be2net: Fix error recovery paths
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
2e588f84 30static ushort rx_frag_size = 2048;
ba343c77 31static unsigned int num_vfs;
2e588f84 32module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 33module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 34MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
6b7c5b94 37static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
44 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 47/* UE Status Low CSR */
42c8b11e 48static const char * const ue_status_low_desc[] = {
7c185276
AK
49 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
42c8b11e 83static const char * const ue_status_hi_desc[] = {
7c185276
AK
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
42c8b11e 107 "NETC",
7c185276
AK
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
6b7c5b94 117
752961a1
SP
118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
6b7c5b94
SP
131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
2b7bcebf
IV
142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
6b7c5b94
SP
144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
8788fdc2 150static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 151{
db3ea781 152 u32 reg, enabled;
5f0b849e 153
cf588477
SP
154 if (adapter->eeh_err)
155 return;
156
db3ea781
SP
157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781
SP
168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
8788fdc2 172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
177
178 wmb();
8788fdc2 179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
180}
181
8788fdc2 182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
187
188 wmb();
8788fdc2 189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
190}
191
8788fdc2 192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
199
200 if (adapter->eeh_err)
201 return;
202
6b7c5b94
SP
203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
210}
211
8788fdc2 212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
218
219 if (adapter->eeh_err)
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
226}
227
6b7c5b94
SP
228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
e3a7ae2c
SK
233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
6b7c5b94 235
ca9e4988
AK
236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
238
e3a7ae2c 239 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
240 MAC_ADDRESS_TYPE_NETWORK, false,
241 adapter->if_handle, 0);
a65027e4 242 if (status)
e3a7ae2c 243 goto err;
6b7c5b94 244
e3a7ae2c
SK
245 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
246 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 247 adapter->if_handle, &adapter->pmac_id, 0);
e3a7ae2c
SK
248 if (status)
249 goto err;
6b7c5b94 250
e3a7ae2c
SK
251 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252 }
253 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254 return 0;
255err:
256 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
257 return status;
258}
259
89a88ab8
AK
260static void populate_be2_stats(struct be_adapter *adapter)
261{
ac124ff9
SP
262 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
263 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
264 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 265 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
266 &rxf_stats->port[adapter->port_num];
267 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 268
ac124ff9 269 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
270 drvs->rx_pause_frames = port_stats->rx_pause_frames;
271 drvs->rx_crc_errors = port_stats->rx_crc_errors;
272 drvs->rx_control_frames = port_stats->rx_control_frames;
273 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
274 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
275 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
276 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
277 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
278 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
279 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
280 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
281 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
282 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
283 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 284 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
285 drvs->rx_dropped_header_too_small =
286 port_stats->rx_dropped_header_too_small;
ac124ff9 287 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
288 drvs->rx_alignment_symbol_errors =
289 port_stats->rx_alignment_symbol_errors;
290
291 drvs->tx_pauseframes = port_stats->tx_pauseframes;
292 drvs->tx_controlframes = port_stats->tx_controlframes;
293
294 if (adapter->port_num)
ac124ff9 295 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 296 else
ac124ff9 297 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
298 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
299 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
300 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
301 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
302 drvs->forwarded_packets = rxf_stats->forwarded_packets;
303 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
304 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
305 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
306 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307}
308
309static void populate_be3_stats(struct be_adapter *adapter)
310{
ac124ff9
SP
311 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
312 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
313 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 314 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
315 &rxf_stats->port[adapter->port_num];
316 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 317
ac124ff9 318 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
319 drvs->rx_pause_frames = port_stats->rx_pause_frames;
320 drvs->rx_crc_errors = port_stats->rx_crc_errors;
321 drvs->rx_control_frames = port_stats->rx_control_frames;
322 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
323 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
324 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
325 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
326 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
327 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
328 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
329 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
330 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
331 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
332 drvs->rx_dropped_header_too_small =
333 port_stats->rx_dropped_header_too_small;
334 drvs->rx_input_fifo_overflow_drop =
335 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 336 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
337 drvs->rx_alignment_symbol_errors =
338 port_stats->rx_alignment_symbol_errors;
ac124ff9 339 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
340 drvs->tx_pauseframes = port_stats->tx_pauseframes;
341 drvs->tx_controlframes = port_stats->tx_controlframes;
342 drvs->jabber_events = port_stats->jabber_events;
343 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347 drvs->forwarded_packets = rxf_stats->forwarded_packets;
348 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
349 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
350 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
351 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352}
353
005d5696
SX
354static void populate_lancer_stats(struct be_adapter *adapter)
355{
89a88ab8 356
005d5696 357 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
358 struct lancer_pport_stats *pport_stats =
359 pport_stats_from_cmd(adapter);
360
361 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
362 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
363 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
364 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 365 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 366 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
367 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
368 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
369 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
370 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
371 drvs->rx_dropped_tcp_length =
372 pport_stats->rx_dropped_invalid_tcp_length;
373 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
374 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
375 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
376 drvs->rx_dropped_header_too_small =
377 pport_stats->rx_dropped_header_too_small;
378 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
379 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 380 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 381 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
382 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
383 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 384 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 385 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
386 drvs->forwarded_packets = pport_stats->num_forwards_lo;
387 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 388 drvs->rx_drops_too_many_frags =
ac124ff9 389 pport_stats->rx_drops_too_many_frags_lo;
005d5696 390}
89a88ab8 391
09c1c68f
SP
392static void accumulate_16bit_val(u32 *acc, u16 val)
393{
394#define lo(x) (x & 0xFFFF)
395#define hi(x) (x & 0xFFFF0000)
396 bool wrapped = val < lo(*acc);
397 u32 newacc = hi(*acc) + val;
398
399 if (wrapped)
400 newacc += 65536;
401 ACCESS_ONCE(*acc) = newacc;
402}
403
89a88ab8
AK
404void be_parse_stats(struct be_adapter *adapter)
405{
ac124ff9
SP
406 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
407 struct be_rx_obj *rxo;
408 int i;
409
005d5696
SX
410 if (adapter->generation == BE_GEN3) {
411 if (lancer_chip(adapter))
412 populate_lancer_stats(adapter);
413 else
414 populate_be3_stats(adapter);
415 } else {
89a88ab8 416 populate_be2_stats(adapter);
005d5696 417 }
ac124ff9
SP
418
419 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
420 for_all_rx_queues(adapter, rxo, i) {
421 /* below erx HW counter can actually wrap around after
422 * 65535. Driver accumulates a 32-bit value
423 */
424 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
425 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
426 }
89a88ab8
AK
427}
428
ab1594e9
SP
429static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
430 struct rtnl_link_stats64 *stats)
6b7c5b94 431{
ab1594e9 432 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 433 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 434 struct be_rx_obj *rxo;
3c8def97 435 struct be_tx_obj *txo;
ab1594e9
SP
436 u64 pkts, bytes;
437 unsigned int start;
3abcdeda 438 int i;
6b7c5b94 439
3abcdeda 440 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
441 const struct be_rx_stats *rx_stats = rx_stats(rxo);
442 do {
443 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
444 pkts = rx_stats(rxo)->rx_pkts;
445 bytes = rx_stats(rxo)->rx_bytes;
446 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
447 stats->rx_packets += pkts;
448 stats->rx_bytes += bytes;
449 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
450 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
451 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
452 }
453
3c8def97 454 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
455 const struct be_tx_stats *tx_stats = tx_stats(txo);
456 do {
457 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
458 pkts = tx_stats(txo)->tx_pkts;
459 bytes = tx_stats(txo)->tx_bytes;
460 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
461 stats->tx_packets += pkts;
462 stats->tx_bytes += bytes;
3c8def97 463 }
6b7c5b94
SP
464
465 /* bad pkts received */
ab1594e9 466 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
467 drvs->rx_alignment_symbol_errors +
468 drvs->rx_in_range_errors +
469 drvs->rx_out_range_errors +
470 drvs->rx_frame_too_long +
471 drvs->rx_dropped_too_small +
472 drvs->rx_dropped_too_short +
473 drvs->rx_dropped_header_too_small +
474 drvs->rx_dropped_tcp_length +
ab1594e9 475 drvs->rx_dropped_runt;
68110868 476
6b7c5b94 477 /* detailed rx errors */
ab1594e9 478 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long;
68110868 481
ab1594e9 482 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
483
484 /* frame alignment errors */
ab1594e9 485 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 486
6b7c5b94
SP
487 /* receiver fifo overrun */
488 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 489 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
490 drvs->rx_input_fifo_overflow_drop +
491 drvs->rx_drops_no_pbuf;
ab1594e9 492 return stats;
6b7c5b94
SP
493}
494
ea172a01 495void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 496{
6b7c5b94
SP
497 struct net_device *netdev = adapter->netdev;
498
ea172a01
SP
499 /* when link status changes, link speed must be re-queried from card */
500 adapter->link_speed = -1;
501 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
502 netif_carrier_on(netdev);
503 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
504 } else {
505 netif_carrier_off(netdev);
506 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 507 }
6b7c5b94
SP
508}
509
3c8def97 510static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 511 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 512{
3c8def97
SP
513 struct be_tx_stats *stats = tx_stats(txo);
514
ab1594e9 515 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
516 stats->tx_reqs++;
517 stats->tx_wrbs += wrb_cnt;
518 stats->tx_bytes += copied;
519 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 520 if (stopped)
ac124ff9 521 stats->tx_stops++;
ab1594e9 522 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
523}
524
525/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
526static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
527 bool *dummy)
6b7c5b94 528{
ebc8d2ab
DM
529 int cnt = (skb->len > skb->data_len);
530
531 cnt += skb_shinfo(skb)->nr_frags;
532
6b7c5b94
SP
533 /* to account for hdr wrb */
534 cnt++;
fe6d2a38
SP
535 if (lancer_chip(adapter) || !(cnt & 1)) {
536 *dummy = false;
537 } else {
6b7c5b94
SP
538 /* add a dummy to make it an even num */
539 cnt++;
540 *dummy = true;
fe6d2a38 541 }
6b7c5b94
SP
542 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
543 return cnt;
544}
545
546static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
547{
548 wrb->frag_pa_hi = upper_32_bits(addr);
549 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
550 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
551}
552
cc4ce020
SK
553static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
554 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 555{
cc4ce020
SK
556 u8 vlan_prio = 0;
557 u16 vlan_tag = 0;
558
6b7c5b94
SP
559 memset(hdr, 0, sizeof(*hdr));
560
561 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
562
49e4b847 563 if (skb_is_gso(skb)) {
6b7c5b94
SP
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
566 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 567 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
569 if (lancer_chip(adapter) && adapter->sli_family ==
570 LANCER_A0_SLI_FAMILY) {
571 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
572 if (is_tcp_pkt(skb))
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
574 tcpcs, hdr, 1);
575 else if (is_udp_pkt(skb))
576 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
577 udpcs, hdr, 1);
578 }
6b7c5b94
SP
579 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
580 if (is_tcp_pkt(skb))
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
582 else if (is_udp_pkt(skb))
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
584 }
585
4c5102f9 586 if (vlan_tx_tag_present(skb)) {
6b7c5b94 587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
588 vlan_tag = vlan_tx_tag_get(skb);
589 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
590 /* If vlan priority provided by OS is NOT in available bmap */
591 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
592 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
593 adapter->recommended_prio;
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
595 }
596
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
601}
602
2b7bcebf 603static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
604 bool unmap_single)
605{
606 dma_addr_t dma;
607
608 be_dws_le_to_cpu(wrb, sizeof(*wrb));
609
610 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 611 if (wrb->frag_len) {
7101e111 612 if (unmap_single)
2b7bcebf
IV
613 dma_unmap_single(dev, dma, wrb->frag_len,
614 DMA_TO_DEVICE);
7101e111 615 else
2b7bcebf 616 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
617 }
618}
6b7c5b94 619
3c8def97 620static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
621 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
622{
7101e111
SP
623 dma_addr_t busaddr;
624 int i, copied = 0;
2b7bcebf 625 struct device *dev = &adapter->pdev->dev;
6b7c5b94 626 struct sk_buff *first_skb = skb;
6b7c5b94
SP
627 struct be_eth_wrb *wrb;
628 struct be_eth_hdr_wrb *hdr;
7101e111
SP
629 bool map_single = false;
630 u16 map_head;
6b7c5b94 631
6b7c5b94
SP
632 hdr = queue_head_node(txq);
633 queue_head_inc(txq);
7101e111 634 map_head = txq->head;
6b7c5b94 635
ebc8d2ab 636 if (skb->len > skb->data_len) {
e743d313 637 int len = skb_headlen(skb);
2b7bcebf
IV
638 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
639 if (dma_mapping_error(dev, busaddr))
7101e111
SP
640 goto dma_err;
641 map_single = true;
ebc8d2ab
DM
642 wrb = queue_head_node(txq);
643 wrb_fill(wrb, busaddr, len);
644 be_dws_cpu_to_le(wrb, sizeof(*wrb));
645 queue_head_inc(txq);
646 copied += len;
647 }
6b7c5b94 648
ebc8d2ab 649 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 650 const struct skb_frag_struct *frag =
ebc8d2ab 651 &skb_shinfo(skb)->frags[i];
b061b39e 652 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 653 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 654 if (dma_mapping_error(dev, busaddr))
7101e111 655 goto dma_err;
ebc8d2ab 656 wrb = queue_head_node(txq);
9e903e08 657 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
9e903e08 660 copied += skb_frag_size(frag);
6b7c5b94
SP
661 }
662
663 if (dummy_wrb) {
664 wrb = queue_head_node(txq);
665 wrb_fill(wrb, 0, 0);
666 be_dws_cpu_to_le(wrb, sizeof(*wrb));
667 queue_head_inc(txq);
668 }
669
cc4ce020 670 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
671 be_dws_cpu_to_le(hdr, sizeof(*hdr));
672
673 return copied;
7101e111
SP
674dma_err:
675 txq->head = map_head;
676 while (copied) {
677 wrb = queue_head_node(txq);
2b7bcebf 678 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
679 map_single = false;
680 copied -= wrb->frag_len;
681 queue_head_inc(txq);
682 }
683 return 0;
6b7c5b94
SP
684}
685
61357325 686static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 687 struct net_device *netdev)
6b7c5b94
SP
688{
689 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
690 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
691 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
692 u32 wrb_cnt = 0, copied = 0;
693 u32 start = txq->head;
694 bool dummy_wrb, stopped = false;
695
fe6d2a38 696 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 697
3c8def97 698 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
699 if (copied) {
700 /* record the sent skb in the sent_skb table */
3c8def97
SP
701 BUG_ON(txo->sent_skb_list[start]);
702 txo->sent_skb_list[start] = skb;
c190e3c8
AK
703
704 /* Ensure txq has space for the next skb; Else stop the queue
705 * *BEFORE* ringing the tx doorbell, so that we serialze the
706 * tx compls of the current transmit which'll wake up the queue
707 */
7101e111 708 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
709 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
710 txq->len) {
3c8def97 711 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
712 stopped = true;
713 }
6b7c5b94 714
c190e3c8 715 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 716
3c8def97 717 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 718 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
719 } else {
720 txq->head = start;
721 dev_kfree_skb_any(skb);
6b7c5b94 722 }
6b7c5b94
SP
723 return NETDEV_TX_OK;
724}
725
726static int be_change_mtu(struct net_device *netdev, int new_mtu)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
730 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
731 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
732 dev_info(&adapter->pdev->dev,
733 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
734 BE_MIN_MTU,
735 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
736 return -EINVAL;
737 }
738 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
739 netdev->mtu, new_mtu);
740 netdev->mtu = new_mtu;
741 return 0;
742}
743
744/*
82903e4b
AK
745 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
746 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 747 */
1da87b7f 748static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 749{
6b7c5b94
SP
750 u16 vtag[BE_NUM_VLANS_SUPPORTED];
751 u16 ntags = 0, i;
82903e4b 752 int status = 0;
1da87b7f
AK
753 u32 if_handle;
754
755 if (vf) {
756 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
757 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
758 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
759 }
6b7c5b94 760
c0e64ef4
SP
761 /* No need to further configure vids if in promiscuous mode */
762 if (adapter->promiscuous)
763 return 0;
764
82903e4b 765 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 766 /* Construct VLAN Table to give to HW */
b738127d 767 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
768 if (adapter->vlan_tag[i]) {
769 vtag[ntags] = cpu_to_le16(i);
770 ntags++;
771 }
772 }
b31c50a7
SP
773 status = be_cmd_vlan_config(adapter, adapter->if_handle,
774 vtag, ntags, 1, 0);
6b7c5b94 775 } else {
b31c50a7
SP
776 status = be_cmd_vlan_config(adapter, adapter->if_handle,
777 NULL, 0, 1, 1);
6b7c5b94 778 }
1da87b7f 779
b31c50a7 780 return status;
6b7c5b94
SP
781}
782
6b7c5b94
SP
783static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
784{
785 struct be_adapter *adapter = netdev_priv(netdev);
786
1da87b7f 787 adapter->vlans_added++;
ba343c77
SB
788 if (!be_physfn(adapter))
789 return;
790
6b7c5b94 791 adapter->vlan_tag[vid] = 1;
82903e4b 792 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 793 be_vid_config(adapter, false, 0);
6b7c5b94
SP
794}
795
796static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799
1da87b7f 800 adapter->vlans_added--;
1da87b7f 801
ba343c77
SB
802 if (!be_physfn(adapter))
803 return;
804
6b7c5b94 805 adapter->vlan_tag[vid] = 0;
82903e4b 806 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 807 be_vid_config(adapter, false, 0);
6b7c5b94
SP
808}
809
a54769f5 810static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
811{
812 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 813
24307eef 814 if (netdev->flags & IFF_PROMISC) {
5b8821b7 815 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
816 adapter->promiscuous = true;
817 goto done;
6b7c5b94
SP
818 }
819
25985edc 820 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
821 if (adapter->promiscuous) {
822 adapter->promiscuous = false;
5b8821b7 823 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
824
825 if (adapter->vlans_added)
826 be_vid_config(adapter, false, 0);
6b7c5b94
SP
827 }
828
e7b909a6 829 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 830 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
831 netdev_mc_count(netdev) > BE_MAX_MC) {
832 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 833 goto done;
6b7c5b94 834 }
6b7c5b94 835
5b8821b7 836 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
837done:
838 return;
6b7c5b94
SP
839}
840
ba343c77
SB
841static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
842{
843 struct be_adapter *adapter = netdev_priv(netdev);
844 int status;
845
846 if (!adapter->sriov_enabled)
847 return -EPERM;
848
849 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
850 return -EINVAL;
851
590c391d
PR
852 if (lancer_chip(adapter)) {
853 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
854 } else {
855 status = be_cmd_pmac_del(adapter,
856 adapter->vf_cfg[vf].vf_if_handle,
30128031 857 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 858
590c391d
PR
859 status = be_cmd_pmac_add(adapter, mac,
860 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 861 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
590c391d
PR
862 }
863
64600ea5 864 if (status)
ba343c77
SB
865 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
866 mac, vf);
64600ea5
AK
867 else
868 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
869
ba343c77
SB
870 return status;
871}
872
64600ea5
AK
873static int be_get_vf_config(struct net_device *netdev, int vf,
874 struct ifla_vf_info *vi)
875{
876 struct be_adapter *adapter = netdev_priv(netdev);
877
878 if (!adapter->sriov_enabled)
879 return -EPERM;
880
881 if (vf >= num_vfs)
882 return -EINVAL;
883
884 vi->vf = vf;
e1d18735 885 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 886 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
887 vi->qos = 0;
888 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
889
890 return 0;
891}
892
1da87b7f
AK
893static int be_set_vf_vlan(struct net_device *netdev,
894 int vf, u16 vlan, u8 qos)
895{
896 struct be_adapter *adapter = netdev_priv(netdev);
897 int status = 0;
898
899 if (!adapter->sriov_enabled)
900 return -EPERM;
901
902 if ((vf >= num_vfs) || (vlan > 4095))
903 return -EINVAL;
904
905 if (vlan) {
906 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
907 adapter->vlans_added++;
908 } else {
909 adapter->vf_cfg[vf].vf_vlan_tag = 0;
910 adapter->vlans_added--;
911 }
912
913 status = be_vid_config(adapter, true, vf);
914
915 if (status)
916 dev_info(&adapter->pdev->dev,
917 "VLAN %d config on VF %d failed\n", vlan, vf);
918 return status;
919}
920
e1d18735
AK
921static int be_set_vf_tx_rate(struct net_device *netdev,
922 int vf, int rate)
923{
924 struct be_adapter *adapter = netdev_priv(netdev);
925 int status = 0;
926
927 if (!adapter->sriov_enabled)
928 return -EPERM;
929
930 if ((vf >= num_vfs) || (rate < 0))
931 return -EINVAL;
932
933 if (rate > 10000)
934 rate = 10000;
935
936 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 937 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
938
939 if (status)
940 dev_info(&adapter->pdev->dev,
941 "tx rate %d on VF %d failed\n", rate, vf);
942 return status;
943}
944
ac124ff9 945static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 946{
ac124ff9
SP
947 struct be_eq_obj *rx_eq = &rxo->rx_eq;
948 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 949 ulong now = jiffies;
ac124ff9 950 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
951 u64 pkts;
952 unsigned int start, eqd;
ac124ff9
SP
953
954 if (!rx_eq->enable_aic)
955 return;
6b7c5b94 956
4097f663 957 /* Wrapped around */
3abcdeda
SP
958 if (time_before(now, stats->rx_jiffies)) {
959 stats->rx_jiffies = now;
4097f663
SP
960 return;
961 }
6b7c5b94 962
ac124ff9
SP
963 /* Update once a second */
964 if (delta < HZ)
6b7c5b94
SP
965 return;
966
ab1594e9
SP
967 do {
968 start = u64_stats_fetch_begin_bh(&stats->sync);
969 pkts = stats->rx_pkts;
970 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
971
68c3e5a7 972 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 973 stats->rx_pkts_prev = pkts;
3abcdeda 974 stats->rx_jiffies = now;
ac124ff9
SP
975 eqd = stats->rx_pps / 110000;
976 eqd = eqd << 3;
977 if (eqd > rx_eq->max_eqd)
978 eqd = rx_eq->max_eqd;
979 if (eqd < rx_eq->min_eqd)
980 eqd = rx_eq->min_eqd;
981 if (eqd < 10)
982 eqd = 0;
983 if (eqd != rx_eq->cur_eqd) {
984 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
985 rx_eq->cur_eqd = eqd;
986 }
6b7c5b94
SP
987}
988
3abcdeda 989static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 990 struct be_rx_compl_info *rxcp)
4097f663 991{
ac124ff9 992 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 993
ab1594e9 994 u64_stats_update_begin(&stats->sync);
3abcdeda 995 stats->rx_compl++;
2e588f84 996 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 997 stats->rx_pkts++;
2e588f84 998 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 999 stats->rx_mcast_pkts++;
2e588f84 1000 if (rxcp->err)
ac124ff9 1001 stats->rx_compl_err++;
ab1594e9 1002 u64_stats_update_end(&stats->sync);
4097f663
SP
1003}
1004
2e588f84 1005static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1006{
19fad86f
PR
1007 /* L4 checksum is not reliable for non TCP/UDP packets.
1008 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1009 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1010 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1011}
1012
6b7c5b94 1013static struct be_rx_page_info *
3abcdeda
SP
1014get_rx_page_info(struct be_adapter *adapter,
1015 struct be_rx_obj *rxo,
1016 u16 frag_idx)
6b7c5b94
SP
1017{
1018 struct be_rx_page_info *rx_page_info;
3abcdeda 1019 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1020
3abcdeda 1021 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1022 BUG_ON(!rx_page_info->page);
1023
205859a2 1024 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1025 dma_unmap_page(&adapter->pdev->dev,
1026 dma_unmap_addr(rx_page_info, bus),
1027 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1028 rx_page_info->last_page_user = false;
1029 }
6b7c5b94
SP
1030
1031 atomic_dec(&rxq->used);
1032 return rx_page_info;
1033}
1034
1035/* Throwaway the data in the Rx completion */
1036static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1037 struct be_rx_obj *rxo,
2e588f84 1038 struct be_rx_compl_info *rxcp)
6b7c5b94 1039{
3abcdeda 1040 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1041 struct be_rx_page_info *page_info;
2e588f84 1042 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1043
e80d9da6 1044 for (i = 0; i < num_rcvd; i++) {
2e588f84 1045 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1046 put_page(page_info->page);
1047 memset(page_info, 0, sizeof(*page_info));
2e588f84 1048 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1049 }
1050}
1051
1052/*
1053 * skb_fill_rx_data forms a complete skb for an ether frame
1054 * indicated by rxcp.
1055 */
3abcdeda 1056static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1057 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1058{
3abcdeda 1059 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1060 struct be_rx_page_info *page_info;
2e588f84
SP
1061 u16 i, j;
1062 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1063 u8 *start;
6b7c5b94 1064
2e588f84 1065 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1066 start = page_address(page_info->page) + page_info->page_offset;
1067 prefetch(start);
1068
1069 /* Copy data in the first descriptor of this completion */
2e588f84 1070 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1071
1072 /* Copy the header portion into skb_data */
2e588f84 1073 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1074 memcpy(skb->data, start, hdr_len);
1075 skb->len = curr_frag_len;
1076 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1077 /* Complete packet has now been moved to data */
1078 put_page(page_info->page);
1079 skb->data_len = 0;
1080 skb->tail += curr_frag_len;
1081 } else {
1082 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1083 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1084 skb_shinfo(skb)->frags[0].page_offset =
1085 page_info->page_offset + hdr_len;
9e903e08 1086 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1087 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1088 skb->truesize += rx_frag_size;
6b7c5b94
SP
1089 skb->tail += hdr_len;
1090 }
205859a2 1091 page_info->page = NULL;
6b7c5b94 1092
2e588f84
SP
1093 if (rxcp->pkt_size <= rx_frag_size) {
1094 BUG_ON(rxcp->num_rcvd != 1);
1095 return;
6b7c5b94
SP
1096 }
1097
1098 /* More frags present for this completion */
2e588f84
SP
1099 index_inc(&rxcp->rxq_idx, rxq->len);
1100 remaining = rxcp->pkt_size - curr_frag_len;
1101 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1102 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1103 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1104
bd46cb6c
AK
1105 /* Coalesce all frags from the same physical page in one slot */
1106 if (page_info->page_offset == 0) {
1107 /* Fresh page */
1108 j++;
b061b39e 1109 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1110 skb_shinfo(skb)->frags[j].page_offset =
1111 page_info->page_offset;
9e903e08 1112 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1113 skb_shinfo(skb)->nr_frags++;
1114 } else {
1115 put_page(page_info->page);
1116 }
1117
9e903e08 1118 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1119 skb->len += curr_frag_len;
1120 skb->data_len += curr_frag_len;
bdb28a97 1121 skb->truesize += rx_frag_size;
2e588f84
SP
1122 remaining -= curr_frag_len;
1123 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1124 page_info->page = NULL;
6b7c5b94 1125 }
bd46cb6c 1126 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1127}
1128
5be93b9a 1129/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1130static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1131 struct be_rx_obj *rxo,
2e588f84 1132 struct be_rx_compl_info *rxcp)
6b7c5b94 1133{
6332c8d3 1134 struct net_device *netdev = adapter->netdev;
6b7c5b94 1135 struct sk_buff *skb;
89420424 1136
6332c8d3 1137 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1138 if (unlikely(!skb)) {
ac124ff9 1139 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1140 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1141 return;
1142 }
1143
2e588f84 1144 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1145
6332c8d3 1146 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1147 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1148 else
1149 skb_checksum_none_assert(skb);
6b7c5b94 1150
6332c8d3 1151 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1152 if (adapter->netdev->features & NETIF_F_RXHASH)
1153 skb->rxhash = rxcp->rss_hash;
1154
6b7c5b94 1155
343e43c0 1156 if (rxcp->vlanf)
4c5102f9
AK
1157 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1158
1159 netif_receive_skb(skb);
6b7c5b94
SP
1160}
1161
5be93b9a
AK
1162/* Process the RX completion indicated by rxcp when GRO is enabled */
1163static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1164 struct be_rx_obj *rxo,
2e588f84 1165 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1166{
1167 struct be_rx_page_info *page_info;
5be93b9a 1168 struct sk_buff *skb = NULL;
3abcdeda
SP
1169 struct be_queue_info *rxq = &rxo->q;
1170 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1171 u16 remaining, curr_frag_len;
1172 u16 i, j;
3968fa1e 1173
5be93b9a
AK
1174 skb = napi_get_frags(&eq_obj->napi);
1175 if (!skb) {
3abcdeda 1176 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1177 return;
1178 }
1179
2e588f84
SP
1180 remaining = rxcp->pkt_size;
1181 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1182 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1183
1184 curr_frag_len = min(remaining, rx_frag_size);
1185
bd46cb6c
AK
1186 /* Coalesce all frags from the same physical page in one slot */
1187 if (i == 0 || page_info->page_offset == 0) {
1188 /* First frag or Fresh page */
1189 j++;
b061b39e 1190 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1191 skb_shinfo(skb)->frags[j].page_offset =
1192 page_info->page_offset;
9e903e08 1193 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1194 } else {
1195 put_page(page_info->page);
1196 }
9e903e08 1197 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1198 skb->truesize += rx_frag_size;
bd46cb6c 1199 remaining -= curr_frag_len;
2e588f84 1200 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1201 memset(page_info, 0, sizeof(*page_info));
1202 }
bd46cb6c 1203 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1204
5be93b9a 1205 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1206 skb->len = rxcp->pkt_size;
1207 skb->data_len = rxcp->pkt_size;
5be93b9a 1208 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1209 if (adapter->netdev->features & NETIF_F_RXHASH)
1210 skb->rxhash = rxcp->rss_hash;
5be93b9a 1211
343e43c0 1212 if (rxcp->vlanf)
4c5102f9
AK
1213 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1214
1215 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1216}
1217
1218static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1219 struct be_eth_rx_compl *compl,
1220 struct be_rx_compl_info *rxcp)
1221{
1222 rxcp->pkt_size =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1224 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1225 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1226 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1227 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1228 rxcp->ip_csum =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1230 rxcp->l4_csum =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1232 rxcp->ipv6 =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1234 rxcp->rxq_idx =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1236 rxcp->num_rcvd =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1238 rxcp->pkt_type =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1240 rxcp->rss_hash =
1241 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1242 if (rxcp->vlanf) {
1243 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1244 compl);
1245 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1246 compl);
15d72184 1247 }
12004ae9 1248 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1249}
1250
1251static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1252 struct be_eth_rx_compl *compl,
1253 struct be_rx_compl_info *rxcp)
1254{
1255 rxcp->pkt_size =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1257 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1258 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1259 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1260 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1261 rxcp->ip_csum =
1262 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1263 rxcp->l4_csum =
1264 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1265 rxcp->ipv6 =
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1267 rxcp->rxq_idx =
1268 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1269 rxcp->num_rcvd =
1270 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1271 rxcp->pkt_type =
1272 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1273 rxcp->rss_hash =
1274 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1275 if (rxcp->vlanf) {
1276 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1277 compl);
1278 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1279 compl);
15d72184 1280 }
12004ae9 1281 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1282}
1283
1284static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1285{
1286 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1287 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1288 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1289
2e588f84
SP
1290 /* For checking the valid bit it is Ok to use either definition as the
1291 * valid bit is at the same position in both v0 and v1 Rx compl */
1292 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1293 return NULL;
6b7c5b94 1294
2e588f84
SP
1295 rmb();
1296 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1297
2e588f84
SP
1298 if (adapter->be3_native)
1299 be_parse_rx_compl_v1(adapter, compl, rxcp);
1300 else
1301 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1302
15d72184
SP
1303 if (rxcp->vlanf) {
1304 /* vlanf could be wrongly set in some cards.
1305 * ignore if vtm is not set */
752961a1 1306 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1307 rxcp->vlanf = 0;
6b7c5b94 1308
15d72184 1309 if (!lancer_chip(adapter))
3c709f8f 1310 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1311
939cf306 1312 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1313 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1314 rxcp->vlanf = 0;
1315 }
2e588f84
SP
1316
1317 /* As the compl has been parsed, reset it; we wont touch it again */
1318 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1319
3abcdeda 1320 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1321 return rxcp;
1322}
1323
1829b086 1324static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1325{
6b7c5b94 1326 u32 order = get_order(size);
1829b086 1327
6b7c5b94 1328 if (order > 0)
1829b086
ED
1329 gfp |= __GFP_COMP;
1330 return alloc_pages(gfp, order);
6b7c5b94
SP
1331}
1332
1333/*
1334 * Allocate a page, split it to fragments of size rx_frag_size and post as
1335 * receive buffers to BE
1336 */
1829b086 1337static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1338{
3abcdeda
SP
1339 struct be_adapter *adapter = rxo->adapter;
1340 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1341 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1342 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1343 struct page *pagep = NULL;
1344 struct be_eth_rx_d *rxd;
1345 u64 page_dmaaddr = 0, frag_dmaaddr;
1346 u32 posted, page_offset = 0;
1347
3abcdeda 1348 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1349 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1350 if (!pagep) {
1829b086 1351 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1352 if (unlikely(!pagep)) {
ac124ff9 1353 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1354 break;
1355 }
2b7bcebf
IV
1356 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1357 0, adapter->big_page_size,
1358 DMA_FROM_DEVICE);
6b7c5b94
SP
1359 page_info->page_offset = 0;
1360 } else {
1361 get_page(pagep);
1362 page_info->page_offset = page_offset + rx_frag_size;
1363 }
1364 page_offset = page_info->page_offset;
1365 page_info->page = pagep;
fac6da5b 1366 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1367 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1368
1369 rxd = queue_head_node(rxq);
1370 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1371 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1372
1373 /* Any space left in the current big page for another frag? */
1374 if ((page_offset + rx_frag_size + rx_frag_size) >
1375 adapter->big_page_size) {
1376 pagep = NULL;
1377 page_info->last_page_user = true;
1378 }
26d92f92
SP
1379
1380 prev_page_info = page_info;
1381 queue_head_inc(rxq);
6b7c5b94
SP
1382 page_info = &page_info_tbl[rxq->head];
1383 }
1384 if (pagep)
26d92f92 1385 prev_page_info->last_page_user = true;
6b7c5b94
SP
1386
1387 if (posted) {
6b7c5b94 1388 atomic_add(posted, &rxq->used);
8788fdc2 1389 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1390 } else if (atomic_read(&rxq->used) == 0) {
1391 /* Let be_worker replenish when memory is available */
3abcdeda 1392 rxo->rx_post_starved = true;
6b7c5b94 1393 }
6b7c5b94
SP
1394}
1395
5fb379ee 1396static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1397{
6b7c5b94
SP
1398 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1399
1400 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1401 return NULL;
1402
f3eb62d2 1403 rmb();
6b7c5b94
SP
1404 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1405
1406 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1407
1408 queue_tail_inc(tx_cq);
1409 return txcp;
1410}
1411
3c8def97
SP
1412static u16 be_tx_compl_process(struct be_adapter *adapter,
1413 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1414{
3c8def97 1415 struct be_queue_info *txq = &txo->q;
a73b796e 1416 struct be_eth_wrb *wrb;
3c8def97 1417 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1418 struct sk_buff *sent_skb;
ec43b1a6
SP
1419 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1420 bool unmap_skb_hdr = true;
6b7c5b94 1421
ec43b1a6 1422 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1423 BUG_ON(!sent_skb);
ec43b1a6
SP
1424 sent_skbs[txq->tail] = NULL;
1425
1426 /* skip header wrb */
a73b796e 1427 queue_tail_inc(txq);
6b7c5b94 1428
ec43b1a6 1429 do {
6b7c5b94 1430 cur_index = txq->tail;
a73b796e 1431 wrb = queue_tail_node(txq);
2b7bcebf
IV
1432 unmap_tx_frag(&adapter->pdev->dev, wrb,
1433 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1434 unmap_skb_hdr = false;
1435
6b7c5b94
SP
1436 num_wrbs++;
1437 queue_tail_inc(txq);
ec43b1a6 1438 } while (cur_index != last_index);
6b7c5b94 1439
6b7c5b94 1440 kfree_skb(sent_skb);
4d586b82 1441 return num_wrbs;
6b7c5b94
SP
1442}
1443
859b1e4e
SP
1444static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1445{
1446 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1447
1448 if (!eqe->evt)
1449 return NULL;
1450
f3eb62d2 1451 rmb();
859b1e4e
SP
1452 eqe->evt = le32_to_cpu(eqe->evt);
1453 queue_tail_inc(&eq_obj->q);
1454 return eqe;
1455}
1456
1457static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1458 struct be_eq_obj *eq_obj,
1459 bool rearm)
859b1e4e
SP
1460{
1461 struct be_eq_entry *eqe;
1462 u16 num = 0;
1463
1464 while ((eqe = event_get(eq_obj)) != NULL) {
1465 eqe->evt = 0;
1466 num++;
1467 }
1468
1469 /* Deal with any spurious interrupts that come
1470 * without events
1471 */
3c8def97
SP
1472 if (!num)
1473 rearm = true;
1474
1475 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1476 if (num)
1477 napi_schedule(&eq_obj->napi);
1478
1479 return num;
1480}
1481
1482/* Just read and notify events without processing them.
1483 * Used at the time of destroying event queues */
1484static void be_eq_clean(struct be_adapter *adapter,
1485 struct be_eq_obj *eq_obj)
1486{
1487 struct be_eq_entry *eqe;
1488 u16 num = 0;
1489
1490 while ((eqe = event_get(eq_obj)) != NULL) {
1491 eqe->evt = 0;
1492 num++;
1493 }
1494
1495 if (num)
1496 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1497}
1498
3abcdeda 1499static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1500{
1501 struct be_rx_page_info *page_info;
3abcdeda
SP
1502 struct be_queue_info *rxq = &rxo->q;
1503 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1504 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1505 u16 tail;
1506
1507 /* First cleanup pending rx completions */
3abcdeda
SP
1508 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1509 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1510 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1511 }
1512
1513 /* Then free posted rx buffer that were not used */
1514 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1515 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1516 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1517 put_page(page_info->page);
1518 memset(page_info, 0, sizeof(*page_info));
1519 }
1520 BUG_ON(atomic_read(&rxq->used));
482c9e79 1521 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1522}
1523
3c8def97
SP
1524static void be_tx_compl_clean(struct be_adapter *adapter,
1525 struct be_tx_obj *txo)
6b7c5b94 1526{
3c8def97
SP
1527 struct be_queue_info *tx_cq = &txo->cq;
1528 struct be_queue_info *txq = &txo->q;
a8e9179a 1529 struct be_eth_tx_compl *txcp;
4d586b82 1530 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1531 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1532 struct sk_buff *sent_skb;
1533 bool dummy_wrb;
a8e9179a
SP
1534
1535 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1536 do {
1537 while ((txcp = be_tx_compl_get(tx_cq))) {
1538 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1539 wrb_index, txcp);
3c8def97 1540 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1541 cmpl++;
1542 }
1543 if (cmpl) {
1544 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1545 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1546 cmpl = 0;
4d586b82 1547 num_wrbs = 0;
a8e9179a
SP
1548 }
1549
1550 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1551 break;
1552
1553 mdelay(1);
1554 } while (true);
1555
1556 if (atomic_read(&txq->used))
1557 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1558 atomic_read(&txq->used));
b03388d6
SP
1559
1560 /* free posted tx for which compls will never arrive */
1561 while (atomic_read(&txq->used)) {
1562 sent_skb = sent_skbs[txq->tail];
1563 end_idx = txq->tail;
1564 index_adv(&end_idx,
fe6d2a38
SP
1565 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1566 txq->len);
3c8def97 1567 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1568 atomic_sub(num_wrbs, &txq->used);
b03388d6 1569 }
6b7c5b94
SP
1570}
1571
5fb379ee
SP
1572static void be_mcc_queues_destroy(struct be_adapter *adapter)
1573{
1574 struct be_queue_info *q;
5fb379ee 1575
8788fdc2 1576 q = &adapter->mcc_obj.q;
5fb379ee 1577 if (q->created)
8788fdc2 1578 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1579 be_queue_free(adapter, q);
1580
8788fdc2 1581 q = &adapter->mcc_obj.cq;
5fb379ee 1582 if (q->created)
8788fdc2 1583 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1584 be_queue_free(adapter, q);
1585}
1586
1587/* Must be called only after TX qs are created as MCC shares TX EQ */
1588static int be_mcc_queues_create(struct be_adapter *adapter)
1589{
1590 struct be_queue_info *q, *cq;
5fb379ee
SP
1591
1592 /* Alloc MCC compl queue */
8788fdc2 1593 cq = &adapter->mcc_obj.cq;
5fb379ee 1594 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1595 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1596 goto err;
1597
1598 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1599 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1600 goto mcc_cq_free;
1601
1602 /* Alloc MCC queue */
8788fdc2 1603 q = &adapter->mcc_obj.q;
5fb379ee
SP
1604 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1605 goto mcc_cq_destroy;
1606
1607 /* Ask BE to create MCC queue */
8788fdc2 1608 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1609 goto mcc_q_free;
1610
1611 return 0;
1612
1613mcc_q_free:
1614 be_queue_free(adapter, q);
1615mcc_cq_destroy:
8788fdc2 1616 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1617mcc_cq_free:
1618 be_queue_free(adapter, cq);
1619err:
1620 return -1;
1621}
1622
6b7c5b94
SP
1623static void be_tx_queues_destroy(struct be_adapter *adapter)
1624{
1625 struct be_queue_info *q;
3c8def97
SP
1626 struct be_tx_obj *txo;
1627 u8 i;
6b7c5b94 1628
3c8def97
SP
1629 for_all_tx_queues(adapter, txo, i) {
1630 q = &txo->q;
1631 if (q->created)
1632 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1633 be_queue_free(adapter, q);
6b7c5b94 1634
3c8def97
SP
1635 q = &txo->cq;
1636 if (q->created)
1637 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1638 be_queue_free(adapter, q);
1639 }
6b7c5b94 1640
859b1e4e
SP
1641 /* Clear any residual events */
1642 be_eq_clean(adapter, &adapter->tx_eq);
1643
6b7c5b94
SP
1644 q = &adapter->tx_eq.q;
1645 if (q->created)
8788fdc2 1646 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1647 be_queue_free(adapter, q);
1648}
1649
dafc0fe3
SP
1650static int be_num_txqs_want(struct be_adapter *adapter)
1651{
1652 if ((num_vfs && adapter->sriov_enabled) ||
752961a1 1653 be_is_mc(adapter) ||
dafc0fe3
SP
1654 lancer_chip(adapter) || !be_physfn(adapter) ||
1655 adapter->generation == BE_GEN2)
1656 return 1;
1657 else
1658 return MAX_TX_QS;
1659}
1660
3c8def97 1661/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1662static int be_tx_queues_create(struct be_adapter *adapter)
1663{
1664 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1665 struct be_tx_obj *txo;
1666 u8 i;
6b7c5b94 1667
dafc0fe3 1668 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1669 if (adapter->num_tx_qs != MAX_TX_QS) {
1670 rtnl_lock();
dafc0fe3
SP
1671 netif_set_real_num_tx_queues(adapter->netdev,
1672 adapter->num_tx_qs);
3bb62f4f
PR
1673 rtnl_unlock();
1674 }
dafc0fe3 1675
6b7c5b94
SP
1676 adapter->tx_eq.max_eqd = 0;
1677 adapter->tx_eq.min_eqd = 0;
1678 adapter->tx_eq.cur_eqd = 96;
1679 adapter->tx_eq.enable_aic = false;
3c8def97 1680
6b7c5b94 1681 eq = &adapter->tx_eq.q;
3c8def97
SP
1682 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1683 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1684 return -1;
1685
8788fdc2 1686 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1687 goto err;
ecd62107 1688 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1689
3c8def97
SP
1690 for_all_tx_queues(adapter, txo, i) {
1691 cq = &txo->cq;
1692 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1693 sizeof(struct be_eth_tx_compl)))
3c8def97 1694 goto err;
6b7c5b94 1695
3c8def97
SP
1696 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1697 goto err;
6b7c5b94 1698
3c8def97
SP
1699 q = &txo->q;
1700 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1701 sizeof(struct be_eth_wrb)))
1702 goto err;
3c8def97 1703 }
6b7c5b94
SP
1704 return 0;
1705
3c8def97
SP
1706err:
1707 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1708 return -1;
1709}
1710
1711static void be_rx_queues_destroy(struct be_adapter *adapter)
1712{
1713 struct be_queue_info *q;
3abcdeda
SP
1714 struct be_rx_obj *rxo;
1715 int i;
1716
1717 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1718 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1719
1720 q = &rxo->cq;
1721 if (q->created)
1722 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1723 be_queue_free(adapter, q);
1724
3abcdeda 1725 q = &rxo->rx_eq.q;
482c9e79 1726 if (q->created)
3abcdeda 1727 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1728 be_queue_free(adapter, q);
6b7c5b94 1729 }
6b7c5b94
SP
1730}
1731
ac6a0c4a
SP
1732static u32 be_num_rxqs_want(struct be_adapter *adapter)
1733{
c814fd36 1734 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
752961a1
SP
1735 !adapter->sriov_enabled && be_physfn(adapter) &&
1736 !be_is_mc(adapter)) {
ac6a0c4a
SP
1737 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1738 } else {
1739 dev_warn(&adapter->pdev->dev,
1740 "No support for multiple RX queues\n");
1741 return 1;
1742 }
1743}
1744
6b7c5b94
SP
1745static int be_rx_queues_create(struct be_adapter *adapter)
1746{
1747 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1748 struct be_rx_obj *rxo;
1749 int rc, i;
6b7c5b94 1750
ac6a0c4a
SP
1751 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1752 msix_enabled(adapter) ?
1753 adapter->num_msix_vec - 1 : 1);
1754 if (adapter->num_rx_qs != MAX_RX_QS)
1755 dev_warn(&adapter->pdev->dev,
1756 "Can create only %d RX queues", adapter->num_rx_qs);
1757
6b7c5b94 1758 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1759 for_all_rx_queues(adapter, rxo, i) {
1760 rxo->adapter = adapter;
1761 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1762 rxo->rx_eq.enable_aic = true;
1763
1764 /* EQ */
1765 eq = &rxo->rx_eq.q;
1766 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1767 sizeof(struct be_eq_entry));
1768 if (rc)
1769 goto err;
1770
1771 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1772 if (rc)
1773 goto err;
1774
ecd62107 1775 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1776
3abcdeda
SP
1777 /* CQ */
1778 cq = &rxo->cq;
1779 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1780 sizeof(struct be_eth_rx_compl));
1781 if (rc)
1782 goto err;
1783
1784 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1785 if (rc)
1786 goto err;
482c9e79
SP
1787
1788 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1789 q = &rxo->q;
1790 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1791 sizeof(struct be_eth_rx_d));
1792 if (rc)
1793 goto err;
1794
3abcdeda 1795 }
6b7c5b94
SP
1796
1797 return 0;
3abcdeda
SP
1798err:
1799 be_rx_queues_destroy(adapter);
1800 return -1;
6b7c5b94 1801}
6b7c5b94 1802
fe6d2a38 1803static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1804{
fe6d2a38
SP
1805 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1806 if (!eqe->evt)
1807 return false;
1808 else
1809 return true;
b628bde2
SP
1810}
1811
6b7c5b94
SP
1812static irqreturn_t be_intx(int irq, void *dev)
1813{
1814 struct be_adapter *adapter = dev;
3abcdeda 1815 struct be_rx_obj *rxo;
fe6d2a38 1816 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1817
fe6d2a38
SP
1818 if (lancer_chip(adapter)) {
1819 if (event_peek(&adapter->tx_eq))
3c8def97 1820 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1821 for_all_rx_queues(adapter, rxo, i) {
1822 if (event_peek(&rxo->rx_eq))
3c8def97 1823 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1824 }
6b7c5b94 1825
fe6d2a38
SP
1826 if (!(tx || rx))
1827 return IRQ_NONE;
3abcdeda 1828
fe6d2a38
SP
1829 } else {
1830 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1831 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1832 if (!isr)
1833 return IRQ_NONE;
1834
ecd62107 1835 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1836 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1837
1838 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1839 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1840 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1841 }
3abcdeda 1842 }
c001c213 1843
8788fdc2 1844 return IRQ_HANDLED;
6b7c5b94
SP
1845}
1846
1847static irqreturn_t be_msix_rx(int irq, void *dev)
1848{
3abcdeda
SP
1849 struct be_rx_obj *rxo = dev;
1850 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1851
3c8def97 1852 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1853
1854 return IRQ_HANDLED;
1855}
1856
5fb379ee 1857static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1858{
1859 struct be_adapter *adapter = dev;
1860
3c8def97 1861 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1862
1863 return IRQ_HANDLED;
1864}
1865
2e588f84 1866static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1867{
2e588f84 1868 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1869}
1870
49b05221 1871static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1872{
1873 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1874 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1875 struct be_adapter *adapter = rxo->adapter;
1876 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1877 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1878 u32 work_done;
1879
ac124ff9 1880 rx_stats(rxo)->rx_polls++;
6b7c5b94 1881 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1882 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1883 if (!rxcp)
1884 break;
1885
12004ae9
SP
1886 /* Is it a flush compl that has no data */
1887 if (unlikely(rxcp->num_rcvd == 0))
1888 goto loop_continue;
1889
1890 /* Discard compl with partial DMA Lancer B0 */
1891 if (unlikely(!rxcp->pkt_size)) {
1892 be_rx_compl_discard(adapter, rxo, rxcp);
1893 goto loop_continue;
1894 }
1895
1896 /* On BE drop pkts that arrive due to imperfect filtering in
1897 * promiscuous mode on some skews
1898 */
1899 if (unlikely(rxcp->port != adapter->port_num &&
1900 !lancer_chip(adapter))) {
009dd872 1901 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1902 goto loop_continue;
64642811 1903 }
009dd872 1904
12004ae9
SP
1905 if (do_gro(rxcp))
1906 be_rx_compl_process_gro(adapter, rxo, rxcp);
1907 else
1908 be_rx_compl_process(adapter, rxo, rxcp);
1909loop_continue:
2e588f84 1910 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1911 }
1912
9372cacb
PR
1913 be_cq_notify(adapter, rx_cq->id, false, work_done);
1914
6b7c5b94 1915 /* Refill the queue */
857c9905 1916 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1917 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1918
1919 /* All consumed */
1920 if (work_done < budget) {
1921 napi_complete(napi);
9372cacb
PR
1922 /* Arm CQ */
1923 be_cq_notify(adapter, rx_cq->id, true, 0);
6b7c5b94
SP
1924 }
1925 return work_done;
1926}
1927
f31e50a8
SP
1928/* As TX and MCC share the same EQ check for both TX and MCC completions.
1929 * For TX/MCC we don't honour budget; consume everything
1930 */
1931static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1932{
f31e50a8
SP
1933 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1934 struct be_adapter *adapter =
1935 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1936 struct be_tx_obj *txo;
6b7c5b94 1937 struct be_eth_tx_compl *txcp;
3c8def97
SP
1938 int tx_compl, mcc_compl, status = 0;
1939 u8 i;
1940 u16 num_wrbs;
1941
1942 for_all_tx_queues(adapter, txo, i) {
1943 tx_compl = 0;
1944 num_wrbs = 0;
1945 while ((txcp = be_tx_compl_get(&txo->cq))) {
1946 num_wrbs += be_tx_compl_process(adapter, txo,
1947 AMAP_GET_BITS(struct amap_eth_tx_compl,
1948 wrb_index, txcp));
1949 tx_compl++;
1950 }
1951 if (tx_compl) {
1952 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1953
1954 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1955
3c8def97
SP
1956 /* As Tx wrbs have been freed up, wake up netdev queue
1957 * if it was stopped due to lack of tx wrbs. */
1958 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1959 atomic_read(&txo->q.used) < txo->q.len / 2) {
1960 netif_wake_subqueue(adapter->netdev, i);
1961 }
1962
ab1594e9 1963 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 1964 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 1965 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 1966 }
6b7c5b94
SP
1967 }
1968
f31e50a8
SP
1969 mcc_compl = be_process_mcc(adapter, &status);
1970
f31e50a8
SP
1971 if (mcc_compl) {
1972 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1973 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1974 }
1975
3c8def97 1976 napi_complete(napi);
6b7c5b94 1977
3c8def97 1978 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 1979 adapter->drv_stats.tx_events++;
6b7c5b94
SP
1980 return 1;
1981}
1982
d053de91 1983void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 1984{
e1cfb67a
PR
1985 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1986 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
1987 u32 i;
1988
72f02485
SP
1989 if (adapter->eeh_err || adapter->ue_detected)
1990 return;
1991
e1cfb67a
PR
1992 if (lancer_chip(adapter)) {
1993 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1994 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1995 sliport_err1 = ioread32(adapter->db +
1996 SLIPORT_ERROR1_OFFSET);
1997 sliport_err2 = ioread32(adapter->db +
1998 SLIPORT_ERROR2_OFFSET);
1999 }
2000 } else {
2001 pci_read_config_dword(adapter->pdev,
2002 PCICFG_UE_STATUS_LOW, &ue_lo);
2003 pci_read_config_dword(adapter->pdev,
2004 PCICFG_UE_STATUS_HIGH, &ue_hi);
2005 pci_read_config_dword(adapter->pdev,
2006 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2007 pci_read_config_dword(adapter->pdev,
2008 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2009
2010 ue_lo = (ue_lo & (~ue_lo_mask));
2011 ue_hi = (ue_hi & (~ue_hi_mask));
2012 }
7c185276 2013
e1cfb67a
PR
2014 if (ue_lo || ue_hi ||
2015 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2016 adapter->ue_detected = true;
7acc2087 2017 adapter->eeh_err = true;
434b3648
SP
2018 dev_err(&adapter->pdev->dev,
2019 "Unrecoverable error in the card\n");
d053de91
AK
2020 }
2021
e1cfb67a
PR
2022 if (ue_lo) {
2023 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2024 if (ue_lo & 1)
7c185276
AK
2025 dev_err(&adapter->pdev->dev,
2026 "UE: %s bit set\n", ue_status_low_desc[i]);
2027 }
2028 }
e1cfb67a
PR
2029 if (ue_hi) {
2030 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2031 if (ue_hi & 1)
7c185276
AK
2032 dev_err(&adapter->pdev->dev,
2033 "UE: %s bit set\n", ue_status_hi_desc[i]);
2034 }
2035 }
2036
e1cfb67a
PR
2037 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2038 dev_err(&adapter->pdev->dev,
2039 "sliport status 0x%x\n", sliport_status);
2040 dev_err(&adapter->pdev->dev,
2041 "sliport error1 0x%x\n", sliport_err1);
2042 dev_err(&adapter->pdev->dev,
2043 "sliport error2 0x%x\n", sliport_err2);
2044 }
7c185276
AK
2045}
2046
ea1dae11
SP
2047static void be_worker(struct work_struct *work)
2048{
2049 struct be_adapter *adapter =
2050 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
2051 struct be_rx_obj *rxo;
2052 int i;
ea1dae11 2053
72f02485 2054 be_detect_dump_ue(adapter);
16da8250 2055
f203af70
SK
2056 /* when interrupts are not yet enabled, just reap any pending
2057 * mcc completions */
2058 if (!netif_running(adapter->netdev)) {
2059 int mcc_compl, status = 0;
2060
2061 mcc_compl = be_process_mcc(adapter, &status);
2062
2063 if (mcc_compl) {
2064 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2065 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2066 }
9b037f38 2067
f203af70
SK
2068 goto reschedule;
2069 }
2070
005d5696
SX
2071 if (!adapter->stats_cmd_sent) {
2072 if (lancer_chip(adapter))
2073 lancer_cmd_get_pport_stats(adapter,
2074 &adapter->stats_cmd);
2075 else
2076 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2077 }
3c8def97 2078
3abcdeda 2079 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2080 be_rx_eqd_update(adapter, rxo);
2081
2082 if (rxo->rx_post_starved) {
2083 rxo->rx_post_starved = false;
1829b086 2084 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2085 }
ea1dae11
SP
2086 }
2087
f203af70 2088reschedule:
e74fbd03 2089 adapter->work_counter++;
ea1dae11
SP
2090 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2091}
2092
8d56ff11
SP
2093static void be_msix_disable(struct be_adapter *adapter)
2094{
ac6a0c4a 2095 if (msix_enabled(adapter)) {
8d56ff11 2096 pci_disable_msix(adapter->pdev);
ac6a0c4a 2097 adapter->num_msix_vec = 0;
3abcdeda
SP
2098 }
2099}
2100
6b7c5b94
SP
2101static void be_msix_enable(struct be_adapter *adapter)
2102{
3abcdeda 2103#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2104 int i, status, num_vec;
6b7c5b94 2105
ac6a0c4a 2106 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2107
ac6a0c4a 2108 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2109 adapter->msix_entries[i].entry = i;
2110
ac6a0c4a 2111 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2112 if (status == 0) {
2113 goto done;
2114 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2115 num_vec = status;
3abcdeda 2116 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2117 num_vec) == 0)
3abcdeda 2118 goto done;
3abcdeda
SP
2119 }
2120 return;
2121done:
ac6a0c4a
SP
2122 adapter->num_msix_vec = num_vec;
2123 return;
6b7c5b94
SP
2124}
2125
f9449ab7 2126static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2127{
344dbf10 2128 be_check_sriov_fn_type(adapter);
6dedec81 2129#ifdef CONFIG_PCI_IOV
ba343c77 2130 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2131 int status, pos;
2132 u16 nvfs;
2133
2134 pos = pci_find_ext_capability(adapter->pdev,
2135 PCI_EXT_CAP_ID_SRIOV);
2136 pci_read_config_word(adapter->pdev,
2137 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2138
2139 if (num_vfs > nvfs) {
2140 dev_info(&adapter->pdev->dev,
2141 "Device supports %d VFs and not %d\n",
2142 nvfs, num_vfs);
2143 num_vfs = nvfs;
2144 }
6dedec81 2145
ba343c77
SB
2146 status = pci_enable_sriov(adapter->pdev, num_vfs);
2147 adapter->sriov_enabled = status ? false : true;
f9449ab7
SP
2148
2149 if (adapter->sriov_enabled) {
2150 adapter->vf_cfg = kcalloc(num_vfs,
2151 sizeof(struct be_vf_cfg),
2152 GFP_KERNEL);
2153 if (!adapter->vf_cfg)
2154 return -ENOMEM;
2155 }
ba343c77
SB
2156 }
2157#endif
f9449ab7 2158 return 0;
ba343c77
SB
2159}
2160
2161static void be_sriov_disable(struct be_adapter *adapter)
2162{
2163#ifdef CONFIG_PCI_IOV
2164 if (adapter->sriov_enabled) {
2165 pci_disable_sriov(adapter->pdev);
f9449ab7 2166 kfree(adapter->vf_cfg);
ba343c77
SB
2167 adapter->sriov_enabled = false;
2168 }
2169#endif
2170}
2171
fe6d2a38
SP
2172static inline int be_msix_vec_get(struct be_adapter *adapter,
2173 struct be_eq_obj *eq_obj)
6b7c5b94 2174{
ecd62107 2175 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2176}
2177
b628bde2
SP
2178static int be_request_irq(struct be_adapter *adapter,
2179 struct be_eq_obj *eq_obj,
3abcdeda 2180 void *handler, char *desc, void *context)
6b7c5b94
SP
2181{
2182 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2183 int vec;
2184
2185 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2186 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2187 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2188}
2189
3abcdeda
SP
2190static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2191 void *context)
b628bde2 2192{
fe6d2a38 2193 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2194 free_irq(vec, context);
b628bde2 2195}
6b7c5b94 2196
b628bde2
SP
2197static int be_msix_register(struct be_adapter *adapter)
2198{
3abcdeda
SP
2199 struct be_rx_obj *rxo;
2200 int status, i;
2201 char qname[10];
b628bde2 2202
3abcdeda
SP
2203 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2204 adapter);
6b7c5b94
SP
2205 if (status)
2206 goto err;
2207
3abcdeda
SP
2208 for_all_rx_queues(adapter, rxo, i) {
2209 sprintf(qname, "rxq%d", i);
2210 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2211 qname, rxo);
2212 if (status)
2213 goto err_msix;
2214 }
b628bde2 2215
6b7c5b94 2216 return 0;
b628bde2 2217
3abcdeda
SP
2218err_msix:
2219 be_free_irq(adapter, &adapter->tx_eq, adapter);
2220
2221 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2222 be_free_irq(adapter, &rxo->rx_eq, rxo);
2223
6b7c5b94
SP
2224err:
2225 dev_warn(&adapter->pdev->dev,
2226 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2227 be_msix_disable(adapter);
6b7c5b94
SP
2228 return status;
2229}
2230
2231static int be_irq_register(struct be_adapter *adapter)
2232{
2233 struct net_device *netdev = adapter->netdev;
2234 int status;
2235
ac6a0c4a 2236 if (msix_enabled(adapter)) {
6b7c5b94
SP
2237 status = be_msix_register(adapter);
2238 if (status == 0)
2239 goto done;
ba343c77
SB
2240 /* INTx is not supported for VF */
2241 if (!be_physfn(adapter))
2242 return status;
6b7c5b94
SP
2243 }
2244
2245 /* INTx */
2246 netdev->irq = adapter->pdev->irq;
2247 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2248 adapter);
2249 if (status) {
2250 dev_err(&adapter->pdev->dev,
2251 "INTx request IRQ failed - err %d\n", status);
2252 return status;
2253 }
2254done:
2255 adapter->isr_registered = true;
2256 return 0;
2257}
2258
2259static void be_irq_unregister(struct be_adapter *adapter)
2260{
2261 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2262 struct be_rx_obj *rxo;
2263 int i;
6b7c5b94
SP
2264
2265 if (!adapter->isr_registered)
2266 return;
2267
2268 /* INTx */
ac6a0c4a 2269 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2270 free_irq(netdev->irq, adapter);
2271 goto done;
2272 }
2273
2274 /* MSIx */
3abcdeda
SP
2275 be_free_irq(adapter, &adapter->tx_eq, adapter);
2276
2277 for_all_rx_queues(adapter, rxo, i)
2278 be_free_irq(adapter, &rxo->rx_eq, rxo);
2279
6b7c5b94
SP
2280done:
2281 adapter->isr_registered = false;
6b7c5b94
SP
2282}
2283
482c9e79
SP
2284static void be_rx_queues_clear(struct be_adapter *adapter)
2285{
2286 struct be_queue_info *q;
2287 struct be_rx_obj *rxo;
2288 int i;
2289
2290 for_all_rx_queues(adapter, rxo, i) {
2291 q = &rxo->q;
2292 if (q->created) {
2293 be_cmd_rxq_destroy(adapter, q);
2294 /* After the rxq is invalidated, wait for a grace time
2295 * of 1ms for all dma to end and the flush compl to
2296 * arrive
2297 */
2298 mdelay(1);
2299 be_rx_q_clean(adapter, rxo);
2300 }
2301
2302 /* Clear any residual events */
2303 q = &rxo->rx_eq.q;
2304 if (q->created)
2305 be_eq_clean(adapter, &rxo->rx_eq);
2306 }
2307}
2308
889cd4b2
SP
2309static int be_close(struct net_device *netdev)
2310{
2311 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2312 struct be_rx_obj *rxo;
3c8def97 2313 struct be_tx_obj *txo;
889cd4b2 2314 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2315 int vec, i;
889cd4b2 2316
889cd4b2
SP
2317 be_async_mcc_disable(adapter);
2318
fe6d2a38
SP
2319 if (!lancer_chip(adapter))
2320 be_intr_set(adapter, false);
889cd4b2 2321
63fcb27f
PR
2322 for_all_rx_queues(adapter, rxo, i)
2323 napi_disable(&rxo->rx_eq.napi);
2324
2325 napi_disable(&tx_eq->napi);
2326
2327 if (lancer_chip(adapter)) {
63fcb27f
PR
2328 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2329 for_all_rx_queues(adapter, rxo, i)
2330 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2331 for_all_tx_queues(adapter, txo, i)
2332 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2333 }
2334
ac6a0c4a 2335 if (msix_enabled(adapter)) {
fe6d2a38 2336 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2337 synchronize_irq(vec);
3abcdeda
SP
2338
2339 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2340 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2341 synchronize_irq(vec);
2342 }
889cd4b2
SP
2343 } else {
2344 synchronize_irq(netdev->irq);
2345 }
2346 be_irq_unregister(adapter);
2347
889cd4b2
SP
2348 /* Wait for all pending tx completions to arrive so that
2349 * all tx skbs are freed.
2350 */
3c8def97
SP
2351 for_all_tx_queues(adapter, txo, i)
2352 be_tx_compl_clean(adapter, txo);
889cd4b2 2353
482c9e79
SP
2354 be_rx_queues_clear(adapter);
2355 return 0;
2356}
2357
2358static int be_rx_queues_setup(struct be_adapter *adapter)
2359{
2360 struct be_rx_obj *rxo;
2361 int rc, i;
2362 u8 rsstable[MAX_RSS_QS];
2363
2364 for_all_rx_queues(adapter, rxo, i) {
2365 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2366 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2367 adapter->if_handle,
2368 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2369 if (rc)
2370 return rc;
2371 }
2372
2373 if (be_multi_rxq(adapter)) {
2374 for_all_rss_queues(adapter, rxo, i)
2375 rsstable[i] = rxo->rss_id;
2376
2377 rc = be_cmd_rss_config(adapter, rsstable,
2378 adapter->num_rx_qs - 1);
2379 if (rc)
2380 return rc;
2381 }
2382
2383 /* First time posting */
2384 for_all_rx_queues(adapter, rxo, i) {
2385 be_post_rx_frags(rxo, GFP_KERNEL);
2386 napi_enable(&rxo->rx_eq.napi);
2387 }
889cd4b2
SP
2388 return 0;
2389}
2390
6b7c5b94
SP
2391static int be_open(struct net_device *netdev)
2392{
2393 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2394 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2395 struct be_rx_obj *rxo;
3abcdeda 2396 int status, i;
5fb379ee 2397
482c9e79
SP
2398 status = be_rx_queues_setup(adapter);
2399 if (status)
2400 goto err;
2401
5fb379ee
SP
2402 napi_enable(&tx_eq->napi);
2403
2404 be_irq_register(adapter);
2405
fe6d2a38
SP
2406 if (!lancer_chip(adapter))
2407 be_intr_set(adapter, true);
5fb379ee
SP
2408
2409 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2410 for_all_rx_queues(adapter, rxo, i) {
2411 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2412 be_cq_notify(adapter, rxo->cq.id, true, 0);
2413 }
8788fdc2 2414 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2415
7a1e9b20
SP
2416 /* Now that interrupts are on we can process async mcc */
2417 be_async_mcc_enable(adapter);
2418
889cd4b2
SP
2419 return 0;
2420err:
2421 be_close(adapter->netdev);
2422 return -EIO;
5fb379ee
SP
2423}
2424
71d8d1b5
AK
2425static int be_setup_wol(struct be_adapter *adapter, bool enable)
2426{
2427 struct be_dma_mem cmd;
2428 int status = 0;
2429 u8 mac[ETH_ALEN];
2430
2431 memset(mac, 0, ETH_ALEN);
2432
2433 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2434 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2435 GFP_KERNEL);
71d8d1b5
AK
2436 if (cmd.va == NULL)
2437 return -1;
2438 memset(cmd.va, 0, cmd.size);
2439
2440 if (enable) {
2441 status = pci_write_config_dword(adapter->pdev,
2442 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2443 if (status) {
2444 dev_err(&adapter->pdev->dev,
2381a55c 2445 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2446 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2447 cmd.dma);
71d8d1b5
AK
2448 return status;
2449 }
2450 status = be_cmd_enable_magic_wol(adapter,
2451 adapter->netdev->dev_addr, &cmd);
2452 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2453 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2454 } else {
2455 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2456 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2457 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2458 }
2459
2b7bcebf 2460 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2461 return status;
2462}
2463
6d87f5c3
AK
2464/*
2465 * Generate a seed MAC address from the PF MAC Address using jhash.
2466 * MAC Address for VFs are assigned incrementally starting from the seed.
2467 * These addresses are programmed in the ASIC by the PF and the VF driver
2468 * queries for the MAC address during its probe.
2469 */
2470static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2471{
f9449ab7 2472 u32 vf;
3abcdeda 2473 int status = 0;
6d87f5c3
AK
2474 u8 mac[ETH_ALEN];
2475
2476 be_vf_eth_addr_generate(adapter, mac);
2477
2478 for (vf = 0; vf < num_vfs; vf++) {
590c391d
PR
2479 if (lancer_chip(adapter)) {
2480 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2481 } else {
2482 status = be_cmd_pmac_add(adapter, mac,
6d87f5c3 2483 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2484 &adapter->vf_cfg[vf].vf_pmac_id,
2485 vf + 1);
590c391d
PR
2486 }
2487
6d87f5c3
AK
2488 if (status)
2489 dev_err(&adapter->pdev->dev,
590c391d 2490 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3
AK
2491 else
2492 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2493
2494 mac[5] += 1;
2495 }
2496 return status;
2497}
2498
f9449ab7 2499static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3
AK
2500{
2501 u32 vf;
2502
590c391d
PR
2503 for (vf = 0; vf < num_vfs; vf++) {
2504 if (lancer_chip(adapter))
2505 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2506 else
2507 be_cmd_pmac_del(adapter,
2508 adapter->vf_cfg[vf].vf_if_handle,
2509 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2510 }
f9449ab7
SP
2511
2512 for (vf = 0; vf < num_vfs; vf++)
30128031
SP
2513 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2514 vf + 1);
6d87f5c3
AK
2515}
2516
a54769f5
SP
2517static int be_clear(struct be_adapter *adapter)
2518{
a54769f5 2519 if (be_physfn(adapter) && adapter->sriov_enabled)
f9449ab7
SP
2520 be_vf_clear(adapter);
2521
2522 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2523
2524 be_mcc_queues_destroy(adapter);
2525 be_rx_queues_destroy(adapter);
2526 be_tx_queues_destroy(adapter);
a54769f5
SP
2527
2528 /* tell fw we're done with firing cmds */
2529 be_cmd_fw_clean(adapter);
2530 return 0;
2531}
2532
30128031
SP
2533static void be_vf_setup_init(struct be_adapter *adapter)
2534{
2535 int vf;
2536
2537 for (vf = 0; vf < num_vfs; vf++) {
2538 adapter->vf_cfg[vf].vf_if_handle = -1;
2539 adapter->vf_cfg[vf].vf_pmac_id = -1;
2540 }
2541}
2542
f9449ab7
SP
2543static int be_vf_setup(struct be_adapter *adapter)
2544{
2545 u32 cap_flags, en_flags, vf;
2546 u16 lnk_speed;
2547 int status;
2548
30128031
SP
2549 be_vf_setup_init(adapter);
2550
590c391d
PR
2551 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2552 BE_IF_FLAGS_MULTICAST;
2553
f9449ab7
SP
2554 for (vf = 0; vf < num_vfs; vf++) {
2555 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2556 &adapter->vf_cfg[vf].vf_if_handle,
2557 NULL, vf+1);
2558 if (status)
2559 goto err;
f9449ab7
SP
2560 }
2561
590c391d
PR
2562 status = be_vf_eth_addr_config(adapter);
2563 if (status)
2564 goto err;
f9449ab7
SP
2565
2566 for (vf = 0; vf < num_vfs; vf++) {
2567 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2568 vf + 1);
2569 if (status)
2570 goto err;
2571 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2572 }
2573 return 0;
2574err:
2575 return status;
2576}
2577
30128031
SP
2578static void be_setup_init(struct be_adapter *adapter)
2579{
2580 adapter->vlan_prio_bmap = 0xff;
2581 adapter->link_speed = -1;
2582 adapter->if_handle = -1;
2583 adapter->be3_native = false;
2584 adapter->promiscuous = false;
2585 adapter->eq_next_idx = 0;
2586}
2587
590c391d
PR
2588static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2589{
2590 u32 pmac_id;
2591 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2592 if (status != 0)
2593 goto do_none;
2594 status = be_cmd_mac_addr_query(adapter, mac,
2595 MAC_ADDRESS_TYPE_NETWORK,
2596 false, adapter->if_handle, pmac_id);
2597 if (status != 0)
2598 goto do_none;
2599 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2600 &adapter->pmac_id, 0);
2601do_none:
2602 return status;
2603}
2604
5fb379ee
SP
2605static int be_setup(struct be_adapter *adapter)
2606{
5fb379ee 2607 struct net_device *netdev = adapter->netdev;
f9449ab7 2608 u32 cap_flags, en_flags;
a54769f5 2609 u32 tx_fc, rx_fc;
293c4a7d 2610 int status, i;
ba343c77 2611 u8 mac[ETH_ALEN];
293c4a7d 2612 struct be_tx_obj *txo;
ba343c77 2613
30128031 2614 be_setup_init(adapter);
6b7c5b94 2615
f9449ab7 2616 be_cmd_req_native_mode(adapter);
73d540f2 2617
f9449ab7 2618 status = be_tx_queues_create(adapter);
6b7c5b94 2619 if (status != 0)
a54769f5 2620 goto err;
6b7c5b94 2621
f9449ab7 2622 status = be_rx_queues_create(adapter);
6b7c5b94 2623 if (status != 0)
a54769f5 2624 goto err;
6b7c5b94 2625
f9449ab7 2626 status = be_mcc_queues_create(adapter);
6b7c5b94 2627 if (status != 0)
a54769f5 2628 goto err;
6b7c5b94 2629
f9449ab7
SP
2630 memset(mac, 0, ETH_ALEN);
2631 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2632 true /*permanent */, 0, 0);
f9449ab7
SP
2633 if (status)
2634 return status;
2635 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2636 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2637
f9449ab7
SP
2638 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2639 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2640 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2641 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2642
f9449ab7
SP
2643 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2644 cap_flags |= BE_IF_FLAGS_RSS;
2645 en_flags |= BE_IF_FLAGS_RSS;
2646 }
2647 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2648 netdev->dev_addr, &adapter->if_handle,
2649 &adapter->pmac_id, 0);
5fb379ee 2650 if (status != 0)
a54769f5 2651 goto err;
6b7c5b94 2652
293c4a7d
PR
2653 for_all_tx_queues(adapter, txo, i) {
2654 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2655 if (status)
2656 goto err;
2657 }
2658
590c391d
PR
2659 /* The VF's permanent mac queried from card is incorrect.
2660 * For BEx: Query the mac configued by the PF using if_handle
2661 * For Lancer: Get and use mac_list to obtain mac address.
2662 */
2663 if (!be_physfn(adapter)) {
2664 if (lancer_chip(adapter))
2665 status = be_configure_mac_from_list(adapter, mac);
2666 else
2667 status = be_cmd_mac_addr_query(adapter, mac,
2668 MAC_ADDRESS_TYPE_NETWORK, false,
2669 adapter->if_handle, 0);
f9449ab7
SP
2670 if (!status) {
2671 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2672 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2673 }
2674 }
0dffc83e 2675
04b71175 2676 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2677
a54769f5
SP
2678 status = be_vid_config(adapter, false, 0);
2679 if (status)
2680 goto err;
7ab8b0b4 2681
a54769f5 2682 be_set_rx_mode(adapter->netdev);
5fb379ee 2683
a54769f5 2684 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d
PR
2685 /* For Lancer: It is legal for this cmd to fail on VF */
2686 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5 2687 goto err;
590c391d 2688
a54769f5
SP
2689 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2690 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2691 adapter->rx_fc);
590c391d
PR
2692 /* For Lancer: It is legal for this cmd to fail on VF */
2693 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5
SP
2694 goto err;
2695 }
2dc1deb6 2696
a54769f5 2697 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2698
f9449ab7
SP
2699 if (be_physfn(adapter) && adapter->sriov_enabled) {
2700 status = be_vf_setup(adapter);
2701 if (status)
2702 goto err;
2703 }
2704
2705 return 0;
a54769f5
SP
2706err:
2707 be_clear(adapter);
2708 return status;
2709}
6b7c5b94 2710
84517482 2711#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2712static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2713 const u8 *p, u32 img_start, int image_size,
2714 int hdr_size)
fa9a6fed
SB
2715{
2716 u32 crc_offset;
2717 u8 flashed_crc[4];
2718 int status;
3f0d4560
AK
2719
2720 crc_offset = hdr_size + img_start + image_size - 4;
2721
fa9a6fed 2722 p += crc_offset;
3f0d4560
AK
2723
2724 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2725 (image_size - 4));
fa9a6fed
SB
2726 if (status) {
2727 dev_err(&adapter->pdev->dev,
2728 "could not get crc from flash, not flashing redboot\n");
2729 return false;
2730 }
2731
2732 /*update redboot only if crc does not match*/
2733 if (!memcmp(flashed_crc, p, 4))
2734 return false;
2735 else
2736 return true;
fa9a6fed
SB
2737}
2738
306f1348
SP
2739static bool phy_flashing_required(struct be_adapter *adapter)
2740{
2741 int status = 0;
2742 struct be_phy_info phy_info;
2743
2744 status = be_cmd_get_phy_info(adapter, &phy_info);
2745 if (status)
2746 return false;
2747 if ((phy_info.phy_type == TN_8022) &&
2748 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2749 return true;
2750 }
2751 return false;
2752}
2753
3f0d4560 2754static int be_flash_data(struct be_adapter *adapter,
84517482 2755 const struct firmware *fw,
3f0d4560
AK
2756 struct be_dma_mem *flash_cmd, int num_of_images)
2757
84517482 2758{
3f0d4560
AK
2759 int status = 0, i, filehdr_size = 0;
2760 u32 total_bytes = 0, flash_op;
84517482
AK
2761 int num_bytes;
2762 const u8 *p = fw->data;
2763 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2764 const struct flash_comp *pflashcomp;
9fe96934 2765 int num_comp;
3f0d4560 2766
306f1348 2767 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2768 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2769 FLASH_IMAGE_MAX_SIZE_g3},
2770 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2771 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2772 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2773 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2774 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2775 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2776 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2777 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2778 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2779 FLASH_IMAGE_MAX_SIZE_g3},
2780 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2781 FLASH_IMAGE_MAX_SIZE_g3},
2782 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2783 FLASH_IMAGE_MAX_SIZE_g3},
2784 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2785 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2786 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2787 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2788 };
215faf9c 2789 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2790 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2791 FLASH_IMAGE_MAX_SIZE_g2},
2792 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2793 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2794 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2795 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2796 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2797 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2798 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2799 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2800 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2801 FLASH_IMAGE_MAX_SIZE_g2},
2802 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2803 FLASH_IMAGE_MAX_SIZE_g2},
2804 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2805 FLASH_IMAGE_MAX_SIZE_g2}
2806 };
2807
2808 if (adapter->generation == BE_GEN3) {
2809 pflashcomp = gen3_flash_types;
2810 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2811 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2812 } else {
2813 pflashcomp = gen2_flash_types;
2814 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2815 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2816 }
9fe96934
SB
2817 for (i = 0; i < num_comp; i++) {
2818 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2819 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2820 continue;
306f1348
SP
2821 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2822 if (!phy_flashing_required(adapter))
2823 continue;
2824 }
3f0d4560
AK
2825 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2826 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2827 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2828 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2829 continue;
2830 p = fw->data;
2831 p += filehdr_size + pflashcomp[i].offset
2832 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2833 if (p + pflashcomp[i].size > fw->data + fw->size)
2834 return -1;
2835 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2836 while (total_bytes) {
2837 if (total_bytes > 32*1024)
2838 num_bytes = 32*1024;
2839 else
2840 num_bytes = total_bytes;
2841 total_bytes -= num_bytes;
306f1348
SP
2842 if (!total_bytes) {
2843 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2844 flash_op = FLASHROM_OPER_PHY_FLASH;
2845 else
2846 flash_op = FLASHROM_OPER_FLASH;
2847 } else {
2848 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2849 flash_op = FLASHROM_OPER_PHY_SAVE;
2850 else
2851 flash_op = FLASHROM_OPER_SAVE;
2852 }
3f0d4560
AK
2853 memcpy(req->params.data_buf, p, num_bytes);
2854 p += num_bytes;
2855 status = be_cmd_write_flashrom(adapter, flash_cmd,
2856 pflashcomp[i].optype, flash_op, num_bytes);
2857 if (status) {
306f1348
SP
2858 if ((status == ILLEGAL_IOCTL_REQ) &&
2859 (pflashcomp[i].optype ==
2860 IMG_TYPE_PHY_FW))
2861 break;
3f0d4560
AK
2862 dev_err(&adapter->pdev->dev,
2863 "cmd to write to flash rom failed.\n");
2864 return -1;
2865 }
84517482 2866 }
84517482 2867 }
84517482
AK
2868 return 0;
2869}
2870
3f0d4560
AK
2871static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2872{
2873 if (fhdr == NULL)
2874 return 0;
2875 if (fhdr->build[0] == '3')
2876 return BE_GEN3;
2877 else if (fhdr->build[0] == '2')
2878 return BE_GEN2;
2879 else
2880 return 0;
2881}
2882
485bf569
SN
2883static int lancer_fw_download(struct be_adapter *adapter,
2884 const struct firmware *fw)
84517482 2885{
485bf569
SN
2886#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2887#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2888 struct be_dma_mem flash_cmd;
485bf569
SN
2889 const u8 *data_ptr = NULL;
2890 u8 *dest_image_ptr = NULL;
2891 size_t image_size = 0;
2892 u32 chunk_size = 0;
2893 u32 data_written = 0;
2894 u32 offset = 0;
2895 int status = 0;
2896 u8 add_status = 0;
84517482 2897
485bf569 2898 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2899 dev_err(&adapter->pdev->dev,
485bf569
SN
2900 "FW Image not properly aligned. "
2901 "Length must be 4 byte aligned.\n");
2902 status = -EINVAL;
2903 goto lancer_fw_exit;
d9efd2af
SB
2904 }
2905
485bf569
SN
2906 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2907 + LANCER_FW_DOWNLOAD_CHUNK;
2908 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2909 &flash_cmd.dma, GFP_KERNEL);
2910 if (!flash_cmd.va) {
2911 status = -ENOMEM;
2912 dev_err(&adapter->pdev->dev,
2913 "Memory allocation failure while flashing\n");
2914 goto lancer_fw_exit;
2915 }
84517482 2916
485bf569
SN
2917 dest_image_ptr = flash_cmd.va +
2918 sizeof(struct lancer_cmd_req_write_object);
2919 image_size = fw->size;
2920 data_ptr = fw->data;
2921
2922 while (image_size) {
2923 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2924
2925 /* Copy the image chunk content. */
2926 memcpy(dest_image_ptr, data_ptr, chunk_size);
2927
2928 status = lancer_cmd_write_object(adapter, &flash_cmd,
2929 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2930 &data_written, &add_status);
2931
2932 if (status)
2933 break;
2934
2935 offset += data_written;
2936 data_ptr += data_written;
2937 image_size -= data_written;
2938 }
2939
2940 if (!status) {
2941 /* Commit the FW written */
2942 status = lancer_cmd_write_object(adapter, &flash_cmd,
2943 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2944 &data_written, &add_status);
2945 }
2946
2947 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2948 flash_cmd.dma);
2949 if (status) {
2950 dev_err(&adapter->pdev->dev,
2951 "Firmware load error. "
2952 "Status code: 0x%x Additional Status: 0x%x\n",
2953 status, add_status);
2954 goto lancer_fw_exit;
2955 }
2956
2957 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2958lancer_fw_exit:
2959 return status;
2960}
2961
2962static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2963{
2964 struct flash_file_hdr_g2 *fhdr;
2965 struct flash_file_hdr_g3 *fhdr3;
2966 struct image_hdr *img_hdr_ptr = NULL;
2967 struct be_dma_mem flash_cmd;
2968 const u8 *p;
2969 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2970
2971 p = fw->data;
3f0d4560 2972 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2973
84517482 2974 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2975 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2976 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2977 if (!flash_cmd.va) {
2978 status = -ENOMEM;
2979 dev_err(&adapter->pdev->dev,
2980 "Memory allocation failure while flashing\n");
485bf569 2981 goto be_fw_exit;
84517482
AK
2982 }
2983
3f0d4560
AK
2984 if ((adapter->generation == BE_GEN3) &&
2985 (get_ufigen_type(fhdr) == BE_GEN3)) {
2986 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2987 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2988 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2989 img_hdr_ptr = (struct image_hdr *) (fw->data +
2990 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2991 i * sizeof(struct image_hdr)));
2992 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2993 status = be_flash_data(adapter, fw, &flash_cmd,
2994 num_imgs);
3f0d4560
AK
2995 }
2996 } else if ((adapter->generation == BE_GEN2) &&
2997 (get_ufigen_type(fhdr) == BE_GEN2)) {
2998 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2999 } else {
3000 dev_err(&adapter->pdev->dev,
3001 "UFI and Interface are not compatible for flashing\n");
3002 status = -1;
84517482
AK
3003 }
3004
2b7bcebf
IV
3005 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3006 flash_cmd.dma);
84517482
AK
3007 if (status) {
3008 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3009 goto be_fw_exit;
84517482
AK
3010 }
3011
af901ca1 3012 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3013
485bf569
SN
3014be_fw_exit:
3015 return status;
3016}
3017
3018int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3019{
3020 const struct firmware *fw;
3021 int status;
3022
3023 if (!netif_running(adapter->netdev)) {
3024 dev_err(&adapter->pdev->dev,
3025 "Firmware load not allowed (interface is down)\n");
3026 return -1;
3027 }
3028
3029 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3030 if (status)
3031 goto fw_exit;
3032
3033 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3034
3035 if (lancer_chip(adapter))
3036 status = lancer_fw_download(adapter, fw);
3037 else
3038 status = be_fw_download(adapter, fw);
3039
84517482
AK
3040fw_exit:
3041 release_firmware(fw);
3042 return status;
3043}
3044
6b7c5b94
SP
3045static struct net_device_ops be_netdev_ops = {
3046 .ndo_open = be_open,
3047 .ndo_stop = be_close,
3048 .ndo_start_xmit = be_xmit,
a54769f5 3049 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3050 .ndo_set_mac_address = be_mac_addr_set,
3051 .ndo_change_mtu = be_change_mtu,
ab1594e9 3052 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3053 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3054 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3055 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3056 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3057 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3058 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 3059 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
3060};
3061
3062static void be_netdev_init(struct net_device *netdev)
3063{
3064 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
3065 struct be_rx_obj *rxo;
3066 int i;
6b7c5b94 3067
6332c8d3 3068 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3069 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3070 NETIF_F_HW_VLAN_TX;
3071 if (be_multi_rxq(adapter))
3072 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3073
3074 netdev->features |= netdev->hw_features |
8b8ddc68 3075 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3076
eb8a50d9 3077 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3078 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3079
6b7c5b94
SP
3080 netdev->flags |= IFF_MULTICAST;
3081
c190e3c8
AK
3082 netif_set_gso_max_size(netdev, 65535);
3083
6b7c5b94
SP
3084 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3085
3086 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3087
3abcdeda
SP
3088 for_all_rx_queues(adapter, rxo, i)
3089 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3090 BE_NAPI_WEIGHT);
3091
5fb379ee 3092 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 3093 BE_NAPI_WEIGHT);
6b7c5b94
SP
3094}
3095
3096static void be_unmap_pci_bars(struct be_adapter *adapter)
3097{
8788fdc2
SP
3098 if (adapter->csr)
3099 iounmap(adapter->csr);
3100 if (adapter->db)
3101 iounmap(adapter->db);
6b7c5b94
SP
3102}
3103
3104static int be_map_pci_bars(struct be_adapter *adapter)
3105{
3106 u8 __iomem *addr;
db3ea781 3107 int db_reg;
6b7c5b94 3108
fe6d2a38
SP
3109 if (lancer_chip(adapter)) {
3110 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3111 pci_resource_len(adapter->pdev, 0));
3112 if (addr == NULL)
3113 return -ENOMEM;
3114 adapter->db = addr;
3115 return 0;
3116 }
3117
ba343c77
SB
3118 if (be_physfn(adapter)) {
3119 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3120 pci_resource_len(adapter->pdev, 2));
3121 if (addr == NULL)
3122 return -ENOMEM;
3123 adapter->csr = addr;
3124 }
6b7c5b94 3125
ba343c77 3126 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3127 db_reg = 4;
3128 } else {
ba343c77
SB
3129 if (be_physfn(adapter))
3130 db_reg = 4;
3131 else
3132 db_reg = 0;
3133 }
3134 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3135 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3136 if (addr == NULL)
3137 goto pci_map_err;
ba343c77
SB
3138 adapter->db = addr;
3139
6b7c5b94
SP
3140 return 0;
3141pci_map_err:
3142 be_unmap_pci_bars(adapter);
3143 return -ENOMEM;
3144}
3145
3146
3147static void be_ctrl_cleanup(struct be_adapter *adapter)
3148{
8788fdc2 3149 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3150
3151 be_unmap_pci_bars(adapter);
3152
3153 if (mem->va)
2b7bcebf
IV
3154 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3155 mem->dma);
e7b909a6 3156
5b8821b7 3157 mem = &adapter->rx_filter;
e7b909a6 3158 if (mem->va)
2b7bcebf
IV
3159 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3160 mem->dma);
6b7c5b94
SP
3161}
3162
6b7c5b94
SP
3163static int be_ctrl_init(struct be_adapter *adapter)
3164{
8788fdc2
SP
3165 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3166 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3167 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3168 int status;
6b7c5b94
SP
3169
3170 status = be_map_pci_bars(adapter);
3171 if (status)
e7b909a6 3172 goto done;
6b7c5b94
SP
3173
3174 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3175 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3176 mbox_mem_alloc->size,
3177 &mbox_mem_alloc->dma,
3178 GFP_KERNEL);
6b7c5b94 3179 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3180 status = -ENOMEM;
3181 goto unmap_pci_bars;
6b7c5b94
SP
3182 }
3183 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3184 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3185 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3186 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3187
5b8821b7
SP
3188 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3189 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3190 &rx_filter->dma, GFP_KERNEL);
3191 if (rx_filter->va == NULL) {
e7b909a6
SP
3192 status = -ENOMEM;
3193 goto free_mbox;
3194 }
5b8821b7 3195 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3196
2984961c 3197 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3198 spin_lock_init(&adapter->mcc_lock);
3199 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3200
dd131e76 3201 init_completion(&adapter->flash_compl);
cf588477 3202 pci_save_state(adapter->pdev);
6b7c5b94 3203 return 0;
e7b909a6
SP
3204
3205free_mbox:
2b7bcebf
IV
3206 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3207 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3208
3209unmap_pci_bars:
3210 be_unmap_pci_bars(adapter);
3211
3212done:
3213 return status;
6b7c5b94
SP
3214}
3215
3216static void be_stats_cleanup(struct be_adapter *adapter)
3217{
3abcdeda 3218 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3219
3220 if (cmd->va)
2b7bcebf
IV
3221 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3222 cmd->va, cmd->dma);
6b7c5b94
SP
3223}
3224
3225static int be_stats_init(struct be_adapter *adapter)
3226{
3abcdeda 3227 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3228
005d5696 3229 if (adapter->generation == BE_GEN2) {
89a88ab8 3230 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3231 } else {
3232 if (lancer_chip(adapter))
3233 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3234 else
3235 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3236 }
2b7bcebf
IV
3237 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3238 GFP_KERNEL);
6b7c5b94
SP
3239 if (cmd->va == NULL)
3240 return -1;
d291b9af 3241 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3242 return 0;
3243}
3244
3245static void __devexit be_remove(struct pci_dev *pdev)
3246{
3247 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3248
6b7c5b94
SP
3249 if (!adapter)
3250 return;
3251
f203af70
SK
3252 cancel_delayed_work_sync(&adapter->work);
3253
6b7c5b94
SP
3254 unregister_netdev(adapter->netdev);
3255
5fb379ee
SP
3256 be_clear(adapter);
3257
6b7c5b94
SP
3258 be_stats_cleanup(adapter);
3259
3260 be_ctrl_cleanup(adapter);
3261
ba343c77
SB
3262 be_sriov_disable(adapter);
3263
8d56ff11 3264 be_msix_disable(adapter);
6b7c5b94
SP
3265
3266 pci_set_drvdata(pdev, NULL);
3267 pci_release_regions(pdev);
3268 pci_disable_device(pdev);
3269
3270 free_netdev(adapter->netdev);
3271}
3272
2243e2e9 3273static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3274{
6b7c5b94
SP
3275 int status;
3276
3abcdeda
SP
3277 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3278 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3279 if (status)
3280 return status;
3281
752961a1 3282 if (adapter->function_mode & FLEX10_MODE)
82903e4b
AK
3283 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3284 else
3285 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3286
9e1453c5
AK
3287 status = be_cmd_get_cntl_attributes(adapter);
3288 if (status)
3289 return status;
3290
2243e2e9 3291 return 0;
6b7c5b94
SP
3292}
3293
fe6d2a38
SP
3294static int be_dev_family_check(struct be_adapter *adapter)
3295{
3296 struct pci_dev *pdev = adapter->pdev;
3297 u32 sli_intf = 0, if_type;
3298
3299 switch (pdev->device) {
3300 case BE_DEVICE_ID1:
3301 case OC_DEVICE_ID1:
3302 adapter->generation = BE_GEN2;
3303 break;
3304 case BE_DEVICE_ID2:
3305 case OC_DEVICE_ID2:
3306 adapter->generation = BE_GEN3;
3307 break;
3308 case OC_DEVICE_ID3:
12f4d0a8 3309 case OC_DEVICE_ID4:
fe6d2a38
SP
3310 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3311 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3312 SLI_INTF_IF_TYPE_SHIFT;
3313
3314 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3315 if_type != 0x02) {
3316 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3317 return -EINVAL;
3318 }
fe6d2a38
SP
3319 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3320 SLI_INTF_FAMILY_SHIFT);
3321 adapter->generation = BE_GEN3;
3322 break;
3323 default:
3324 adapter->generation = 0;
3325 }
3326 return 0;
3327}
3328
37eed1cb
PR
3329static int lancer_wait_ready(struct be_adapter *adapter)
3330{
3331#define SLIPORT_READY_TIMEOUT 500
3332 u32 sliport_status;
3333 int status = 0, i;
3334
3335 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3336 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3337 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3338 break;
3339
3340 msleep(20);
3341 }
3342
3343 if (i == SLIPORT_READY_TIMEOUT)
3344 status = -1;
3345
3346 return status;
3347}
3348
3349static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3350{
3351 int status;
3352 u32 sliport_status, err, reset_needed;
3353 status = lancer_wait_ready(adapter);
3354 if (!status) {
3355 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3356 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3357 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3358 if (err && reset_needed) {
3359 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3360 adapter->db + SLIPORT_CONTROL_OFFSET);
3361
3362 /* check adapter has corrected the error */
3363 status = lancer_wait_ready(adapter);
3364 sliport_status = ioread32(adapter->db +
3365 SLIPORT_STATUS_OFFSET);
3366 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3367 SLIPORT_STATUS_RN_MASK);
3368 if (status || sliport_status)
3369 status = -1;
3370 } else if (err || reset_needed) {
3371 status = -1;
3372 }
3373 }
3374 return status;
3375}
3376
6b7c5b94
SP
3377static int __devinit be_probe(struct pci_dev *pdev,
3378 const struct pci_device_id *pdev_id)
3379{
3380 int status = 0;
3381 struct be_adapter *adapter;
3382 struct net_device *netdev;
6b7c5b94
SP
3383
3384 status = pci_enable_device(pdev);
3385 if (status)
3386 goto do_none;
3387
3388 status = pci_request_regions(pdev, DRV_NAME);
3389 if (status)
3390 goto disable_dev;
3391 pci_set_master(pdev);
3392
3c8def97 3393 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3394 if (netdev == NULL) {
3395 status = -ENOMEM;
3396 goto rel_reg;
3397 }
3398 adapter = netdev_priv(netdev);
3399 adapter->pdev = pdev;
3400 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3401
3402 status = be_dev_family_check(adapter);
63657b9c 3403 if (status)
fe6d2a38
SP
3404 goto free_netdev;
3405
6b7c5b94 3406 adapter->netdev = netdev;
2243e2e9 3407 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3408
2b7bcebf 3409 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3410 if (!status) {
3411 netdev->features |= NETIF_F_HIGHDMA;
3412 } else {
2b7bcebf 3413 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3414 if (status) {
3415 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3416 goto free_netdev;
3417 }
3418 }
3419
f9449ab7
SP
3420 status = be_sriov_enable(adapter);
3421 if (status)
3422 goto free_netdev;
ba343c77 3423
6b7c5b94
SP
3424 status = be_ctrl_init(adapter);
3425 if (status)
f9449ab7 3426 goto disable_sriov;
6b7c5b94 3427
37eed1cb
PR
3428 if (lancer_chip(adapter)) {
3429 status = lancer_test_and_set_rdy_state(adapter);
3430 if (status) {
3431 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3432 goto ctrl_clean;
37eed1cb
PR
3433 }
3434 }
3435
2243e2e9 3436 /* sync up with fw's ready state */
ba343c77
SB
3437 if (be_physfn(adapter)) {
3438 status = be_cmd_POST(adapter);
3439 if (status)
3440 goto ctrl_clean;
ba343c77 3441 }
6b7c5b94 3442
2243e2e9
SP
3443 /* tell fw we're ready to fire cmds */
3444 status = be_cmd_fw_init(adapter);
6b7c5b94 3445 if (status)
2243e2e9
SP
3446 goto ctrl_clean;
3447
a4b4dfab
AK
3448 status = be_cmd_reset_function(adapter);
3449 if (status)
3450 goto ctrl_clean;
556ae191 3451
2243e2e9
SP
3452 status = be_stats_init(adapter);
3453 if (status)
3454 goto ctrl_clean;
3455
3456 status = be_get_config(adapter);
6b7c5b94
SP
3457 if (status)
3458 goto stats_clean;
6b7c5b94 3459
b9ab82c7
SP
3460 /* The INTR bit may be set in the card when probed by a kdump kernel
3461 * after a crash.
3462 */
3463 if (!lancer_chip(adapter))
3464 be_intr_set(adapter, false);
3465
3abcdeda
SP
3466 be_msix_enable(adapter);
3467
6b7c5b94 3468 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3469 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3470
5fb379ee
SP
3471 status = be_setup(adapter);
3472 if (status)
3abcdeda 3473 goto msix_disable;
2243e2e9 3474
3abcdeda 3475 be_netdev_init(netdev);
6b7c5b94
SP
3476 status = register_netdev(netdev);
3477 if (status != 0)
5fb379ee 3478 goto unsetup;
6b7c5b94 3479
c4ca2374 3480 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3481
f203af70 3482 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3483 return 0;
3484
5fb379ee
SP
3485unsetup:
3486 be_clear(adapter);
3abcdeda
SP
3487msix_disable:
3488 be_msix_disable(adapter);
6b7c5b94
SP
3489stats_clean:
3490 be_stats_cleanup(adapter);
3491ctrl_clean:
3492 be_ctrl_cleanup(adapter);
f9449ab7 3493disable_sriov:
ba343c77 3494 be_sriov_disable(adapter);
f9449ab7 3495free_netdev:
fe6d2a38 3496 free_netdev(netdev);
8d56ff11 3497 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3498rel_reg:
3499 pci_release_regions(pdev);
3500disable_dev:
3501 pci_disable_device(pdev);
3502do_none:
c4ca2374 3503 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3504 return status;
3505}
3506
3507static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3508{
3509 struct be_adapter *adapter = pci_get_drvdata(pdev);
3510 struct net_device *netdev = adapter->netdev;
3511
a4ca055f 3512 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3513 if (adapter->wol)
3514 be_setup_wol(adapter, true);
3515
6b7c5b94
SP
3516 netif_device_detach(netdev);
3517 if (netif_running(netdev)) {
3518 rtnl_lock();
3519 be_close(netdev);
3520 rtnl_unlock();
3521 }
9b0365f1 3522 be_clear(adapter);
6b7c5b94 3523
a4ca055f 3524 be_msix_disable(adapter);
6b7c5b94
SP
3525 pci_save_state(pdev);
3526 pci_disable_device(pdev);
3527 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3528 return 0;
3529}
3530
3531static int be_resume(struct pci_dev *pdev)
3532{
3533 int status = 0;
3534 struct be_adapter *adapter = pci_get_drvdata(pdev);
3535 struct net_device *netdev = adapter->netdev;
3536
3537 netif_device_detach(netdev);
3538
3539 status = pci_enable_device(pdev);
3540 if (status)
3541 return status;
3542
3543 pci_set_power_state(pdev, 0);
3544 pci_restore_state(pdev);
3545
a4ca055f 3546 be_msix_enable(adapter);
2243e2e9
SP
3547 /* tell fw we're ready to fire cmds */
3548 status = be_cmd_fw_init(adapter);
3549 if (status)
3550 return status;
3551
9b0365f1 3552 be_setup(adapter);
6b7c5b94
SP
3553 if (netif_running(netdev)) {
3554 rtnl_lock();
3555 be_open(netdev);
3556 rtnl_unlock();
3557 }
3558 netif_device_attach(netdev);
71d8d1b5
AK
3559
3560 if (adapter->wol)
3561 be_setup_wol(adapter, false);
a4ca055f
AK
3562
3563 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3564 return 0;
3565}
3566
82456b03
SP
3567/*
3568 * An FLR will stop BE from DMAing any data.
3569 */
3570static void be_shutdown(struct pci_dev *pdev)
3571{
3572 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3573
2d5d4154
AK
3574 if (!adapter)
3575 return;
82456b03 3576
0f4a6828 3577 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3578
2d5d4154 3579 netif_device_detach(adapter->netdev);
82456b03 3580
82456b03
SP
3581 if (adapter->wol)
3582 be_setup_wol(adapter, true);
3583
57841869
AK
3584 be_cmd_reset_function(adapter);
3585
82456b03 3586 pci_disable_device(pdev);
82456b03
SP
3587}
3588
cf588477
SP
3589static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3590 pci_channel_state_t state)
3591{
3592 struct be_adapter *adapter = pci_get_drvdata(pdev);
3593 struct net_device *netdev = adapter->netdev;
3594
3595 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3596
3597 adapter->eeh_err = true;
3598
3599 netif_device_detach(netdev);
3600
3601 if (netif_running(netdev)) {
3602 rtnl_lock();
3603 be_close(netdev);
3604 rtnl_unlock();
3605 }
3606 be_clear(adapter);
3607
3608 if (state == pci_channel_io_perm_failure)
3609 return PCI_ERS_RESULT_DISCONNECT;
3610
3611 pci_disable_device(pdev);
3612
3613 return PCI_ERS_RESULT_NEED_RESET;
3614}
3615
3616static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3617{
3618 struct be_adapter *adapter = pci_get_drvdata(pdev);
3619 int status;
3620
3621 dev_info(&adapter->pdev->dev, "EEH reset\n");
3622 adapter->eeh_err = false;
6589ade0
SP
3623 adapter->ue_detected = false;
3624 adapter->fw_timeout = false;
cf588477
SP
3625
3626 status = pci_enable_device(pdev);
3627 if (status)
3628 return PCI_ERS_RESULT_DISCONNECT;
3629
3630 pci_set_master(pdev);
3631 pci_set_power_state(pdev, 0);
3632 pci_restore_state(pdev);
3633
3634 /* Check if card is ok and fw is ready */
3635 status = be_cmd_POST(adapter);
3636 if (status)
3637 return PCI_ERS_RESULT_DISCONNECT;
3638
3639 return PCI_ERS_RESULT_RECOVERED;
3640}
3641
3642static void be_eeh_resume(struct pci_dev *pdev)
3643{
3644 int status = 0;
3645 struct be_adapter *adapter = pci_get_drvdata(pdev);
3646 struct net_device *netdev = adapter->netdev;
3647
3648 dev_info(&adapter->pdev->dev, "EEH resume\n");
3649
3650 pci_save_state(pdev);
3651
3652 /* tell fw we're ready to fire cmds */
3653 status = be_cmd_fw_init(adapter);
3654 if (status)
3655 goto err;
3656
3657 status = be_setup(adapter);
3658 if (status)
3659 goto err;
3660
3661 if (netif_running(netdev)) {
3662 status = be_open(netdev);
3663 if (status)
3664 goto err;
3665 }
3666 netif_device_attach(netdev);
3667 return;
3668err:
3669 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3670}
3671
3672static struct pci_error_handlers be_eeh_handlers = {
3673 .error_detected = be_eeh_err_detected,
3674 .slot_reset = be_eeh_reset,
3675 .resume = be_eeh_resume,
3676};
3677
6b7c5b94
SP
3678static struct pci_driver be_driver = {
3679 .name = DRV_NAME,
3680 .id_table = be_dev_ids,
3681 .probe = be_probe,
3682 .remove = be_remove,
3683 .suspend = be_suspend,
cf588477 3684 .resume = be_resume,
82456b03 3685 .shutdown = be_shutdown,
cf588477 3686 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3687};
3688
3689static int __init be_init_module(void)
3690{
8e95a202
JP
3691 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3692 rx_frag_size != 2048) {
6b7c5b94
SP
3693 printk(KERN_WARNING DRV_NAME
3694 " : Module param rx_frag_size must be 2048/4096/8192."
3695 " Using 2048\n");
3696 rx_frag_size = 2048;
3697 }
6b7c5b94
SP
3698
3699 return pci_register_driver(&be_driver);
3700}
3701module_init(be_init_module);
3702
3703static void __exit be_exit_module(void)
3704{
3705 pci_unregister_driver(&be_driver);
3706}
3707module_exit(be_exit_module);
This page took 0.558701 seconds and 5 git commands to generate.