be2net: fix ethtool ringparam reporting
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
2e588f84 30static ushort rx_frag_size = 2048;
ba343c77 31static unsigned int num_vfs;
2e588f84 32module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 33module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 34MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
6b7c5b94 37static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
44 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 47/* UE Status Low CSR */
42c8b11e 48static const char * const ue_status_low_desc[] = {
7c185276
AK
49 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
42c8b11e 83static const char * const ue_status_hi_desc[] = {
7c185276
AK
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
42c8b11e 107 "NETC",
7c185276
AK
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
6b7c5b94 117
752961a1
SP
118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
6b7c5b94
SP
131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
2b7bcebf
IV
142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
6b7c5b94
SP
144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
8788fdc2 150static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 151{
db3ea781 152 u32 reg, enabled;
5f0b849e 153
cf588477
SP
154 if (adapter->eeh_err)
155 return;
156
db3ea781
SP
157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781
SP
168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
8788fdc2 172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
177
178 wmb();
8788fdc2 179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
180}
181
8788fdc2 182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
187
188 wmb();
8788fdc2 189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
190}
191
8788fdc2 192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
199
200 if (adapter->eeh_err)
201 return;
202
6b7c5b94
SP
203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
210}
211
8788fdc2 212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
218
219 if (adapter->eeh_err)
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
226}
227
6b7c5b94
SP
228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
e3a7ae2c
SK
233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
6b7c5b94 235
ca9e4988
AK
236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
238
e3a7ae2c 239 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
240 MAC_ADDRESS_TYPE_NETWORK, false,
241 adapter->if_handle, 0);
a65027e4 242 if (status)
e3a7ae2c 243 goto err;
6b7c5b94 244
e3a7ae2c
SK
245 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
246 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 247 adapter->if_handle, &adapter->pmac_id, 0);
e3a7ae2c
SK
248 if (status)
249 goto err;
6b7c5b94 250
e3a7ae2c
SK
251 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252 }
253 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254 return 0;
255err:
256 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
257 return status;
258}
259
89a88ab8
AK
260static void populate_be2_stats(struct be_adapter *adapter)
261{
ac124ff9
SP
262 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
263 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
264 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 265 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
266 &rxf_stats->port[adapter->port_num];
267 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 268
ac124ff9 269 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
270 drvs->rx_pause_frames = port_stats->rx_pause_frames;
271 drvs->rx_crc_errors = port_stats->rx_crc_errors;
272 drvs->rx_control_frames = port_stats->rx_control_frames;
273 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
274 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
275 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
276 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
277 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
278 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
279 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
280 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
281 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
282 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
283 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 284 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
285 drvs->rx_dropped_header_too_small =
286 port_stats->rx_dropped_header_too_small;
ac124ff9 287 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
288 drvs->rx_alignment_symbol_errors =
289 port_stats->rx_alignment_symbol_errors;
290
291 drvs->tx_pauseframes = port_stats->tx_pauseframes;
292 drvs->tx_controlframes = port_stats->tx_controlframes;
293
294 if (adapter->port_num)
ac124ff9 295 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 296 else
ac124ff9 297 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
298 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
299 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
300 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
301 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
302 drvs->forwarded_packets = rxf_stats->forwarded_packets;
303 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
304 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
305 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
306 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307}
308
309static void populate_be3_stats(struct be_adapter *adapter)
310{
ac124ff9
SP
311 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
312 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
313 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 314 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
315 &rxf_stats->port[adapter->port_num];
316 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 317
ac124ff9 318 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
319 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
320 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
321 drvs->rx_pause_frames = port_stats->rx_pause_frames;
322 drvs->rx_crc_errors = port_stats->rx_crc_errors;
323 drvs->rx_control_frames = port_stats->rx_control_frames;
324 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
325 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
326 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
327 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
328 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
329 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
330 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
331 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
332 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
333 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
334 drvs->rx_dropped_header_too_small =
335 port_stats->rx_dropped_header_too_small;
336 drvs->rx_input_fifo_overflow_drop =
337 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 338 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
339 drvs->rx_alignment_symbol_errors =
340 port_stats->rx_alignment_symbol_errors;
ac124ff9 341 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
342 drvs->tx_pauseframes = port_stats->tx_pauseframes;
343 drvs->tx_controlframes = port_stats->tx_controlframes;
344 drvs->jabber_events = port_stats->jabber_events;
345 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
346 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
347 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
348 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
349 drvs->forwarded_packets = rxf_stats->forwarded_packets;
350 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
351 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
352 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
353 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354}
355
005d5696
SX
356static void populate_lancer_stats(struct be_adapter *adapter)
357{
89a88ab8 358
005d5696 359 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
360 struct lancer_pport_stats *pport_stats =
361 pport_stats_from_cmd(adapter);
362
363 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
364 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
365 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
366 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 367 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 368 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
369 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
370 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
371 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
372 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
373 drvs->rx_dropped_tcp_length =
374 pport_stats->rx_dropped_invalid_tcp_length;
375 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
376 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
377 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
378 drvs->rx_dropped_header_too_small =
379 pport_stats->rx_dropped_header_too_small;
380 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
381 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 382 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 383 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
384 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
385 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 386 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 387 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
388 drvs->forwarded_packets = pport_stats->num_forwards_lo;
389 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 390 drvs->rx_drops_too_many_frags =
ac124ff9 391 pport_stats->rx_drops_too_many_frags_lo;
005d5696 392}
89a88ab8 393
09c1c68f
SP
394static void accumulate_16bit_val(u32 *acc, u16 val)
395{
396#define lo(x) (x & 0xFFFF)
397#define hi(x) (x & 0xFFFF0000)
398 bool wrapped = val < lo(*acc);
399 u32 newacc = hi(*acc) + val;
400
401 if (wrapped)
402 newacc += 65536;
403 ACCESS_ONCE(*acc) = newacc;
404}
405
89a88ab8
AK
406void be_parse_stats(struct be_adapter *adapter)
407{
ac124ff9
SP
408 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
409 struct be_rx_obj *rxo;
410 int i;
411
005d5696
SX
412 if (adapter->generation == BE_GEN3) {
413 if (lancer_chip(adapter))
414 populate_lancer_stats(adapter);
415 else
416 populate_be3_stats(adapter);
417 } else {
89a88ab8 418 populate_be2_stats(adapter);
005d5696 419 }
ac124ff9
SP
420
421 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
422 for_all_rx_queues(adapter, rxo, i) {
423 /* below erx HW counter can actually wrap around after
424 * 65535. Driver accumulates a 32-bit value
425 */
426 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
427 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
428 }
89a88ab8
AK
429}
430
ab1594e9
SP
431static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
432 struct rtnl_link_stats64 *stats)
6b7c5b94 433{
ab1594e9 434 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 435 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 436 struct be_rx_obj *rxo;
3c8def97 437 struct be_tx_obj *txo;
ab1594e9
SP
438 u64 pkts, bytes;
439 unsigned int start;
3abcdeda 440 int i;
6b7c5b94 441
3abcdeda 442 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
443 const struct be_rx_stats *rx_stats = rx_stats(rxo);
444 do {
445 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
446 pkts = rx_stats(rxo)->rx_pkts;
447 bytes = rx_stats(rxo)->rx_bytes;
448 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
449 stats->rx_packets += pkts;
450 stats->rx_bytes += bytes;
451 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
452 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
453 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
454 }
455
3c8def97 456 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
457 const struct be_tx_stats *tx_stats = tx_stats(txo);
458 do {
459 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
460 pkts = tx_stats(txo)->tx_pkts;
461 bytes = tx_stats(txo)->tx_bytes;
462 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
463 stats->tx_packets += pkts;
464 stats->tx_bytes += bytes;
3c8def97 465 }
6b7c5b94
SP
466
467 /* bad pkts received */
ab1594e9 468 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
469 drvs->rx_alignment_symbol_errors +
470 drvs->rx_in_range_errors +
471 drvs->rx_out_range_errors +
472 drvs->rx_frame_too_long +
473 drvs->rx_dropped_too_small +
474 drvs->rx_dropped_too_short +
475 drvs->rx_dropped_header_too_small +
476 drvs->rx_dropped_tcp_length +
ab1594e9 477 drvs->rx_dropped_runt;
68110868 478
6b7c5b94 479 /* detailed rx errors */
ab1594e9 480 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
481 drvs->rx_out_range_errors +
482 drvs->rx_frame_too_long;
68110868 483
ab1594e9 484 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
485
486 /* frame alignment errors */
ab1594e9 487 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 488
6b7c5b94
SP
489 /* receiver fifo overrun */
490 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 491 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
492 drvs->rx_input_fifo_overflow_drop +
493 drvs->rx_drops_no_pbuf;
ab1594e9 494 return stats;
6b7c5b94
SP
495}
496
ea172a01 497void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 498{
6b7c5b94
SP
499 struct net_device *netdev = adapter->netdev;
500
ea172a01
SP
501 /* when link status changes, link speed must be re-queried from card */
502 adapter->link_speed = -1;
503 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
504 netif_carrier_on(netdev);
505 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
506 } else {
507 netif_carrier_off(netdev);
508 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 509 }
6b7c5b94
SP
510}
511
3c8def97 512static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 513 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 514{
3c8def97
SP
515 struct be_tx_stats *stats = tx_stats(txo);
516
ab1594e9 517 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
518 stats->tx_reqs++;
519 stats->tx_wrbs += wrb_cnt;
520 stats->tx_bytes += copied;
521 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 522 if (stopped)
ac124ff9 523 stats->tx_stops++;
ab1594e9 524 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
525}
526
527/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
528static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
529 bool *dummy)
6b7c5b94 530{
ebc8d2ab
DM
531 int cnt = (skb->len > skb->data_len);
532
533 cnt += skb_shinfo(skb)->nr_frags;
534
6b7c5b94
SP
535 /* to account for hdr wrb */
536 cnt++;
fe6d2a38
SP
537 if (lancer_chip(adapter) || !(cnt & 1)) {
538 *dummy = false;
539 } else {
6b7c5b94
SP
540 /* add a dummy to make it an even num */
541 cnt++;
542 *dummy = true;
fe6d2a38 543 }
6b7c5b94
SP
544 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
545 return cnt;
546}
547
548static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
549{
550 wrb->frag_pa_hi = upper_32_bits(addr);
551 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
552 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
553}
554
1ded132d
AK
555static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
556 struct sk_buff *skb)
557{
558 u8 vlan_prio;
559 u16 vlan_tag;
560
561 vlan_tag = vlan_tx_tag_get(skb);
562 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
563 /* If vlan priority provided by OS is NOT in available bmap */
564 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
565 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
566 adapter->recommended_prio;
567
568 return vlan_tag;
569}
570
cc4ce020
SK
571static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
572 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 573{
1ded132d 574 u16 vlan_tag;
cc4ce020 575
6b7c5b94
SP
576 memset(hdr, 0, sizeof(*hdr));
577
578 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
579
49e4b847 580 if (skb_is_gso(skb)) {
6b7c5b94
SP
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
583 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 584 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
586 if (lancer_chip(adapter) && adapter->sli_family ==
587 LANCER_A0_SLI_FAMILY) {
588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
589 if (is_tcp_pkt(skb))
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
591 tcpcs, hdr, 1);
592 else if (is_udp_pkt(skb))
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594 udpcs, hdr, 1);
595 }
6b7c5b94
SP
596 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
599 else if (is_udp_pkt(skb))
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
601 }
602
4c5102f9 603 if (vlan_tx_tag_present(skb)) {
6b7c5b94 604 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 605 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
607 }
608
609 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
610 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
613}
614
2b7bcebf 615static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
616 bool unmap_single)
617{
618 dma_addr_t dma;
619
620 be_dws_le_to_cpu(wrb, sizeof(*wrb));
621
622 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 623 if (wrb->frag_len) {
7101e111 624 if (unmap_single)
2b7bcebf
IV
625 dma_unmap_single(dev, dma, wrb->frag_len,
626 DMA_TO_DEVICE);
7101e111 627 else
2b7bcebf 628 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
629 }
630}
6b7c5b94 631
3c8def97 632static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
633 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
634{
7101e111
SP
635 dma_addr_t busaddr;
636 int i, copied = 0;
2b7bcebf 637 struct device *dev = &adapter->pdev->dev;
6b7c5b94 638 struct sk_buff *first_skb = skb;
6b7c5b94
SP
639 struct be_eth_wrb *wrb;
640 struct be_eth_hdr_wrb *hdr;
7101e111
SP
641 bool map_single = false;
642 u16 map_head;
6b7c5b94 643
6b7c5b94
SP
644 hdr = queue_head_node(txq);
645 queue_head_inc(txq);
7101e111 646 map_head = txq->head;
6b7c5b94 647
ebc8d2ab 648 if (skb->len > skb->data_len) {
e743d313 649 int len = skb_headlen(skb);
2b7bcebf
IV
650 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
651 if (dma_mapping_error(dev, busaddr))
7101e111
SP
652 goto dma_err;
653 map_single = true;
ebc8d2ab
DM
654 wrb = queue_head_node(txq);
655 wrb_fill(wrb, busaddr, len);
656 be_dws_cpu_to_le(wrb, sizeof(*wrb));
657 queue_head_inc(txq);
658 copied += len;
659 }
6b7c5b94 660
ebc8d2ab 661 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 662 const struct skb_frag_struct *frag =
ebc8d2ab 663 &skb_shinfo(skb)->frags[i];
b061b39e 664 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 665 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 666 if (dma_mapping_error(dev, busaddr))
7101e111 667 goto dma_err;
ebc8d2ab 668 wrb = queue_head_node(txq);
9e903e08 669 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
670 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671 queue_head_inc(txq);
9e903e08 672 copied += skb_frag_size(frag);
6b7c5b94
SP
673 }
674
675 if (dummy_wrb) {
676 wrb = queue_head_node(txq);
677 wrb_fill(wrb, 0, 0);
678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
680 }
681
cc4ce020 682 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
683 be_dws_cpu_to_le(hdr, sizeof(*hdr));
684
685 return copied;
7101e111
SP
686dma_err:
687 txq->head = map_head;
688 while (copied) {
689 wrb = queue_head_node(txq);
2b7bcebf 690 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
691 map_single = false;
692 copied -= wrb->frag_len;
693 queue_head_inc(txq);
694 }
695 return 0;
6b7c5b94
SP
696}
697
61357325 698static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 699 struct net_device *netdev)
6b7c5b94
SP
700{
701 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
702 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
703 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
704 u32 wrb_cnt = 0, copied = 0;
705 u32 start = txq->head;
706 bool dummy_wrb, stopped = false;
707
1ded132d
AK
708 /* For vlan tagged pkts, BE
709 * 1) calculates checksum even when CSO is not requested
710 * 2) calculates checksum wrongly for padded pkt less than
711 * 60 bytes long.
712 * As a workaround disable TX vlan offloading in such cases.
713 */
714 if (unlikely(vlan_tx_tag_present(skb) &&
715 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
716 skb = skb_share_check(skb, GFP_ATOMIC);
717 if (unlikely(!skb))
718 goto tx_drop;
719
720 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
721 if (unlikely(!skb))
722 goto tx_drop;
723
724 skb->vlan_tci = 0;
725 }
726
fe6d2a38 727 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 728
3c8def97 729 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
730 if (copied) {
731 /* record the sent skb in the sent_skb table */
3c8def97
SP
732 BUG_ON(txo->sent_skb_list[start]);
733 txo->sent_skb_list[start] = skb;
c190e3c8
AK
734
735 /* Ensure txq has space for the next skb; Else stop the queue
736 * *BEFORE* ringing the tx doorbell, so that we serialze the
737 * tx compls of the current transmit which'll wake up the queue
738 */
7101e111 739 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
740 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
741 txq->len) {
3c8def97 742 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
743 stopped = true;
744 }
6b7c5b94 745
c190e3c8 746 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 747
3c8def97 748 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 749 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
750 } else {
751 txq->head = start;
752 dev_kfree_skb_any(skb);
6b7c5b94 753 }
1ded132d 754tx_drop:
6b7c5b94
SP
755 return NETDEV_TX_OK;
756}
757
758static int be_change_mtu(struct net_device *netdev, int new_mtu)
759{
760 struct be_adapter *adapter = netdev_priv(netdev);
761 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
762 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
763 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
764 dev_info(&adapter->pdev->dev,
765 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
766 BE_MIN_MTU,
767 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
768 return -EINVAL;
769 }
770 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
771 netdev->mtu, new_mtu);
772 netdev->mtu = new_mtu;
773 return 0;
774}
775
776/*
82903e4b
AK
777 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
778 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 779 */
1da87b7f 780static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 781{
6b7c5b94
SP
782 u16 vtag[BE_NUM_VLANS_SUPPORTED];
783 u16 ntags = 0, i;
82903e4b 784 int status = 0;
1da87b7f
AK
785 u32 if_handle;
786
787 if (vf) {
788 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
789 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
790 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
791 }
6b7c5b94 792
c0e64ef4
SP
793 /* No need to further configure vids if in promiscuous mode */
794 if (adapter->promiscuous)
795 return 0;
796
82903e4b 797 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 798 /* Construct VLAN Table to give to HW */
b738127d 799 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
800 if (adapter->vlan_tag[i]) {
801 vtag[ntags] = cpu_to_le16(i);
802 ntags++;
803 }
804 }
b31c50a7
SP
805 status = be_cmd_vlan_config(adapter, adapter->if_handle,
806 vtag, ntags, 1, 0);
6b7c5b94 807 } else {
b31c50a7
SP
808 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809 NULL, 0, 1, 1);
6b7c5b94 810 }
1da87b7f 811
b31c50a7 812 return status;
6b7c5b94
SP
813}
814
8e586137 815static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
816{
817 struct be_adapter *adapter = netdev_priv(netdev);
818
1da87b7f 819 adapter->vlans_added++;
ba343c77 820 if (!be_physfn(adapter))
8e586137 821 return 0;
ba343c77 822
6b7c5b94 823 adapter->vlan_tag[vid] = 1;
82903e4b 824 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 825 be_vid_config(adapter, false, 0);
8e586137
JP
826
827 return 0;
6b7c5b94
SP
828}
829
8e586137 830static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
831{
832 struct be_adapter *adapter = netdev_priv(netdev);
833
1da87b7f 834 adapter->vlans_added--;
1da87b7f 835
ba343c77 836 if (!be_physfn(adapter))
8e586137 837 return 0;
ba343c77 838
6b7c5b94 839 adapter->vlan_tag[vid] = 0;
82903e4b 840 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 841 be_vid_config(adapter, false, 0);
8e586137
JP
842
843 return 0;
6b7c5b94
SP
844}
845
a54769f5 846static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
847{
848 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 849
24307eef 850 if (netdev->flags & IFF_PROMISC) {
5b8821b7 851 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
852 adapter->promiscuous = true;
853 goto done;
6b7c5b94
SP
854 }
855
25985edc 856 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
857 if (adapter->promiscuous) {
858 adapter->promiscuous = false;
5b8821b7 859 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
860
861 if (adapter->vlans_added)
862 be_vid_config(adapter, false, 0);
6b7c5b94
SP
863 }
864
e7b909a6 865 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 866 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
867 netdev_mc_count(netdev) > BE_MAX_MC) {
868 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 869 goto done;
6b7c5b94 870 }
6b7c5b94 871
5b8821b7 872 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
873done:
874 return;
6b7c5b94
SP
875}
876
ba343c77
SB
877static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
878{
879 struct be_adapter *adapter = netdev_priv(netdev);
880 int status;
881
882 if (!adapter->sriov_enabled)
883 return -EPERM;
884
885 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
886 return -EINVAL;
887
590c391d
PR
888 if (lancer_chip(adapter)) {
889 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
890 } else {
891 status = be_cmd_pmac_del(adapter,
892 adapter->vf_cfg[vf].vf_if_handle,
30128031 893 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 894
590c391d
PR
895 status = be_cmd_pmac_add(adapter, mac,
896 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 897 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
590c391d
PR
898 }
899
64600ea5 900 if (status)
ba343c77
SB
901 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
902 mac, vf);
64600ea5
AK
903 else
904 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
905
ba343c77
SB
906 return status;
907}
908
64600ea5
AK
909static int be_get_vf_config(struct net_device *netdev, int vf,
910 struct ifla_vf_info *vi)
911{
912 struct be_adapter *adapter = netdev_priv(netdev);
913
914 if (!adapter->sriov_enabled)
915 return -EPERM;
916
917 if (vf >= num_vfs)
918 return -EINVAL;
919
920 vi->vf = vf;
e1d18735 921 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 922 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
923 vi->qos = 0;
924 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
925
926 return 0;
927}
928
1da87b7f
AK
929static int be_set_vf_vlan(struct net_device *netdev,
930 int vf, u16 vlan, u8 qos)
931{
932 struct be_adapter *adapter = netdev_priv(netdev);
933 int status = 0;
934
935 if (!adapter->sriov_enabled)
936 return -EPERM;
937
938 if ((vf >= num_vfs) || (vlan > 4095))
939 return -EINVAL;
940
941 if (vlan) {
942 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
943 adapter->vlans_added++;
944 } else {
945 adapter->vf_cfg[vf].vf_vlan_tag = 0;
946 adapter->vlans_added--;
947 }
948
949 status = be_vid_config(adapter, true, vf);
950
951 if (status)
952 dev_info(&adapter->pdev->dev,
953 "VLAN %d config on VF %d failed\n", vlan, vf);
954 return status;
955}
956
e1d18735
AK
957static int be_set_vf_tx_rate(struct net_device *netdev,
958 int vf, int rate)
959{
960 struct be_adapter *adapter = netdev_priv(netdev);
961 int status = 0;
962
963 if (!adapter->sriov_enabled)
964 return -EPERM;
965
966 if ((vf >= num_vfs) || (rate < 0))
967 return -EINVAL;
968
969 if (rate > 10000)
970 rate = 10000;
971
972 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 973 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
974
975 if (status)
976 dev_info(&adapter->pdev->dev,
977 "tx rate %d on VF %d failed\n", rate, vf);
978 return status;
979}
980
ac124ff9 981static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 982{
ac124ff9
SP
983 struct be_eq_obj *rx_eq = &rxo->rx_eq;
984 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 985 ulong now = jiffies;
ac124ff9 986 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
987 u64 pkts;
988 unsigned int start, eqd;
ac124ff9
SP
989
990 if (!rx_eq->enable_aic)
991 return;
6b7c5b94 992
4097f663 993 /* Wrapped around */
3abcdeda
SP
994 if (time_before(now, stats->rx_jiffies)) {
995 stats->rx_jiffies = now;
4097f663
SP
996 return;
997 }
6b7c5b94 998
ac124ff9
SP
999 /* Update once a second */
1000 if (delta < HZ)
6b7c5b94
SP
1001 return;
1002
ab1594e9
SP
1003 do {
1004 start = u64_stats_fetch_begin_bh(&stats->sync);
1005 pkts = stats->rx_pkts;
1006 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1007
68c3e5a7 1008 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1009 stats->rx_pkts_prev = pkts;
3abcdeda 1010 stats->rx_jiffies = now;
ac124ff9
SP
1011 eqd = stats->rx_pps / 110000;
1012 eqd = eqd << 3;
1013 if (eqd > rx_eq->max_eqd)
1014 eqd = rx_eq->max_eqd;
1015 if (eqd < rx_eq->min_eqd)
1016 eqd = rx_eq->min_eqd;
1017 if (eqd < 10)
1018 eqd = 0;
1019 if (eqd != rx_eq->cur_eqd) {
1020 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1021 rx_eq->cur_eqd = eqd;
1022 }
6b7c5b94
SP
1023}
1024
3abcdeda 1025static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1026 struct be_rx_compl_info *rxcp)
4097f663 1027{
ac124ff9 1028 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1029
ab1594e9 1030 u64_stats_update_begin(&stats->sync);
3abcdeda 1031 stats->rx_compl++;
2e588f84 1032 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1033 stats->rx_pkts++;
2e588f84 1034 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1035 stats->rx_mcast_pkts++;
2e588f84 1036 if (rxcp->err)
ac124ff9 1037 stats->rx_compl_err++;
ab1594e9 1038 u64_stats_update_end(&stats->sync);
4097f663
SP
1039}
1040
2e588f84 1041static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1042{
19fad86f
PR
1043 /* L4 checksum is not reliable for non TCP/UDP packets.
1044 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1045 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1046 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1047}
1048
6b7c5b94 1049static struct be_rx_page_info *
3abcdeda
SP
1050get_rx_page_info(struct be_adapter *adapter,
1051 struct be_rx_obj *rxo,
1052 u16 frag_idx)
6b7c5b94
SP
1053{
1054 struct be_rx_page_info *rx_page_info;
3abcdeda 1055 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1056
3abcdeda 1057 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1058 BUG_ON(!rx_page_info->page);
1059
205859a2 1060 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1061 dma_unmap_page(&adapter->pdev->dev,
1062 dma_unmap_addr(rx_page_info, bus),
1063 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1064 rx_page_info->last_page_user = false;
1065 }
6b7c5b94
SP
1066
1067 atomic_dec(&rxq->used);
1068 return rx_page_info;
1069}
1070
1071/* Throwaway the data in the Rx completion */
1072static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1073 struct be_rx_obj *rxo,
2e588f84 1074 struct be_rx_compl_info *rxcp)
6b7c5b94 1075{
3abcdeda 1076 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1077 struct be_rx_page_info *page_info;
2e588f84 1078 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1079
e80d9da6 1080 for (i = 0; i < num_rcvd; i++) {
2e588f84 1081 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1082 put_page(page_info->page);
1083 memset(page_info, 0, sizeof(*page_info));
2e588f84 1084 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1085 }
1086}
1087
1088/*
1089 * skb_fill_rx_data forms a complete skb for an ether frame
1090 * indicated by rxcp.
1091 */
3abcdeda 1092static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1093 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1094{
3abcdeda 1095 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1096 struct be_rx_page_info *page_info;
2e588f84
SP
1097 u16 i, j;
1098 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1099 u8 *start;
6b7c5b94 1100
2e588f84 1101 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1102 start = page_address(page_info->page) + page_info->page_offset;
1103 prefetch(start);
1104
1105 /* Copy data in the first descriptor of this completion */
2e588f84 1106 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1107
1108 /* Copy the header portion into skb_data */
2e588f84 1109 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1110 memcpy(skb->data, start, hdr_len);
1111 skb->len = curr_frag_len;
1112 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1113 /* Complete packet has now been moved to data */
1114 put_page(page_info->page);
1115 skb->data_len = 0;
1116 skb->tail += curr_frag_len;
1117 } else {
1118 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1119 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1120 skb_shinfo(skb)->frags[0].page_offset =
1121 page_info->page_offset + hdr_len;
9e903e08 1122 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1123 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1124 skb->truesize += rx_frag_size;
6b7c5b94
SP
1125 skb->tail += hdr_len;
1126 }
205859a2 1127 page_info->page = NULL;
6b7c5b94 1128
2e588f84
SP
1129 if (rxcp->pkt_size <= rx_frag_size) {
1130 BUG_ON(rxcp->num_rcvd != 1);
1131 return;
6b7c5b94
SP
1132 }
1133
1134 /* More frags present for this completion */
2e588f84
SP
1135 index_inc(&rxcp->rxq_idx, rxq->len);
1136 remaining = rxcp->pkt_size - curr_frag_len;
1137 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1138 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1139 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1140
bd46cb6c
AK
1141 /* Coalesce all frags from the same physical page in one slot */
1142 if (page_info->page_offset == 0) {
1143 /* Fresh page */
1144 j++;
b061b39e 1145 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1146 skb_shinfo(skb)->frags[j].page_offset =
1147 page_info->page_offset;
9e903e08 1148 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1149 skb_shinfo(skb)->nr_frags++;
1150 } else {
1151 put_page(page_info->page);
1152 }
1153
9e903e08 1154 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1155 skb->len += curr_frag_len;
1156 skb->data_len += curr_frag_len;
bdb28a97 1157 skb->truesize += rx_frag_size;
2e588f84
SP
1158 remaining -= curr_frag_len;
1159 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1160 page_info->page = NULL;
6b7c5b94 1161 }
bd46cb6c 1162 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1163}
1164
5be93b9a 1165/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1166static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1167 struct be_rx_obj *rxo,
2e588f84 1168 struct be_rx_compl_info *rxcp)
6b7c5b94 1169{
6332c8d3 1170 struct net_device *netdev = adapter->netdev;
6b7c5b94 1171 struct sk_buff *skb;
89420424 1172
6332c8d3 1173 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1174 if (unlikely(!skb)) {
ac124ff9 1175 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1176 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1177 return;
1178 }
1179
2e588f84 1180 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1181
6332c8d3 1182 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1183 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1184 else
1185 skb_checksum_none_assert(skb);
6b7c5b94 1186
6332c8d3 1187 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1188 if (adapter->netdev->features & NETIF_F_RXHASH)
1189 skb->rxhash = rxcp->rss_hash;
1190
6b7c5b94 1191
343e43c0 1192 if (rxcp->vlanf)
4c5102f9
AK
1193 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1194
1195 netif_receive_skb(skb);
6b7c5b94
SP
1196}
1197
5be93b9a
AK
1198/* Process the RX completion indicated by rxcp when GRO is enabled */
1199static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1200 struct be_rx_obj *rxo,
2e588f84 1201 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1202{
1203 struct be_rx_page_info *page_info;
5be93b9a 1204 struct sk_buff *skb = NULL;
3abcdeda
SP
1205 struct be_queue_info *rxq = &rxo->q;
1206 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1207 u16 remaining, curr_frag_len;
1208 u16 i, j;
3968fa1e 1209
5be93b9a
AK
1210 skb = napi_get_frags(&eq_obj->napi);
1211 if (!skb) {
3abcdeda 1212 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1213 return;
1214 }
1215
2e588f84
SP
1216 remaining = rxcp->pkt_size;
1217 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1218 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1219
1220 curr_frag_len = min(remaining, rx_frag_size);
1221
bd46cb6c
AK
1222 /* Coalesce all frags from the same physical page in one slot */
1223 if (i == 0 || page_info->page_offset == 0) {
1224 /* First frag or Fresh page */
1225 j++;
b061b39e 1226 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1227 skb_shinfo(skb)->frags[j].page_offset =
1228 page_info->page_offset;
9e903e08 1229 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1230 } else {
1231 put_page(page_info->page);
1232 }
9e903e08 1233 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1234 skb->truesize += rx_frag_size;
bd46cb6c 1235 remaining -= curr_frag_len;
2e588f84 1236 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1237 memset(page_info, 0, sizeof(*page_info));
1238 }
bd46cb6c 1239 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1240
5be93b9a 1241 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1242 skb->len = rxcp->pkt_size;
1243 skb->data_len = rxcp->pkt_size;
5be93b9a 1244 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1245 if (adapter->netdev->features & NETIF_F_RXHASH)
1246 skb->rxhash = rxcp->rss_hash;
5be93b9a 1247
343e43c0 1248 if (rxcp->vlanf)
4c5102f9
AK
1249 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1250
1251 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1252}
1253
1254static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1255 struct be_eth_rx_compl *compl,
1256 struct be_rx_compl_info *rxcp)
1257{
1258 rxcp->pkt_size =
1259 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1260 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1261 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1262 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1263 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1264 rxcp->ip_csum =
1265 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1266 rxcp->l4_csum =
1267 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1268 rxcp->ipv6 =
1269 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1270 rxcp->rxq_idx =
1271 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1272 rxcp->num_rcvd =
1273 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1274 rxcp->pkt_type =
1275 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1276 rxcp->rss_hash =
1277 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1278 if (rxcp->vlanf) {
1279 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1280 compl);
1281 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1282 compl);
15d72184 1283 }
12004ae9 1284 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1285}
1286
1287static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1288 struct be_eth_rx_compl *compl,
1289 struct be_rx_compl_info *rxcp)
1290{
1291 rxcp->pkt_size =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1293 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1294 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1295 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1296 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1297 rxcp->ip_csum =
1298 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1299 rxcp->l4_csum =
1300 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1301 rxcp->ipv6 =
1302 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1303 rxcp->rxq_idx =
1304 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1305 rxcp->num_rcvd =
1306 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1307 rxcp->pkt_type =
1308 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1309 rxcp->rss_hash =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1311 if (rxcp->vlanf) {
1312 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1313 compl);
1314 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1315 compl);
15d72184 1316 }
12004ae9 1317 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1318}
1319
1320static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1321{
1322 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1323 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1324 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1325
2e588f84
SP
1326 /* For checking the valid bit it is Ok to use either definition as the
1327 * valid bit is at the same position in both v0 and v1 Rx compl */
1328 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1329 return NULL;
6b7c5b94 1330
2e588f84
SP
1331 rmb();
1332 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1333
2e588f84
SP
1334 if (adapter->be3_native)
1335 be_parse_rx_compl_v1(adapter, compl, rxcp);
1336 else
1337 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1338
15d72184
SP
1339 if (rxcp->vlanf) {
1340 /* vlanf could be wrongly set in some cards.
1341 * ignore if vtm is not set */
752961a1 1342 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1343 rxcp->vlanf = 0;
6b7c5b94 1344
15d72184 1345 if (!lancer_chip(adapter))
3c709f8f 1346 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1347
939cf306 1348 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1349 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1350 rxcp->vlanf = 0;
1351 }
2e588f84
SP
1352
1353 /* As the compl has been parsed, reset it; we wont touch it again */
1354 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1355
3abcdeda 1356 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1357 return rxcp;
1358}
1359
1829b086 1360static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1361{
6b7c5b94 1362 u32 order = get_order(size);
1829b086 1363
6b7c5b94 1364 if (order > 0)
1829b086
ED
1365 gfp |= __GFP_COMP;
1366 return alloc_pages(gfp, order);
6b7c5b94
SP
1367}
1368
1369/*
1370 * Allocate a page, split it to fragments of size rx_frag_size and post as
1371 * receive buffers to BE
1372 */
1829b086 1373static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1374{
3abcdeda
SP
1375 struct be_adapter *adapter = rxo->adapter;
1376 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1377 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1378 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1379 struct page *pagep = NULL;
1380 struct be_eth_rx_d *rxd;
1381 u64 page_dmaaddr = 0, frag_dmaaddr;
1382 u32 posted, page_offset = 0;
1383
3abcdeda 1384 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1385 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1386 if (!pagep) {
1829b086 1387 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1388 if (unlikely(!pagep)) {
ac124ff9 1389 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1390 break;
1391 }
2b7bcebf
IV
1392 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1393 0, adapter->big_page_size,
1394 DMA_FROM_DEVICE);
6b7c5b94
SP
1395 page_info->page_offset = 0;
1396 } else {
1397 get_page(pagep);
1398 page_info->page_offset = page_offset + rx_frag_size;
1399 }
1400 page_offset = page_info->page_offset;
1401 page_info->page = pagep;
fac6da5b 1402 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1403 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1404
1405 rxd = queue_head_node(rxq);
1406 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1407 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1408
1409 /* Any space left in the current big page for another frag? */
1410 if ((page_offset + rx_frag_size + rx_frag_size) >
1411 adapter->big_page_size) {
1412 pagep = NULL;
1413 page_info->last_page_user = true;
1414 }
26d92f92
SP
1415
1416 prev_page_info = page_info;
1417 queue_head_inc(rxq);
6b7c5b94
SP
1418 page_info = &page_info_tbl[rxq->head];
1419 }
1420 if (pagep)
26d92f92 1421 prev_page_info->last_page_user = true;
6b7c5b94
SP
1422
1423 if (posted) {
6b7c5b94 1424 atomic_add(posted, &rxq->used);
8788fdc2 1425 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1426 } else if (atomic_read(&rxq->used) == 0) {
1427 /* Let be_worker replenish when memory is available */
3abcdeda 1428 rxo->rx_post_starved = true;
6b7c5b94 1429 }
6b7c5b94
SP
1430}
1431
5fb379ee 1432static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1433{
6b7c5b94
SP
1434 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1435
1436 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1437 return NULL;
1438
f3eb62d2 1439 rmb();
6b7c5b94
SP
1440 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1441
1442 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1443
1444 queue_tail_inc(tx_cq);
1445 return txcp;
1446}
1447
3c8def97
SP
1448static u16 be_tx_compl_process(struct be_adapter *adapter,
1449 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1450{
3c8def97 1451 struct be_queue_info *txq = &txo->q;
a73b796e 1452 struct be_eth_wrb *wrb;
3c8def97 1453 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1454 struct sk_buff *sent_skb;
ec43b1a6
SP
1455 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1456 bool unmap_skb_hdr = true;
6b7c5b94 1457
ec43b1a6 1458 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1459 BUG_ON(!sent_skb);
ec43b1a6
SP
1460 sent_skbs[txq->tail] = NULL;
1461
1462 /* skip header wrb */
a73b796e 1463 queue_tail_inc(txq);
6b7c5b94 1464
ec43b1a6 1465 do {
6b7c5b94 1466 cur_index = txq->tail;
a73b796e 1467 wrb = queue_tail_node(txq);
2b7bcebf
IV
1468 unmap_tx_frag(&adapter->pdev->dev, wrb,
1469 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1470 unmap_skb_hdr = false;
1471
6b7c5b94
SP
1472 num_wrbs++;
1473 queue_tail_inc(txq);
ec43b1a6 1474 } while (cur_index != last_index);
6b7c5b94 1475
6b7c5b94 1476 kfree_skb(sent_skb);
4d586b82 1477 return num_wrbs;
6b7c5b94
SP
1478}
1479
859b1e4e
SP
1480static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1481{
1482 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1483
1484 if (!eqe->evt)
1485 return NULL;
1486
f3eb62d2 1487 rmb();
859b1e4e
SP
1488 eqe->evt = le32_to_cpu(eqe->evt);
1489 queue_tail_inc(&eq_obj->q);
1490 return eqe;
1491}
1492
1493static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1494 struct be_eq_obj *eq_obj,
1495 bool rearm)
859b1e4e
SP
1496{
1497 struct be_eq_entry *eqe;
1498 u16 num = 0;
1499
1500 while ((eqe = event_get(eq_obj)) != NULL) {
1501 eqe->evt = 0;
1502 num++;
1503 }
1504
1505 /* Deal with any spurious interrupts that come
1506 * without events
1507 */
3c8def97
SP
1508 if (!num)
1509 rearm = true;
1510
1511 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1512 if (num)
1513 napi_schedule(&eq_obj->napi);
1514
1515 return num;
1516}
1517
1518/* Just read and notify events without processing them.
1519 * Used at the time of destroying event queues */
1520static void be_eq_clean(struct be_adapter *adapter,
1521 struct be_eq_obj *eq_obj)
1522{
1523 struct be_eq_entry *eqe;
1524 u16 num = 0;
1525
1526 while ((eqe = event_get(eq_obj)) != NULL) {
1527 eqe->evt = 0;
1528 num++;
1529 }
1530
1531 if (num)
1532 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1533}
1534
3abcdeda 1535static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1536{
1537 struct be_rx_page_info *page_info;
3abcdeda
SP
1538 struct be_queue_info *rxq = &rxo->q;
1539 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1540 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1541 u16 tail;
1542
1543 /* First cleanup pending rx completions */
3abcdeda
SP
1544 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1545 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1546 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1547 }
1548
1549 /* Then free posted rx buffer that were not used */
1550 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1551 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1552 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1553 put_page(page_info->page);
1554 memset(page_info, 0, sizeof(*page_info));
1555 }
1556 BUG_ON(atomic_read(&rxq->used));
482c9e79 1557 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1558}
1559
3c8def97
SP
1560static void be_tx_compl_clean(struct be_adapter *adapter,
1561 struct be_tx_obj *txo)
6b7c5b94 1562{
3c8def97
SP
1563 struct be_queue_info *tx_cq = &txo->cq;
1564 struct be_queue_info *txq = &txo->q;
a8e9179a 1565 struct be_eth_tx_compl *txcp;
4d586b82 1566 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1567 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1568 struct sk_buff *sent_skb;
1569 bool dummy_wrb;
a8e9179a
SP
1570
1571 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1572 do {
1573 while ((txcp = be_tx_compl_get(tx_cq))) {
1574 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1575 wrb_index, txcp);
3c8def97 1576 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1577 cmpl++;
1578 }
1579 if (cmpl) {
1580 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1581 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1582 cmpl = 0;
4d586b82 1583 num_wrbs = 0;
a8e9179a
SP
1584 }
1585
1586 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1587 break;
1588
1589 mdelay(1);
1590 } while (true);
1591
1592 if (atomic_read(&txq->used))
1593 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1594 atomic_read(&txq->used));
b03388d6
SP
1595
1596 /* free posted tx for which compls will never arrive */
1597 while (atomic_read(&txq->used)) {
1598 sent_skb = sent_skbs[txq->tail];
1599 end_idx = txq->tail;
1600 index_adv(&end_idx,
fe6d2a38
SP
1601 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1602 txq->len);
3c8def97 1603 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1604 atomic_sub(num_wrbs, &txq->used);
b03388d6 1605 }
6b7c5b94
SP
1606}
1607
5fb379ee
SP
1608static void be_mcc_queues_destroy(struct be_adapter *adapter)
1609{
1610 struct be_queue_info *q;
5fb379ee 1611
8788fdc2 1612 q = &adapter->mcc_obj.q;
5fb379ee 1613 if (q->created)
8788fdc2 1614 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1615 be_queue_free(adapter, q);
1616
8788fdc2 1617 q = &adapter->mcc_obj.cq;
5fb379ee 1618 if (q->created)
8788fdc2 1619 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1620 be_queue_free(adapter, q);
1621}
1622
1623/* Must be called only after TX qs are created as MCC shares TX EQ */
1624static int be_mcc_queues_create(struct be_adapter *adapter)
1625{
1626 struct be_queue_info *q, *cq;
5fb379ee
SP
1627
1628 /* Alloc MCC compl queue */
8788fdc2 1629 cq = &adapter->mcc_obj.cq;
5fb379ee 1630 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1631 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1632 goto err;
1633
1634 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1635 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1636 goto mcc_cq_free;
1637
1638 /* Alloc MCC queue */
8788fdc2 1639 q = &adapter->mcc_obj.q;
5fb379ee
SP
1640 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1641 goto mcc_cq_destroy;
1642
1643 /* Ask BE to create MCC queue */
8788fdc2 1644 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1645 goto mcc_q_free;
1646
1647 return 0;
1648
1649mcc_q_free:
1650 be_queue_free(adapter, q);
1651mcc_cq_destroy:
8788fdc2 1652 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1653mcc_cq_free:
1654 be_queue_free(adapter, cq);
1655err:
1656 return -1;
1657}
1658
6b7c5b94
SP
1659static void be_tx_queues_destroy(struct be_adapter *adapter)
1660{
1661 struct be_queue_info *q;
3c8def97
SP
1662 struct be_tx_obj *txo;
1663 u8 i;
6b7c5b94 1664
3c8def97
SP
1665 for_all_tx_queues(adapter, txo, i) {
1666 q = &txo->q;
1667 if (q->created)
1668 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1669 be_queue_free(adapter, q);
6b7c5b94 1670
3c8def97
SP
1671 q = &txo->cq;
1672 if (q->created)
1673 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1674 be_queue_free(adapter, q);
1675 }
6b7c5b94 1676
859b1e4e
SP
1677 /* Clear any residual events */
1678 be_eq_clean(adapter, &adapter->tx_eq);
1679
6b7c5b94
SP
1680 q = &adapter->tx_eq.q;
1681 if (q->created)
8788fdc2 1682 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1683 be_queue_free(adapter, q);
1684}
1685
dafc0fe3
SP
1686static int be_num_txqs_want(struct be_adapter *adapter)
1687{
1688 if ((num_vfs && adapter->sriov_enabled) ||
752961a1 1689 be_is_mc(adapter) ||
dafc0fe3
SP
1690 lancer_chip(adapter) || !be_physfn(adapter) ||
1691 adapter->generation == BE_GEN2)
1692 return 1;
1693 else
1694 return MAX_TX_QS;
1695}
1696
3c8def97 1697/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1698static int be_tx_queues_create(struct be_adapter *adapter)
1699{
1700 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1701 struct be_tx_obj *txo;
1702 u8 i;
6b7c5b94 1703
dafc0fe3 1704 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1705 if (adapter->num_tx_qs != MAX_TX_QS) {
1706 rtnl_lock();
dafc0fe3
SP
1707 netif_set_real_num_tx_queues(adapter->netdev,
1708 adapter->num_tx_qs);
3bb62f4f
PR
1709 rtnl_unlock();
1710 }
dafc0fe3 1711
6b7c5b94
SP
1712 adapter->tx_eq.max_eqd = 0;
1713 adapter->tx_eq.min_eqd = 0;
1714 adapter->tx_eq.cur_eqd = 96;
1715 adapter->tx_eq.enable_aic = false;
3c8def97 1716
6b7c5b94 1717 eq = &adapter->tx_eq.q;
3c8def97
SP
1718 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1719 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1720 return -1;
1721
8788fdc2 1722 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1723 goto err;
ecd62107 1724 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1725
3c8def97
SP
1726 for_all_tx_queues(adapter, txo, i) {
1727 cq = &txo->cq;
1728 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1729 sizeof(struct be_eth_tx_compl)))
3c8def97 1730 goto err;
6b7c5b94 1731
3c8def97
SP
1732 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1733 goto err;
6b7c5b94 1734
3c8def97
SP
1735 q = &txo->q;
1736 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1737 sizeof(struct be_eth_wrb)))
1738 goto err;
3c8def97 1739 }
6b7c5b94
SP
1740 return 0;
1741
3c8def97
SP
1742err:
1743 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1744 return -1;
1745}
1746
1747static void be_rx_queues_destroy(struct be_adapter *adapter)
1748{
1749 struct be_queue_info *q;
3abcdeda
SP
1750 struct be_rx_obj *rxo;
1751 int i;
1752
1753 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1754 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1755
1756 q = &rxo->cq;
1757 if (q->created)
1758 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1759 be_queue_free(adapter, q);
1760
3abcdeda 1761 q = &rxo->rx_eq.q;
482c9e79 1762 if (q->created)
3abcdeda 1763 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1764 be_queue_free(adapter, q);
6b7c5b94 1765 }
6b7c5b94
SP
1766}
1767
ac6a0c4a
SP
1768static u32 be_num_rxqs_want(struct be_adapter *adapter)
1769{
c814fd36 1770 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
752961a1
SP
1771 !adapter->sriov_enabled && be_physfn(adapter) &&
1772 !be_is_mc(adapter)) {
ac6a0c4a
SP
1773 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1774 } else {
1775 dev_warn(&adapter->pdev->dev,
1776 "No support for multiple RX queues\n");
1777 return 1;
1778 }
1779}
1780
6b7c5b94
SP
1781static int be_rx_queues_create(struct be_adapter *adapter)
1782{
1783 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1784 struct be_rx_obj *rxo;
1785 int rc, i;
6b7c5b94 1786
ac6a0c4a
SP
1787 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1788 msix_enabled(adapter) ?
1789 adapter->num_msix_vec - 1 : 1);
1790 if (adapter->num_rx_qs != MAX_RX_QS)
1791 dev_warn(&adapter->pdev->dev,
1792 "Can create only %d RX queues", adapter->num_rx_qs);
1793
6b7c5b94 1794 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1795 for_all_rx_queues(adapter, rxo, i) {
1796 rxo->adapter = adapter;
1797 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1798 rxo->rx_eq.enable_aic = true;
1799
1800 /* EQ */
1801 eq = &rxo->rx_eq.q;
1802 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1803 sizeof(struct be_eq_entry));
1804 if (rc)
1805 goto err;
1806
1807 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1808 if (rc)
1809 goto err;
1810
ecd62107 1811 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1812
3abcdeda
SP
1813 /* CQ */
1814 cq = &rxo->cq;
1815 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1816 sizeof(struct be_eth_rx_compl));
1817 if (rc)
1818 goto err;
1819
1820 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1821 if (rc)
1822 goto err;
482c9e79
SP
1823
1824 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1825 q = &rxo->q;
1826 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1827 sizeof(struct be_eth_rx_d));
1828 if (rc)
1829 goto err;
1830
3abcdeda 1831 }
6b7c5b94
SP
1832
1833 return 0;
3abcdeda
SP
1834err:
1835 be_rx_queues_destroy(adapter);
1836 return -1;
6b7c5b94 1837}
6b7c5b94 1838
fe6d2a38 1839static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1840{
fe6d2a38
SP
1841 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1842 if (!eqe->evt)
1843 return false;
1844 else
1845 return true;
b628bde2
SP
1846}
1847
6b7c5b94
SP
1848static irqreturn_t be_intx(int irq, void *dev)
1849{
1850 struct be_adapter *adapter = dev;
3abcdeda 1851 struct be_rx_obj *rxo;
fe6d2a38 1852 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1853
fe6d2a38
SP
1854 if (lancer_chip(adapter)) {
1855 if (event_peek(&adapter->tx_eq))
3c8def97 1856 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1857 for_all_rx_queues(adapter, rxo, i) {
1858 if (event_peek(&rxo->rx_eq))
3c8def97 1859 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1860 }
6b7c5b94 1861
fe6d2a38
SP
1862 if (!(tx || rx))
1863 return IRQ_NONE;
3abcdeda 1864
fe6d2a38
SP
1865 } else {
1866 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1867 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1868 if (!isr)
1869 return IRQ_NONE;
1870
ecd62107 1871 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1872 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1873
1874 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1875 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1876 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1877 }
3abcdeda 1878 }
c001c213 1879
8788fdc2 1880 return IRQ_HANDLED;
6b7c5b94
SP
1881}
1882
1883static irqreturn_t be_msix_rx(int irq, void *dev)
1884{
3abcdeda
SP
1885 struct be_rx_obj *rxo = dev;
1886 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1887
3c8def97 1888 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1889
1890 return IRQ_HANDLED;
1891}
1892
5fb379ee 1893static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1894{
1895 struct be_adapter *adapter = dev;
1896
3c8def97 1897 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1898
1899 return IRQ_HANDLED;
1900}
1901
2e588f84 1902static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1903{
2e588f84 1904 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1905}
1906
49b05221 1907static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1908{
1909 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1910 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1911 struct be_adapter *adapter = rxo->adapter;
1912 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1913 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1914 u32 work_done;
1915
ac124ff9 1916 rx_stats(rxo)->rx_polls++;
6b7c5b94 1917 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1918 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1919 if (!rxcp)
1920 break;
1921
12004ae9
SP
1922 /* Is it a flush compl that has no data */
1923 if (unlikely(rxcp->num_rcvd == 0))
1924 goto loop_continue;
1925
1926 /* Discard compl with partial DMA Lancer B0 */
1927 if (unlikely(!rxcp->pkt_size)) {
1928 be_rx_compl_discard(adapter, rxo, rxcp);
1929 goto loop_continue;
1930 }
1931
1932 /* On BE drop pkts that arrive due to imperfect filtering in
1933 * promiscuous mode on some skews
1934 */
1935 if (unlikely(rxcp->port != adapter->port_num &&
1936 !lancer_chip(adapter))) {
009dd872 1937 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1938 goto loop_continue;
64642811 1939 }
009dd872 1940
12004ae9
SP
1941 if (do_gro(rxcp))
1942 be_rx_compl_process_gro(adapter, rxo, rxcp);
1943 else
1944 be_rx_compl_process(adapter, rxo, rxcp);
1945loop_continue:
2e588f84 1946 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1947 }
1948
9372cacb
PR
1949 be_cq_notify(adapter, rx_cq->id, false, work_done);
1950
6b7c5b94 1951 /* Refill the queue */
857c9905 1952 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1953 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1954
1955 /* All consumed */
1956 if (work_done < budget) {
1957 napi_complete(napi);
9372cacb
PR
1958 /* Arm CQ */
1959 be_cq_notify(adapter, rx_cq->id, true, 0);
6b7c5b94
SP
1960 }
1961 return work_done;
1962}
1963
f31e50a8
SP
1964/* As TX and MCC share the same EQ check for both TX and MCC completions.
1965 * For TX/MCC we don't honour budget; consume everything
1966 */
1967static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1968{
f31e50a8
SP
1969 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1970 struct be_adapter *adapter =
1971 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1972 struct be_tx_obj *txo;
6b7c5b94 1973 struct be_eth_tx_compl *txcp;
3c8def97
SP
1974 int tx_compl, mcc_compl, status = 0;
1975 u8 i;
1976 u16 num_wrbs;
1977
1978 for_all_tx_queues(adapter, txo, i) {
1979 tx_compl = 0;
1980 num_wrbs = 0;
1981 while ((txcp = be_tx_compl_get(&txo->cq))) {
1982 num_wrbs += be_tx_compl_process(adapter, txo,
1983 AMAP_GET_BITS(struct amap_eth_tx_compl,
1984 wrb_index, txcp));
1985 tx_compl++;
1986 }
1987 if (tx_compl) {
1988 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1989
1990 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1991
3c8def97
SP
1992 /* As Tx wrbs have been freed up, wake up netdev queue
1993 * if it was stopped due to lack of tx wrbs. */
1994 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1995 atomic_read(&txo->q.used) < txo->q.len / 2) {
1996 netif_wake_subqueue(adapter->netdev, i);
1997 }
1998
ab1594e9 1999 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 2000 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 2001 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 2002 }
6b7c5b94
SP
2003 }
2004
f31e50a8
SP
2005 mcc_compl = be_process_mcc(adapter, &status);
2006
f31e50a8
SP
2007 if (mcc_compl) {
2008 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2009 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2010 }
2011
3c8def97 2012 napi_complete(napi);
6b7c5b94 2013
3c8def97 2014 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 2015 adapter->drv_stats.tx_events++;
6b7c5b94
SP
2016 return 1;
2017}
2018
d053de91 2019void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 2020{
e1cfb67a
PR
2021 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2022 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2023 u32 i;
2024
72f02485
SP
2025 if (adapter->eeh_err || adapter->ue_detected)
2026 return;
2027
e1cfb67a
PR
2028 if (lancer_chip(adapter)) {
2029 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2030 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2031 sliport_err1 = ioread32(adapter->db +
2032 SLIPORT_ERROR1_OFFSET);
2033 sliport_err2 = ioread32(adapter->db +
2034 SLIPORT_ERROR2_OFFSET);
2035 }
2036 } else {
2037 pci_read_config_dword(adapter->pdev,
2038 PCICFG_UE_STATUS_LOW, &ue_lo);
2039 pci_read_config_dword(adapter->pdev,
2040 PCICFG_UE_STATUS_HIGH, &ue_hi);
2041 pci_read_config_dword(adapter->pdev,
2042 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2043 pci_read_config_dword(adapter->pdev,
2044 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2045
2046 ue_lo = (ue_lo & (~ue_lo_mask));
2047 ue_hi = (ue_hi & (~ue_hi_mask));
2048 }
7c185276 2049
e1cfb67a
PR
2050 if (ue_lo || ue_hi ||
2051 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2052 adapter->ue_detected = true;
7acc2087 2053 adapter->eeh_err = true;
434b3648
SP
2054 dev_err(&adapter->pdev->dev,
2055 "Unrecoverable error in the card\n");
d053de91
AK
2056 }
2057
e1cfb67a
PR
2058 if (ue_lo) {
2059 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2060 if (ue_lo & 1)
7c185276
AK
2061 dev_err(&adapter->pdev->dev,
2062 "UE: %s bit set\n", ue_status_low_desc[i]);
2063 }
2064 }
e1cfb67a
PR
2065 if (ue_hi) {
2066 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2067 if (ue_hi & 1)
7c185276
AK
2068 dev_err(&adapter->pdev->dev,
2069 "UE: %s bit set\n", ue_status_hi_desc[i]);
2070 }
2071 }
2072
e1cfb67a
PR
2073 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2074 dev_err(&adapter->pdev->dev,
2075 "sliport status 0x%x\n", sliport_status);
2076 dev_err(&adapter->pdev->dev,
2077 "sliport error1 0x%x\n", sliport_err1);
2078 dev_err(&adapter->pdev->dev,
2079 "sliport error2 0x%x\n", sliport_err2);
2080 }
7c185276
AK
2081}
2082
8d56ff11
SP
2083static void be_msix_disable(struct be_adapter *adapter)
2084{
ac6a0c4a 2085 if (msix_enabled(adapter)) {
8d56ff11 2086 pci_disable_msix(adapter->pdev);
ac6a0c4a 2087 adapter->num_msix_vec = 0;
3abcdeda
SP
2088 }
2089}
2090
6b7c5b94
SP
2091static void be_msix_enable(struct be_adapter *adapter)
2092{
3abcdeda 2093#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2094 int i, status, num_vec;
6b7c5b94 2095
ac6a0c4a 2096 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2097
ac6a0c4a 2098 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2099 adapter->msix_entries[i].entry = i;
2100
ac6a0c4a 2101 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2102 if (status == 0) {
2103 goto done;
2104 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2105 num_vec = status;
3abcdeda 2106 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2107 num_vec) == 0)
3abcdeda 2108 goto done;
3abcdeda
SP
2109 }
2110 return;
2111done:
ac6a0c4a
SP
2112 adapter->num_msix_vec = num_vec;
2113 return;
6b7c5b94
SP
2114}
2115
f9449ab7 2116static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2117{
344dbf10 2118 be_check_sriov_fn_type(adapter);
6dedec81 2119#ifdef CONFIG_PCI_IOV
ba343c77 2120 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2121 int status, pos;
2122 u16 nvfs;
2123
2124 pos = pci_find_ext_capability(adapter->pdev,
2125 PCI_EXT_CAP_ID_SRIOV);
2126 pci_read_config_word(adapter->pdev,
2127 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2128
2129 if (num_vfs > nvfs) {
2130 dev_info(&adapter->pdev->dev,
2131 "Device supports %d VFs and not %d\n",
2132 nvfs, num_vfs);
2133 num_vfs = nvfs;
2134 }
6dedec81 2135
ba343c77
SB
2136 status = pci_enable_sriov(adapter->pdev, num_vfs);
2137 adapter->sriov_enabled = status ? false : true;
f9449ab7
SP
2138
2139 if (adapter->sriov_enabled) {
2140 adapter->vf_cfg = kcalloc(num_vfs,
2141 sizeof(struct be_vf_cfg),
2142 GFP_KERNEL);
2143 if (!adapter->vf_cfg)
2144 return -ENOMEM;
2145 }
ba343c77
SB
2146 }
2147#endif
f9449ab7 2148 return 0;
ba343c77
SB
2149}
2150
2151static void be_sriov_disable(struct be_adapter *adapter)
2152{
2153#ifdef CONFIG_PCI_IOV
2154 if (adapter->sriov_enabled) {
2155 pci_disable_sriov(adapter->pdev);
f9449ab7 2156 kfree(adapter->vf_cfg);
ba343c77
SB
2157 adapter->sriov_enabled = false;
2158 }
2159#endif
2160}
2161
fe6d2a38
SP
2162static inline int be_msix_vec_get(struct be_adapter *adapter,
2163 struct be_eq_obj *eq_obj)
6b7c5b94 2164{
ecd62107 2165 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2166}
2167
b628bde2
SP
2168static int be_request_irq(struct be_adapter *adapter,
2169 struct be_eq_obj *eq_obj,
3abcdeda 2170 void *handler, char *desc, void *context)
6b7c5b94
SP
2171{
2172 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2173 int vec;
2174
2175 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2176 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2177 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2178}
2179
3abcdeda
SP
2180static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2181 void *context)
b628bde2 2182{
fe6d2a38 2183 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2184 free_irq(vec, context);
b628bde2 2185}
6b7c5b94 2186
b628bde2
SP
2187static int be_msix_register(struct be_adapter *adapter)
2188{
3abcdeda
SP
2189 struct be_rx_obj *rxo;
2190 int status, i;
2191 char qname[10];
b628bde2 2192
3abcdeda
SP
2193 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2194 adapter);
6b7c5b94
SP
2195 if (status)
2196 goto err;
2197
3abcdeda
SP
2198 for_all_rx_queues(adapter, rxo, i) {
2199 sprintf(qname, "rxq%d", i);
2200 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2201 qname, rxo);
2202 if (status)
2203 goto err_msix;
2204 }
b628bde2 2205
6b7c5b94 2206 return 0;
b628bde2 2207
3abcdeda
SP
2208err_msix:
2209 be_free_irq(adapter, &adapter->tx_eq, adapter);
2210
2211 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2212 be_free_irq(adapter, &rxo->rx_eq, rxo);
2213
6b7c5b94
SP
2214err:
2215 dev_warn(&adapter->pdev->dev,
2216 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2217 be_msix_disable(adapter);
6b7c5b94
SP
2218 return status;
2219}
2220
2221static int be_irq_register(struct be_adapter *adapter)
2222{
2223 struct net_device *netdev = adapter->netdev;
2224 int status;
2225
ac6a0c4a 2226 if (msix_enabled(adapter)) {
6b7c5b94
SP
2227 status = be_msix_register(adapter);
2228 if (status == 0)
2229 goto done;
ba343c77
SB
2230 /* INTx is not supported for VF */
2231 if (!be_physfn(adapter))
2232 return status;
6b7c5b94
SP
2233 }
2234
2235 /* INTx */
2236 netdev->irq = adapter->pdev->irq;
2237 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2238 adapter);
2239 if (status) {
2240 dev_err(&adapter->pdev->dev,
2241 "INTx request IRQ failed - err %d\n", status);
2242 return status;
2243 }
2244done:
2245 adapter->isr_registered = true;
2246 return 0;
2247}
2248
2249static void be_irq_unregister(struct be_adapter *adapter)
2250{
2251 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2252 struct be_rx_obj *rxo;
2253 int i;
6b7c5b94
SP
2254
2255 if (!adapter->isr_registered)
2256 return;
2257
2258 /* INTx */
ac6a0c4a 2259 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2260 free_irq(netdev->irq, adapter);
2261 goto done;
2262 }
2263
2264 /* MSIx */
3abcdeda
SP
2265 be_free_irq(adapter, &adapter->tx_eq, adapter);
2266
2267 for_all_rx_queues(adapter, rxo, i)
2268 be_free_irq(adapter, &rxo->rx_eq, rxo);
2269
6b7c5b94
SP
2270done:
2271 adapter->isr_registered = false;
6b7c5b94
SP
2272}
2273
482c9e79
SP
2274static void be_rx_queues_clear(struct be_adapter *adapter)
2275{
2276 struct be_queue_info *q;
2277 struct be_rx_obj *rxo;
2278 int i;
2279
2280 for_all_rx_queues(adapter, rxo, i) {
2281 q = &rxo->q;
2282 if (q->created) {
2283 be_cmd_rxq_destroy(adapter, q);
2284 /* After the rxq is invalidated, wait for a grace time
2285 * of 1ms for all dma to end and the flush compl to
2286 * arrive
2287 */
2288 mdelay(1);
2289 be_rx_q_clean(adapter, rxo);
2290 }
2291
2292 /* Clear any residual events */
2293 q = &rxo->rx_eq.q;
2294 if (q->created)
2295 be_eq_clean(adapter, &rxo->rx_eq);
2296 }
2297}
2298
889cd4b2
SP
2299static int be_close(struct net_device *netdev)
2300{
2301 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2302 struct be_rx_obj *rxo;
3c8def97 2303 struct be_tx_obj *txo;
889cd4b2 2304 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2305 int vec, i;
889cd4b2 2306
889cd4b2
SP
2307 be_async_mcc_disable(adapter);
2308
fe6d2a38
SP
2309 if (!lancer_chip(adapter))
2310 be_intr_set(adapter, false);
889cd4b2 2311
63fcb27f
PR
2312 for_all_rx_queues(adapter, rxo, i)
2313 napi_disable(&rxo->rx_eq.napi);
2314
2315 napi_disable(&tx_eq->napi);
2316
2317 if (lancer_chip(adapter)) {
63fcb27f
PR
2318 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2319 for_all_rx_queues(adapter, rxo, i)
2320 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2321 for_all_tx_queues(adapter, txo, i)
2322 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2323 }
2324
ac6a0c4a 2325 if (msix_enabled(adapter)) {
fe6d2a38 2326 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2327 synchronize_irq(vec);
3abcdeda
SP
2328
2329 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2330 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2331 synchronize_irq(vec);
2332 }
889cd4b2
SP
2333 } else {
2334 synchronize_irq(netdev->irq);
2335 }
2336 be_irq_unregister(adapter);
2337
889cd4b2
SP
2338 /* Wait for all pending tx completions to arrive so that
2339 * all tx skbs are freed.
2340 */
3c8def97
SP
2341 for_all_tx_queues(adapter, txo, i)
2342 be_tx_compl_clean(adapter, txo);
889cd4b2 2343
482c9e79
SP
2344 be_rx_queues_clear(adapter);
2345 return 0;
2346}
2347
2348static int be_rx_queues_setup(struct be_adapter *adapter)
2349{
2350 struct be_rx_obj *rxo;
e9008ee9
PR
2351 int rc, i, j;
2352 u8 rsstable[128];
482c9e79
SP
2353
2354 for_all_rx_queues(adapter, rxo, i) {
2355 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2356 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2357 adapter->if_handle,
2358 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2359 if (rc)
2360 return rc;
2361 }
2362
2363 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2364 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2365 for_all_rss_queues(adapter, rxo, i) {
2366 if ((j + i) >= 128)
2367 break;
2368 rsstable[j + i] = rxo->rss_id;
2369 }
2370 }
2371 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79 2372
482c9e79
SP
2373 if (rc)
2374 return rc;
2375 }
2376
2377 /* First time posting */
2378 for_all_rx_queues(adapter, rxo, i) {
2379 be_post_rx_frags(rxo, GFP_KERNEL);
2380 napi_enable(&rxo->rx_eq.napi);
2381 }
889cd4b2
SP
2382 return 0;
2383}
2384
6b7c5b94
SP
2385static int be_open(struct net_device *netdev)
2386{
2387 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2388 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2389 struct be_rx_obj *rxo;
3abcdeda 2390 int status, i;
5fb379ee 2391
482c9e79
SP
2392 status = be_rx_queues_setup(adapter);
2393 if (status)
2394 goto err;
2395
5fb379ee
SP
2396 napi_enable(&tx_eq->napi);
2397
2398 be_irq_register(adapter);
2399
fe6d2a38
SP
2400 if (!lancer_chip(adapter))
2401 be_intr_set(adapter, true);
5fb379ee
SP
2402
2403 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2404 for_all_rx_queues(adapter, rxo, i) {
2405 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2406 be_cq_notify(adapter, rxo->cq.id, true, 0);
2407 }
8788fdc2 2408 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2409
7a1e9b20
SP
2410 /* Now that interrupts are on we can process async mcc */
2411 be_async_mcc_enable(adapter);
2412
889cd4b2
SP
2413 return 0;
2414err:
2415 be_close(adapter->netdev);
2416 return -EIO;
5fb379ee
SP
2417}
2418
71d8d1b5
AK
2419static int be_setup_wol(struct be_adapter *adapter, bool enable)
2420{
2421 struct be_dma_mem cmd;
2422 int status = 0;
2423 u8 mac[ETH_ALEN];
2424
2425 memset(mac, 0, ETH_ALEN);
2426
2427 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2428 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2429 GFP_KERNEL);
71d8d1b5
AK
2430 if (cmd.va == NULL)
2431 return -1;
2432 memset(cmd.va, 0, cmd.size);
2433
2434 if (enable) {
2435 status = pci_write_config_dword(adapter->pdev,
2436 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2437 if (status) {
2438 dev_err(&adapter->pdev->dev,
2381a55c 2439 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2440 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2441 cmd.dma);
71d8d1b5
AK
2442 return status;
2443 }
2444 status = be_cmd_enable_magic_wol(adapter,
2445 adapter->netdev->dev_addr, &cmd);
2446 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2447 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2448 } else {
2449 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2450 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2451 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2452 }
2453
2b7bcebf 2454 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2455 return status;
2456}
2457
6d87f5c3
AK
2458/*
2459 * Generate a seed MAC address from the PF MAC Address using jhash.
2460 * MAC Address for VFs are assigned incrementally starting from the seed.
2461 * These addresses are programmed in the ASIC by the PF and the VF driver
2462 * queries for the MAC address during its probe.
2463 */
2464static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2465{
f9449ab7 2466 u32 vf;
3abcdeda 2467 int status = 0;
6d87f5c3
AK
2468 u8 mac[ETH_ALEN];
2469
2470 be_vf_eth_addr_generate(adapter, mac);
2471
2472 for (vf = 0; vf < num_vfs; vf++) {
590c391d
PR
2473 if (lancer_chip(adapter)) {
2474 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2475 } else {
2476 status = be_cmd_pmac_add(adapter, mac,
6d87f5c3 2477 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2478 &adapter->vf_cfg[vf].vf_pmac_id,
2479 vf + 1);
590c391d
PR
2480 }
2481
6d87f5c3
AK
2482 if (status)
2483 dev_err(&adapter->pdev->dev,
590c391d 2484 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3
AK
2485 else
2486 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2487
2488 mac[5] += 1;
2489 }
2490 return status;
2491}
2492
f9449ab7 2493static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3
AK
2494{
2495 u32 vf;
2496
590c391d
PR
2497 for (vf = 0; vf < num_vfs; vf++) {
2498 if (lancer_chip(adapter))
2499 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2500 else
2501 be_cmd_pmac_del(adapter,
2502 adapter->vf_cfg[vf].vf_if_handle,
2503 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2504 }
f9449ab7
SP
2505
2506 for (vf = 0; vf < num_vfs; vf++)
30128031
SP
2507 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2508 vf + 1);
6d87f5c3
AK
2509}
2510
a54769f5
SP
2511static int be_clear(struct be_adapter *adapter)
2512{
a54769f5 2513 if (be_physfn(adapter) && adapter->sriov_enabled)
f9449ab7
SP
2514 be_vf_clear(adapter);
2515
2516 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2517
2518 be_mcc_queues_destroy(adapter);
2519 be_rx_queues_destroy(adapter);
2520 be_tx_queues_destroy(adapter);
a54769f5
SP
2521
2522 /* tell fw we're done with firing cmds */
2523 be_cmd_fw_clean(adapter);
2524 return 0;
2525}
2526
30128031
SP
2527static void be_vf_setup_init(struct be_adapter *adapter)
2528{
2529 int vf;
2530
2531 for (vf = 0; vf < num_vfs; vf++) {
2532 adapter->vf_cfg[vf].vf_if_handle = -1;
2533 adapter->vf_cfg[vf].vf_pmac_id = -1;
2534 }
2535}
2536
f9449ab7
SP
2537static int be_vf_setup(struct be_adapter *adapter)
2538{
2539 u32 cap_flags, en_flags, vf;
2540 u16 lnk_speed;
2541 int status;
2542
30128031
SP
2543 be_vf_setup_init(adapter);
2544
590c391d
PR
2545 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2546 BE_IF_FLAGS_MULTICAST;
2547
f9449ab7
SP
2548 for (vf = 0; vf < num_vfs; vf++) {
2549 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2550 &adapter->vf_cfg[vf].vf_if_handle,
2551 NULL, vf+1);
2552 if (status)
2553 goto err;
f9449ab7
SP
2554 }
2555
590c391d
PR
2556 status = be_vf_eth_addr_config(adapter);
2557 if (status)
2558 goto err;
f9449ab7
SP
2559
2560 for (vf = 0; vf < num_vfs; vf++) {
2561 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2562 vf + 1);
2563 if (status)
2564 goto err;
2565 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2566 }
2567 return 0;
2568err:
2569 return status;
2570}
2571
30128031
SP
2572static void be_setup_init(struct be_adapter *adapter)
2573{
2574 adapter->vlan_prio_bmap = 0xff;
2575 adapter->link_speed = -1;
2576 adapter->if_handle = -1;
2577 adapter->be3_native = false;
2578 adapter->promiscuous = false;
2579 adapter->eq_next_idx = 0;
2580}
2581
590c391d
PR
2582static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2583{
2584 u32 pmac_id;
2585 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2586 if (status != 0)
2587 goto do_none;
2588 status = be_cmd_mac_addr_query(adapter, mac,
2589 MAC_ADDRESS_TYPE_NETWORK,
2590 false, adapter->if_handle, pmac_id);
2591 if (status != 0)
2592 goto do_none;
2593 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2594 &adapter->pmac_id, 0);
2595do_none:
2596 return status;
2597}
2598
5fb379ee
SP
2599static int be_setup(struct be_adapter *adapter)
2600{
5fb379ee 2601 struct net_device *netdev = adapter->netdev;
f9449ab7 2602 u32 cap_flags, en_flags;
a54769f5 2603 u32 tx_fc, rx_fc;
293c4a7d 2604 int status, i;
ba343c77 2605 u8 mac[ETH_ALEN];
293c4a7d 2606 struct be_tx_obj *txo;
ba343c77 2607
30128031 2608 be_setup_init(adapter);
6b7c5b94 2609
f9449ab7 2610 be_cmd_req_native_mode(adapter);
73d540f2 2611
f9449ab7 2612 status = be_tx_queues_create(adapter);
6b7c5b94 2613 if (status != 0)
a54769f5 2614 goto err;
6b7c5b94 2615
f9449ab7 2616 status = be_rx_queues_create(adapter);
6b7c5b94 2617 if (status != 0)
a54769f5 2618 goto err;
6b7c5b94 2619
f9449ab7 2620 status = be_mcc_queues_create(adapter);
6b7c5b94 2621 if (status != 0)
a54769f5 2622 goto err;
6b7c5b94 2623
f9449ab7
SP
2624 memset(mac, 0, ETH_ALEN);
2625 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2626 true /*permanent */, 0, 0);
f9449ab7
SP
2627 if (status)
2628 return status;
2629 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2630 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2631
f9449ab7
SP
2632 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2633 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2634 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2635 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2636
f9449ab7
SP
2637 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2638 cap_flags |= BE_IF_FLAGS_RSS;
2639 en_flags |= BE_IF_FLAGS_RSS;
2640 }
2641 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2642 netdev->dev_addr, &adapter->if_handle,
2643 &adapter->pmac_id, 0);
5fb379ee 2644 if (status != 0)
a54769f5 2645 goto err;
6b7c5b94 2646
293c4a7d
PR
2647 for_all_tx_queues(adapter, txo, i) {
2648 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2649 if (status)
2650 goto err;
2651 }
2652
590c391d
PR
2653 /* The VF's permanent mac queried from card is incorrect.
2654 * For BEx: Query the mac configued by the PF using if_handle
2655 * For Lancer: Get and use mac_list to obtain mac address.
2656 */
2657 if (!be_physfn(adapter)) {
2658 if (lancer_chip(adapter))
2659 status = be_configure_mac_from_list(adapter, mac);
2660 else
2661 status = be_cmd_mac_addr_query(adapter, mac,
2662 MAC_ADDRESS_TYPE_NETWORK, false,
2663 adapter->if_handle, 0);
f9449ab7
SP
2664 if (!status) {
2665 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2666 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2667 }
2668 }
0dffc83e 2669
04b71175 2670 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2671
a54769f5
SP
2672 status = be_vid_config(adapter, false, 0);
2673 if (status)
2674 goto err;
7ab8b0b4 2675
a54769f5 2676 be_set_rx_mode(adapter->netdev);
5fb379ee 2677
a54769f5 2678 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d
PR
2679 /* For Lancer: It is legal for this cmd to fail on VF */
2680 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5 2681 goto err;
590c391d 2682
a54769f5
SP
2683 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2684 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2685 adapter->rx_fc);
590c391d
PR
2686 /* For Lancer: It is legal for this cmd to fail on VF */
2687 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5
SP
2688 goto err;
2689 }
2dc1deb6 2690
a54769f5 2691 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2692
f9449ab7
SP
2693 if (be_physfn(adapter) && adapter->sriov_enabled) {
2694 status = be_vf_setup(adapter);
2695 if (status)
2696 goto err;
2697 }
2698
2699 return 0;
a54769f5
SP
2700err:
2701 be_clear(adapter);
2702 return status;
2703}
6b7c5b94 2704
66268739
IV
2705#ifdef CONFIG_NET_POLL_CONTROLLER
2706static void be_netpoll(struct net_device *netdev)
2707{
2708 struct be_adapter *adapter = netdev_priv(netdev);
2709 struct be_rx_obj *rxo;
2710 int i;
2711
2712 event_handle(adapter, &adapter->tx_eq, false);
2713 for_all_rx_queues(adapter, rxo, i)
2714 event_handle(adapter, &rxo->rx_eq, true);
2715}
2716#endif
2717
84517482 2718#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2719static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2720 const u8 *p, u32 img_start, int image_size,
2721 int hdr_size)
fa9a6fed
SB
2722{
2723 u32 crc_offset;
2724 u8 flashed_crc[4];
2725 int status;
3f0d4560
AK
2726
2727 crc_offset = hdr_size + img_start + image_size - 4;
2728
fa9a6fed 2729 p += crc_offset;
3f0d4560
AK
2730
2731 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2732 (image_size - 4));
fa9a6fed
SB
2733 if (status) {
2734 dev_err(&adapter->pdev->dev,
2735 "could not get crc from flash, not flashing redboot\n");
2736 return false;
2737 }
2738
2739 /*update redboot only if crc does not match*/
2740 if (!memcmp(flashed_crc, p, 4))
2741 return false;
2742 else
2743 return true;
fa9a6fed
SB
2744}
2745
306f1348
SP
2746static bool phy_flashing_required(struct be_adapter *adapter)
2747{
2748 int status = 0;
2749 struct be_phy_info phy_info;
2750
2751 status = be_cmd_get_phy_info(adapter, &phy_info);
2752 if (status)
2753 return false;
2754 if ((phy_info.phy_type == TN_8022) &&
2755 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2756 return true;
2757 }
2758 return false;
2759}
2760
3f0d4560 2761static int be_flash_data(struct be_adapter *adapter,
84517482 2762 const struct firmware *fw,
3f0d4560
AK
2763 struct be_dma_mem *flash_cmd, int num_of_images)
2764
84517482 2765{
3f0d4560
AK
2766 int status = 0, i, filehdr_size = 0;
2767 u32 total_bytes = 0, flash_op;
84517482
AK
2768 int num_bytes;
2769 const u8 *p = fw->data;
2770 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2771 const struct flash_comp *pflashcomp;
9fe96934 2772 int num_comp;
3f0d4560 2773
306f1348 2774 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2775 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2776 FLASH_IMAGE_MAX_SIZE_g3},
2777 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2778 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2779 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2780 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2781 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2782 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2783 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2784 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2785 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2786 FLASH_IMAGE_MAX_SIZE_g3},
2787 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2788 FLASH_IMAGE_MAX_SIZE_g3},
2789 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2790 FLASH_IMAGE_MAX_SIZE_g3},
2791 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2792 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2793 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2794 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2795 };
215faf9c 2796 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2797 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2798 FLASH_IMAGE_MAX_SIZE_g2},
2799 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2800 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2801 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2802 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2803 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2804 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2805 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2806 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2807 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2808 FLASH_IMAGE_MAX_SIZE_g2},
2809 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2810 FLASH_IMAGE_MAX_SIZE_g2},
2811 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2812 FLASH_IMAGE_MAX_SIZE_g2}
2813 };
2814
2815 if (adapter->generation == BE_GEN3) {
2816 pflashcomp = gen3_flash_types;
2817 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2818 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2819 } else {
2820 pflashcomp = gen2_flash_types;
2821 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2822 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2823 }
9fe96934
SB
2824 for (i = 0; i < num_comp; i++) {
2825 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2826 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2827 continue;
306f1348
SP
2828 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2829 if (!phy_flashing_required(adapter))
2830 continue;
2831 }
3f0d4560
AK
2832 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2833 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2834 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2835 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2836 continue;
2837 p = fw->data;
2838 p += filehdr_size + pflashcomp[i].offset
2839 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2840 if (p + pflashcomp[i].size > fw->data + fw->size)
2841 return -1;
2842 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2843 while (total_bytes) {
2844 if (total_bytes > 32*1024)
2845 num_bytes = 32*1024;
2846 else
2847 num_bytes = total_bytes;
2848 total_bytes -= num_bytes;
306f1348
SP
2849 if (!total_bytes) {
2850 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2851 flash_op = FLASHROM_OPER_PHY_FLASH;
2852 else
2853 flash_op = FLASHROM_OPER_FLASH;
2854 } else {
2855 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2856 flash_op = FLASHROM_OPER_PHY_SAVE;
2857 else
2858 flash_op = FLASHROM_OPER_SAVE;
2859 }
3f0d4560
AK
2860 memcpy(req->params.data_buf, p, num_bytes);
2861 p += num_bytes;
2862 status = be_cmd_write_flashrom(adapter, flash_cmd,
2863 pflashcomp[i].optype, flash_op, num_bytes);
2864 if (status) {
306f1348
SP
2865 if ((status == ILLEGAL_IOCTL_REQ) &&
2866 (pflashcomp[i].optype ==
2867 IMG_TYPE_PHY_FW))
2868 break;
3f0d4560
AK
2869 dev_err(&adapter->pdev->dev,
2870 "cmd to write to flash rom failed.\n");
2871 return -1;
2872 }
84517482 2873 }
84517482 2874 }
84517482
AK
2875 return 0;
2876}
2877
3f0d4560
AK
2878static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2879{
2880 if (fhdr == NULL)
2881 return 0;
2882 if (fhdr->build[0] == '3')
2883 return BE_GEN3;
2884 else if (fhdr->build[0] == '2')
2885 return BE_GEN2;
2886 else
2887 return 0;
2888}
2889
485bf569
SN
2890static int lancer_fw_download(struct be_adapter *adapter,
2891 const struct firmware *fw)
84517482 2892{
485bf569
SN
2893#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2894#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2895 struct be_dma_mem flash_cmd;
485bf569
SN
2896 const u8 *data_ptr = NULL;
2897 u8 *dest_image_ptr = NULL;
2898 size_t image_size = 0;
2899 u32 chunk_size = 0;
2900 u32 data_written = 0;
2901 u32 offset = 0;
2902 int status = 0;
2903 u8 add_status = 0;
84517482 2904
485bf569 2905 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2906 dev_err(&adapter->pdev->dev,
485bf569
SN
2907 "FW Image not properly aligned. "
2908 "Length must be 4 byte aligned.\n");
2909 status = -EINVAL;
2910 goto lancer_fw_exit;
d9efd2af
SB
2911 }
2912
485bf569
SN
2913 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2914 + LANCER_FW_DOWNLOAD_CHUNK;
2915 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2916 &flash_cmd.dma, GFP_KERNEL);
2917 if (!flash_cmd.va) {
2918 status = -ENOMEM;
2919 dev_err(&adapter->pdev->dev,
2920 "Memory allocation failure while flashing\n");
2921 goto lancer_fw_exit;
2922 }
84517482 2923
485bf569
SN
2924 dest_image_ptr = flash_cmd.va +
2925 sizeof(struct lancer_cmd_req_write_object);
2926 image_size = fw->size;
2927 data_ptr = fw->data;
2928
2929 while (image_size) {
2930 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2931
2932 /* Copy the image chunk content. */
2933 memcpy(dest_image_ptr, data_ptr, chunk_size);
2934
2935 status = lancer_cmd_write_object(adapter, &flash_cmd,
2936 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2937 &data_written, &add_status);
2938
2939 if (status)
2940 break;
2941
2942 offset += data_written;
2943 data_ptr += data_written;
2944 image_size -= data_written;
2945 }
2946
2947 if (!status) {
2948 /* Commit the FW written */
2949 status = lancer_cmd_write_object(adapter, &flash_cmd,
2950 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2951 &data_written, &add_status);
2952 }
2953
2954 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2955 flash_cmd.dma);
2956 if (status) {
2957 dev_err(&adapter->pdev->dev,
2958 "Firmware load error. "
2959 "Status code: 0x%x Additional Status: 0x%x\n",
2960 status, add_status);
2961 goto lancer_fw_exit;
2962 }
2963
2964 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2965lancer_fw_exit:
2966 return status;
2967}
2968
2969static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2970{
2971 struct flash_file_hdr_g2 *fhdr;
2972 struct flash_file_hdr_g3 *fhdr3;
2973 struct image_hdr *img_hdr_ptr = NULL;
2974 struct be_dma_mem flash_cmd;
2975 const u8 *p;
2976 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2977
2978 p = fw->data;
3f0d4560 2979 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2980
84517482 2981 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2982 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2983 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2984 if (!flash_cmd.va) {
2985 status = -ENOMEM;
2986 dev_err(&adapter->pdev->dev,
2987 "Memory allocation failure while flashing\n");
485bf569 2988 goto be_fw_exit;
84517482
AK
2989 }
2990
3f0d4560
AK
2991 if ((adapter->generation == BE_GEN3) &&
2992 (get_ufigen_type(fhdr) == BE_GEN3)) {
2993 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2994 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2995 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2996 img_hdr_ptr = (struct image_hdr *) (fw->data +
2997 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2998 i * sizeof(struct image_hdr)));
2999 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3000 status = be_flash_data(adapter, fw, &flash_cmd,
3001 num_imgs);
3f0d4560
AK
3002 }
3003 } else if ((adapter->generation == BE_GEN2) &&
3004 (get_ufigen_type(fhdr) == BE_GEN2)) {
3005 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3006 } else {
3007 dev_err(&adapter->pdev->dev,
3008 "UFI and Interface are not compatible for flashing\n");
3009 status = -1;
84517482
AK
3010 }
3011
2b7bcebf
IV
3012 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3013 flash_cmd.dma);
84517482
AK
3014 if (status) {
3015 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3016 goto be_fw_exit;
84517482
AK
3017 }
3018
af901ca1 3019 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3020
485bf569
SN
3021be_fw_exit:
3022 return status;
3023}
3024
3025int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3026{
3027 const struct firmware *fw;
3028 int status;
3029
3030 if (!netif_running(adapter->netdev)) {
3031 dev_err(&adapter->pdev->dev,
3032 "Firmware load not allowed (interface is down)\n");
3033 return -1;
3034 }
3035
3036 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3037 if (status)
3038 goto fw_exit;
3039
3040 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3041
3042 if (lancer_chip(adapter))
3043 status = lancer_fw_download(adapter, fw);
3044 else
3045 status = be_fw_download(adapter, fw);
3046
84517482
AK
3047fw_exit:
3048 release_firmware(fw);
3049 return status;
3050}
3051
6b7c5b94
SP
3052static struct net_device_ops be_netdev_ops = {
3053 .ndo_open = be_open,
3054 .ndo_stop = be_close,
3055 .ndo_start_xmit = be_xmit,
a54769f5 3056 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3057 .ndo_set_mac_address = be_mac_addr_set,
3058 .ndo_change_mtu = be_change_mtu,
ab1594e9 3059 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3060 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3061 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3062 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3063 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3064 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3065 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3066 .ndo_get_vf_config = be_get_vf_config,
3067#ifdef CONFIG_NET_POLL_CONTROLLER
3068 .ndo_poll_controller = be_netpoll,
3069#endif
6b7c5b94
SP
3070};
3071
3072static void be_netdev_init(struct net_device *netdev)
3073{
3074 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
3075 struct be_rx_obj *rxo;
3076 int i;
6b7c5b94 3077
6332c8d3 3078 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3079 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3080 NETIF_F_HW_VLAN_TX;
3081 if (be_multi_rxq(adapter))
3082 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3083
3084 netdev->features |= netdev->hw_features |
8b8ddc68 3085 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3086
eb8a50d9 3087 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3088 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3089
6b7c5b94
SP
3090 netdev->flags |= IFF_MULTICAST;
3091
c190e3c8
AK
3092 netif_set_gso_max_size(netdev, 65535);
3093
6b7c5b94
SP
3094 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3095
3096 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3097
3abcdeda
SP
3098 for_all_rx_queues(adapter, rxo, i)
3099 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3100 BE_NAPI_WEIGHT);
3101
5fb379ee 3102 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 3103 BE_NAPI_WEIGHT);
6b7c5b94
SP
3104}
3105
3106static void be_unmap_pci_bars(struct be_adapter *adapter)
3107{
8788fdc2
SP
3108 if (adapter->csr)
3109 iounmap(adapter->csr);
3110 if (adapter->db)
3111 iounmap(adapter->db);
6b7c5b94
SP
3112}
3113
3114static int be_map_pci_bars(struct be_adapter *adapter)
3115{
3116 u8 __iomem *addr;
db3ea781 3117 int db_reg;
6b7c5b94 3118
fe6d2a38
SP
3119 if (lancer_chip(adapter)) {
3120 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3121 pci_resource_len(adapter->pdev, 0));
3122 if (addr == NULL)
3123 return -ENOMEM;
3124 adapter->db = addr;
3125 return 0;
3126 }
3127
ba343c77
SB
3128 if (be_physfn(adapter)) {
3129 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3130 pci_resource_len(adapter->pdev, 2));
3131 if (addr == NULL)
3132 return -ENOMEM;
3133 adapter->csr = addr;
3134 }
6b7c5b94 3135
ba343c77 3136 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3137 db_reg = 4;
3138 } else {
ba343c77
SB
3139 if (be_physfn(adapter))
3140 db_reg = 4;
3141 else
3142 db_reg = 0;
3143 }
3144 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3145 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3146 if (addr == NULL)
3147 goto pci_map_err;
ba343c77
SB
3148 adapter->db = addr;
3149
6b7c5b94
SP
3150 return 0;
3151pci_map_err:
3152 be_unmap_pci_bars(adapter);
3153 return -ENOMEM;
3154}
3155
3156
3157static void be_ctrl_cleanup(struct be_adapter *adapter)
3158{
8788fdc2 3159 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3160
3161 be_unmap_pci_bars(adapter);
3162
3163 if (mem->va)
2b7bcebf
IV
3164 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3165 mem->dma);
e7b909a6 3166
5b8821b7 3167 mem = &adapter->rx_filter;
e7b909a6 3168 if (mem->va)
2b7bcebf
IV
3169 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3170 mem->dma);
6b7c5b94
SP
3171}
3172
6b7c5b94
SP
3173static int be_ctrl_init(struct be_adapter *adapter)
3174{
8788fdc2
SP
3175 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3176 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3177 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3178 int status;
6b7c5b94
SP
3179
3180 status = be_map_pci_bars(adapter);
3181 if (status)
e7b909a6 3182 goto done;
6b7c5b94
SP
3183
3184 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3185 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3186 mbox_mem_alloc->size,
3187 &mbox_mem_alloc->dma,
3188 GFP_KERNEL);
6b7c5b94 3189 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3190 status = -ENOMEM;
3191 goto unmap_pci_bars;
6b7c5b94
SP
3192 }
3193 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3194 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3195 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3196 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3197
5b8821b7
SP
3198 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3199 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3200 &rx_filter->dma, GFP_KERNEL);
3201 if (rx_filter->va == NULL) {
e7b909a6
SP
3202 status = -ENOMEM;
3203 goto free_mbox;
3204 }
5b8821b7 3205 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3206
2984961c 3207 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3208 spin_lock_init(&adapter->mcc_lock);
3209 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3210
dd131e76 3211 init_completion(&adapter->flash_compl);
cf588477 3212 pci_save_state(adapter->pdev);
6b7c5b94 3213 return 0;
e7b909a6
SP
3214
3215free_mbox:
2b7bcebf
IV
3216 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3217 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3218
3219unmap_pci_bars:
3220 be_unmap_pci_bars(adapter);
3221
3222done:
3223 return status;
6b7c5b94
SP
3224}
3225
3226static void be_stats_cleanup(struct be_adapter *adapter)
3227{
3abcdeda 3228 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3229
3230 if (cmd->va)
2b7bcebf
IV
3231 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3232 cmd->va, cmd->dma);
6b7c5b94
SP
3233}
3234
3235static int be_stats_init(struct be_adapter *adapter)
3236{
3abcdeda 3237 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3238
005d5696 3239 if (adapter->generation == BE_GEN2) {
89a88ab8 3240 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3241 } else {
3242 if (lancer_chip(adapter))
3243 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3244 else
3245 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3246 }
2b7bcebf
IV
3247 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3248 GFP_KERNEL);
6b7c5b94
SP
3249 if (cmd->va == NULL)
3250 return -1;
d291b9af 3251 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3252 return 0;
3253}
3254
3255static void __devexit be_remove(struct pci_dev *pdev)
3256{
3257 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3258
6b7c5b94
SP
3259 if (!adapter)
3260 return;
3261
f203af70
SK
3262 cancel_delayed_work_sync(&adapter->work);
3263
6b7c5b94
SP
3264 unregister_netdev(adapter->netdev);
3265
5fb379ee
SP
3266 be_clear(adapter);
3267
6b7c5b94
SP
3268 be_stats_cleanup(adapter);
3269
3270 be_ctrl_cleanup(adapter);
3271
ba343c77
SB
3272 be_sriov_disable(adapter);
3273
8d56ff11 3274 be_msix_disable(adapter);
6b7c5b94
SP
3275
3276 pci_set_drvdata(pdev, NULL);
3277 pci_release_regions(pdev);
3278 pci_disable_device(pdev);
3279
3280 free_netdev(adapter->netdev);
3281}
3282
2243e2e9 3283static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3284{
6b7c5b94
SP
3285 int status;
3286
3abcdeda
SP
3287 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3288 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3289 if (status)
3290 return status;
3291
752961a1 3292 if (adapter->function_mode & FLEX10_MODE)
82903e4b
AK
3293 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3294 else
3295 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3296
9e1453c5
AK
3297 status = be_cmd_get_cntl_attributes(adapter);
3298 if (status)
3299 return status;
3300
2243e2e9 3301 return 0;
6b7c5b94
SP
3302}
3303
fe6d2a38
SP
3304static int be_dev_family_check(struct be_adapter *adapter)
3305{
3306 struct pci_dev *pdev = adapter->pdev;
3307 u32 sli_intf = 0, if_type;
3308
3309 switch (pdev->device) {
3310 case BE_DEVICE_ID1:
3311 case OC_DEVICE_ID1:
3312 adapter->generation = BE_GEN2;
3313 break;
3314 case BE_DEVICE_ID2:
3315 case OC_DEVICE_ID2:
3316 adapter->generation = BE_GEN3;
3317 break;
3318 case OC_DEVICE_ID3:
12f4d0a8 3319 case OC_DEVICE_ID4:
fe6d2a38
SP
3320 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3321 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3322 SLI_INTF_IF_TYPE_SHIFT;
3323
3324 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3325 if_type != 0x02) {
3326 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3327 return -EINVAL;
3328 }
fe6d2a38
SP
3329 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3330 SLI_INTF_FAMILY_SHIFT);
3331 adapter->generation = BE_GEN3;
3332 break;
3333 default:
3334 adapter->generation = 0;
3335 }
3336 return 0;
3337}
3338
37eed1cb
PR
3339static int lancer_wait_ready(struct be_adapter *adapter)
3340{
d8110f62 3341#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3342 u32 sliport_status;
3343 int status = 0, i;
3344
3345 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3346 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3347 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3348 break;
3349
d8110f62 3350 msleep(1000);
37eed1cb
PR
3351 }
3352
3353 if (i == SLIPORT_READY_TIMEOUT)
3354 status = -1;
3355
3356 return status;
3357}
3358
3359static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3360{
3361 int status;
3362 u32 sliport_status, err, reset_needed;
3363 status = lancer_wait_ready(adapter);
3364 if (!status) {
3365 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3366 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3367 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3368 if (err && reset_needed) {
3369 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3370 adapter->db + SLIPORT_CONTROL_OFFSET);
3371
3372 /* check adapter has corrected the error */
3373 status = lancer_wait_ready(adapter);
3374 sliport_status = ioread32(adapter->db +
3375 SLIPORT_STATUS_OFFSET);
3376 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3377 SLIPORT_STATUS_RN_MASK);
3378 if (status || sliport_status)
3379 status = -1;
3380 } else if (err || reset_needed) {
3381 status = -1;
3382 }
3383 }
3384 return status;
3385}
3386
d8110f62
PR
3387static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3388{
3389 int status;
3390 u32 sliport_status;
3391
3392 if (adapter->eeh_err || adapter->ue_detected)
3393 return;
3394
3395 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3396
3397 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3398 dev_err(&adapter->pdev->dev,
3399 "Adapter in error state."
3400 "Trying to recover.\n");
3401
3402 status = lancer_test_and_set_rdy_state(adapter);
3403 if (status)
3404 goto err;
3405
3406 netif_device_detach(adapter->netdev);
3407
3408 if (netif_running(adapter->netdev))
3409 be_close(adapter->netdev);
3410
3411 be_clear(adapter);
3412
3413 adapter->fw_timeout = false;
3414
3415 status = be_setup(adapter);
3416 if (status)
3417 goto err;
3418
3419 if (netif_running(adapter->netdev)) {
3420 status = be_open(adapter->netdev);
3421 if (status)
3422 goto err;
3423 }
3424
3425 netif_device_attach(adapter->netdev);
3426
3427 dev_err(&adapter->pdev->dev,
3428 "Adapter error recovery succeeded\n");
3429 }
3430 return;
3431err:
3432 dev_err(&adapter->pdev->dev,
3433 "Adapter error recovery failed\n");
3434}
3435
3436static void be_worker(struct work_struct *work)
3437{
3438 struct be_adapter *adapter =
3439 container_of(work, struct be_adapter, work.work);
3440 struct be_rx_obj *rxo;
3441 int i;
3442
3443 if (lancer_chip(adapter))
3444 lancer_test_and_recover_fn_err(adapter);
3445
3446 be_detect_dump_ue(adapter);
3447
3448 /* when interrupts are not yet enabled, just reap any pending
3449 * mcc completions */
3450 if (!netif_running(adapter->netdev)) {
3451 int mcc_compl, status = 0;
3452
3453 mcc_compl = be_process_mcc(adapter, &status);
3454
3455 if (mcc_compl) {
3456 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3457 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3458 }
3459
3460 goto reschedule;
3461 }
3462
3463 if (!adapter->stats_cmd_sent) {
3464 if (lancer_chip(adapter))
3465 lancer_cmd_get_pport_stats(adapter,
3466 &adapter->stats_cmd);
3467 else
3468 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3469 }
3470
3471 for_all_rx_queues(adapter, rxo, i) {
3472 be_rx_eqd_update(adapter, rxo);
3473
3474 if (rxo->rx_post_starved) {
3475 rxo->rx_post_starved = false;
3476 be_post_rx_frags(rxo, GFP_KERNEL);
3477 }
3478 }
3479
3480reschedule:
3481 adapter->work_counter++;
3482 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3483}
3484
6b7c5b94
SP
3485static int __devinit be_probe(struct pci_dev *pdev,
3486 const struct pci_device_id *pdev_id)
3487{
3488 int status = 0;
3489 struct be_adapter *adapter;
3490 struct net_device *netdev;
6b7c5b94
SP
3491
3492 status = pci_enable_device(pdev);
3493 if (status)
3494 goto do_none;
3495
3496 status = pci_request_regions(pdev, DRV_NAME);
3497 if (status)
3498 goto disable_dev;
3499 pci_set_master(pdev);
3500
3c8def97 3501 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3502 if (netdev == NULL) {
3503 status = -ENOMEM;
3504 goto rel_reg;
3505 }
3506 adapter = netdev_priv(netdev);
3507 adapter->pdev = pdev;
3508 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3509
3510 status = be_dev_family_check(adapter);
63657b9c 3511 if (status)
fe6d2a38
SP
3512 goto free_netdev;
3513
6b7c5b94 3514 adapter->netdev = netdev;
2243e2e9 3515 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3516
2b7bcebf 3517 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3518 if (!status) {
3519 netdev->features |= NETIF_F_HIGHDMA;
3520 } else {
2b7bcebf 3521 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3522 if (status) {
3523 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3524 goto free_netdev;
3525 }
3526 }
3527
f9449ab7
SP
3528 status = be_sriov_enable(adapter);
3529 if (status)
3530 goto free_netdev;
ba343c77 3531
6b7c5b94
SP
3532 status = be_ctrl_init(adapter);
3533 if (status)
f9449ab7 3534 goto disable_sriov;
6b7c5b94 3535
37eed1cb 3536 if (lancer_chip(adapter)) {
d8110f62
PR
3537 status = lancer_wait_ready(adapter);
3538 if (!status) {
3539 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3540 adapter->db + SLIPORT_CONTROL_OFFSET);
3541 status = lancer_test_and_set_rdy_state(adapter);
3542 }
37eed1cb
PR
3543 if (status) {
3544 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3545 goto ctrl_clean;
37eed1cb
PR
3546 }
3547 }
3548
2243e2e9 3549 /* sync up with fw's ready state */
ba343c77
SB
3550 if (be_physfn(adapter)) {
3551 status = be_cmd_POST(adapter);
3552 if (status)
3553 goto ctrl_clean;
ba343c77 3554 }
6b7c5b94 3555
2243e2e9
SP
3556 /* tell fw we're ready to fire cmds */
3557 status = be_cmd_fw_init(adapter);
6b7c5b94 3558 if (status)
2243e2e9
SP
3559 goto ctrl_clean;
3560
a4b4dfab
AK
3561 status = be_cmd_reset_function(adapter);
3562 if (status)
3563 goto ctrl_clean;
556ae191 3564
2243e2e9
SP
3565 status = be_stats_init(adapter);
3566 if (status)
3567 goto ctrl_clean;
3568
3569 status = be_get_config(adapter);
6b7c5b94
SP
3570 if (status)
3571 goto stats_clean;
6b7c5b94 3572
b9ab82c7
SP
3573 /* The INTR bit may be set in the card when probed by a kdump kernel
3574 * after a crash.
3575 */
3576 if (!lancer_chip(adapter))
3577 be_intr_set(adapter, false);
3578
3abcdeda
SP
3579 be_msix_enable(adapter);
3580
6b7c5b94 3581 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3582 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3583
5fb379ee
SP
3584 status = be_setup(adapter);
3585 if (status)
3abcdeda 3586 goto msix_disable;
2243e2e9 3587
3abcdeda 3588 be_netdev_init(netdev);
6b7c5b94
SP
3589 status = register_netdev(netdev);
3590 if (status != 0)
5fb379ee 3591 goto unsetup;
6b7c5b94 3592
c4ca2374 3593 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3594
f203af70 3595 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3596 return 0;
3597
5fb379ee
SP
3598unsetup:
3599 be_clear(adapter);
3abcdeda
SP
3600msix_disable:
3601 be_msix_disable(adapter);
6b7c5b94
SP
3602stats_clean:
3603 be_stats_cleanup(adapter);
3604ctrl_clean:
3605 be_ctrl_cleanup(adapter);
f9449ab7 3606disable_sriov:
ba343c77 3607 be_sriov_disable(adapter);
f9449ab7 3608free_netdev:
fe6d2a38 3609 free_netdev(netdev);
8d56ff11 3610 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3611rel_reg:
3612 pci_release_regions(pdev);
3613disable_dev:
3614 pci_disable_device(pdev);
3615do_none:
c4ca2374 3616 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3617 return status;
3618}
3619
3620static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3621{
3622 struct be_adapter *adapter = pci_get_drvdata(pdev);
3623 struct net_device *netdev = adapter->netdev;
3624
a4ca055f 3625 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3626 if (adapter->wol)
3627 be_setup_wol(adapter, true);
3628
6b7c5b94
SP
3629 netif_device_detach(netdev);
3630 if (netif_running(netdev)) {
3631 rtnl_lock();
3632 be_close(netdev);
3633 rtnl_unlock();
3634 }
9b0365f1 3635 be_clear(adapter);
6b7c5b94 3636
a4ca055f 3637 be_msix_disable(adapter);
6b7c5b94
SP
3638 pci_save_state(pdev);
3639 pci_disable_device(pdev);
3640 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3641 return 0;
3642}
3643
3644static int be_resume(struct pci_dev *pdev)
3645{
3646 int status = 0;
3647 struct be_adapter *adapter = pci_get_drvdata(pdev);
3648 struct net_device *netdev = adapter->netdev;
3649
3650 netif_device_detach(netdev);
3651
3652 status = pci_enable_device(pdev);
3653 if (status)
3654 return status;
3655
3656 pci_set_power_state(pdev, 0);
3657 pci_restore_state(pdev);
3658
a4ca055f 3659 be_msix_enable(adapter);
2243e2e9
SP
3660 /* tell fw we're ready to fire cmds */
3661 status = be_cmd_fw_init(adapter);
3662 if (status)
3663 return status;
3664
9b0365f1 3665 be_setup(adapter);
6b7c5b94
SP
3666 if (netif_running(netdev)) {
3667 rtnl_lock();
3668 be_open(netdev);
3669 rtnl_unlock();
3670 }
3671 netif_device_attach(netdev);
71d8d1b5
AK
3672
3673 if (adapter->wol)
3674 be_setup_wol(adapter, false);
a4ca055f
AK
3675
3676 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3677 return 0;
3678}
3679
82456b03
SP
3680/*
3681 * An FLR will stop BE from DMAing any data.
3682 */
3683static void be_shutdown(struct pci_dev *pdev)
3684{
3685 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3686
2d5d4154
AK
3687 if (!adapter)
3688 return;
82456b03 3689
0f4a6828 3690 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3691
2d5d4154 3692 netif_device_detach(adapter->netdev);
82456b03 3693
82456b03
SP
3694 if (adapter->wol)
3695 be_setup_wol(adapter, true);
3696
57841869
AK
3697 be_cmd_reset_function(adapter);
3698
82456b03 3699 pci_disable_device(pdev);
82456b03
SP
3700}
3701
cf588477
SP
3702static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3703 pci_channel_state_t state)
3704{
3705 struct be_adapter *adapter = pci_get_drvdata(pdev);
3706 struct net_device *netdev = adapter->netdev;
3707
3708 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3709
3710 adapter->eeh_err = true;
3711
3712 netif_device_detach(netdev);
3713
3714 if (netif_running(netdev)) {
3715 rtnl_lock();
3716 be_close(netdev);
3717 rtnl_unlock();
3718 }
3719 be_clear(adapter);
3720
3721 if (state == pci_channel_io_perm_failure)
3722 return PCI_ERS_RESULT_DISCONNECT;
3723
3724 pci_disable_device(pdev);
3725
3726 return PCI_ERS_RESULT_NEED_RESET;
3727}
3728
3729static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3730{
3731 struct be_adapter *adapter = pci_get_drvdata(pdev);
3732 int status;
3733
3734 dev_info(&adapter->pdev->dev, "EEH reset\n");
3735 adapter->eeh_err = false;
6589ade0
SP
3736 adapter->ue_detected = false;
3737 adapter->fw_timeout = false;
cf588477
SP
3738
3739 status = pci_enable_device(pdev);
3740 if (status)
3741 return PCI_ERS_RESULT_DISCONNECT;
3742
3743 pci_set_master(pdev);
3744 pci_set_power_state(pdev, 0);
3745 pci_restore_state(pdev);
3746
3747 /* Check if card is ok and fw is ready */
3748 status = be_cmd_POST(adapter);
3749 if (status)
3750 return PCI_ERS_RESULT_DISCONNECT;
3751
3752 return PCI_ERS_RESULT_RECOVERED;
3753}
3754
3755static void be_eeh_resume(struct pci_dev *pdev)
3756{
3757 int status = 0;
3758 struct be_adapter *adapter = pci_get_drvdata(pdev);
3759 struct net_device *netdev = adapter->netdev;
3760
3761 dev_info(&adapter->pdev->dev, "EEH resume\n");
3762
3763 pci_save_state(pdev);
3764
3765 /* tell fw we're ready to fire cmds */
3766 status = be_cmd_fw_init(adapter);
3767 if (status)
3768 goto err;
3769
3770 status = be_setup(adapter);
3771 if (status)
3772 goto err;
3773
3774 if (netif_running(netdev)) {
3775 status = be_open(netdev);
3776 if (status)
3777 goto err;
3778 }
3779 netif_device_attach(netdev);
3780 return;
3781err:
3782 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3783}
3784
3785static struct pci_error_handlers be_eeh_handlers = {
3786 .error_detected = be_eeh_err_detected,
3787 .slot_reset = be_eeh_reset,
3788 .resume = be_eeh_resume,
3789};
3790
6b7c5b94
SP
3791static struct pci_driver be_driver = {
3792 .name = DRV_NAME,
3793 .id_table = be_dev_ids,
3794 .probe = be_probe,
3795 .remove = be_remove,
3796 .suspend = be_suspend,
cf588477 3797 .resume = be_resume,
82456b03 3798 .shutdown = be_shutdown,
cf588477 3799 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3800};
3801
3802static int __init be_init_module(void)
3803{
8e95a202
JP
3804 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3805 rx_frag_size != 2048) {
6b7c5b94
SP
3806 printk(KERN_WARNING DRV_NAME
3807 " : Module param rx_frag_size must be 2048/4096/8192."
3808 " Using 2048\n");
3809 rx_frag_size = 2048;
3810 }
6b7c5b94
SP
3811
3812 return pci_register_driver(&be_driver);
3813}
3814module_init(be_init_module);
3815
3816static void __exit be_exit_module(void)
3817{
3818 pci_unregister_driver(&be_driver);
3819}
3820module_exit(be_exit_module);
This page took 0.560702 seconds and 5 git commands to generate.