Staging: et131x: unify return value of .ndo_set_mac_address if address is invalid
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
ba343c77 30static unsigned int num_vfs;
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
ba343c77 32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 33
11ac75ed
SP
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
6b7c5b94 38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 49/* UE Status Low CSR */
42c8b11e 50static const char * const ue_status_low_desc[] = {
7c185276
AK
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
42c8b11e 85static const char * const ue_status_hi_desc[] = {
7c185276
AK
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
42c8b11e 109 "NETC",
7c185276
AK
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
6b7c5b94 119
752961a1
SP
120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
6b7c5b94
SP
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
6b7c5b94
SP
133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
2b7bcebf
IV
144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
6b7c5b94 146 if (!mem->va)
10ef9ab4 147 return -ENOMEM;
6b7c5b94
SP
148 memset(mem->va, 0, mem->size);
149 return 0;
150}
151
8788fdc2 152static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
cf588477
SP
156 if (adapter->eeh_err)
157 return;
158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
8788fdc2 174static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
175{
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
179
180 wmb();
8788fdc2 181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
182}
183
8788fdc2 184static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
185{
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
189
190 wmb();
8788fdc2 191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
192}
193
8788fdc2 194static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
195 bool arm, bool clear_int, u16 num_popped)
196{
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
201
202 if (adapter->eeh_err)
203 return;
204
6b7c5b94
SP
205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
212}
213
8788fdc2 214void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
215{
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
220
221 if (adapter->eeh_err)
222 return;
223
6b7c5b94
SP
224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
228}
229
6b7c5b94
SP
230static int be_mac_addr_set(struct net_device *netdev, void *p)
231{
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
e3a7ae2c
SK
235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
6b7c5b94 237
ca9e4988
AK
238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
e3a7ae2c 241 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
a65027e4 244 if (status)
e3a7ae2c 245 goto err;
6b7c5b94 246
e3a7ae2c
SK
247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 249 adapter->if_handle, &adapter->pmac_id, 0);
e3a7ae2c
SK
250 if (status)
251 goto err;
6b7c5b94 252
e3a7ae2c
SK
253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
259 return status;
260}
261
89a88ab8
AK
262static void populate_be2_stats(struct be_adapter *adapter)
263{
ac124ff9
SP
264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 267 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 270
ac124ff9 271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
289 drvs->rx_address_mismatch_drops =
290 port_stats->rx_address_mismatch_drops +
291 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
292 drvs->rx_alignment_symbol_errors =
293 port_stats->rx_alignment_symbol_errors;
294
295 drvs->tx_pauseframes = port_stats->tx_pauseframes;
296 drvs->tx_controlframes = port_stats->tx_controlframes;
297
298 if (adapter->port_num)
ac124ff9 299 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 300 else
ac124ff9 301 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 302 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 303 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309}
310
311static void populate_be3_stats(struct be_adapter *adapter)
312{
ac124ff9
SP
313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 316 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 319
ac124ff9 320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 340 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
ac124ff9 343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 348 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
349 drvs->forwarded_packets = rxf_stats->forwarded_packets;
350 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
351 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
352 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
353 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354}
355
005d5696
SX
356static void populate_lancer_stats(struct be_adapter *adapter)
357{
89a88ab8 358
005d5696 359 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
360 struct lancer_pport_stats *pport_stats =
361 pport_stats_from_cmd(adapter);
362
363 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
364 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
365 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
366 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 367 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 368 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
369 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
370 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
371 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
372 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
373 drvs->rx_dropped_tcp_length =
374 pport_stats->rx_dropped_invalid_tcp_length;
375 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
376 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
377 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
378 drvs->rx_dropped_header_too_small =
379 pport_stats->rx_dropped_header_too_small;
380 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
381 drvs->rx_address_mismatch_drops =
382 pport_stats->rx_address_mismatch_drops +
383 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 388 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
389 drvs->forwarded_packets = pport_stats->num_forwards_lo;
390 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 391 drvs->rx_drops_too_many_frags =
ac124ff9 392 pport_stats->rx_drops_too_many_frags_lo;
005d5696 393}
89a88ab8 394
09c1c68f
SP
395static void accumulate_16bit_val(u32 *acc, u16 val)
396{
397#define lo(x) (x & 0xFFFF)
398#define hi(x) (x & 0xFFFF0000)
399 bool wrapped = val < lo(*acc);
400 u32 newacc = hi(*acc) + val;
401
402 if (wrapped)
403 newacc += 65536;
404 ACCESS_ONCE(*acc) = newacc;
405}
406
89a88ab8
AK
407void be_parse_stats(struct be_adapter *adapter)
408{
ac124ff9
SP
409 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
410 struct be_rx_obj *rxo;
411 int i;
412
005d5696
SX
413 if (adapter->generation == BE_GEN3) {
414 if (lancer_chip(adapter))
415 populate_lancer_stats(adapter);
416 else
417 populate_be3_stats(adapter);
418 } else {
89a88ab8 419 populate_be2_stats(adapter);
005d5696 420 }
ac124ff9
SP
421
422 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
423 for_all_rx_queues(adapter, rxo, i) {
424 /* below erx HW counter can actually wrap around after
425 * 65535. Driver accumulates a 32-bit value
426 */
427 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
428 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
429 }
89a88ab8
AK
430}
431
ab1594e9
SP
432static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
433 struct rtnl_link_stats64 *stats)
6b7c5b94 434{
ab1594e9 435 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 436 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 437 struct be_rx_obj *rxo;
3c8def97 438 struct be_tx_obj *txo;
ab1594e9
SP
439 u64 pkts, bytes;
440 unsigned int start;
3abcdeda 441 int i;
6b7c5b94 442
3abcdeda 443 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
444 const struct be_rx_stats *rx_stats = rx_stats(rxo);
445 do {
446 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
447 pkts = rx_stats(rxo)->rx_pkts;
448 bytes = rx_stats(rxo)->rx_bytes;
449 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
450 stats->rx_packets += pkts;
451 stats->rx_bytes += bytes;
452 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
453 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
454 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
455 }
456
3c8def97 457 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
458 const struct be_tx_stats *tx_stats = tx_stats(txo);
459 do {
460 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
461 pkts = tx_stats(txo)->tx_pkts;
462 bytes = tx_stats(txo)->tx_bytes;
463 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
464 stats->tx_packets += pkts;
465 stats->tx_bytes += bytes;
3c8def97 466 }
6b7c5b94
SP
467
468 /* bad pkts received */
ab1594e9 469 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
470 drvs->rx_alignment_symbol_errors +
471 drvs->rx_in_range_errors +
472 drvs->rx_out_range_errors +
473 drvs->rx_frame_too_long +
474 drvs->rx_dropped_too_small +
475 drvs->rx_dropped_too_short +
476 drvs->rx_dropped_header_too_small +
477 drvs->rx_dropped_tcp_length +
ab1594e9 478 drvs->rx_dropped_runt;
68110868 479
6b7c5b94 480 /* detailed rx errors */
ab1594e9 481 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
482 drvs->rx_out_range_errors +
483 drvs->rx_frame_too_long;
68110868 484
ab1594e9 485 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
486
487 /* frame alignment errors */
ab1594e9 488 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 489
6b7c5b94
SP
490 /* receiver fifo overrun */
491 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 492 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
493 drvs->rx_input_fifo_overflow_drop +
494 drvs->rx_drops_no_pbuf;
ab1594e9 495 return stats;
6b7c5b94
SP
496}
497
b236916a 498void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 499{
6b7c5b94
SP
500 struct net_device *netdev = adapter->netdev;
501
b236916a 502 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 503 netif_carrier_off(netdev);
b236916a 504 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 505 }
b236916a
AK
506
507 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
508 netif_carrier_on(netdev);
509 else
510 netif_carrier_off(netdev);
6b7c5b94
SP
511}
512
3c8def97 513static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 514 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 515{
3c8def97
SP
516 struct be_tx_stats *stats = tx_stats(txo);
517
ab1594e9 518 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
519 stats->tx_reqs++;
520 stats->tx_wrbs += wrb_cnt;
521 stats->tx_bytes += copied;
522 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 523 if (stopped)
ac124ff9 524 stats->tx_stops++;
ab1594e9 525 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
526}
527
528/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
529static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
530 bool *dummy)
6b7c5b94 531{
ebc8d2ab
DM
532 int cnt = (skb->len > skb->data_len);
533
534 cnt += skb_shinfo(skb)->nr_frags;
535
6b7c5b94
SP
536 /* to account for hdr wrb */
537 cnt++;
fe6d2a38
SP
538 if (lancer_chip(adapter) || !(cnt & 1)) {
539 *dummy = false;
540 } else {
6b7c5b94
SP
541 /* add a dummy to make it an even num */
542 cnt++;
543 *dummy = true;
fe6d2a38 544 }
6b7c5b94
SP
545 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
546 return cnt;
547}
548
549static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
550{
551 wrb->frag_pa_hi = upper_32_bits(addr);
552 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
553 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
554}
555
1ded132d
AK
556static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
557 struct sk_buff *skb)
558{
559 u8 vlan_prio;
560 u16 vlan_tag;
561
562 vlan_tag = vlan_tx_tag_get(skb);
563 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
564 /* If vlan priority provided by OS is NOT in available bmap */
565 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
566 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
567 adapter->recommended_prio;
568
569 return vlan_tag;
570}
571
cc4ce020
SK
572static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
573 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 574{
1ded132d 575 u16 vlan_tag;
cc4ce020 576
6b7c5b94
SP
577 memset(hdr, 0, sizeof(*hdr));
578
579 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
580
49e4b847 581 if (skb_is_gso(skb)) {
6b7c5b94
SP
582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
584 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 585 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
587 if (lancer_chip(adapter) && adapter->sli_family ==
588 LANCER_A0_SLI_FAMILY) {
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
590 if (is_tcp_pkt(skb))
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
592 tcpcs, hdr, 1);
593 else if (is_udp_pkt(skb))
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
595 udpcs, hdr, 1);
596 }
6b7c5b94
SP
597 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
598 if (is_tcp_pkt(skb))
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
602 }
603
4c5102f9 604 if (vlan_tx_tag_present(skb)) {
6b7c5b94 605 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 606 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
608 }
609
610 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
614}
615
2b7bcebf 616static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
617 bool unmap_single)
618{
619 dma_addr_t dma;
620
621 be_dws_le_to_cpu(wrb, sizeof(*wrb));
622
623 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 624 if (wrb->frag_len) {
7101e111 625 if (unmap_single)
2b7bcebf
IV
626 dma_unmap_single(dev, dma, wrb->frag_len,
627 DMA_TO_DEVICE);
7101e111 628 else
2b7bcebf 629 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
630 }
631}
6b7c5b94 632
3c8def97 633static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
634 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
635{
7101e111
SP
636 dma_addr_t busaddr;
637 int i, copied = 0;
2b7bcebf 638 struct device *dev = &adapter->pdev->dev;
6b7c5b94 639 struct sk_buff *first_skb = skb;
6b7c5b94
SP
640 struct be_eth_wrb *wrb;
641 struct be_eth_hdr_wrb *hdr;
7101e111
SP
642 bool map_single = false;
643 u16 map_head;
6b7c5b94 644
6b7c5b94
SP
645 hdr = queue_head_node(txq);
646 queue_head_inc(txq);
7101e111 647 map_head = txq->head;
6b7c5b94 648
ebc8d2ab 649 if (skb->len > skb->data_len) {
e743d313 650 int len = skb_headlen(skb);
2b7bcebf
IV
651 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
652 if (dma_mapping_error(dev, busaddr))
7101e111
SP
653 goto dma_err;
654 map_single = true;
ebc8d2ab
DM
655 wrb = queue_head_node(txq);
656 wrb_fill(wrb, busaddr, len);
657 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658 queue_head_inc(txq);
659 copied += len;
660 }
6b7c5b94 661
ebc8d2ab 662 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 663 const struct skb_frag_struct *frag =
ebc8d2ab 664 &skb_shinfo(skb)->frags[i];
b061b39e 665 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 666 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 667 if (dma_mapping_error(dev, busaddr))
7101e111 668 goto dma_err;
ebc8d2ab 669 wrb = queue_head_node(txq);
9e903e08 670 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
671 be_dws_cpu_to_le(wrb, sizeof(*wrb));
672 queue_head_inc(txq);
9e903e08 673 copied += skb_frag_size(frag);
6b7c5b94
SP
674 }
675
676 if (dummy_wrb) {
677 wrb = queue_head_node(txq);
678 wrb_fill(wrb, 0, 0);
679 be_dws_cpu_to_le(wrb, sizeof(*wrb));
680 queue_head_inc(txq);
681 }
682
cc4ce020 683 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
684 be_dws_cpu_to_le(hdr, sizeof(*hdr));
685
686 return copied;
7101e111
SP
687dma_err:
688 txq->head = map_head;
689 while (copied) {
690 wrb = queue_head_node(txq);
2b7bcebf 691 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
692 map_single = false;
693 copied -= wrb->frag_len;
694 queue_head_inc(txq);
695 }
696 return 0;
6b7c5b94
SP
697}
698
61357325 699static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 700 struct net_device *netdev)
6b7c5b94
SP
701{
702 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
703 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
704 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
705 u32 wrb_cnt = 0, copied = 0;
706 u32 start = txq->head;
707 bool dummy_wrb, stopped = false;
708
1ded132d
AK
709 /* For vlan tagged pkts, BE
710 * 1) calculates checksum even when CSO is not requested
711 * 2) calculates checksum wrongly for padded pkt less than
712 * 60 bytes long.
713 * As a workaround disable TX vlan offloading in such cases.
714 */
715 if (unlikely(vlan_tx_tag_present(skb) &&
716 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 goto tx_drop;
720
721 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
722 if (unlikely(!skb))
723 goto tx_drop;
724
725 skb->vlan_tci = 0;
726 }
727
fe6d2a38 728 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 729
3c8def97 730 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
731 if (copied) {
732 /* record the sent skb in the sent_skb table */
3c8def97
SP
733 BUG_ON(txo->sent_skb_list[start]);
734 txo->sent_skb_list[start] = skb;
c190e3c8
AK
735
736 /* Ensure txq has space for the next skb; Else stop the queue
737 * *BEFORE* ringing the tx doorbell, so that we serialze the
738 * tx compls of the current transmit which'll wake up the queue
739 */
7101e111 740 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
741 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
742 txq->len) {
3c8def97 743 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
744 stopped = true;
745 }
6b7c5b94 746
c190e3c8 747 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 748
3c8def97 749 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 750 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
751 } else {
752 txq->head = start;
753 dev_kfree_skb_any(skb);
6b7c5b94 754 }
1ded132d 755tx_drop:
6b7c5b94
SP
756 return NETDEV_TX_OK;
757}
758
759static int be_change_mtu(struct net_device *netdev, int new_mtu)
760{
761 struct be_adapter *adapter = netdev_priv(netdev);
762 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
763 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
764 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
765 dev_info(&adapter->pdev->dev,
766 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
767 BE_MIN_MTU,
768 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
769 return -EINVAL;
770 }
771 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
772 netdev->mtu, new_mtu);
773 netdev->mtu = new_mtu;
774 return 0;
775}
776
777/*
82903e4b
AK
778 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
779 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 780 */
1da87b7f 781static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 782{
11ac75ed 783 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
6b7c5b94
SP
784 u16 vtag[BE_NUM_VLANS_SUPPORTED];
785 u16 ntags = 0, i;
82903e4b 786 int status = 0;
1da87b7f
AK
787
788 if (vf) {
11ac75ed
SP
789 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
790 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
791 1, 1, 0);
1da87b7f 792 }
6b7c5b94 793
c0e64ef4
SP
794 /* No need to further configure vids if in promiscuous mode */
795 if (adapter->promiscuous)
796 return 0;
797
82903e4b 798 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 799 /* Construct VLAN Table to give to HW */
b738127d 800 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
801 if (adapter->vlan_tag[i]) {
802 vtag[ntags] = cpu_to_le16(i);
803 ntags++;
804 }
805 }
b31c50a7
SP
806 status = be_cmd_vlan_config(adapter, adapter->if_handle,
807 vtag, ntags, 1, 0);
6b7c5b94 808 } else {
b31c50a7
SP
809 status = be_cmd_vlan_config(adapter, adapter->if_handle,
810 NULL, 0, 1, 1);
6b7c5b94 811 }
1da87b7f 812
b31c50a7 813 return status;
6b7c5b94
SP
814}
815
8e586137 816static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
817{
818 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 819 int status = 0;
6b7c5b94 820
80817cbf
AK
821 if (!be_physfn(adapter)) {
822 status = -EINVAL;
823 goto ret;
824 }
ba343c77 825
6b7c5b94 826 adapter->vlan_tag[vid] = 1;
82903e4b 827 if (adapter->vlans_added <= (adapter->max_vlans + 1))
80817cbf 828 status = be_vid_config(adapter, false, 0);
8e586137 829
80817cbf
AK
830 if (!status)
831 adapter->vlans_added++;
832 else
833 adapter->vlan_tag[vid] = 0;
834ret:
835 return status;
6b7c5b94
SP
836}
837
8e586137 838static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
839{
840 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 841 int status = 0;
6b7c5b94 842
80817cbf
AK
843 if (!be_physfn(adapter)) {
844 status = -EINVAL;
845 goto ret;
846 }
ba343c77 847
6b7c5b94 848 adapter->vlan_tag[vid] = 0;
82903e4b 849 if (adapter->vlans_added <= adapter->max_vlans)
80817cbf 850 status = be_vid_config(adapter, false, 0);
8e586137 851
80817cbf
AK
852 if (!status)
853 adapter->vlans_added--;
854 else
855 adapter->vlan_tag[vid] = 1;
856ret:
857 return status;
6b7c5b94
SP
858}
859
a54769f5 860static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
861{
862 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 863
24307eef 864 if (netdev->flags & IFF_PROMISC) {
5b8821b7 865 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
866 adapter->promiscuous = true;
867 goto done;
6b7c5b94
SP
868 }
869
25985edc 870 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
871 if (adapter->promiscuous) {
872 adapter->promiscuous = false;
5b8821b7 873 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
874
875 if (adapter->vlans_added)
876 be_vid_config(adapter, false, 0);
6b7c5b94
SP
877 }
878
e7b909a6 879 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 880 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
881 netdev_mc_count(netdev) > BE_MAX_MC) {
882 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 883 goto done;
6b7c5b94 884 }
6b7c5b94 885
5b8821b7 886 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
887done:
888 return;
6b7c5b94
SP
889}
890
ba343c77
SB
891static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
892{
893 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 894 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
895 int status;
896
11ac75ed 897 if (!sriov_enabled(adapter))
ba343c77
SB
898 return -EPERM;
899
11ac75ed 900 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
901 return -EINVAL;
902
590c391d
PR
903 if (lancer_chip(adapter)) {
904 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
905 } else {
11ac75ed
SP
906 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
907 vf_cfg->pmac_id, vf + 1);
ba343c77 908
11ac75ed
SP
909 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
910 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
911 }
912
64600ea5 913 if (status)
ba343c77
SB
914 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
915 mac, vf);
64600ea5 916 else
11ac75ed 917 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 918
ba343c77
SB
919 return status;
920}
921
64600ea5
AK
922static int be_get_vf_config(struct net_device *netdev, int vf,
923 struct ifla_vf_info *vi)
924{
925 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 926 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 927
11ac75ed 928 if (!sriov_enabled(adapter))
64600ea5
AK
929 return -EPERM;
930
11ac75ed 931 if (vf >= adapter->num_vfs)
64600ea5
AK
932 return -EINVAL;
933
934 vi->vf = vf;
11ac75ed
SP
935 vi->tx_rate = vf_cfg->tx_rate;
936 vi->vlan = vf_cfg->vlan_tag;
64600ea5 937 vi->qos = 0;
11ac75ed 938 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
939
940 return 0;
941}
942
1da87b7f
AK
943static int be_set_vf_vlan(struct net_device *netdev,
944 int vf, u16 vlan, u8 qos)
945{
946 struct be_adapter *adapter = netdev_priv(netdev);
947 int status = 0;
948
11ac75ed 949 if (!sriov_enabled(adapter))
1da87b7f
AK
950 return -EPERM;
951
11ac75ed 952 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
953 return -EINVAL;
954
955 if (vlan) {
11ac75ed 956 adapter->vf_cfg[vf].vlan_tag = vlan;
1da87b7f
AK
957 adapter->vlans_added++;
958 } else {
11ac75ed 959 adapter->vf_cfg[vf].vlan_tag = 0;
1da87b7f
AK
960 adapter->vlans_added--;
961 }
962
963 status = be_vid_config(adapter, true, vf);
964
965 if (status)
966 dev_info(&adapter->pdev->dev,
967 "VLAN %d config on VF %d failed\n", vlan, vf);
968 return status;
969}
970
e1d18735
AK
971static int be_set_vf_tx_rate(struct net_device *netdev,
972 int vf, int rate)
973{
974 struct be_adapter *adapter = netdev_priv(netdev);
975 int status = 0;
976
11ac75ed 977 if (!sriov_enabled(adapter))
e1d18735
AK
978 return -EPERM;
979
94f434c2 980 if (vf >= adapter->num_vfs)
e1d18735
AK
981 return -EINVAL;
982
94f434c2
AK
983 if (rate < 100 || rate > 10000) {
984 dev_err(&adapter->pdev->dev,
985 "tx rate must be between 100 and 10000 Mbps\n");
986 return -EINVAL;
987 }
e1d18735 988
856c4012 989 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
990
991 if (status)
94f434c2 992 dev_err(&adapter->pdev->dev,
e1d18735 993 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
994 else
995 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
996 return status;
997}
998
10ef9ab4 999static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1000{
10ef9ab4 1001 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1002 ulong now = jiffies;
ac124ff9 1003 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1004 u64 pkts;
1005 unsigned int start, eqd;
ac124ff9 1006
10ef9ab4
SP
1007 if (!eqo->enable_aic) {
1008 eqd = eqo->eqd;
1009 goto modify_eqd;
1010 }
1011
1012 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1013 return;
6b7c5b94 1014
10ef9ab4
SP
1015 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1016
4097f663 1017 /* Wrapped around */
3abcdeda
SP
1018 if (time_before(now, stats->rx_jiffies)) {
1019 stats->rx_jiffies = now;
4097f663
SP
1020 return;
1021 }
6b7c5b94 1022
ac124ff9
SP
1023 /* Update once a second */
1024 if (delta < HZ)
6b7c5b94
SP
1025 return;
1026
ab1594e9
SP
1027 do {
1028 start = u64_stats_fetch_begin_bh(&stats->sync);
1029 pkts = stats->rx_pkts;
1030 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1031
68c3e5a7 1032 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1033 stats->rx_pkts_prev = pkts;
3abcdeda 1034 stats->rx_jiffies = now;
10ef9ab4
SP
1035 eqd = (stats->rx_pps / 110000) << 3;
1036 eqd = min(eqd, eqo->max_eqd);
1037 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1038 if (eqd < 10)
1039 eqd = 0;
10ef9ab4
SP
1040
1041modify_eqd:
1042 if (eqd != eqo->cur_eqd) {
1043 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1044 eqo->cur_eqd = eqd;
ac124ff9 1045 }
6b7c5b94
SP
1046}
1047
3abcdeda 1048static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1049 struct be_rx_compl_info *rxcp)
4097f663 1050{
ac124ff9 1051 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1052
ab1594e9 1053 u64_stats_update_begin(&stats->sync);
3abcdeda 1054 stats->rx_compl++;
2e588f84 1055 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1056 stats->rx_pkts++;
2e588f84 1057 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1058 stats->rx_mcast_pkts++;
2e588f84 1059 if (rxcp->err)
ac124ff9 1060 stats->rx_compl_err++;
ab1594e9 1061 u64_stats_update_end(&stats->sync);
4097f663
SP
1062}
1063
2e588f84 1064static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1065{
19fad86f
PR
1066 /* L4 checksum is not reliable for non TCP/UDP packets.
1067 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1068 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1069 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1070}
1071
10ef9ab4
SP
1072static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1073 u16 frag_idx)
6b7c5b94 1074{
10ef9ab4 1075 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1076 struct be_rx_page_info *rx_page_info;
3abcdeda 1077 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1078
3abcdeda 1079 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1080 BUG_ON(!rx_page_info->page);
1081
205859a2 1082 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1083 dma_unmap_page(&adapter->pdev->dev,
1084 dma_unmap_addr(rx_page_info, bus),
1085 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1086 rx_page_info->last_page_user = false;
1087 }
6b7c5b94
SP
1088
1089 atomic_dec(&rxq->used);
1090 return rx_page_info;
1091}
1092
1093/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1094static void be_rx_compl_discard(struct be_rx_obj *rxo,
1095 struct be_rx_compl_info *rxcp)
6b7c5b94 1096{
3abcdeda 1097 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1098 struct be_rx_page_info *page_info;
2e588f84 1099 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1100
e80d9da6 1101 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1102 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1103 put_page(page_info->page);
1104 memset(page_info, 0, sizeof(*page_info));
2e588f84 1105 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1106 }
1107}
1108
1109/*
1110 * skb_fill_rx_data forms a complete skb for an ether frame
1111 * indicated by rxcp.
1112 */
10ef9ab4
SP
1113static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1114 struct be_rx_compl_info *rxcp)
6b7c5b94 1115{
3abcdeda 1116 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1117 struct be_rx_page_info *page_info;
2e588f84
SP
1118 u16 i, j;
1119 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1120 u8 *start;
6b7c5b94 1121
10ef9ab4 1122 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1123 start = page_address(page_info->page) + page_info->page_offset;
1124 prefetch(start);
1125
1126 /* Copy data in the first descriptor of this completion */
2e588f84 1127 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1128
1129 /* Copy the header portion into skb_data */
2e588f84 1130 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1131 memcpy(skb->data, start, hdr_len);
1132 skb->len = curr_frag_len;
1133 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1134 /* Complete packet has now been moved to data */
1135 put_page(page_info->page);
1136 skb->data_len = 0;
1137 skb->tail += curr_frag_len;
1138 } else {
1139 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1140 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1141 skb_shinfo(skb)->frags[0].page_offset =
1142 page_info->page_offset + hdr_len;
9e903e08 1143 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1144 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1145 skb->truesize += rx_frag_size;
6b7c5b94
SP
1146 skb->tail += hdr_len;
1147 }
205859a2 1148 page_info->page = NULL;
6b7c5b94 1149
2e588f84
SP
1150 if (rxcp->pkt_size <= rx_frag_size) {
1151 BUG_ON(rxcp->num_rcvd != 1);
1152 return;
6b7c5b94
SP
1153 }
1154
1155 /* More frags present for this completion */
2e588f84
SP
1156 index_inc(&rxcp->rxq_idx, rxq->len);
1157 remaining = rxcp->pkt_size - curr_frag_len;
1158 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1159 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1160 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1161
bd46cb6c
AK
1162 /* Coalesce all frags from the same physical page in one slot */
1163 if (page_info->page_offset == 0) {
1164 /* Fresh page */
1165 j++;
b061b39e 1166 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1167 skb_shinfo(skb)->frags[j].page_offset =
1168 page_info->page_offset;
9e903e08 1169 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1170 skb_shinfo(skb)->nr_frags++;
1171 } else {
1172 put_page(page_info->page);
1173 }
1174
9e903e08 1175 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1176 skb->len += curr_frag_len;
1177 skb->data_len += curr_frag_len;
bdb28a97 1178 skb->truesize += rx_frag_size;
2e588f84
SP
1179 remaining -= curr_frag_len;
1180 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1181 page_info->page = NULL;
6b7c5b94 1182 }
bd46cb6c 1183 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1184}
1185
5be93b9a 1186/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1187static void be_rx_compl_process(struct be_rx_obj *rxo,
1188 struct be_rx_compl_info *rxcp)
6b7c5b94 1189{
10ef9ab4 1190 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1191 struct net_device *netdev = adapter->netdev;
6b7c5b94 1192 struct sk_buff *skb;
89420424 1193
bb349bb4 1194 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1195 if (unlikely(!skb)) {
ac124ff9 1196 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1197 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1198 return;
1199 }
1200
10ef9ab4 1201 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1202
6332c8d3 1203 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1204 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1205 else
1206 skb_checksum_none_assert(skb);
6b7c5b94 1207
6332c8d3 1208 skb->protocol = eth_type_trans(skb, netdev);
10ef9ab4 1209 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1210 skb->rxhash = rxcp->rss_hash;
1211
6b7c5b94 1212
343e43c0 1213 if (rxcp->vlanf)
4c5102f9
AK
1214 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1215
1216 netif_receive_skb(skb);
6b7c5b94
SP
1217}
1218
5be93b9a 1219/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1220void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1221 struct be_rx_compl_info *rxcp)
6b7c5b94 1222{
10ef9ab4 1223 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1224 struct be_rx_page_info *page_info;
5be93b9a 1225 struct sk_buff *skb = NULL;
3abcdeda 1226 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1227 u16 remaining, curr_frag_len;
1228 u16 i, j;
3968fa1e 1229
10ef9ab4 1230 skb = napi_get_frags(napi);
5be93b9a 1231 if (!skb) {
10ef9ab4 1232 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1233 return;
1234 }
1235
2e588f84
SP
1236 remaining = rxcp->pkt_size;
1237 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1238 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1239
1240 curr_frag_len = min(remaining, rx_frag_size);
1241
bd46cb6c
AK
1242 /* Coalesce all frags from the same physical page in one slot */
1243 if (i == 0 || page_info->page_offset == 0) {
1244 /* First frag or Fresh page */
1245 j++;
b061b39e 1246 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1247 skb_shinfo(skb)->frags[j].page_offset =
1248 page_info->page_offset;
9e903e08 1249 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1250 } else {
1251 put_page(page_info->page);
1252 }
9e903e08 1253 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1254 skb->truesize += rx_frag_size;
bd46cb6c 1255 remaining -= curr_frag_len;
2e588f84 1256 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1257 memset(page_info, 0, sizeof(*page_info));
1258 }
bd46cb6c 1259 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1260
5be93b9a 1261 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1262 skb->len = rxcp->pkt_size;
1263 skb->data_len = rxcp->pkt_size;
5be93b9a 1264 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1265 if (adapter->netdev->features & NETIF_F_RXHASH)
1266 skb->rxhash = rxcp->rss_hash;
5be93b9a 1267
343e43c0 1268 if (rxcp->vlanf)
4c5102f9
AK
1269 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1270
10ef9ab4 1271 napi_gro_frags(napi);
2e588f84
SP
1272}
1273
10ef9ab4
SP
1274static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1275 struct be_rx_compl_info *rxcp)
2e588f84
SP
1276{
1277 rxcp->pkt_size =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1282 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1283 rxcp->ip_csum =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285 rxcp->l4_csum =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287 rxcp->ipv6 =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289 rxcp->rxq_idx =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291 rxcp->num_rcvd =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293 rxcp->pkt_type =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1295 rxcp->rss_hash =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1297 if (rxcp->vlanf) {
1298 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1299 compl);
1300 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301 compl);
15d72184 1302 }
12004ae9 1303 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1304}
1305
10ef9ab4
SP
1306static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1307 struct be_rx_compl_info *rxcp)
2e588f84
SP
1308{
1309 rxcp->pkt_size =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1311 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1312 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1313 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1314 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1315 rxcp->ip_csum =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1317 rxcp->l4_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1319 rxcp->ipv6 =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1321 rxcp->rxq_idx =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1323 rxcp->num_rcvd =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1325 rxcp->pkt_type =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1327 rxcp->rss_hash =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1329 if (rxcp->vlanf) {
1330 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1331 compl);
1332 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1333 compl);
15d72184 1334 }
12004ae9 1335 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1336}
1337
1338static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1339{
1340 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1341 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1342 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1343
2e588f84
SP
1344 /* For checking the valid bit it is Ok to use either definition as the
1345 * valid bit is at the same position in both v0 and v1 Rx compl */
1346 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1347 return NULL;
6b7c5b94 1348
2e588f84
SP
1349 rmb();
1350 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1351
2e588f84 1352 if (adapter->be3_native)
10ef9ab4 1353 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1354 else
10ef9ab4 1355 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1356
15d72184
SP
1357 if (rxcp->vlanf) {
1358 /* vlanf could be wrongly set in some cards.
1359 * ignore if vtm is not set */
752961a1 1360 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1361 rxcp->vlanf = 0;
6b7c5b94 1362
15d72184 1363 if (!lancer_chip(adapter))
3c709f8f 1364 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1365
939cf306 1366 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1367 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1368 rxcp->vlanf = 0;
1369 }
2e588f84
SP
1370
1371 /* As the compl has been parsed, reset it; we wont touch it again */
1372 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1373
3abcdeda 1374 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1375 return rxcp;
1376}
1377
1829b086 1378static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1379{
6b7c5b94 1380 u32 order = get_order(size);
1829b086 1381
6b7c5b94 1382 if (order > 0)
1829b086
ED
1383 gfp |= __GFP_COMP;
1384 return alloc_pages(gfp, order);
6b7c5b94
SP
1385}
1386
1387/*
1388 * Allocate a page, split it to fragments of size rx_frag_size and post as
1389 * receive buffers to BE
1390 */
1829b086 1391static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1392{
3abcdeda 1393 struct be_adapter *adapter = rxo->adapter;
26d92f92 1394 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1395 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1396 struct page *pagep = NULL;
1397 struct be_eth_rx_d *rxd;
1398 u64 page_dmaaddr = 0, frag_dmaaddr;
1399 u32 posted, page_offset = 0;
1400
3abcdeda 1401 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1402 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1403 if (!pagep) {
1829b086 1404 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1405 if (unlikely(!pagep)) {
ac124ff9 1406 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1407 break;
1408 }
2b7bcebf
IV
1409 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1410 0, adapter->big_page_size,
1411 DMA_FROM_DEVICE);
6b7c5b94
SP
1412 page_info->page_offset = 0;
1413 } else {
1414 get_page(pagep);
1415 page_info->page_offset = page_offset + rx_frag_size;
1416 }
1417 page_offset = page_info->page_offset;
1418 page_info->page = pagep;
fac6da5b 1419 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1420 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1421
1422 rxd = queue_head_node(rxq);
1423 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1424 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1425
1426 /* Any space left in the current big page for another frag? */
1427 if ((page_offset + rx_frag_size + rx_frag_size) >
1428 adapter->big_page_size) {
1429 pagep = NULL;
1430 page_info->last_page_user = true;
1431 }
26d92f92
SP
1432
1433 prev_page_info = page_info;
1434 queue_head_inc(rxq);
10ef9ab4 1435 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1436 }
1437 if (pagep)
26d92f92 1438 prev_page_info->last_page_user = true;
6b7c5b94
SP
1439
1440 if (posted) {
6b7c5b94 1441 atomic_add(posted, &rxq->used);
8788fdc2 1442 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1443 } else if (atomic_read(&rxq->used) == 0) {
1444 /* Let be_worker replenish when memory is available */
3abcdeda 1445 rxo->rx_post_starved = true;
6b7c5b94 1446 }
6b7c5b94
SP
1447}
1448
5fb379ee 1449static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1450{
6b7c5b94
SP
1451 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1452
1453 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1454 return NULL;
1455
f3eb62d2 1456 rmb();
6b7c5b94
SP
1457 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1458
1459 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1460
1461 queue_tail_inc(tx_cq);
1462 return txcp;
1463}
1464
3c8def97
SP
1465static u16 be_tx_compl_process(struct be_adapter *adapter,
1466 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1467{
3c8def97 1468 struct be_queue_info *txq = &txo->q;
a73b796e 1469 struct be_eth_wrb *wrb;
3c8def97 1470 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1471 struct sk_buff *sent_skb;
ec43b1a6
SP
1472 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1473 bool unmap_skb_hdr = true;
6b7c5b94 1474
ec43b1a6 1475 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1476 BUG_ON(!sent_skb);
ec43b1a6
SP
1477 sent_skbs[txq->tail] = NULL;
1478
1479 /* skip header wrb */
a73b796e 1480 queue_tail_inc(txq);
6b7c5b94 1481
ec43b1a6 1482 do {
6b7c5b94 1483 cur_index = txq->tail;
a73b796e 1484 wrb = queue_tail_node(txq);
2b7bcebf
IV
1485 unmap_tx_frag(&adapter->pdev->dev, wrb,
1486 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1487 unmap_skb_hdr = false;
1488
6b7c5b94
SP
1489 num_wrbs++;
1490 queue_tail_inc(txq);
ec43b1a6 1491 } while (cur_index != last_index);
6b7c5b94 1492
6b7c5b94 1493 kfree_skb(sent_skb);
4d586b82 1494 return num_wrbs;
6b7c5b94
SP
1495}
1496
10ef9ab4
SP
1497/* Return the number of events in the event queue */
1498static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1499{
10ef9ab4
SP
1500 struct be_eq_entry *eqe;
1501 int num = 0;
859b1e4e 1502
10ef9ab4
SP
1503 do {
1504 eqe = queue_tail_node(&eqo->q);
1505 if (eqe->evt == 0)
1506 break;
859b1e4e 1507
10ef9ab4
SP
1508 rmb();
1509 eqe->evt = 0;
1510 num++;
1511 queue_tail_inc(&eqo->q);
1512 } while (true);
1513
1514 return num;
859b1e4e
SP
1515}
1516
10ef9ab4 1517static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1518{
10ef9ab4
SP
1519 bool rearm = false;
1520 int num = events_get(eqo);
859b1e4e 1521
10ef9ab4 1522 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1523 if (!num)
1524 rearm = true;
1525
10ef9ab4 1526 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
859b1e4e 1527 if (num)
10ef9ab4 1528 napi_schedule(&eqo->napi);
859b1e4e
SP
1529
1530 return num;
1531}
1532
10ef9ab4
SP
1533/* Leaves the EQ is disarmed state */
1534static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1535{
10ef9ab4 1536 int num = events_get(eqo);
859b1e4e 1537
10ef9ab4 1538 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1539}
1540
10ef9ab4 1541static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1542{
1543 struct be_rx_page_info *page_info;
3abcdeda
SP
1544 struct be_queue_info *rxq = &rxo->q;
1545 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1546 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1547 u16 tail;
1548
1549 /* First cleanup pending rx completions */
3abcdeda 1550 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1551 be_rx_compl_discard(rxo, rxcp);
1552 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1553 }
1554
1555 /* Then free posted rx buffer that were not used */
1556 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1557 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1558 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1559 put_page(page_info->page);
1560 memset(page_info, 0, sizeof(*page_info));
1561 }
1562 BUG_ON(atomic_read(&rxq->used));
482c9e79 1563 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1564}
1565
3c8def97
SP
1566static void be_tx_compl_clean(struct be_adapter *adapter,
1567 struct be_tx_obj *txo)
6b7c5b94 1568{
3c8def97
SP
1569 struct be_queue_info *tx_cq = &txo->cq;
1570 struct be_queue_info *txq = &txo->q;
a8e9179a 1571 struct be_eth_tx_compl *txcp;
4d586b82 1572 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1573 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1574 struct sk_buff *sent_skb;
1575 bool dummy_wrb;
a8e9179a
SP
1576
1577 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1578 do {
1579 while ((txcp = be_tx_compl_get(tx_cq))) {
1580 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1581 wrb_index, txcp);
3c8def97 1582 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1583 cmpl++;
1584 }
1585 if (cmpl) {
1586 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1587 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1588 cmpl = 0;
4d586b82 1589 num_wrbs = 0;
a8e9179a
SP
1590 }
1591
1592 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1593 break;
1594
1595 mdelay(1);
1596 } while (true);
1597
1598 if (atomic_read(&txq->used))
1599 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1600 atomic_read(&txq->used));
b03388d6
SP
1601
1602 /* free posted tx for which compls will never arrive */
1603 while (atomic_read(&txq->used)) {
1604 sent_skb = sent_skbs[txq->tail];
1605 end_idx = txq->tail;
1606 index_adv(&end_idx,
fe6d2a38
SP
1607 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1608 txq->len);
3c8def97 1609 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1610 atomic_sub(num_wrbs, &txq->used);
b03388d6 1611 }
6b7c5b94
SP
1612}
1613
10ef9ab4
SP
1614static void be_evt_queues_destroy(struct be_adapter *adapter)
1615{
1616 struct be_eq_obj *eqo;
1617 int i;
1618
1619 for_all_evt_queues(adapter, eqo, i) {
1620 be_eq_clean(eqo);
1621 if (eqo->q.created)
1622 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1623 be_queue_free(adapter, &eqo->q);
1624 }
1625}
1626
1627static int be_evt_queues_create(struct be_adapter *adapter)
1628{
1629 struct be_queue_info *eq;
1630 struct be_eq_obj *eqo;
1631 int i, rc;
1632
1633 adapter->num_evt_qs = num_irqs(adapter);
1634
1635 for_all_evt_queues(adapter, eqo, i) {
1636 eqo->adapter = adapter;
1637 eqo->tx_budget = BE_TX_BUDGET;
1638 eqo->idx = i;
1639 eqo->max_eqd = BE_MAX_EQD;
1640 eqo->enable_aic = true;
1641
1642 eq = &eqo->q;
1643 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1644 sizeof(struct be_eq_entry));
1645 if (rc)
1646 return rc;
1647
1648 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1649 if (rc)
1650 return rc;
1651 }
1652 return rc;
1653}
1654
5fb379ee
SP
1655static void be_mcc_queues_destroy(struct be_adapter *adapter)
1656{
1657 struct be_queue_info *q;
5fb379ee 1658
8788fdc2 1659 q = &adapter->mcc_obj.q;
5fb379ee 1660 if (q->created)
8788fdc2 1661 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1662 be_queue_free(adapter, q);
1663
8788fdc2 1664 q = &adapter->mcc_obj.cq;
5fb379ee 1665 if (q->created)
8788fdc2 1666 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1667 be_queue_free(adapter, q);
1668}
1669
1670/* Must be called only after TX qs are created as MCC shares TX EQ */
1671static int be_mcc_queues_create(struct be_adapter *adapter)
1672{
1673 struct be_queue_info *q, *cq;
5fb379ee 1674
8788fdc2 1675 cq = &adapter->mcc_obj.cq;
5fb379ee 1676 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1677 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1678 goto err;
1679
10ef9ab4
SP
1680 /* Use the default EQ for MCC completions */
1681 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1682 goto mcc_cq_free;
1683
8788fdc2 1684 q = &adapter->mcc_obj.q;
5fb379ee
SP
1685 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1686 goto mcc_cq_destroy;
1687
8788fdc2 1688 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1689 goto mcc_q_free;
1690
1691 return 0;
1692
1693mcc_q_free:
1694 be_queue_free(adapter, q);
1695mcc_cq_destroy:
8788fdc2 1696 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1697mcc_cq_free:
1698 be_queue_free(adapter, cq);
1699err:
1700 return -1;
1701}
1702
6b7c5b94
SP
1703static void be_tx_queues_destroy(struct be_adapter *adapter)
1704{
1705 struct be_queue_info *q;
3c8def97
SP
1706 struct be_tx_obj *txo;
1707 u8 i;
6b7c5b94 1708
3c8def97
SP
1709 for_all_tx_queues(adapter, txo, i) {
1710 q = &txo->q;
1711 if (q->created)
1712 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1713 be_queue_free(adapter, q);
6b7c5b94 1714
3c8def97
SP
1715 q = &txo->cq;
1716 if (q->created)
1717 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1718 be_queue_free(adapter, q);
1719 }
6b7c5b94
SP
1720}
1721
dafc0fe3
SP
1722static int be_num_txqs_want(struct be_adapter *adapter)
1723{
11ac75ed 1724 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
dafc0fe3
SP
1725 lancer_chip(adapter) || !be_physfn(adapter) ||
1726 adapter->generation == BE_GEN2)
1727 return 1;
1728 else
1729 return MAX_TX_QS;
1730}
1731
10ef9ab4 1732static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1733{
10ef9ab4
SP
1734 struct be_queue_info *cq, *eq;
1735 int status;
3c8def97
SP
1736 struct be_tx_obj *txo;
1737 u8 i;
6b7c5b94 1738
dafc0fe3 1739 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1740 if (adapter->num_tx_qs != MAX_TX_QS) {
1741 rtnl_lock();
dafc0fe3
SP
1742 netif_set_real_num_tx_queues(adapter->netdev,
1743 adapter->num_tx_qs);
3bb62f4f
PR
1744 rtnl_unlock();
1745 }
dafc0fe3 1746
10ef9ab4
SP
1747 for_all_tx_queues(adapter, txo, i) {
1748 cq = &txo->cq;
1749 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1750 sizeof(struct be_eth_tx_compl));
1751 if (status)
1752 return status;
3c8def97 1753
10ef9ab4
SP
1754 /* If num_evt_qs is less than num_tx_qs, then more than
1755 * one txq share an eq
1756 */
1757 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1758 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1759 if (status)
1760 return status;
1761 }
1762 return 0;
1763}
6b7c5b94 1764
10ef9ab4
SP
1765static int be_tx_qs_create(struct be_adapter *adapter)
1766{
1767 struct be_tx_obj *txo;
1768 int i, status;
fe6d2a38 1769
3c8def97 1770 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1771 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1772 sizeof(struct be_eth_wrb));
1773 if (status)
1774 return status;
6b7c5b94 1775
10ef9ab4
SP
1776 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1777 if (status)
1778 return status;
3c8def97 1779 }
6b7c5b94 1780
10ef9ab4 1781 return 0;
6b7c5b94
SP
1782}
1783
10ef9ab4 1784static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1785{
1786 struct be_queue_info *q;
3abcdeda
SP
1787 struct be_rx_obj *rxo;
1788 int i;
1789
1790 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1791 q = &rxo->cq;
1792 if (q->created)
1793 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1794 be_queue_free(adapter, q);
ac6a0c4a
SP
1795 }
1796}
1797
10ef9ab4 1798static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1799{
10ef9ab4 1800 struct be_queue_info *eq, *cq;
3abcdeda
SP
1801 struct be_rx_obj *rxo;
1802 int rc, i;
6b7c5b94 1803
10ef9ab4
SP
1804 /* We'll create as many RSS rings as there are irqs.
1805 * But when there's only one irq there's no use creating RSS rings
1806 */
1807 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1808 num_irqs(adapter) + 1 : 1;
ac6a0c4a 1809
6b7c5b94 1810 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1811 for_all_rx_queues(adapter, rxo, i) {
1812 rxo->adapter = adapter;
3abcdeda
SP
1813 cq = &rxo->cq;
1814 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1815 sizeof(struct be_eth_rx_compl));
1816 if (rc)
10ef9ab4 1817 return rc;
3abcdeda 1818
10ef9ab4
SP
1819 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1820 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1821 if (rc)
10ef9ab4 1822 return rc;
3abcdeda 1823 }
6b7c5b94 1824
10ef9ab4
SP
1825 if (adapter->num_rx_qs != MAX_RX_QS)
1826 dev_info(&adapter->pdev->dev,
1827 "Created only %d receive queues", adapter->num_rx_qs);
6b7c5b94 1828
10ef9ab4 1829 return 0;
b628bde2
SP
1830}
1831
6b7c5b94
SP
1832static irqreturn_t be_intx(int irq, void *dev)
1833{
1834 struct be_adapter *adapter = dev;
10ef9ab4 1835 int num_evts;
6b7c5b94 1836
10ef9ab4
SP
1837 /* With INTx only one EQ is used */
1838 num_evts = event_handle(&adapter->eq_obj[0]);
1839 if (num_evts)
1840 return IRQ_HANDLED;
1841 else
1842 return IRQ_NONE;
6b7c5b94
SP
1843}
1844
10ef9ab4 1845static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 1846{
10ef9ab4 1847 struct be_eq_obj *eqo = dev;
6b7c5b94 1848
10ef9ab4 1849 event_handle(eqo);
6b7c5b94
SP
1850 return IRQ_HANDLED;
1851}
1852
2e588f84 1853static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1854{
2e588f84 1855 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1856}
1857
10ef9ab4
SP
1858static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1859 int budget)
6b7c5b94 1860{
3abcdeda
SP
1861 struct be_adapter *adapter = rxo->adapter;
1862 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1863 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1864 u32 work_done;
1865
1866 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1867 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1868 if (!rxcp)
1869 break;
1870
12004ae9
SP
1871 /* Is it a flush compl that has no data */
1872 if (unlikely(rxcp->num_rcvd == 0))
1873 goto loop_continue;
1874
1875 /* Discard compl with partial DMA Lancer B0 */
1876 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 1877 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
1878 goto loop_continue;
1879 }
1880
1881 /* On BE drop pkts that arrive due to imperfect filtering in
1882 * promiscuous mode on some skews
1883 */
1884 if (unlikely(rxcp->port != adapter->port_num &&
1885 !lancer_chip(adapter))) {
10ef9ab4 1886 be_rx_compl_discard(rxo, rxcp);
12004ae9 1887 goto loop_continue;
64642811 1888 }
009dd872 1889
12004ae9 1890 if (do_gro(rxcp))
10ef9ab4 1891 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 1892 else
10ef9ab4 1893 be_rx_compl_process(rxo, rxcp);
12004ae9 1894loop_continue:
2e588f84 1895 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1896 }
1897
10ef9ab4
SP
1898 if (work_done) {
1899 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 1900
10ef9ab4
SP
1901 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1902 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 1903 }
10ef9ab4 1904
6b7c5b94
SP
1905 return work_done;
1906}
1907
10ef9ab4
SP
1908static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1909 int budget, int idx)
6b7c5b94 1910{
6b7c5b94 1911 struct be_eth_tx_compl *txcp;
10ef9ab4 1912 int num_wrbs = 0, work_done;
3c8def97 1913
10ef9ab4
SP
1914 for (work_done = 0; work_done < budget; work_done++) {
1915 txcp = be_tx_compl_get(&txo->cq);
1916 if (!txcp)
1917 break;
1918 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
1919 AMAP_GET_BITS(struct amap_eth_tx_compl,
1920 wrb_index, txcp));
10ef9ab4 1921 }
6b7c5b94 1922
10ef9ab4
SP
1923 if (work_done) {
1924 be_cq_notify(adapter, txo->cq.id, true, work_done);
1925 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 1926
10ef9ab4
SP
1927 /* As Tx wrbs have been freed up, wake up netdev queue
1928 * if it was stopped due to lack of tx wrbs. */
1929 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1930 atomic_read(&txo->q.used) < txo->q.len / 2) {
1931 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 1932 }
10ef9ab4
SP
1933
1934 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1935 tx_stats(txo)->tx_compl += work_done;
1936 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 1937 }
10ef9ab4
SP
1938 return (work_done < budget); /* Done */
1939}
6b7c5b94 1940
10ef9ab4
SP
1941int be_poll(struct napi_struct *napi, int budget)
1942{
1943 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
1944 struct be_adapter *adapter = eqo->adapter;
1945 int max_work = 0, work, i;
1946 bool tx_done;
f31e50a8 1947
10ef9ab4
SP
1948 /* Process all TXQs serviced by this EQ */
1949 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
1950 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
1951 eqo->tx_budget, i);
1952 if (!tx_done)
1953 max_work = budget;
f31e50a8
SP
1954 }
1955
10ef9ab4
SP
1956 /* This loop will iterate twice for EQ0 in which
1957 * completions of the last RXQ (default one) are also processed
1958 * For other EQs the loop iterates only once
1959 */
1960 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
1961 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
1962 max_work = max(work, max_work);
1963 }
6b7c5b94 1964
10ef9ab4
SP
1965 if (is_mcc_eqo(eqo))
1966 be_process_mcc(adapter);
93c86700 1967
10ef9ab4
SP
1968 if (max_work < budget) {
1969 napi_complete(napi);
1970 be_eq_notify(adapter, eqo->q.id, true, false, 0);
1971 } else {
1972 /* As we'll continue in polling mode, count and clear events */
1973 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 1974 }
10ef9ab4 1975 return max_work;
6b7c5b94
SP
1976}
1977
d053de91 1978void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 1979{
e1cfb67a
PR
1980 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1981 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
1982 u32 i;
1983
72f02485
SP
1984 if (adapter->eeh_err || adapter->ue_detected)
1985 return;
1986
e1cfb67a
PR
1987 if (lancer_chip(adapter)) {
1988 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1989 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1990 sliport_err1 = ioread32(adapter->db +
1991 SLIPORT_ERROR1_OFFSET);
1992 sliport_err2 = ioread32(adapter->db +
1993 SLIPORT_ERROR2_OFFSET);
1994 }
1995 } else {
1996 pci_read_config_dword(adapter->pdev,
1997 PCICFG_UE_STATUS_LOW, &ue_lo);
1998 pci_read_config_dword(adapter->pdev,
1999 PCICFG_UE_STATUS_HIGH, &ue_hi);
2000 pci_read_config_dword(adapter->pdev,
2001 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2002 pci_read_config_dword(adapter->pdev,
2003 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2004
2005 ue_lo = (ue_lo & (~ue_lo_mask));
2006 ue_hi = (ue_hi & (~ue_hi_mask));
2007 }
7c185276 2008
e1cfb67a
PR
2009 if (ue_lo || ue_hi ||
2010 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2011 adapter->ue_detected = true;
7acc2087 2012 adapter->eeh_err = true;
434b3648
SP
2013 dev_err(&adapter->pdev->dev,
2014 "Unrecoverable error in the card\n");
d053de91
AK
2015 }
2016
e1cfb67a
PR
2017 if (ue_lo) {
2018 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2019 if (ue_lo & 1)
7c185276
AK
2020 dev_err(&adapter->pdev->dev,
2021 "UE: %s bit set\n", ue_status_low_desc[i]);
2022 }
2023 }
e1cfb67a
PR
2024 if (ue_hi) {
2025 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2026 if (ue_hi & 1)
7c185276
AK
2027 dev_err(&adapter->pdev->dev,
2028 "UE: %s bit set\n", ue_status_hi_desc[i]);
2029 }
2030 }
2031
e1cfb67a
PR
2032 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2033 dev_err(&adapter->pdev->dev,
2034 "sliport status 0x%x\n", sliport_status);
2035 dev_err(&adapter->pdev->dev,
2036 "sliport error1 0x%x\n", sliport_err1);
2037 dev_err(&adapter->pdev->dev,
2038 "sliport error2 0x%x\n", sliport_err2);
2039 }
7c185276
AK
2040}
2041
8d56ff11
SP
2042static void be_msix_disable(struct be_adapter *adapter)
2043{
ac6a0c4a 2044 if (msix_enabled(adapter)) {
8d56ff11 2045 pci_disable_msix(adapter->pdev);
ac6a0c4a 2046 adapter->num_msix_vec = 0;
3abcdeda
SP
2047 }
2048}
2049
10ef9ab4
SP
2050static uint be_num_rss_want(struct be_adapter *adapter)
2051{
2052 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2053 adapter->num_vfs == 0 && be_physfn(adapter) &&
2054 !be_is_mc(adapter))
2055 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2056 else
2057 return 0;
2058}
2059
6b7c5b94
SP
2060static void be_msix_enable(struct be_adapter *adapter)
2061{
10ef9ab4 2062#define BE_MIN_MSIX_VECTORS 1
ac6a0c4a 2063 int i, status, num_vec;
6b7c5b94 2064
10ef9ab4
SP
2065 /* If RSS queues are not used, need a vec for default RX Q */
2066 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2067 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2068
ac6a0c4a 2069 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2070 adapter->msix_entries[i].entry = i;
2071
ac6a0c4a 2072 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2073 if (status == 0) {
2074 goto done;
2075 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2076 num_vec = status;
3abcdeda 2077 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2078 num_vec) == 0)
3abcdeda 2079 goto done;
3abcdeda
SP
2080 }
2081 return;
2082done:
ac6a0c4a
SP
2083 adapter->num_msix_vec = num_vec;
2084 return;
6b7c5b94
SP
2085}
2086
f9449ab7 2087static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2088{
344dbf10 2089 be_check_sriov_fn_type(adapter);
11ac75ed 2090
6dedec81 2091#ifdef CONFIG_PCI_IOV
ba343c77 2092 if (be_physfn(adapter) && num_vfs) {
81be8f0a 2093 int status, pos;
11ac75ed 2094 u16 dev_vfs;
81be8f0a
AK
2095
2096 pos = pci_find_ext_capability(adapter->pdev,
2097 PCI_EXT_CAP_ID_SRIOV);
2098 pci_read_config_word(adapter->pdev,
11ac75ed 2099 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
81be8f0a 2100
11ac75ed
SP
2101 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2102 if (adapter->num_vfs != num_vfs)
81be8f0a 2103 dev_info(&adapter->pdev->dev,
11ac75ed
SP
2104 "Device supports %d VFs and not %d\n",
2105 adapter->num_vfs, num_vfs);
6dedec81 2106
11ac75ed
SP
2107 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2108 if (status)
2109 adapter->num_vfs = 0;
f9449ab7 2110
11ac75ed 2111 if (adapter->num_vfs) {
f9449ab7
SP
2112 adapter->vf_cfg = kcalloc(num_vfs,
2113 sizeof(struct be_vf_cfg),
2114 GFP_KERNEL);
2115 if (!adapter->vf_cfg)
2116 return -ENOMEM;
2117 }
ba343c77
SB
2118 }
2119#endif
f9449ab7 2120 return 0;
ba343c77
SB
2121}
2122
2123static void be_sriov_disable(struct be_adapter *adapter)
2124{
2125#ifdef CONFIG_PCI_IOV
11ac75ed 2126 if (sriov_enabled(adapter)) {
ba343c77 2127 pci_disable_sriov(adapter->pdev);
f9449ab7 2128 kfree(adapter->vf_cfg);
11ac75ed 2129 adapter->num_vfs = 0;
ba343c77
SB
2130 }
2131#endif
2132}
2133
fe6d2a38 2134static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2135 struct be_eq_obj *eqo)
b628bde2 2136{
10ef9ab4 2137 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2138}
6b7c5b94 2139
b628bde2
SP
2140static int be_msix_register(struct be_adapter *adapter)
2141{
10ef9ab4
SP
2142 struct net_device *netdev = adapter->netdev;
2143 struct be_eq_obj *eqo;
2144 int status, i, vec;
6b7c5b94 2145
10ef9ab4
SP
2146 for_all_evt_queues(adapter, eqo, i) {
2147 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2148 vec = be_msix_vec_get(adapter, eqo);
2149 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2150 if (status)
2151 goto err_msix;
2152 }
b628bde2 2153
6b7c5b94 2154 return 0;
3abcdeda 2155err_msix:
10ef9ab4
SP
2156 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2157 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2158 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2159 status);
ac6a0c4a 2160 be_msix_disable(adapter);
6b7c5b94
SP
2161 return status;
2162}
2163
2164static int be_irq_register(struct be_adapter *adapter)
2165{
2166 struct net_device *netdev = adapter->netdev;
2167 int status;
2168
ac6a0c4a 2169 if (msix_enabled(adapter)) {
6b7c5b94
SP
2170 status = be_msix_register(adapter);
2171 if (status == 0)
2172 goto done;
ba343c77
SB
2173 /* INTx is not supported for VF */
2174 if (!be_physfn(adapter))
2175 return status;
6b7c5b94
SP
2176 }
2177
2178 /* INTx */
2179 netdev->irq = adapter->pdev->irq;
2180 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2181 adapter);
2182 if (status) {
2183 dev_err(&adapter->pdev->dev,
2184 "INTx request IRQ failed - err %d\n", status);
2185 return status;
2186 }
2187done:
2188 adapter->isr_registered = true;
2189 return 0;
2190}
2191
2192static void be_irq_unregister(struct be_adapter *adapter)
2193{
2194 struct net_device *netdev = adapter->netdev;
10ef9ab4 2195 struct be_eq_obj *eqo;
3abcdeda 2196 int i;
6b7c5b94
SP
2197
2198 if (!adapter->isr_registered)
2199 return;
2200
2201 /* INTx */
ac6a0c4a 2202 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2203 free_irq(netdev->irq, adapter);
2204 goto done;
2205 }
2206
2207 /* MSIx */
10ef9ab4
SP
2208 for_all_evt_queues(adapter, eqo, i)
2209 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2210
6b7c5b94
SP
2211done:
2212 adapter->isr_registered = false;
6b7c5b94
SP
2213}
2214
10ef9ab4 2215static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2216{
2217 struct be_queue_info *q;
2218 struct be_rx_obj *rxo;
2219 int i;
2220
2221 for_all_rx_queues(adapter, rxo, i) {
2222 q = &rxo->q;
2223 if (q->created) {
2224 be_cmd_rxq_destroy(adapter, q);
2225 /* After the rxq is invalidated, wait for a grace time
2226 * of 1ms for all dma to end and the flush compl to
2227 * arrive
2228 */
2229 mdelay(1);
10ef9ab4 2230 be_rx_cq_clean(rxo);
482c9e79 2231 }
10ef9ab4 2232 be_queue_free(adapter, q);
482c9e79
SP
2233 }
2234}
2235
889cd4b2
SP
2236static int be_close(struct net_device *netdev)
2237{
2238 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97 2239 struct be_tx_obj *txo;
10ef9ab4
SP
2240 struct be_eq_obj *eqo;
2241 int i;
889cd4b2 2242
889cd4b2
SP
2243 be_async_mcc_disable(adapter);
2244
fe6d2a38
SP
2245 if (!lancer_chip(adapter))
2246 be_intr_set(adapter, false);
889cd4b2 2247
10ef9ab4
SP
2248 for_all_evt_queues(adapter, eqo, i) {
2249 napi_disable(&eqo->napi);
2250 if (msix_enabled(adapter))
2251 synchronize_irq(be_msix_vec_get(adapter, eqo));
2252 else
2253 synchronize_irq(netdev->irq);
2254 be_eq_clean(eqo);
63fcb27f
PR
2255 }
2256
889cd4b2
SP
2257 be_irq_unregister(adapter);
2258
889cd4b2
SP
2259 /* Wait for all pending tx completions to arrive so that
2260 * all tx skbs are freed.
2261 */
3c8def97
SP
2262 for_all_tx_queues(adapter, txo, i)
2263 be_tx_compl_clean(adapter, txo);
889cd4b2 2264
10ef9ab4 2265 be_rx_qs_destroy(adapter);
482c9e79
SP
2266 return 0;
2267}
2268
10ef9ab4 2269static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2270{
2271 struct be_rx_obj *rxo;
e9008ee9
PR
2272 int rc, i, j;
2273 u8 rsstable[128];
482c9e79
SP
2274
2275 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2276 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2277 sizeof(struct be_eth_rx_d));
2278 if (rc)
2279 return rc;
2280 }
2281
2282 /* The FW would like the default RXQ to be created first */
2283 rxo = default_rxo(adapter);
2284 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2285 adapter->if_handle, false, &rxo->rss_id);
2286 if (rc)
2287 return rc;
2288
2289 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2290 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2291 rx_frag_size, adapter->if_handle,
2292 true, &rxo->rss_id);
482c9e79
SP
2293 if (rc)
2294 return rc;
2295 }
2296
2297 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2298 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2299 for_all_rss_queues(adapter, rxo, i) {
2300 if ((j + i) >= 128)
2301 break;
2302 rsstable[j + i] = rxo->rss_id;
2303 }
2304 }
2305 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2306 if (rc)
2307 return rc;
2308 }
2309
2310 /* First time posting */
10ef9ab4 2311 for_all_rx_queues(adapter, rxo, i)
482c9e79 2312 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2313 return 0;
2314}
2315
6b7c5b94
SP
2316static int be_open(struct net_device *netdev)
2317{
2318 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2319 struct be_eq_obj *eqo;
3abcdeda 2320 struct be_rx_obj *rxo;
10ef9ab4 2321 struct be_tx_obj *txo;
b236916a 2322 u8 link_status;
3abcdeda 2323 int status, i;
5fb379ee 2324
10ef9ab4 2325 status = be_rx_qs_create(adapter);
482c9e79
SP
2326 if (status)
2327 goto err;
2328
5fb379ee
SP
2329 be_irq_register(adapter);
2330
fe6d2a38
SP
2331 if (!lancer_chip(adapter))
2332 be_intr_set(adapter, true);
5fb379ee 2333
10ef9ab4 2334 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2335 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2336
10ef9ab4
SP
2337 for_all_tx_queues(adapter, txo, i)
2338 be_cq_notify(adapter, txo->cq.id, true, 0);
2339
7a1e9b20
SP
2340 be_async_mcc_enable(adapter);
2341
10ef9ab4
SP
2342 for_all_evt_queues(adapter, eqo, i) {
2343 napi_enable(&eqo->napi);
2344 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2345 }
2346
b236916a
AK
2347 status = be_cmd_link_status_query(adapter, NULL, NULL,
2348 &link_status, 0);
2349 if (!status)
2350 be_link_status_update(adapter, link_status);
2351
889cd4b2
SP
2352 return 0;
2353err:
2354 be_close(adapter->netdev);
2355 return -EIO;
5fb379ee
SP
2356}
2357
71d8d1b5
AK
2358static int be_setup_wol(struct be_adapter *adapter, bool enable)
2359{
2360 struct be_dma_mem cmd;
2361 int status = 0;
2362 u8 mac[ETH_ALEN];
2363
2364 memset(mac, 0, ETH_ALEN);
2365
2366 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2367 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2368 GFP_KERNEL);
71d8d1b5
AK
2369 if (cmd.va == NULL)
2370 return -1;
2371 memset(cmd.va, 0, cmd.size);
2372
2373 if (enable) {
2374 status = pci_write_config_dword(adapter->pdev,
2375 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2376 if (status) {
2377 dev_err(&adapter->pdev->dev,
2381a55c 2378 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2379 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2380 cmd.dma);
71d8d1b5
AK
2381 return status;
2382 }
2383 status = be_cmd_enable_magic_wol(adapter,
2384 adapter->netdev->dev_addr, &cmd);
2385 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2386 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2387 } else {
2388 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2389 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2390 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2391 }
2392
2b7bcebf 2393 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2394 return status;
2395}
2396
6d87f5c3
AK
2397/*
2398 * Generate a seed MAC address from the PF MAC Address using jhash.
2399 * MAC Address for VFs are assigned incrementally starting from the seed.
2400 * These addresses are programmed in the ASIC by the PF and the VF driver
2401 * queries for the MAC address during its probe.
2402 */
2403static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2404{
f9449ab7 2405 u32 vf;
3abcdeda 2406 int status = 0;
6d87f5c3 2407 u8 mac[ETH_ALEN];
11ac75ed 2408 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2409
2410 be_vf_eth_addr_generate(adapter, mac);
2411
11ac75ed 2412 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2413 if (lancer_chip(adapter)) {
2414 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2415 } else {
2416 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2417 vf_cfg->if_handle,
2418 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2419 }
2420
6d87f5c3
AK
2421 if (status)
2422 dev_err(&adapter->pdev->dev,
590c391d 2423 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2424 else
11ac75ed 2425 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2426
2427 mac[5] += 1;
2428 }
2429 return status;
2430}
2431
f9449ab7 2432static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2433{
11ac75ed 2434 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2435 u32 vf;
2436
11ac75ed 2437 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2438 if (lancer_chip(adapter))
2439 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2440 else
11ac75ed
SP
2441 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2442 vf_cfg->pmac_id, vf + 1);
f9449ab7 2443
11ac75ed
SP
2444 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2445 }
6d87f5c3
AK
2446}
2447
a54769f5
SP
2448static int be_clear(struct be_adapter *adapter)
2449{
11ac75ed 2450 if (sriov_enabled(adapter))
f9449ab7
SP
2451 be_vf_clear(adapter);
2452
2453 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2454
2455 be_mcc_queues_destroy(adapter);
10ef9ab4 2456 be_rx_cqs_destroy(adapter);
a54769f5 2457 be_tx_queues_destroy(adapter);
10ef9ab4 2458 be_evt_queues_destroy(adapter);
a54769f5
SP
2459
2460 /* tell fw we're done with firing cmds */
2461 be_cmd_fw_clean(adapter);
10ef9ab4
SP
2462
2463 be_msix_disable(adapter);
a54769f5
SP
2464 return 0;
2465}
2466
30128031
SP
2467static void be_vf_setup_init(struct be_adapter *adapter)
2468{
11ac75ed 2469 struct be_vf_cfg *vf_cfg;
30128031
SP
2470 int vf;
2471
11ac75ed
SP
2472 for_all_vfs(adapter, vf_cfg, vf) {
2473 vf_cfg->if_handle = -1;
2474 vf_cfg->pmac_id = -1;
30128031
SP
2475 }
2476}
2477
f9449ab7
SP
2478static int be_vf_setup(struct be_adapter *adapter)
2479{
11ac75ed 2480 struct be_vf_cfg *vf_cfg;
f9449ab7
SP
2481 u32 cap_flags, en_flags, vf;
2482 u16 lnk_speed;
2483 int status;
2484
30128031
SP
2485 be_vf_setup_init(adapter);
2486
590c391d
PR
2487 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2488 BE_IF_FLAGS_MULTICAST;
11ac75ed 2489 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2490 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
11ac75ed 2491 &vf_cfg->if_handle, NULL, vf + 1);
f9449ab7
SP
2492 if (status)
2493 goto err;
f9449ab7
SP
2494 }
2495
590c391d
PR
2496 status = be_vf_eth_addr_config(adapter);
2497 if (status)
2498 goto err;
f9449ab7 2499
11ac75ed 2500 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2501 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
b236916a 2502 NULL, vf + 1);
f9449ab7
SP
2503 if (status)
2504 goto err;
11ac75ed 2505 vf_cfg->tx_rate = lnk_speed * 10;
f9449ab7
SP
2506 }
2507 return 0;
2508err:
2509 return status;
2510}
2511
30128031
SP
2512static void be_setup_init(struct be_adapter *adapter)
2513{
2514 adapter->vlan_prio_bmap = 0xff;
2515 adapter->link_speed = -1;
2516 adapter->if_handle = -1;
2517 adapter->be3_native = false;
2518 adapter->promiscuous = false;
2519 adapter->eq_next_idx = 0;
2520}
2521
e5e1ee89 2522static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
590c391d
PR
2523{
2524 u32 pmac_id;
e5e1ee89
PR
2525 int status;
2526 bool pmac_id_active;
2527
2528 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2529 &pmac_id, mac);
590c391d
PR
2530 if (status != 0)
2531 goto do_none;
e5e1ee89
PR
2532
2533 if (pmac_id_active) {
2534 status = be_cmd_mac_addr_query(adapter, mac,
2535 MAC_ADDRESS_TYPE_NETWORK,
2536 false, adapter->if_handle, pmac_id);
2537
2538 if (!status)
2539 adapter->pmac_id = pmac_id;
2540 } else {
2541 status = be_cmd_pmac_add(adapter, mac,
2542 adapter->if_handle, &adapter->pmac_id, 0);
2543 }
590c391d
PR
2544do_none:
2545 return status;
2546}
2547
5fb379ee
SP
2548static int be_setup(struct be_adapter *adapter)
2549{
5fb379ee 2550 struct net_device *netdev = adapter->netdev;
f9449ab7 2551 u32 cap_flags, en_flags;
a54769f5 2552 u32 tx_fc, rx_fc;
10ef9ab4 2553 int status;
ba343c77
SB
2554 u8 mac[ETH_ALEN];
2555
30128031 2556 be_setup_init(adapter);
6b7c5b94 2557
f9449ab7 2558 be_cmd_req_native_mode(adapter);
73d540f2 2559
10ef9ab4
SP
2560 be_msix_enable(adapter);
2561
2562 status = be_evt_queues_create(adapter);
2563 if (status)
a54769f5 2564 goto err;
6b7c5b94 2565
10ef9ab4
SP
2566 status = be_tx_cqs_create(adapter);
2567 if (status)
2568 goto err;
2569
2570 status = be_rx_cqs_create(adapter);
2571 if (status)
a54769f5 2572 goto err;
6b7c5b94 2573
f9449ab7 2574 status = be_mcc_queues_create(adapter);
10ef9ab4 2575 if (status)
a54769f5 2576 goto err;
6b7c5b94 2577
f9449ab7
SP
2578 memset(mac, 0, ETH_ALEN);
2579 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2580 true /*permanent */, 0, 0);
f9449ab7
SP
2581 if (status)
2582 return status;
2583 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2584 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2585
f9449ab7
SP
2586 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2587 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2588 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2589 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2590
f9449ab7
SP
2591 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2592 cap_flags |= BE_IF_FLAGS_RSS;
2593 en_flags |= BE_IF_FLAGS_RSS;
2594 }
2595 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2596 netdev->dev_addr, &adapter->if_handle,
2597 &adapter->pmac_id, 0);
5fb379ee 2598 if (status != 0)
a54769f5 2599 goto err;
6b7c5b94 2600
590c391d
PR
2601 /* The VF's permanent mac queried from card is incorrect.
2602 * For BEx: Query the mac configued by the PF using if_handle
2603 * For Lancer: Get and use mac_list to obtain mac address.
2604 */
2605 if (!be_physfn(adapter)) {
2606 if (lancer_chip(adapter))
e5e1ee89 2607 status = be_add_mac_from_list(adapter, mac);
590c391d
PR
2608 else
2609 status = be_cmd_mac_addr_query(adapter, mac,
2610 MAC_ADDRESS_TYPE_NETWORK, false,
2611 adapter->if_handle, 0);
f9449ab7
SP
2612 if (!status) {
2613 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2614 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2615 }
2616 }
0dffc83e 2617
10ef9ab4
SP
2618 status = be_tx_qs_create(adapter);
2619 if (status)
2620 goto err;
2621
04b71175 2622 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2623
a54769f5
SP
2624 status = be_vid_config(adapter, false, 0);
2625 if (status)
2626 goto err;
7ab8b0b4 2627
a54769f5 2628 be_set_rx_mode(adapter->netdev);
5fb379ee 2629
a54769f5 2630 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d
PR
2631 /* For Lancer: It is legal for this cmd to fail on VF */
2632 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5 2633 goto err;
590c391d 2634
a54769f5
SP
2635 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2636 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2637 adapter->rx_fc);
590c391d
PR
2638 /* For Lancer: It is legal for this cmd to fail on VF */
2639 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5
SP
2640 goto err;
2641 }
2dc1deb6 2642
a54769f5 2643 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2644
11ac75ed 2645 if (sriov_enabled(adapter)) {
f9449ab7
SP
2646 status = be_vf_setup(adapter);
2647 if (status)
2648 goto err;
2649 }
2650
2651 return 0;
a54769f5
SP
2652err:
2653 be_clear(adapter);
2654 return status;
2655}
6b7c5b94 2656
66268739
IV
2657#ifdef CONFIG_NET_POLL_CONTROLLER
2658static void be_netpoll(struct net_device *netdev)
2659{
2660 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2661 struct be_eq_obj *eqo;
66268739
IV
2662 int i;
2663
10ef9ab4
SP
2664 for_all_evt_queues(adapter, eqo, i)
2665 event_handle(eqo);
2666
2667 return;
66268739
IV
2668}
2669#endif
2670
84517482 2671#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2672static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2673 const u8 *p, u32 img_start, int image_size,
2674 int hdr_size)
fa9a6fed
SB
2675{
2676 u32 crc_offset;
2677 u8 flashed_crc[4];
2678 int status;
3f0d4560
AK
2679
2680 crc_offset = hdr_size + img_start + image_size - 4;
2681
fa9a6fed 2682 p += crc_offset;
3f0d4560
AK
2683
2684 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2685 (image_size - 4));
fa9a6fed
SB
2686 if (status) {
2687 dev_err(&adapter->pdev->dev,
2688 "could not get crc from flash, not flashing redboot\n");
2689 return false;
2690 }
2691
2692 /*update redboot only if crc does not match*/
2693 if (!memcmp(flashed_crc, p, 4))
2694 return false;
2695 else
2696 return true;
fa9a6fed
SB
2697}
2698
306f1348
SP
2699static bool phy_flashing_required(struct be_adapter *adapter)
2700{
2701 int status = 0;
2702 struct be_phy_info phy_info;
2703
2704 status = be_cmd_get_phy_info(adapter, &phy_info);
2705 if (status)
2706 return false;
2707 if ((phy_info.phy_type == TN_8022) &&
2708 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2709 return true;
2710 }
2711 return false;
2712}
2713
3f0d4560 2714static int be_flash_data(struct be_adapter *adapter,
84517482 2715 const struct firmware *fw,
3f0d4560
AK
2716 struct be_dma_mem *flash_cmd, int num_of_images)
2717
84517482 2718{
3f0d4560
AK
2719 int status = 0, i, filehdr_size = 0;
2720 u32 total_bytes = 0, flash_op;
84517482
AK
2721 int num_bytes;
2722 const u8 *p = fw->data;
2723 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2724 const struct flash_comp *pflashcomp;
9fe96934 2725 int num_comp;
3f0d4560 2726
306f1348 2727 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2728 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2729 FLASH_IMAGE_MAX_SIZE_g3},
2730 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2731 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2732 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2733 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2734 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2735 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2736 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2737 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2738 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2739 FLASH_IMAGE_MAX_SIZE_g3},
2740 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2741 FLASH_IMAGE_MAX_SIZE_g3},
2742 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2743 FLASH_IMAGE_MAX_SIZE_g3},
2744 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2745 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2746 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2747 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2748 };
215faf9c 2749 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2750 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2751 FLASH_IMAGE_MAX_SIZE_g2},
2752 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2753 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2754 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2755 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2756 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2757 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2758 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2759 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2760 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2761 FLASH_IMAGE_MAX_SIZE_g2},
2762 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2763 FLASH_IMAGE_MAX_SIZE_g2},
2764 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2765 FLASH_IMAGE_MAX_SIZE_g2}
2766 };
2767
2768 if (adapter->generation == BE_GEN3) {
2769 pflashcomp = gen3_flash_types;
2770 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2771 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2772 } else {
2773 pflashcomp = gen2_flash_types;
2774 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2775 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2776 }
9fe96934
SB
2777 for (i = 0; i < num_comp; i++) {
2778 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2779 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2780 continue;
306f1348
SP
2781 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2782 if (!phy_flashing_required(adapter))
2783 continue;
2784 }
3f0d4560
AK
2785 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2786 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2787 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2788 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2789 continue;
2790 p = fw->data;
2791 p += filehdr_size + pflashcomp[i].offset
2792 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2793 if (p + pflashcomp[i].size > fw->data + fw->size)
2794 return -1;
2795 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2796 while (total_bytes) {
2797 if (total_bytes > 32*1024)
2798 num_bytes = 32*1024;
2799 else
2800 num_bytes = total_bytes;
2801 total_bytes -= num_bytes;
306f1348
SP
2802 if (!total_bytes) {
2803 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2804 flash_op = FLASHROM_OPER_PHY_FLASH;
2805 else
2806 flash_op = FLASHROM_OPER_FLASH;
2807 } else {
2808 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2809 flash_op = FLASHROM_OPER_PHY_SAVE;
2810 else
2811 flash_op = FLASHROM_OPER_SAVE;
2812 }
3f0d4560
AK
2813 memcpy(req->params.data_buf, p, num_bytes);
2814 p += num_bytes;
2815 status = be_cmd_write_flashrom(adapter, flash_cmd,
2816 pflashcomp[i].optype, flash_op, num_bytes);
2817 if (status) {
306f1348
SP
2818 if ((status == ILLEGAL_IOCTL_REQ) &&
2819 (pflashcomp[i].optype ==
2820 IMG_TYPE_PHY_FW))
2821 break;
3f0d4560
AK
2822 dev_err(&adapter->pdev->dev,
2823 "cmd to write to flash rom failed.\n");
2824 return -1;
2825 }
84517482 2826 }
84517482 2827 }
84517482
AK
2828 return 0;
2829}
2830
3f0d4560
AK
2831static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2832{
2833 if (fhdr == NULL)
2834 return 0;
2835 if (fhdr->build[0] == '3')
2836 return BE_GEN3;
2837 else if (fhdr->build[0] == '2')
2838 return BE_GEN2;
2839 else
2840 return 0;
2841}
2842
485bf569
SN
2843static int lancer_fw_download(struct be_adapter *adapter,
2844 const struct firmware *fw)
84517482 2845{
485bf569
SN
2846#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2847#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2848 struct be_dma_mem flash_cmd;
485bf569
SN
2849 const u8 *data_ptr = NULL;
2850 u8 *dest_image_ptr = NULL;
2851 size_t image_size = 0;
2852 u32 chunk_size = 0;
2853 u32 data_written = 0;
2854 u32 offset = 0;
2855 int status = 0;
2856 u8 add_status = 0;
84517482 2857
485bf569 2858 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2859 dev_err(&adapter->pdev->dev,
485bf569
SN
2860 "FW Image not properly aligned. "
2861 "Length must be 4 byte aligned.\n");
2862 status = -EINVAL;
2863 goto lancer_fw_exit;
d9efd2af
SB
2864 }
2865
485bf569
SN
2866 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2867 + LANCER_FW_DOWNLOAD_CHUNK;
2868 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2869 &flash_cmd.dma, GFP_KERNEL);
2870 if (!flash_cmd.va) {
2871 status = -ENOMEM;
2872 dev_err(&adapter->pdev->dev,
2873 "Memory allocation failure while flashing\n");
2874 goto lancer_fw_exit;
2875 }
84517482 2876
485bf569
SN
2877 dest_image_ptr = flash_cmd.va +
2878 sizeof(struct lancer_cmd_req_write_object);
2879 image_size = fw->size;
2880 data_ptr = fw->data;
2881
2882 while (image_size) {
2883 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2884
2885 /* Copy the image chunk content. */
2886 memcpy(dest_image_ptr, data_ptr, chunk_size);
2887
2888 status = lancer_cmd_write_object(adapter, &flash_cmd,
2889 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2890 &data_written, &add_status);
2891
2892 if (status)
2893 break;
2894
2895 offset += data_written;
2896 data_ptr += data_written;
2897 image_size -= data_written;
2898 }
2899
2900 if (!status) {
2901 /* Commit the FW written */
2902 status = lancer_cmd_write_object(adapter, &flash_cmd,
2903 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2904 &data_written, &add_status);
2905 }
2906
2907 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2908 flash_cmd.dma);
2909 if (status) {
2910 dev_err(&adapter->pdev->dev,
2911 "Firmware load error. "
2912 "Status code: 0x%x Additional Status: 0x%x\n",
2913 status, add_status);
2914 goto lancer_fw_exit;
2915 }
2916
2917 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2918lancer_fw_exit:
2919 return status;
2920}
2921
2922static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2923{
2924 struct flash_file_hdr_g2 *fhdr;
2925 struct flash_file_hdr_g3 *fhdr3;
2926 struct image_hdr *img_hdr_ptr = NULL;
2927 struct be_dma_mem flash_cmd;
2928 const u8 *p;
2929 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2930
2931 p = fw->data;
3f0d4560 2932 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2933
84517482 2934 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2935 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2936 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2937 if (!flash_cmd.va) {
2938 status = -ENOMEM;
2939 dev_err(&adapter->pdev->dev,
2940 "Memory allocation failure while flashing\n");
485bf569 2941 goto be_fw_exit;
84517482
AK
2942 }
2943
3f0d4560
AK
2944 if ((adapter->generation == BE_GEN3) &&
2945 (get_ufigen_type(fhdr) == BE_GEN3)) {
2946 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2947 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2948 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2949 img_hdr_ptr = (struct image_hdr *) (fw->data +
2950 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2951 i * sizeof(struct image_hdr)));
2952 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2953 status = be_flash_data(adapter, fw, &flash_cmd,
2954 num_imgs);
3f0d4560
AK
2955 }
2956 } else if ((adapter->generation == BE_GEN2) &&
2957 (get_ufigen_type(fhdr) == BE_GEN2)) {
2958 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2959 } else {
2960 dev_err(&adapter->pdev->dev,
2961 "UFI and Interface are not compatible for flashing\n");
2962 status = -1;
84517482
AK
2963 }
2964
2b7bcebf
IV
2965 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2966 flash_cmd.dma);
84517482
AK
2967 if (status) {
2968 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2969 goto be_fw_exit;
84517482
AK
2970 }
2971
af901ca1 2972 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2973
485bf569
SN
2974be_fw_exit:
2975 return status;
2976}
2977
2978int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2979{
2980 const struct firmware *fw;
2981 int status;
2982
2983 if (!netif_running(adapter->netdev)) {
2984 dev_err(&adapter->pdev->dev,
2985 "Firmware load not allowed (interface is down)\n");
2986 return -1;
2987 }
2988
2989 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2990 if (status)
2991 goto fw_exit;
2992
2993 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2994
2995 if (lancer_chip(adapter))
2996 status = lancer_fw_download(adapter, fw);
2997 else
2998 status = be_fw_download(adapter, fw);
2999
84517482
AK
3000fw_exit:
3001 release_firmware(fw);
3002 return status;
3003}
3004
e5686ad8 3005static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3006 .ndo_open = be_open,
3007 .ndo_stop = be_close,
3008 .ndo_start_xmit = be_xmit,
a54769f5 3009 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3010 .ndo_set_mac_address = be_mac_addr_set,
3011 .ndo_change_mtu = be_change_mtu,
ab1594e9 3012 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3013 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3014 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3015 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3016 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3017 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3018 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3019 .ndo_get_vf_config = be_get_vf_config,
3020#ifdef CONFIG_NET_POLL_CONTROLLER
3021 .ndo_poll_controller = be_netpoll,
3022#endif
6b7c5b94
SP
3023};
3024
3025static void be_netdev_init(struct net_device *netdev)
3026{
3027 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3028 struct be_eq_obj *eqo;
3abcdeda 3029 int i;
6b7c5b94 3030
6332c8d3 3031 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3032 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3033 NETIF_F_HW_VLAN_TX;
3034 if (be_multi_rxq(adapter))
3035 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3036
3037 netdev->features |= netdev->hw_features |
8b8ddc68 3038 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3039
eb8a50d9 3040 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3041 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3042
6b7c5b94
SP
3043 netdev->flags |= IFF_MULTICAST;
3044
c190e3c8
AK
3045 netif_set_gso_max_size(netdev, 65535);
3046
10ef9ab4 3047 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3048
3049 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3050
10ef9ab4
SP
3051 for_all_evt_queues(adapter, eqo, i)
3052 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3053}
3054
3055static void be_unmap_pci_bars(struct be_adapter *adapter)
3056{
8788fdc2
SP
3057 if (adapter->csr)
3058 iounmap(adapter->csr);
3059 if (adapter->db)
3060 iounmap(adapter->db);
6b7c5b94
SP
3061}
3062
3063static int be_map_pci_bars(struct be_adapter *adapter)
3064{
3065 u8 __iomem *addr;
db3ea781 3066 int db_reg;
6b7c5b94 3067
fe6d2a38
SP
3068 if (lancer_chip(adapter)) {
3069 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3070 pci_resource_len(adapter->pdev, 0));
3071 if (addr == NULL)
3072 return -ENOMEM;
3073 adapter->db = addr;
3074 return 0;
3075 }
3076
ba343c77
SB
3077 if (be_physfn(adapter)) {
3078 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3079 pci_resource_len(adapter->pdev, 2));
3080 if (addr == NULL)
3081 return -ENOMEM;
3082 adapter->csr = addr;
3083 }
6b7c5b94 3084
ba343c77 3085 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3086 db_reg = 4;
3087 } else {
ba343c77
SB
3088 if (be_physfn(adapter))
3089 db_reg = 4;
3090 else
3091 db_reg = 0;
3092 }
3093 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3094 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3095 if (addr == NULL)
3096 goto pci_map_err;
ba343c77
SB
3097 adapter->db = addr;
3098
6b7c5b94
SP
3099 return 0;
3100pci_map_err:
3101 be_unmap_pci_bars(adapter);
3102 return -ENOMEM;
3103}
3104
3105
3106static void be_ctrl_cleanup(struct be_adapter *adapter)
3107{
8788fdc2 3108 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3109
3110 be_unmap_pci_bars(adapter);
3111
3112 if (mem->va)
2b7bcebf
IV
3113 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3114 mem->dma);
e7b909a6 3115
5b8821b7 3116 mem = &adapter->rx_filter;
e7b909a6 3117 if (mem->va)
2b7bcebf
IV
3118 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3119 mem->dma);
6b7c5b94
SP
3120}
3121
6b7c5b94
SP
3122static int be_ctrl_init(struct be_adapter *adapter)
3123{
8788fdc2
SP
3124 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3125 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3126 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3127 int status;
6b7c5b94
SP
3128
3129 status = be_map_pci_bars(adapter);
3130 if (status)
e7b909a6 3131 goto done;
6b7c5b94
SP
3132
3133 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3134 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3135 mbox_mem_alloc->size,
3136 &mbox_mem_alloc->dma,
3137 GFP_KERNEL);
6b7c5b94 3138 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3139 status = -ENOMEM;
3140 goto unmap_pci_bars;
6b7c5b94
SP
3141 }
3142 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3143 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3144 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3145 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3146
5b8821b7
SP
3147 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3148 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3149 &rx_filter->dma, GFP_KERNEL);
3150 if (rx_filter->va == NULL) {
e7b909a6
SP
3151 status = -ENOMEM;
3152 goto free_mbox;
3153 }
5b8821b7 3154 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3155
2984961c 3156 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3157 spin_lock_init(&adapter->mcc_lock);
3158 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3159
dd131e76 3160 init_completion(&adapter->flash_compl);
cf588477 3161 pci_save_state(adapter->pdev);
6b7c5b94 3162 return 0;
e7b909a6
SP
3163
3164free_mbox:
2b7bcebf
IV
3165 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3166 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3167
3168unmap_pci_bars:
3169 be_unmap_pci_bars(adapter);
3170
3171done:
3172 return status;
6b7c5b94
SP
3173}
3174
3175static void be_stats_cleanup(struct be_adapter *adapter)
3176{
3abcdeda 3177 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3178
3179 if (cmd->va)
2b7bcebf
IV
3180 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3181 cmd->va, cmd->dma);
6b7c5b94
SP
3182}
3183
3184static int be_stats_init(struct be_adapter *adapter)
3185{
3abcdeda 3186 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3187
005d5696 3188 if (adapter->generation == BE_GEN2) {
89a88ab8 3189 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3190 } else {
3191 if (lancer_chip(adapter))
3192 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3193 else
3194 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3195 }
2b7bcebf
IV
3196 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3197 GFP_KERNEL);
6b7c5b94
SP
3198 if (cmd->va == NULL)
3199 return -1;
d291b9af 3200 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3201 return 0;
3202}
3203
3204static void __devexit be_remove(struct pci_dev *pdev)
3205{
3206 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3207
6b7c5b94
SP
3208 if (!adapter)
3209 return;
3210
f203af70
SK
3211 cancel_delayed_work_sync(&adapter->work);
3212
6b7c5b94
SP
3213 unregister_netdev(adapter->netdev);
3214
5fb379ee
SP
3215 be_clear(adapter);
3216
6b7c5b94
SP
3217 be_stats_cleanup(adapter);
3218
3219 be_ctrl_cleanup(adapter);
3220
ba343c77
SB
3221 be_sriov_disable(adapter);
3222
6b7c5b94
SP
3223 pci_set_drvdata(pdev, NULL);
3224 pci_release_regions(pdev);
3225 pci_disable_device(pdev);
3226
3227 free_netdev(adapter->netdev);
3228}
3229
2243e2e9 3230static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3231{
6b7c5b94
SP
3232 int status;
3233
3abcdeda
SP
3234 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3235 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3236 if (status)
3237 return status;
3238
752961a1 3239 if (adapter->function_mode & FLEX10_MODE)
82903e4b
AK
3240 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3241 else
3242 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3243
9e1453c5
AK
3244 status = be_cmd_get_cntl_attributes(adapter);
3245 if (status)
3246 return status;
3247
2243e2e9 3248 return 0;
6b7c5b94
SP
3249}
3250
fe6d2a38
SP
3251static int be_dev_family_check(struct be_adapter *adapter)
3252{
3253 struct pci_dev *pdev = adapter->pdev;
3254 u32 sli_intf = 0, if_type;
3255
3256 switch (pdev->device) {
3257 case BE_DEVICE_ID1:
3258 case OC_DEVICE_ID1:
3259 adapter->generation = BE_GEN2;
3260 break;
3261 case BE_DEVICE_ID2:
3262 case OC_DEVICE_ID2:
ecedb6ae 3263 case OC_DEVICE_ID5:
fe6d2a38
SP
3264 adapter->generation = BE_GEN3;
3265 break;
3266 case OC_DEVICE_ID3:
12f4d0a8 3267 case OC_DEVICE_ID4:
fe6d2a38
SP
3268 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3269 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3270 SLI_INTF_IF_TYPE_SHIFT;
3271
3272 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3273 if_type != 0x02) {
3274 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3275 return -EINVAL;
3276 }
fe6d2a38
SP
3277 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3278 SLI_INTF_FAMILY_SHIFT);
3279 adapter->generation = BE_GEN3;
3280 break;
3281 default:
3282 adapter->generation = 0;
3283 }
3284 return 0;
3285}
3286
37eed1cb
PR
3287static int lancer_wait_ready(struct be_adapter *adapter)
3288{
d8110f62 3289#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3290 u32 sliport_status;
3291 int status = 0, i;
3292
3293 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3294 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3295 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3296 break;
3297
d8110f62 3298 msleep(1000);
37eed1cb
PR
3299 }
3300
3301 if (i == SLIPORT_READY_TIMEOUT)
3302 status = -1;
3303
3304 return status;
3305}
3306
3307static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3308{
3309 int status;
3310 u32 sliport_status, err, reset_needed;
3311 status = lancer_wait_ready(adapter);
3312 if (!status) {
3313 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3314 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3315 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3316 if (err && reset_needed) {
3317 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3318 adapter->db + SLIPORT_CONTROL_OFFSET);
3319
3320 /* check adapter has corrected the error */
3321 status = lancer_wait_ready(adapter);
3322 sliport_status = ioread32(adapter->db +
3323 SLIPORT_STATUS_OFFSET);
3324 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3325 SLIPORT_STATUS_RN_MASK);
3326 if (status || sliport_status)
3327 status = -1;
3328 } else if (err || reset_needed) {
3329 status = -1;
3330 }
3331 }
3332 return status;
3333}
3334
d8110f62
PR
3335static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3336{
3337 int status;
3338 u32 sliport_status;
3339
3340 if (adapter->eeh_err || adapter->ue_detected)
3341 return;
3342
3343 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3344
3345 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3346 dev_err(&adapter->pdev->dev,
3347 "Adapter in error state."
3348 "Trying to recover.\n");
3349
3350 status = lancer_test_and_set_rdy_state(adapter);
3351 if (status)
3352 goto err;
3353
3354 netif_device_detach(adapter->netdev);
3355
3356 if (netif_running(adapter->netdev))
3357 be_close(adapter->netdev);
3358
3359 be_clear(adapter);
3360
3361 adapter->fw_timeout = false;
3362
3363 status = be_setup(adapter);
3364 if (status)
3365 goto err;
3366
3367 if (netif_running(adapter->netdev)) {
3368 status = be_open(adapter->netdev);
3369 if (status)
3370 goto err;
3371 }
3372
3373 netif_device_attach(adapter->netdev);
3374
3375 dev_err(&adapter->pdev->dev,
3376 "Adapter error recovery succeeded\n");
3377 }
3378 return;
3379err:
3380 dev_err(&adapter->pdev->dev,
3381 "Adapter error recovery failed\n");
3382}
3383
3384static void be_worker(struct work_struct *work)
3385{
3386 struct be_adapter *adapter =
3387 container_of(work, struct be_adapter, work.work);
3388 struct be_rx_obj *rxo;
10ef9ab4 3389 struct be_eq_obj *eqo;
d8110f62
PR
3390 int i;
3391
3392 if (lancer_chip(adapter))
3393 lancer_test_and_recover_fn_err(adapter);
3394
3395 be_detect_dump_ue(adapter);
3396
3397 /* when interrupts are not yet enabled, just reap any pending
3398 * mcc completions */
3399 if (!netif_running(adapter->netdev)) {
10ef9ab4 3400 be_process_mcc(adapter);
d8110f62
PR
3401 goto reschedule;
3402 }
3403
3404 if (!adapter->stats_cmd_sent) {
3405 if (lancer_chip(adapter))
3406 lancer_cmd_get_pport_stats(adapter,
3407 &adapter->stats_cmd);
3408 else
3409 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3410 }
3411
3412 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
3413 if (rxo->rx_post_starved) {
3414 rxo->rx_post_starved = false;
3415 be_post_rx_frags(rxo, GFP_KERNEL);
3416 }
3417 }
3418
10ef9ab4
SP
3419 for_all_evt_queues(adapter, eqo, i)
3420 be_eqd_update(adapter, eqo);
3421
d8110f62
PR
3422reschedule:
3423 adapter->work_counter++;
3424 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3425}
3426
6b7c5b94
SP
3427static int __devinit be_probe(struct pci_dev *pdev,
3428 const struct pci_device_id *pdev_id)
3429{
3430 int status = 0;
3431 struct be_adapter *adapter;
3432 struct net_device *netdev;
6b7c5b94
SP
3433
3434 status = pci_enable_device(pdev);
3435 if (status)
3436 goto do_none;
3437
3438 status = pci_request_regions(pdev, DRV_NAME);
3439 if (status)
3440 goto disable_dev;
3441 pci_set_master(pdev);
3442
3c8def97 3443 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3444 if (netdev == NULL) {
3445 status = -ENOMEM;
3446 goto rel_reg;
3447 }
3448 adapter = netdev_priv(netdev);
3449 adapter->pdev = pdev;
3450 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3451
3452 status = be_dev_family_check(adapter);
63657b9c 3453 if (status)
fe6d2a38
SP
3454 goto free_netdev;
3455
6b7c5b94 3456 adapter->netdev = netdev;
2243e2e9 3457 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3458
2b7bcebf 3459 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3460 if (!status) {
3461 netdev->features |= NETIF_F_HIGHDMA;
3462 } else {
2b7bcebf 3463 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3464 if (status) {
3465 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3466 goto free_netdev;
3467 }
3468 }
3469
f9449ab7
SP
3470 status = be_sriov_enable(adapter);
3471 if (status)
3472 goto free_netdev;
ba343c77 3473
6b7c5b94
SP
3474 status = be_ctrl_init(adapter);
3475 if (status)
f9449ab7 3476 goto disable_sriov;
6b7c5b94 3477
37eed1cb 3478 if (lancer_chip(adapter)) {
d8110f62
PR
3479 status = lancer_wait_ready(adapter);
3480 if (!status) {
3481 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3482 adapter->db + SLIPORT_CONTROL_OFFSET);
3483 status = lancer_test_and_set_rdy_state(adapter);
3484 }
37eed1cb
PR
3485 if (status) {
3486 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3487 goto ctrl_clean;
37eed1cb
PR
3488 }
3489 }
3490
2243e2e9 3491 /* sync up with fw's ready state */
ba343c77
SB
3492 if (be_physfn(adapter)) {
3493 status = be_cmd_POST(adapter);
3494 if (status)
3495 goto ctrl_clean;
ba343c77 3496 }
6b7c5b94 3497
2243e2e9
SP
3498 /* tell fw we're ready to fire cmds */
3499 status = be_cmd_fw_init(adapter);
6b7c5b94 3500 if (status)
2243e2e9
SP
3501 goto ctrl_clean;
3502
a4b4dfab
AK
3503 status = be_cmd_reset_function(adapter);
3504 if (status)
3505 goto ctrl_clean;
556ae191 3506
10ef9ab4
SP
3507 /* The INTR bit may be set in the card when probed by a kdump kernel
3508 * after a crash.
3509 */
3510 if (!lancer_chip(adapter))
3511 be_intr_set(adapter, false);
3512
2243e2e9
SP
3513 status = be_stats_init(adapter);
3514 if (status)
3515 goto ctrl_clean;
3516
3517 status = be_get_config(adapter);
6b7c5b94
SP
3518 if (status)
3519 goto stats_clean;
6b7c5b94
SP
3520
3521 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3522 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3523
5fb379ee
SP
3524 status = be_setup(adapter);
3525 if (status)
3abcdeda 3526 goto msix_disable;
2243e2e9 3527
3abcdeda 3528 be_netdev_init(netdev);
6b7c5b94
SP
3529 status = register_netdev(netdev);
3530 if (status != 0)
5fb379ee 3531 goto unsetup;
6b7c5b94 3532
10ef9ab4
SP
3533 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3534 adapter->port_num);
34b1ef04 3535
f203af70 3536 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3537 return 0;
3538
5fb379ee
SP
3539unsetup:
3540 be_clear(adapter);
3abcdeda
SP
3541msix_disable:
3542 be_msix_disable(adapter);
6b7c5b94
SP
3543stats_clean:
3544 be_stats_cleanup(adapter);
3545ctrl_clean:
3546 be_ctrl_cleanup(adapter);
f9449ab7 3547disable_sriov:
ba343c77 3548 be_sriov_disable(adapter);
f9449ab7 3549free_netdev:
fe6d2a38 3550 free_netdev(netdev);
8d56ff11 3551 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3552rel_reg:
3553 pci_release_regions(pdev);
3554disable_dev:
3555 pci_disable_device(pdev);
3556do_none:
c4ca2374 3557 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3558 return status;
3559}
3560
3561static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3562{
3563 struct be_adapter *adapter = pci_get_drvdata(pdev);
3564 struct net_device *netdev = adapter->netdev;
3565
a4ca055f 3566 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3567 if (adapter->wol)
3568 be_setup_wol(adapter, true);
3569
6b7c5b94
SP
3570 netif_device_detach(netdev);
3571 if (netif_running(netdev)) {
3572 rtnl_lock();
3573 be_close(netdev);
3574 rtnl_unlock();
3575 }
9b0365f1 3576 be_clear(adapter);
6b7c5b94
SP
3577
3578 pci_save_state(pdev);
3579 pci_disable_device(pdev);
3580 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3581 return 0;
3582}
3583
3584static int be_resume(struct pci_dev *pdev)
3585{
3586 int status = 0;
3587 struct be_adapter *adapter = pci_get_drvdata(pdev);
3588 struct net_device *netdev = adapter->netdev;
3589
3590 netif_device_detach(netdev);
3591
3592 status = pci_enable_device(pdev);
3593 if (status)
3594 return status;
3595
3596 pci_set_power_state(pdev, 0);
3597 pci_restore_state(pdev);
3598
2243e2e9
SP
3599 /* tell fw we're ready to fire cmds */
3600 status = be_cmd_fw_init(adapter);
3601 if (status)
3602 return status;
3603
9b0365f1 3604 be_setup(adapter);
6b7c5b94
SP
3605 if (netif_running(netdev)) {
3606 rtnl_lock();
3607 be_open(netdev);
3608 rtnl_unlock();
3609 }
3610 netif_device_attach(netdev);
71d8d1b5
AK
3611
3612 if (adapter->wol)
3613 be_setup_wol(adapter, false);
a4ca055f
AK
3614
3615 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3616 return 0;
3617}
3618
82456b03
SP
3619/*
3620 * An FLR will stop BE from DMAing any data.
3621 */
3622static void be_shutdown(struct pci_dev *pdev)
3623{
3624 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3625
2d5d4154
AK
3626 if (!adapter)
3627 return;
82456b03 3628
0f4a6828 3629 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3630
2d5d4154 3631 netif_device_detach(adapter->netdev);
82456b03 3632
82456b03
SP
3633 if (adapter->wol)
3634 be_setup_wol(adapter, true);
3635
57841869
AK
3636 be_cmd_reset_function(adapter);
3637
82456b03 3638 pci_disable_device(pdev);
82456b03
SP
3639}
3640
cf588477
SP
3641static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3642 pci_channel_state_t state)
3643{
3644 struct be_adapter *adapter = pci_get_drvdata(pdev);
3645 struct net_device *netdev = adapter->netdev;
3646
3647 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3648
3649 adapter->eeh_err = true;
3650
3651 netif_device_detach(netdev);
3652
3653 if (netif_running(netdev)) {
3654 rtnl_lock();
3655 be_close(netdev);
3656 rtnl_unlock();
3657 }
3658 be_clear(adapter);
3659
3660 if (state == pci_channel_io_perm_failure)
3661 return PCI_ERS_RESULT_DISCONNECT;
3662
3663 pci_disable_device(pdev);
3664
3665 return PCI_ERS_RESULT_NEED_RESET;
3666}
3667
3668static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3669{
3670 struct be_adapter *adapter = pci_get_drvdata(pdev);
3671 int status;
3672
3673 dev_info(&adapter->pdev->dev, "EEH reset\n");
3674 adapter->eeh_err = false;
6589ade0
SP
3675 adapter->ue_detected = false;
3676 adapter->fw_timeout = false;
cf588477
SP
3677
3678 status = pci_enable_device(pdev);
3679 if (status)
3680 return PCI_ERS_RESULT_DISCONNECT;
3681
3682 pci_set_master(pdev);
3683 pci_set_power_state(pdev, 0);
3684 pci_restore_state(pdev);
3685
3686 /* Check if card is ok and fw is ready */
3687 status = be_cmd_POST(adapter);
3688 if (status)
3689 return PCI_ERS_RESULT_DISCONNECT;
3690
3691 return PCI_ERS_RESULT_RECOVERED;
3692}
3693
3694static void be_eeh_resume(struct pci_dev *pdev)
3695{
3696 int status = 0;
3697 struct be_adapter *adapter = pci_get_drvdata(pdev);
3698 struct net_device *netdev = adapter->netdev;
3699
3700 dev_info(&adapter->pdev->dev, "EEH resume\n");
3701
3702 pci_save_state(pdev);
3703
3704 /* tell fw we're ready to fire cmds */
3705 status = be_cmd_fw_init(adapter);
3706 if (status)
3707 goto err;
3708
3709 status = be_setup(adapter);
3710 if (status)
3711 goto err;
3712
3713 if (netif_running(netdev)) {
3714 status = be_open(netdev);
3715 if (status)
3716 goto err;
3717 }
3718 netif_device_attach(netdev);
3719 return;
3720err:
3721 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3722}
3723
3724static struct pci_error_handlers be_eeh_handlers = {
3725 .error_detected = be_eeh_err_detected,
3726 .slot_reset = be_eeh_reset,
3727 .resume = be_eeh_resume,
3728};
3729
6b7c5b94
SP
3730static struct pci_driver be_driver = {
3731 .name = DRV_NAME,
3732 .id_table = be_dev_ids,
3733 .probe = be_probe,
3734 .remove = be_remove,
3735 .suspend = be_suspend,
cf588477 3736 .resume = be_resume,
82456b03 3737 .shutdown = be_shutdown,
cf588477 3738 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3739};
3740
3741static int __init be_init_module(void)
3742{
8e95a202
JP
3743 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3744 rx_frag_size != 2048) {
6b7c5b94
SP
3745 printk(KERN_WARNING DRV_NAME
3746 " : Module param rx_frag_size must be 2048/4096/8192."
3747 " Using 2048\n");
3748 rx_frag_size = 2048;
3749 }
6b7c5b94
SP
3750
3751 return pci_register_driver(&be_driver);
3752}
3753module_init(be_init_module);
3754
3755static void __exit be_exit_module(void)
3756{
3757 pci_unregister_driver(&be_driver);
3758}
3759module_exit(be_exit_module);
This page took 0.565464 seconds and 5 git commands to generate.