be2net: Fix number of vlan slots in flex mode
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
ba343c77 30static unsigned int num_vfs;
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
ba343c77 32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 33
11ac75ed
SP
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
6b7c5b94 38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 49/* UE Status Low CSR */
42c8b11e 50static const char * const ue_status_low_desc[] = {
7c185276
AK
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
42c8b11e 85static const char * const ue_status_hi_desc[] = {
7c185276
AK
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
42c8b11e 109 "NETC",
7c185276
AK
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
6b7c5b94 119
752961a1
SP
120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
6b7c5b94
SP
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
2b7bcebf
IV
146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
8788fdc2 154static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 155{
db3ea781 156 u32 reg, enabled;
5f0b849e 157
cf588477
SP
158 if (adapter->eeh_err)
159 return;
160
db3ea781
SP
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
5f0b849e 165 if (!enabled && enable)
6b7c5b94 166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else if (enabled && !enable)
6b7c5b94 168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else
6b7c5b94 170 return;
5f0b849e 171
db3ea781
SP
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
174}
175
8788fdc2 176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
181
182 wmb();
8788fdc2 183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
184}
185
8788fdc2 186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
194}
195
8788fdc2 196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
203
204 if (adapter->eeh_err)
205 return;
206
6b7c5b94
SP
207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
214}
215
8788fdc2 216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
222
223 if (adapter->eeh_err)
224 return;
225
6b7c5b94
SP
226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
230}
231
6b7c5b94
SP
232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
e3a7ae2c 237 u8 current_mac[ETH_ALEN];
fbc13f01 238 u32 pmac_id = adapter->pmac_id[0];
6b7c5b94 239
ca9e4988
AK
240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
e3a7ae2c 243 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
a65027e4 246 if (status)
e3a7ae2c 247 goto err;
6b7c5b94 248
e3a7ae2c
SK
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
fbc13f01 251 adapter->if_handle, &adapter->pmac_id[0], 0);
e3a7ae2c
SK
252 if (status)
253 goto err;
6b7c5b94 254
e3a7ae2c
SK
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
261 return status;
262}
263
89a88ab8
AK
264static void populate_be2_stats(struct be_adapter *adapter)
265{
ac124ff9
SP
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 269 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 272
ac124ff9 273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
ac124ff9 301 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 302 else
ac124ff9 303 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
ac124ff9
SP
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 318 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 321
ac124ff9 322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
ac124ff9 345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
005d5696
SX
358static void populate_lancer_stats(struct be_adapter *adapter)
359{
89a88ab8 360
005d5696 361 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 390 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 393 drvs->rx_drops_too_many_frags =
ac124ff9 394 pport_stats->rx_drops_too_many_frags_lo;
005d5696 395}
89a88ab8 396
09c1c68f
SP
397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
89a88ab8
AK
409void be_parse_stats(struct be_adapter *adapter)
410{
ac124ff9
SP
411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
005d5696
SX
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
89a88ab8 421 populate_be2_stats(adapter);
005d5696 422 }
ac124ff9
SP
423
424 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
425 for_all_rx_queues(adapter, rxo, i) {
426 /* below erx HW counter can actually wrap around after
427 * 65535. Driver accumulates a 32-bit value
428 */
429 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
431 }
89a88ab8
AK
432}
433
ab1594e9
SP
434static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
435 struct rtnl_link_stats64 *stats)
6b7c5b94 436{
ab1594e9 437 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 438 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 439 struct be_rx_obj *rxo;
3c8def97 440 struct be_tx_obj *txo;
ab1594e9
SP
441 u64 pkts, bytes;
442 unsigned int start;
3abcdeda 443 int i;
6b7c5b94 444
3abcdeda 445 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
446 const struct be_rx_stats *rx_stats = rx_stats(rxo);
447 do {
448 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
449 pkts = rx_stats(rxo)->rx_pkts;
450 bytes = rx_stats(rxo)->rx_bytes;
451 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
452 stats->rx_packets += pkts;
453 stats->rx_bytes += bytes;
454 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
455 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
456 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
457 }
458
3c8def97 459 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
460 const struct be_tx_stats *tx_stats = tx_stats(txo);
461 do {
462 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
463 pkts = tx_stats(txo)->tx_pkts;
464 bytes = tx_stats(txo)->tx_bytes;
465 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
466 stats->tx_packets += pkts;
467 stats->tx_bytes += bytes;
3c8def97 468 }
6b7c5b94
SP
469
470 /* bad pkts received */
ab1594e9 471 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
472 drvs->rx_alignment_symbol_errors +
473 drvs->rx_in_range_errors +
474 drvs->rx_out_range_errors +
475 drvs->rx_frame_too_long +
476 drvs->rx_dropped_too_small +
477 drvs->rx_dropped_too_short +
478 drvs->rx_dropped_header_too_small +
479 drvs->rx_dropped_tcp_length +
ab1594e9 480 drvs->rx_dropped_runt;
68110868 481
6b7c5b94 482 /* detailed rx errors */
ab1594e9 483 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
484 drvs->rx_out_range_errors +
485 drvs->rx_frame_too_long;
68110868 486
ab1594e9 487 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
488
489 /* frame alignment errors */
ab1594e9 490 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 491
6b7c5b94
SP
492 /* receiver fifo overrun */
493 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 494 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
495 drvs->rx_input_fifo_overflow_drop +
496 drvs->rx_drops_no_pbuf;
ab1594e9 497 return stats;
6b7c5b94
SP
498}
499
b236916a 500void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 501{
6b7c5b94
SP
502 struct net_device *netdev = adapter->netdev;
503
b236916a 504 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 505 netif_carrier_off(netdev);
b236916a 506 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 507 }
b236916a
AK
508
509 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
510 netif_carrier_on(netdev);
511 else
512 netif_carrier_off(netdev);
6b7c5b94
SP
513}
514
3c8def97 515static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 516 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 517{
3c8def97
SP
518 struct be_tx_stats *stats = tx_stats(txo);
519
ab1594e9 520 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
521 stats->tx_reqs++;
522 stats->tx_wrbs += wrb_cnt;
523 stats->tx_bytes += copied;
524 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 525 if (stopped)
ac124ff9 526 stats->tx_stops++;
ab1594e9 527 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
528}
529
530/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
531static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
532 bool *dummy)
6b7c5b94 533{
ebc8d2ab
DM
534 int cnt = (skb->len > skb->data_len);
535
536 cnt += skb_shinfo(skb)->nr_frags;
537
6b7c5b94
SP
538 /* to account for hdr wrb */
539 cnt++;
fe6d2a38
SP
540 if (lancer_chip(adapter) || !(cnt & 1)) {
541 *dummy = false;
542 } else {
6b7c5b94
SP
543 /* add a dummy to make it an even num */
544 cnt++;
545 *dummy = true;
fe6d2a38 546 }
6b7c5b94
SP
547 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
548 return cnt;
549}
550
551static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
552{
553 wrb->frag_pa_hi = upper_32_bits(addr);
554 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
555 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
556}
557
1ded132d
AK
558static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
559 struct sk_buff *skb)
560{
561 u8 vlan_prio;
562 u16 vlan_tag;
563
564 vlan_tag = vlan_tx_tag_get(skb);
565 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
566 /* If vlan priority provided by OS is NOT in available bmap */
567 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
568 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
569 adapter->recommended_prio;
570
571 return vlan_tag;
572}
573
cc4ce020
SK
574static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
575 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 576{
1ded132d 577 u16 vlan_tag;
cc4ce020 578
6b7c5b94
SP
579 memset(hdr, 0, sizeof(*hdr));
580
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
582
49e4b847 583 if (skb_is_gso(skb)) {
6b7c5b94
SP
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
586 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 587 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
589 if (lancer_chip(adapter) && adapter->sli_family ==
590 LANCER_A0_SLI_FAMILY) {
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
592 if (is_tcp_pkt(skb))
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594 tcpcs, hdr, 1);
595 else if (is_udp_pkt(skb))
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
597 udpcs, hdr, 1);
598 }
6b7c5b94
SP
599 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
600 if (is_tcp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
602 else if (is_udp_pkt(skb))
603 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
604 }
605
4c5102f9 606 if (vlan_tx_tag_present(skb)) {
6b7c5b94 607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 608 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 609 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
610 }
611
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
616}
617
2b7bcebf 618static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
619 bool unmap_single)
620{
621 dma_addr_t dma;
622
623 be_dws_le_to_cpu(wrb, sizeof(*wrb));
624
625 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 626 if (wrb->frag_len) {
7101e111 627 if (unmap_single)
2b7bcebf
IV
628 dma_unmap_single(dev, dma, wrb->frag_len,
629 DMA_TO_DEVICE);
7101e111 630 else
2b7bcebf 631 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
632 }
633}
6b7c5b94 634
3c8def97 635static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
636 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
637{
7101e111
SP
638 dma_addr_t busaddr;
639 int i, copied = 0;
2b7bcebf 640 struct device *dev = &adapter->pdev->dev;
6b7c5b94 641 struct sk_buff *first_skb = skb;
6b7c5b94
SP
642 struct be_eth_wrb *wrb;
643 struct be_eth_hdr_wrb *hdr;
7101e111
SP
644 bool map_single = false;
645 u16 map_head;
6b7c5b94 646
6b7c5b94
SP
647 hdr = queue_head_node(txq);
648 queue_head_inc(txq);
7101e111 649 map_head = txq->head;
6b7c5b94 650
ebc8d2ab 651 if (skb->len > skb->data_len) {
e743d313 652 int len = skb_headlen(skb);
2b7bcebf
IV
653 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
654 if (dma_mapping_error(dev, busaddr))
7101e111
SP
655 goto dma_err;
656 map_single = true;
ebc8d2ab
DM
657 wrb = queue_head_node(txq);
658 wrb_fill(wrb, busaddr, len);
659 be_dws_cpu_to_le(wrb, sizeof(*wrb));
660 queue_head_inc(txq);
661 copied += len;
662 }
6b7c5b94 663
ebc8d2ab 664 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 665 const struct skb_frag_struct *frag =
ebc8d2ab 666 &skb_shinfo(skb)->frags[i];
b061b39e 667 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 668 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 669 if (dma_mapping_error(dev, busaddr))
7101e111 670 goto dma_err;
ebc8d2ab 671 wrb = queue_head_node(txq);
9e903e08 672 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
673 be_dws_cpu_to_le(wrb, sizeof(*wrb));
674 queue_head_inc(txq);
9e903e08 675 copied += skb_frag_size(frag);
6b7c5b94
SP
676 }
677
678 if (dummy_wrb) {
679 wrb = queue_head_node(txq);
680 wrb_fill(wrb, 0, 0);
681 be_dws_cpu_to_le(wrb, sizeof(*wrb));
682 queue_head_inc(txq);
683 }
684
cc4ce020 685 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
686 be_dws_cpu_to_le(hdr, sizeof(*hdr));
687
688 return copied;
7101e111
SP
689dma_err:
690 txq->head = map_head;
691 while (copied) {
692 wrb = queue_head_node(txq);
2b7bcebf 693 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
694 map_single = false;
695 copied -= wrb->frag_len;
696 queue_head_inc(txq);
697 }
698 return 0;
6b7c5b94
SP
699}
700
61357325 701static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 702 struct net_device *netdev)
6b7c5b94
SP
703{
704 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
705 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
706 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
707 u32 wrb_cnt = 0, copied = 0;
708 u32 start = txq->head;
709 bool dummy_wrb, stopped = false;
710
1ded132d
AK
711 /* For vlan tagged pkts, BE
712 * 1) calculates checksum even when CSO is not requested
713 * 2) calculates checksum wrongly for padded pkt less than
714 * 60 bytes long.
715 * As a workaround disable TX vlan offloading in such cases.
716 */
717 if (unlikely(vlan_tx_tag_present(skb) &&
718 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
719 skb = skb_share_check(skb, GFP_ATOMIC);
720 if (unlikely(!skb))
721 goto tx_drop;
722
723 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
724 if (unlikely(!skb))
725 goto tx_drop;
726
727 skb->vlan_tci = 0;
728 }
729
fe6d2a38 730 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 731
3c8def97 732 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
733 if (copied) {
734 /* record the sent skb in the sent_skb table */
3c8def97
SP
735 BUG_ON(txo->sent_skb_list[start]);
736 txo->sent_skb_list[start] = skb;
c190e3c8
AK
737
738 /* Ensure txq has space for the next skb; Else stop the queue
739 * *BEFORE* ringing the tx doorbell, so that we serialze the
740 * tx compls of the current transmit which'll wake up the queue
741 */
7101e111 742 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
743 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
744 txq->len) {
3c8def97 745 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
746 stopped = true;
747 }
6b7c5b94 748
c190e3c8 749 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 750
3c8def97 751 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 752 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
753 } else {
754 txq->head = start;
755 dev_kfree_skb_any(skb);
6b7c5b94 756 }
1ded132d 757tx_drop:
6b7c5b94
SP
758 return NETDEV_TX_OK;
759}
760
761static int be_change_mtu(struct net_device *netdev, int new_mtu)
762{
763 struct be_adapter *adapter = netdev_priv(netdev);
764 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
765 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
766 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
767 dev_info(&adapter->pdev->dev,
768 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
769 BE_MIN_MTU,
770 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
771 return -EINVAL;
772 }
773 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
774 netdev->mtu, new_mtu);
775 netdev->mtu = new_mtu;
776 return 0;
777}
778
779/*
82903e4b
AK
780 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
781 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 782 */
1da87b7f 783static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 784{
11ac75ed 785 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
6b7c5b94
SP
786 u16 vtag[BE_NUM_VLANS_SUPPORTED];
787 u16 ntags = 0, i;
82903e4b 788 int status = 0;
1da87b7f
AK
789
790 if (vf) {
11ac75ed
SP
791 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
792 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
793 1, 1, 0);
1da87b7f 794 }
6b7c5b94 795
c0e64ef4
SP
796 /* No need to further configure vids if in promiscuous mode */
797 if (adapter->promiscuous)
798 return 0;
799
82903e4b 800 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 801 /* Construct VLAN Table to give to HW */
b738127d 802 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
803 if (adapter->vlan_tag[i]) {
804 vtag[ntags] = cpu_to_le16(i);
805 ntags++;
806 }
807 }
b31c50a7
SP
808 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809 vtag, ntags, 1, 0);
6b7c5b94 810 } else {
b31c50a7
SP
811 status = be_cmd_vlan_config(adapter, adapter->if_handle,
812 NULL, 0, 1, 1);
6b7c5b94 813 }
1da87b7f 814
b31c50a7 815 return status;
6b7c5b94
SP
816}
817
8e586137 818static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
819{
820 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 821 int status = 0;
6b7c5b94 822
80817cbf
AK
823 if (!be_physfn(adapter)) {
824 status = -EINVAL;
825 goto ret;
826 }
ba343c77 827
6b7c5b94 828 adapter->vlan_tag[vid] = 1;
82903e4b 829 if (adapter->vlans_added <= (adapter->max_vlans + 1))
80817cbf 830 status = be_vid_config(adapter, false, 0);
8e586137 831
80817cbf
AK
832 if (!status)
833 adapter->vlans_added++;
834 else
835 adapter->vlan_tag[vid] = 0;
836ret:
837 return status;
6b7c5b94
SP
838}
839
8e586137 840static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
841{
842 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 843 int status = 0;
6b7c5b94 844
80817cbf
AK
845 if (!be_physfn(adapter)) {
846 status = -EINVAL;
847 goto ret;
848 }
ba343c77 849
6b7c5b94 850 adapter->vlan_tag[vid] = 0;
82903e4b 851 if (adapter->vlans_added <= adapter->max_vlans)
80817cbf 852 status = be_vid_config(adapter, false, 0);
8e586137 853
80817cbf
AK
854 if (!status)
855 adapter->vlans_added--;
856 else
857 adapter->vlan_tag[vid] = 1;
858ret:
859 return status;
6b7c5b94
SP
860}
861
a54769f5 862static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
863{
864 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 865
24307eef 866 if (netdev->flags & IFF_PROMISC) {
5b8821b7 867 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
868 adapter->promiscuous = true;
869 goto done;
6b7c5b94
SP
870 }
871
25985edc 872 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
873 if (adapter->promiscuous) {
874 adapter->promiscuous = false;
5b8821b7 875 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
876
877 if (adapter->vlans_added)
878 be_vid_config(adapter, false, 0);
6b7c5b94
SP
879 }
880
e7b909a6 881 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 882 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
883 netdev_mc_count(netdev) > BE_MAX_MC) {
884 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 885 goto done;
6b7c5b94 886 }
6b7c5b94 887
fbc13f01
AK
888 if (netdev_uc_count(netdev) != adapter->uc_macs) {
889 struct netdev_hw_addr *ha;
890 int i = 1; /* First slot is claimed by the Primary MAC */
891
892 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
893 be_cmd_pmac_del(adapter, adapter->if_handle,
894 adapter->pmac_id[i], 0);
895 }
896
897 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
898 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
899 adapter->promiscuous = true;
900 goto done;
901 }
902
903 netdev_for_each_uc_addr(ha, adapter->netdev) {
904 adapter->uc_macs++; /* First slot is for Primary MAC */
905 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
906 adapter->if_handle,
907 &adapter->pmac_id[adapter->uc_macs], 0);
908 }
909 }
910
5b8821b7 911 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
912done:
913 return;
6b7c5b94
SP
914}
915
ba343c77
SB
916static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
917{
918 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 919 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
920 int status;
921
11ac75ed 922 if (!sriov_enabled(adapter))
ba343c77
SB
923 return -EPERM;
924
11ac75ed 925 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
926 return -EINVAL;
927
590c391d
PR
928 if (lancer_chip(adapter)) {
929 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
930 } else {
11ac75ed
SP
931 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
932 vf_cfg->pmac_id, vf + 1);
ba343c77 933
11ac75ed
SP
934 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
935 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
936 }
937
64600ea5 938 if (status)
ba343c77
SB
939 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
940 mac, vf);
64600ea5 941 else
11ac75ed 942 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 943
ba343c77
SB
944 return status;
945}
946
64600ea5
AK
947static int be_get_vf_config(struct net_device *netdev, int vf,
948 struct ifla_vf_info *vi)
949{
950 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 951 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 952
11ac75ed 953 if (!sriov_enabled(adapter))
64600ea5
AK
954 return -EPERM;
955
11ac75ed 956 if (vf >= adapter->num_vfs)
64600ea5
AK
957 return -EINVAL;
958
959 vi->vf = vf;
11ac75ed
SP
960 vi->tx_rate = vf_cfg->tx_rate;
961 vi->vlan = vf_cfg->vlan_tag;
64600ea5 962 vi->qos = 0;
11ac75ed 963 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
964
965 return 0;
966}
967
1da87b7f
AK
968static int be_set_vf_vlan(struct net_device *netdev,
969 int vf, u16 vlan, u8 qos)
970{
971 struct be_adapter *adapter = netdev_priv(netdev);
972 int status = 0;
973
11ac75ed 974 if (!sriov_enabled(adapter))
1da87b7f
AK
975 return -EPERM;
976
11ac75ed 977 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
978 return -EINVAL;
979
980 if (vlan) {
11ac75ed 981 adapter->vf_cfg[vf].vlan_tag = vlan;
1da87b7f
AK
982 adapter->vlans_added++;
983 } else {
11ac75ed 984 adapter->vf_cfg[vf].vlan_tag = 0;
1da87b7f
AK
985 adapter->vlans_added--;
986 }
987
988 status = be_vid_config(adapter, true, vf);
989
990 if (status)
991 dev_info(&adapter->pdev->dev,
992 "VLAN %d config on VF %d failed\n", vlan, vf);
993 return status;
994}
995
e1d18735
AK
996static int be_set_vf_tx_rate(struct net_device *netdev,
997 int vf, int rate)
998{
999 struct be_adapter *adapter = netdev_priv(netdev);
1000 int status = 0;
1001
11ac75ed 1002 if (!sriov_enabled(adapter))
e1d18735
AK
1003 return -EPERM;
1004
94f434c2 1005 if (vf >= adapter->num_vfs)
e1d18735
AK
1006 return -EINVAL;
1007
94f434c2
AK
1008 if (rate < 100 || rate > 10000) {
1009 dev_err(&adapter->pdev->dev,
1010 "tx rate must be between 100 and 10000 Mbps\n");
1011 return -EINVAL;
1012 }
e1d18735 1013
856c4012 1014 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1015
1016 if (status)
94f434c2 1017 dev_err(&adapter->pdev->dev,
e1d18735 1018 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1019 else
1020 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1021 return status;
1022}
1023
10ef9ab4 1024static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1025{
10ef9ab4 1026 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1027 ulong now = jiffies;
ac124ff9 1028 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1029 u64 pkts;
1030 unsigned int start, eqd;
ac124ff9 1031
10ef9ab4
SP
1032 if (!eqo->enable_aic) {
1033 eqd = eqo->eqd;
1034 goto modify_eqd;
1035 }
1036
1037 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1038 return;
6b7c5b94 1039
10ef9ab4
SP
1040 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1041
4097f663 1042 /* Wrapped around */
3abcdeda
SP
1043 if (time_before(now, stats->rx_jiffies)) {
1044 stats->rx_jiffies = now;
4097f663
SP
1045 return;
1046 }
6b7c5b94 1047
ac124ff9
SP
1048 /* Update once a second */
1049 if (delta < HZ)
6b7c5b94
SP
1050 return;
1051
ab1594e9
SP
1052 do {
1053 start = u64_stats_fetch_begin_bh(&stats->sync);
1054 pkts = stats->rx_pkts;
1055 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1056
68c3e5a7 1057 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1058 stats->rx_pkts_prev = pkts;
3abcdeda 1059 stats->rx_jiffies = now;
10ef9ab4
SP
1060 eqd = (stats->rx_pps / 110000) << 3;
1061 eqd = min(eqd, eqo->max_eqd);
1062 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1063 if (eqd < 10)
1064 eqd = 0;
10ef9ab4
SP
1065
1066modify_eqd:
1067 if (eqd != eqo->cur_eqd) {
1068 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1069 eqo->cur_eqd = eqd;
ac124ff9 1070 }
6b7c5b94
SP
1071}
1072
3abcdeda 1073static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1074 struct be_rx_compl_info *rxcp)
4097f663 1075{
ac124ff9 1076 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1077
ab1594e9 1078 u64_stats_update_begin(&stats->sync);
3abcdeda 1079 stats->rx_compl++;
2e588f84 1080 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1081 stats->rx_pkts++;
2e588f84 1082 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1083 stats->rx_mcast_pkts++;
2e588f84 1084 if (rxcp->err)
ac124ff9 1085 stats->rx_compl_err++;
ab1594e9 1086 u64_stats_update_end(&stats->sync);
4097f663
SP
1087}
1088
2e588f84 1089static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1090{
19fad86f
PR
1091 /* L4 checksum is not reliable for non TCP/UDP packets.
1092 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1093 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1094 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1095}
1096
10ef9ab4
SP
1097static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1098 u16 frag_idx)
6b7c5b94 1099{
10ef9ab4 1100 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1101 struct be_rx_page_info *rx_page_info;
3abcdeda 1102 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1103
3abcdeda 1104 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1105 BUG_ON(!rx_page_info->page);
1106
205859a2 1107 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1108 dma_unmap_page(&adapter->pdev->dev,
1109 dma_unmap_addr(rx_page_info, bus),
1110 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1111 rx_page_info->last_page_user = false;
1112 }
6b7c5b94
SP
1113
1114 atomic_dec(&rxq->used);
1115 return rx_page_info;
1116}
1117
1118/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1119static void be_rx_compl_discard(struct be_rx_obj *rxo,
1120 struct be_rx_compl_info *rxcp)
6b7c5b94 1121{
3abcdeda 1122 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1123 struct be_rx_page_info *page_info;
2e588f84 1124 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1125
e80d9da6 1126 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1127 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1128 put_page(page_info->page);
1129 memset(page_info, 0, sizeof(*page_info));
2e588f84 1130 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1131 }
1132}
1133
1134/*
1135 * skb_fill_rx_data forms a complete skb for an ether frame
1136 * indicated by rxcp.
1137 */
10ef9ab4
SP
1138static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1139 struct be_rx_compl_info *rxcp)
6b7c5b94 1140{
3abcdeda 1141 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1142 struct be_rx_page_info *page_info;
2e588f84
SP
1143 u16 i, j;
1144 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1145 u8 *start;
6b7c5b94 1146
10ef9ab4 1147 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1148 start = page_address(page_info->page) + page_info->page_offset;
1149 prefetch(start);
1150
1151 /* Copy data in the first descriptor of this completion */
2e588f84 1152 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1153
1154 /* Copy the header portion into skb_data */
2e588f84 1155 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1156 memcpy(skb->data, start, hdr_len);
1157 skb->len = curr_frag_len;
1158 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1159 /* Complete packet has now been moved to data */
1160 put_page(page_info->page);
1161 skb->data_len = 0;
1162 skb->tail += curr_frag_len;
1163 } else {
1164 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1165 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1166 skb_shinfo(skb)->frags[0].page_offset =
1167 page_info->page_offset + hdr_len;
9e903e08 1168 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1169 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1170 skb->truesize += rx_frag_size;
6b7c5b94
SP
1171 skb->tail += hdr_len;
1172 }
205859a2 1173 page_info->page = NULL;
6b7c5b94 1174
2e588f84
SP
1175 if (rxcp->pkt_size <= rx_frag_size) {
1176 BUG_ON(rxcp->num_rcvd != 1);
1177 return;
6b7c5b94
SP
1178 }
1179
1180 /* More frags present for this completion */
2e588f84
SP
1181 index_inc(&rxcp->rxq_idx, rxq->len);
1182 remaining = rxcp->pkt_size - curr_frag_len;
1183 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1184 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1185 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1186
bd46cb6c
AK
1187 /* Coalesce all frags from the same physical page in one slot */
1188 if (page_info->page_offset == 0) {
1189 /* Fresh page */
1190 j++;
b061b39e 1191 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1192 skb_shinfo(skb)->frags[j].page_offset =
1193 page_info->page_offset;
9e903e08 1194 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1195 skb_shinfo(skb)->nr_frags++;
1196 } else {
1197 put_page(page_info->page);
1198 }
1199
9e903e08 1200 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1201 skb->len += curr_frag_len;
1202 skb->data_len += curr_frag_len;
bdb28a97 1203 skb->truesize += rx_frag_size;
2e588f84
SP
1204 remaining -= curr_frag_len;
1205 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1206 page_info->page = NULL;
6b7c5b94 1207 }
bd46cb6c 1208 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1209}
1210
5be93b9a 1211/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1212static void be_rx_compl_process(struct be_rx_obj *rxo,
1213 struct be_rx_compl_info *rxcp)
6b7c5b94 1214{
10ef9ab4 1215 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1216 struct net_device *netdev = adapter->netdev;
6b7c5b94 1217 struct sk_buff *skb;
89420424 1218
bb349bb4 1219 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1220 if (unlikely(!skb)) {
ac124ff9 1221 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1222 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1223 return;
1224 }
1225
10ef9ab4 1226 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1227
6332c8d3 1228 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1229 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1230 else
1231 skb_checksum_none_assert(skb);
6b7c5b94 1232
6332c8d3 1233 skb->protocol = eth_type_trans(skb, netdev);
10ef9ab4 1234 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1235 skb->rxhash = rxcp->rss_hash;
1236
6b7c5b94 1237
343e43c0 1238 if (rxcp->vlanf)
4c5102f9
AK
1239 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1240
1241 netif_receive_skb(skb);
6b7c5b94
SP
1242}
1243
5be93b9a 1244/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1245void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1246 struct be_rx_compl_info *rxcp)
6b7c5b94 1247{
10ef9ab4 1248 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1249 struct be_rx_page_info *page_info;
5be93b9a 1250 struct sk_buff *skb = NULL;
3abcdeda 1251 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1252 u16 remaining, curr_frag_len;
1253 u16 i, j;
3968fa1e 1254
10ef9ab4 1255 skb = napi_get_frags(napi);
5be93b9a 1256 if (!skb) {
10ef9ab4 1257 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1258 return;
1259 }
1260
2e588f84
SP
1261 remaining = rxcp->pkt_size;
1262 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1263 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1264
1265 curr_frag_len = min(remaining, rx_frag_size);
1266
bd46cb6c
AK
1267 /* Coalesce all frags from the same physical page in one slot */
1268 if (i == 0 || page_info->page_offset == 0) {
1269 /* First frag or Fresh page */
1270 j++;
b061b39e 1271 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1272 skb_shinfo(skb)->frags[j].page_offset =
1273 page_info->page_offset;
9e903e08 1274 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1275 } else {
1276 put_page(page_info->page);
1277 }
9e903e08 1278 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1279 skb->truesize += rx_frag_size;
bd46cb6c 1280 remaining -= curr_frag_len;
2e588f84 1281 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1282 memset(page_info, 0, sizeof(*page_info));
1283 }
bd46cb6c 1284 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1285
5be93b9a 1286 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1287 skb->len = rxcp->pkt_size;
1288 skb->data_len = rxcp->pkt_size;
5be93b9a 1289 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1290 if (adapter->netdev->features & NETIF_F_RXHASH)
1291 skb->rxhash = rxcp->rss_hash;
5be93b9a 1292
343e43c0 1293 if (rxcp->vlanf)
4c5102f9
AK
1294 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1295
10ef9ab4 1296 napi_gro_frags(napi);
2e588f84
SP
1297}
1298
10ef9ab4
SP
1299static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1300 struct be_rx_compl_info *rxcp)
2e588f84
SP
1301{
1302 rxcp->pkt_size =
1303 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1304 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1305 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1306 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1307 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1308 rxcp->ip_csum =
1309 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1310 rxcp->l4_csum =
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1312 rxcp->ipv6 =
1313 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1314 rxcp->rxq_idx =
1315 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1316 rxcp->num_rcvd =
1317 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1318 rxcp->pkt_type =
1319 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1320 rxcp->rss_hash =
1321 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1322 if (rxcp->vlanf) {
1323 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1324 compl);
1325 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1326 compl);
15d72184 1327 }
12004ae9 1328 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1329}
1330
10ef9ab4
SP
1331static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1332 struct be_rx_compl_info *rxcp)
2e588f84
SP
1333{
1334 rxcp->pkt_size =
1335 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1336 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1337 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1338 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1339 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1340 rxcp->ip_csum =
1341 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1342 rxcp->l4_csum =
1343 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1344 rxcp->ipv6 =
1345 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1346 rxcp->rxq_idx =
1347 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1348 rxcp->num_rcvd =
1349 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1350 rxcp->pkt_type =
1351 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1352 rxcp->rss_hash =
1353 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1354 if (rxcp->vlanf) {
1355 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1356 compl);
1357 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1358 compl);
15d72184 1359 }
12004ae9 1360 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1361}
1362
1363static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1364{
1365 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1366 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1367 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1368
2e588f84
SP
1369 /* For checking the valid bit it is Ok to use either definition as the
1370 * valid bit is at the same position in both v0 and v1 Rx compl */
1371 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1372 return NULL;
6b7c5b94 1373
2e588f84
SP
1374 rmb();
1375 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1376
2e588f84 1377 if (adapter->be3_native)
10ef9ab4 1378 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1379 else
10ef9ab4 1380 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1381
15d72184
SP
1382 if (rxcp->vlanf) {
1383 /* vlanf could be wrongly set in some cards.
1384 * ignore if vtm is not set */
752961a1 1385 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1386 rxcp->vlanf = 0;
6b7c5b94 1387
15d72184 1388 if (!lancer_chip(adapter))
3c709f8f 1389 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1390
939cf306 1391 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1392 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1393 rxcp->vlanf = 0;
1394 }
2e588f84
SP
1395
1396 /* As the compl has been parsed, reset it; we wont touch it again */
1397 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1398
3abcdeda 1399 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1400 return rxcp;
1401}
1402
1829b086 1403static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1404{
6b7c5b94 1405 u32 order = get_order(size);
1829b086 1406
6b7c5b94 1407 if (order > 0)
1829b086
ED
1408 gfp |= __GFP_COMP;
1409 return alloc_pages(gfp, order);
6b7c5b94
SP
1410}
1411
1412/*
1413 * Allocate a page, split it to fragments of size rx_frag_size and post as
1414 * receive buffers to BE
1415 */
1829b086 1416static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1417{
3abcdeda 1418 struct be_adapter *adapter = rxo->adapter;
26d92f92 1419 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1420 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1421 struct page *pagep = NULL;
1422 struct be_eth_rx_d *rxd;
1423 u64 page_dmaaddr = 0, frag_dmaaddr;
1424 u32 posted, page_offset = 0;
1425
3abcdeda 1426 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1427 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1428 if (!pagep) {
1829b086 1429 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1430 if (unlikely(!pagep)) {
ac124ff9 1431 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1432 break;
1433 }
2b7bcebf
IV
1434 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1435 0, adapter->big_page_size,
1436 DMA_FROM_DEVICE);
6b7c5b94
SP
1437 page_info->page_offset = 0;
1438 } else {
1439 get_page(pagep);
1440 page_info->page_offset = page_offset + rx_frag_size;
1441 }
1442 page_offset = page_info->page_offset;
1443 page_info->page = pagep;
fac6da5b 1444 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1445 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1446
1447 rxd = queue_head_node(rxq);
1448 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1449 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1450
1451 /* Any space left in the current big page for another frag? */
1452 if ((page_offset + rx_frag_size + rx_frag_size) >
1453 adapter->big_page_size) {
1454 pagep = NULL;
1455 page_info->last_page_user = true;
1456 }
26d92f92
SP
1457
1458 prev_page_info = page_info;
1459 queue_head_inc(rxq);
10ef9ab4 1460 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1461 }
1462 if (pagep)
26d92f92 1463 prev_page_info->last_page_user = true;
6b7c5b94
SP
1464
1465 if (posted) {
6b7c5b94 1466 atomic_add(posted, &rxq->used);
8788fdc2 1467 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1468 } else if (atomic_read(&rxq->used) == 0) {
1469 /* Let be_worker replenish when memory is available */
3abcdeda 1470 rxo->rx_post_starved = true;
6b7c5b94 1471 }
6b7c5b94
SP
1472}
1473
5fb379ee 1474static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1475{
6b7c5b94
SP
1476 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1477
1478 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1479 return NULL;
1480
f3eb62d2 1481 rmb();
6b7c5b94
SP
1482 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1483
1484 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1485
1486 queue_tail_inc(tx_cq);
1487 return txcp;
1488}
1489
3c8def97
SP
1490static u16 be_tx_compl_process(struct be_adapter *adapter,
1491 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1492{
3c8def97 1493 struct be_queue_info *txq = &txo->q;
a73b796e 1494 struct be_eth_wrb *wrb;
3c8def97 1495 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1496 struct sk_buff *sent_skb;
ec43b1a6
SP
1497 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1498 bool unmap_skb_hdr = true;
6b7c5b94 1499
ec43b1a6 1500 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1501 BUG_ON(!sent_skb);
ec43b1a6
SP
1502 sent_skbs[txq->tail] = NULL;
1503
1504 /* skip header wrb */
a73b796e 1505 queue_tail_inc(txq);
6b7c5b94 1506
ec43b1a6 1507 do {
6b7c5b94 1508 cur_index = txq->tail;
a73b796e 1509 wrb = queue_tail_node(txq);
2b7bcebf
IV
1510 unmap_tx_frag(&adapter->pdev->dev, wrb,
1511 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1512 unmap_skb_hdr = false;
1513
6b7c5b94
SP
1514 num_wrbs++;
1515 queue_tail_inc(txq);
ec43b1a6 1516 } while (cur_index != last_index);
6b7c5b94 1517
6b7c5b94 1518 kfree_skb(sent_skb);
4d586b82 1519 return num_wrbs;
6b7c5b94
SP
1520}
1521
10ef9ab4
SP
1522/* Return the number of events in the event queue */
1523static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1524{
10ef9ab4
SP
1525 struct be_eq_entry *eqe;
1526 int num = 0;
859b1e4e 1527
10ef9ab4
SP
1528 do {
1529 eqe = queue_tail_node(&eqo->q);
1530 if (eqe->evt == 0)
1531 break;
859b1e4e 1532
10ef9ab4
SP
1533 rmb();
1534 eqe->evt = 0;
1535 num++;
1536 queue_tail_inc(&eqo->q);
1537 } while (true);
1538
1539 return num;
859b1e4e
SP
1540}
1541
10ef9ab4 1542static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1543{
10ef9ab4
SP
1544 bool rearm = false;
1545 int num = events_get(eqo);
859b1e4e 1546
10ef9ab4 1547 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1548 if (!num)
1549 rearm = true;
1550
10ef9ab4 1551 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
859b1e4e 1552 if (num)
10ef9ab4 1553 napi_schedule(&eqo->napi);
859b1e4e
SP
1554
1555 return num;
1556}
1557
10ef9ab4
SP
1558/* Leaves the EQ is disarmed state */
1559static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1560{
10ef9ab4 1561 int num = events_get(eqo);
859b1e4e 1562
10ef9ab4 1563 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1564}
1565
10ef9ab4 1566static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1567{
1568 struct be_rx_page_info *page_info;
3abcdeda
SP
1569 struct be_queue_info *rxq = &rxo->q;
1570 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1571 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1572 u16 tail;
1573
1574 /* First cleanup pending rx completions */
3abcdeda 1575 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1576 be_rx_compl_discard(rxo, rxcp);
1577 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1578 }
1579
1580 /* Then free posted rx buffer that were not used */
1581 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1582 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1583 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1584 put_page(page_info->page);
1585 memset(page_info, 0, sizeof(*page_info));
1586 }
1587 BUG_ON(atomic_read(&rxq->used));
482c9e79 1588 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1589}
1590
0ae57bb3 1591static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1592{
0ae57bb3
SP
1593 struct be_tx_obj *txo;
1594 struct be_queue_info *txq;
a8e9179a 1595 struct be_eth_tx_compl *txcp;
4d586b82 1596 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1597 struct sk_buff *sent_skb;
1598 bool dummy_wrb;
0ae57bb3 1599 int i, pending_txqs;
a8e9179a
SP
1600
1601 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1602 do {
0ae57bb3
SP
1603 pending_txqs = adapter->num_tx_qs;
1604
1605 for_all_tx_queues(adapter, txo, i) {
1606 txq = &txo->q;
1607 while ((txcp = be_tx_compl_get(&txo->cq))) {
1608 end_idx =
1609 AMAP_GET_BITS(struct amap_eth_tx_compl,
1610 wrb_index, txcp);
1611 num_wrbs += be_tx_compl_process(adapter, txo,
1612 end_idx);
1613 cmpl++;
1614 }
1615 if (cmpl) {
1616 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1617 atomic_sub(num_wrbs, &txq->used);
1618 cmpl = 0;
1619 num_wrbs = 0;
1620 }
1621 if (atomic_read(&txq->used) == 0)
1622 pending_txqs--;
a8e9179a
SP
1623 }
1624
0ae57bb3 1625 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1626 break;
1627
1628 mdelay(1);
1629 } while (true);
1630
0ae57bb3
SP
1631 for_all_tx_queues(adapter, txo, i) {
1632 txq = &txo->q;
1633 if (atomic_read(&txq->used))
1634 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1635 atomic_read(&txq->used));
1636
1637 /* free posted tx for which compls will never arrive */
1638 while (atomic_read(&txq->used)) {
1639 sent_skb = txo->sent_skb_list[txq->tail];
1640 end_idx = txq->tail;
1641 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1642 &dummy_wrb);
1643 index_adv(&end_idx, num_wrbs - 1, txq->len);
1644 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1645 atomic_sub(num_wrbs, &txq->used);
1646 }
b03388d6 1647 }
6b7c5b94
SP
1648}
1649
10ef9ab4
SP
1650static void be_evt_queues_destroy(struct be_adapter *adapter)
1651{
1652 struct be_eq_obj *eqo;
1653 int i;
1654
1655 for_all_evt_queues(adapter, eqo, i) {
1656 be_eq_clean(eqo);
1657 if (eqo->q.created)
1658 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1659 be_queue_free(adapter, &eqo->q);
1660 }
1661}
1662
1663static int be_evt_queues_create(struct be_adapter *adapter)
1664{
1665 struct be_queue_info *eq;
1666 struct be_eq_obj *eqo;
1667 int i, rc;
1668
1669 adapter->num_evt_qs = num_irqs(adapter);
1670
1671 for_all_evt_queues(adapter, eqo, i) {
1672 eqo->adapter = adapter;
1673 eqo->tx_budget = BE_TX_BUDGET;
1674 eqo->idx = i;
1675 eqo->max_eqd = BE_MAX_EQD;
1676 eqo->enable_aic = true;
1677
1678 eq = &eqo->q;
1679 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1680 sizeof(struct be_eq_entry));
1681 if (rc)
1682 return rc;
1683
1684 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1685 if (rc)
1686 return rc;
1687 }
1cfafab9 1688 return 0;
10ef9ab4
SP
1689}
1690
5fb379ee
SP
1691static void be_mcc_queues_destroy(struct be_adapter *adapter)
1692{
1693 struct be_queue_info *q;
5fb379ee 1694
8788fdc2 1695 q = &adapter->mcc_obj.q;
5fb379ee 1696 if (q->created)
8788fdc2 1697 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1698 be_queue_free(adapter, q);
1699
8788fdc2 1700 q = &adapter->mcc_obj.cq;
5fb379ee 1701 if (q->created)
8788fdc2 1702 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1703 be_queue_free(adapter, q);
1704}
1705
1706/* Must be called only after TX qs are created as MCC shares TX EQ */
1707static int be_mcc_queues_create(struct be_adapter *adapter)
1708{
1709 struct be_queue_info *q, *cq;
5fb379ee 1710
8788fdc2 1711 cq = &adapter->mcc_obj.cq;
5fb379ee 1712 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1713 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1714 goto err;
1715
10ef9ab4
SP
1716 /* Use the default EQ for MCC completions */
1717 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1718 goto mcc_cq_free;
1719
8788fdc2 1720 q = &adapter->mcc_obj.q;
5fb379ee
SP
1721 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1722 goto mcc_cq_destroy;
1723
8788fdc2 1724 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1725 goto mcc_q_free;
1726
1727 return 0;
1728
1729mcc_q_free:
1730 be_queue_free(adapter, q);
1731mcc_cq_destroy:
8788fdc2 1732 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1733mcc_cq_free:
1734 be_queue_free(adapter, cq);
1735err:
1736 return -1;
1737}
1738
6b7c5b94
SP
1739static void be_tx_queues_destroy(struct be_adapter *adapter)
1740{
1741 struct be_queue_info *q;
3c8def97
SP
1742 struct be_tx_obj *txo;
1743 u8 i;
6b7c5b94 1744
3c8def97
SP
1745 for_all_tx_queues(adapter, txo, i) {
1746 q = &txo->q;
1747 if (q->created)
1748 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1749 be_queue_free(adapter, q);
6b7c5b94 1750
3c8def97
SP
1751 q = &txo->cq;
1752 if (q->created)
1753 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1754 be_queue_free(adapter, q);
1755 }
6b7c5b94
SP
1756}
1757
dafc0fe3
SP
1758static int be_num_txqs_want(struct be_adapter *adapter)
1759{
11ac75ed 1760 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
dafc0fe3
SP
1761 lancer_chip(adapter) || !be_physfn(adapter) ||
1762 adapter->generation == BE_GEN2)
1763 return 1;
1764 else
1765 return MAX_TX_QS;
1766}
1767
10ef9ab4 1768static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1769{
10ef9ab4
SP
1770 struct be_queue_info *cq, *eq;
1771 int status;
3c8def97
SP
1772 struct be_tx_obj *txo;
1773 u8 i;
6b7c5b94 1774
dafc0fe3 1775 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1776 if (adapter->num_tx_qs != MAX_TX_QS) {
1777 rtnl_lock();
dafc0fe3
SP
1778 netif_set_real_num_tx_queues(adapter->netdev,
1779 adapter->num_tx_qs);
3bb62f4f
PR
1780 rtnl_unlock();
1781 }
dafc0fe3 1782
10ef9ab4
SP
1783 for_all_tx_queues(adapter, txo, i) {
1784 cq = &txo->cq;
1785 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1786 sizeof(struct be_eth_tx_compl));
1787 if (status)
1788 return status;
3c8def97 1789
10ef9ab4
SP
1790 /* If num_evt_qs is less than num_tx_qs, then more than
1791 * one txq share an eq
1792 */
1793 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1794 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1795 if (status)
1796 return status;
1797 }
1798 return 0;
1799}
6b7c5b94 1800
10ef9ab4
SP
1801static int be_tx_qs_create(struct be_adapter *adapter)
1802{
1803 struct be_tx_obj *txo;
1804 int i, status;
fe6d2a38 1805
3c8def97 1806 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1807 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1808 sizeof(struct be_eth_wrb));
1809 if (status)
1810 return status;
6b7c5b94 1811
10ef9ab4
SP
1812 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1813 if (status)
1814 return status;
3c8def97 1815 }
6b7c5b94 1816
10ef9ab4 1817 return 0;
6b7c5b94
SP
1818}
1819
10ef9ab4 1820static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1821{
1822 struct be_queue_info *q;
3abcdeda
SP
1823 struct be_rx_obj *rxo;
1824 int i;
1825
1826 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1827 q = &rxo->cq;
1828 if (q->created)
1829 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1830 be_queue_free(adapter, q);
ac6a0c4a
SP
1831 }
1832}
1833
10ef9ab4 1834static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1835{
10ef9ab4 1836 struct be_queue_info *eq, *cq;
3abcdeda
SP
1837 struct be_rx_obj *rxo;
1838 int rc, i;
6b7c5b94 1839
10ef9ab4
SP
1840 /* We'll create as many RSS rings as there are irqs.
1841 * But when there's only one irq there's no use creating RSS rings
1842 */
1843 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1844 num_irqs(adapter) + 1 : 1;
ac6a0c4a 1845
6b7c5b94 1846 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1847 for_all_rx_queues(adapter, rxo, i) {
1848 rxo->adapter = adapter;
3abcdeda
SP
1849 cq = &rxo->cq;
1850 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1851 sizeof(struct be_eth_rx_compl));
1852 if (rc)
10ef9ab4 1853 return rc;
3abcdeda 1854
10ef9ab4
SP
1855 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1856 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1857 if (rc)
10ef9ab4 1858 return rc;
3abcdeda 1859 }
6b7c5b94 1860
10ef9ab4
SP
1861 if (adapter->num_rx_qs != MAX_RX_QS)
1862 dev_info(&adapter->pdev->dev,
1863 "Created only %d receive queues", adapter->num_rx_qs);
6b7c5b94 1864
10ef9ab4 1865 return 0;
b628bde2
SP
1866}
1867
6b7c5b94
SP
1868static irqreturn_t be_intx(int irq, void *dev)
1869{
1870 struct be_adapter *adapter = dev;
10ef9ab4 1871 int num_evts;
6b7c5b94 1872
10ef9ab4
SP
1873 /* With INTx only one EQ is used */
1874 num_evts = event_handle(&adapter->eq_obj[0]);
1875 if (num_evts)
1876 return IRQ_HANDLED;
1877 else
1878 return IRQ_NONE;
6b7c5b94
SP
1879}
1880
10ef9ab4 1881static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 1882{
10ef9ab4 1883 struct be_eq_obj *eqo = dev;
6b7c5b94 1884
10ef9ab4 1885 event_handle(eqo);
6b7c5b94
SP
1886 return IRQ_HANDLED;
1887}
1888
2e588f84 1889static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1890{
2e588f84 1891 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1892}
1893
10ef9ab4
SP
1894static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1895 int budget)
6b7c5b94 1896{
3abcdeda
SP
1897 struct be_adapter *adapter = rxo->adapter;
1898 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1899 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1900 u32 work_done;
1901
1902 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1903 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1904 if (!rxcp)
1905 break;
1906
12004ae9
SP
1907 /* Is it a flush compl that has no data */
1908 if (unlikely(rxcp->num_rcvd == 0))
1909 goto loop_continue;
1910
1911 /* Discard compl with partial DMA Lancer B0 */
1912 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 1913 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
1914 goto loop_continue;
1915 }
1916
1917 /* On BE drop pkts that arrive due to imperfect filtering in
1918 * promiscuous mode on some skews
1919 */
1920 if (unlikely(rxcp->port != adapter->port_num &&
1921 !lancer_chip(adapter))) {
10ef9ab4 1922 be_rx_compl_discard(rxo, rxcp);
12004ae9 1923 goto loop_continue;
64642811 1924 }
009dd872 1925
12004ae9 1926 if (do_gro(rxcp))
10ef9ab4 1927 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 1928 else
10ef9ab4 1929 be_rx_compl_process(rxo, rxcp);
12004ae9 1930loop_continue:
2e588f84 1931 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1932 }
1933
10ef9ab4
SP
1934 if (work_done) {
1935 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 1936
10ef9ab4
SP
1937 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1938 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 1939 }
10ef9ab4 1940
6b7c5b94
SP
1941 return work_done;
1942}
1943
10ef9ab4
SP
1944static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1945 int budget, int idx)
6b7c5b94 1946{
6b7c5b94 1947 struct be_eth_tx_compl *txcp;
10ef9ab4 1948 int num_wrbs = 0, work_done;
3c8def97 1949
10ef9ab4
SP
1950 for (work_done = 0; work_done < budget; work_done++) {
1951 txcp = be_tx_compl_get(&txo->cq);
1952 if (!txcp)
1953 break;
1954 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
1955 AMAP_GET_BITS(struct amap_eth_tx_compl,
1956 wrb_index, txcp));
10ef9ab4 1957 }
6b7c5b94 1958
10ef9ab4
SP
1959 if (work_done) {
1960 be_cq_notify(adapter, txo->cq.id, true, work_done);
1961 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 1962
10ef9ab4
SP
1963 /* As Tx wrbs have been freed up, wake up netdev queue
1964 * if it was stopped due to lack of tx wrbs. */
1965 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1966 atomic_read(&txo->q.used) < txo->q.len / 2) {
1967 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 1968 }
10ef9ab4
SP
1969
1970 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1971 tx_stats(txo)->tx_compl += work_done;
1972 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 1973 }
10ef9ab4
SP
1974 return (work_done < budget); /* Done */
1975}
6b7c5b94 1976
10ef9ab4
SP
1977int be_poll(struct napi_struct *napi, int budget)
1978{
1979 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
1980 struct be_adapter *adapter = eqo->adapter;
1981 int max_work = 0, work, i;
1982 bool tx_done;
f31e50a8 1983
10ef9ab4
SP
1984 /* Process all TXQs serviced by this EQ */
1985 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
1986 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
1987 eqo->tx_budget, i);
1988 if (!tx_done)
1989 max_work = budget;
f31e50a8
SP
1990 }
1991
10ef9ab4
SP
1992 /* This loop will iterate twice for EQ0 in which
1993 * completions of the last RXQ (default one) are also processed
1994 * For other EQs the loop iterates only once
1995 */
1996 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
1997 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
1998 max_work = max(work, max_work);
1999 }
6b7c5b94 2000
10ef9ab4
SP
2001 if (is_mcc_eqo(eqo))
2002 be_process_mcc(adapter);
93c86700 2003
10ef9ab4
SP
2004 if (max_work < budget) {
2005 napi_complete(napi);
2006 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2007 } else {
2008 /* As we'll continue in polling mode, count and clear events */
2009 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 2010 }
10ef9ab4 2011 return max_work;
6b7c5b94
SP
2012}
2013
d053de91 2014void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 2015{
e1cfb67a
PR
2016 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2017 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2018 u32 i;
2019
72f02485
SP
2020 if (adapter->eeh_err || adapter->ue_detected)
2021 return;
2022
e1cfb67a
PR
2023 if (lancer_chip(adapter)) {
2024 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2025 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2026 sliport_err1 = ioread32(adapter->db +
2027 SLIPORT_ERROR1_OFFSET);
2028 sliport_err2 = ioread32(adapter->db +
2029 SLIPORT_ERROR2_OFFSET);
2030 }
2031 } else {
2032 pci_read_config_dword(adapter->pdev,
2033 PCICFG_UE_STATUS_LOW, &ue_lo);
2034 pci_read_config_dword(adapter->pdev,
2035 PCICFG_UE_STATUS_HIGH, &ue_hi);
2036 pci_read_config_dword(adapter->pdev,
2037 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2038 pci_read_config_dword(adapter->pdev,
2039 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2040
2041 ue_lo = (ue_lo & (~ue_lo_mask));
2042 ue_hi = (ue_hi & (~ue_hi_mask));
2043 }
7c185276 2044
e1cfb67a
PR
2045 if (ue_lo || ue_hi ||
2046 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2047 adapter->ue_detected = true;
7acc2087 2048 adapter->eeh_err = true;
434b3648
SP
2049 dev_err(&adapter->pdev->dev,
2050 "Unrecoverable error in the card\n");
d053de91
AK
2051 }
2052
e1cfb67a
PR
2053 if (ue_lo) {
2054 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2055 if (ue_lo & 1)
7c185276
AK
2056 dev_err(&adapter->pdev->dev,
2057 "UE: %s bit set\n", ue_status_low_desc[i]);
2058 }
2059 }
e1cfb67a
PR
2060 if (ue_hi) {
2061 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2062 if (ue_hi & 1)
7c185276
AK
2063 dev_err(&adapter->pdev->dev,
2064 "UE: %s bit set\n", ue_status_hi_desc[i]);
2065 }
2066 }
2067
e1cfb67a
PR
2068 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2069 dev_err(&adapter->pdev->dev,
2070 "sliport status 0x%x\n", sliport_status);
2071 dev_err(&adapter->pdev->dev,
2072 "sliport error1 0x%x\n", sliport_err1);
2073 dev_err(&adapter->pdev->dev,
2074 "sliport error2 0x%x\n", sliport_err2);
2075 }
7c185276
AK
2076}
2077
8d56ff11
SP
2078static void be_msix_disable(struct be_adapter *adapter)
2079{
ac6a0c4a 2080 if (msix_enabled(adapter)) {
8d56ff11 2081 pci_disable_msix(adapter->pdev);
ac6a0c4a 2082 adapter->num_msix_vec = 0;
3abcdeda
SP
2083 }
2084}
2085
10ef9ab4
SP
2086static uint be_num_rss_want(struct be_adapter *adapter)
2087{
2088 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2089 adapter->num_vfs == 0 && be_physfn(adapter) &&
2090 !be_is_mc(adapter))
2091 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2092 else
2093 return 0;
2094}
2095
6b7c5b94
SP
2096static void be_msix_enable(struct be_adapter *adapter)
2097{
10ef9ab4 2098#define BE_MIN_MSIX_VECTORS 1
ac6a0c4a 2099 int i, status, num_vec;
6b7c5b94 2100
10ef9ab4
SP
2101 /* If RSS queues are not used, need a vec for default RX Q */
2102 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2103 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2104
ac6a0c4a 2105 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2106 adapter->msix_entries[i].entry = i;
2107
ac6a0c4a 2108 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2109 if (status == 0) {
2110 goto done;
2111 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2112 num_vec = status;
3abcdeda 2113 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2114 num_vec) == 0)
3abcdeda 2115 goto done;
3abcdeda
SP
2116 }
2117 return;
2118done:
ac6a0c4a
SP
2119 adapter->num_msix_vec = num_vec;
2120 return;
6b7c5b94
SP
2121}
2122
f9449ab7 2123static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2124{
344dbf10 2125 be_check_sriov_fn_type(adapter);
11ac75ed 2126
6dedec81 2127#ifdef CONFIG_PCI_IOV
ba343c77 2128 if (be_physfn(adapter) && num_vfs) {
81be8f0a 2129 int status, pos;
11ac75ed 2130 u16 dev_vfs;
81be8f0a
AK
2131
2132 pos = pci_find_ext_capability(adapter->pdev,
2133 PCI_EXT_CAP_ID_SRIOV);
2134 pci_read_config_word(adapter->pdev,
11ac75ed 2135 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
81be8f0a 2136
11ac75ed
SP
2137 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2138 if (adapter->num_vfs != num_vfs)
81be8f0a 2139 dev_info(&adapter->pdev->dev,
11ac75ed
SP
2140 "Device supports %d VFs and not %d\n",
2141 adapter->num_vfs, num_vfs);
6dedec81 2142
11ac75ed
SP
2143 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2144 if (status)
2145 adapter->num_vfs = 0;
f9449ab7 2146
11ac75ed 2147 if (adapter->num_vfs) {
f9449ab7
SP
2148 adapter->vf_cfg = kcalloc(num_vfs,
2149 sizeof(struct be_vf_cfg),
2150 GFP_KERNEL);
2151 if (!adapter->vf_cfg)
2152 return -ENOMEM;
2153 }
ba343c77
SB
2154 }
2155#endif
f9449ab7 2156 return 0;
ba343c77
SB
2157}
2158
2159static void be_sriov_disable(struct be_adapter *adapter)
2160{
2161#ifdef CONFIG_PCI_IOV
11ac75ed 2162 if (sriov_enabled(adapter)) {
ba343c77 2163 pci_disable_sriov(adapter->pdev);
f9449ab7 2164 kfree(adapter->vf_cfg);
11ac75ed 2165 adapter->num_vfs = 0;
ba343c77
SB
2166 }
2167#endif
2168}
2169
fe6d2a38 2170static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2171 struct be_eq_obj *eqo)
b628bde2 2172{
10ef9ab4 2173 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2174}
6b7c5b94 2175
b628bde2
SP
2176static int be_msix_register(struct be_adapter *adapter)
2177{
10ef9ab4
SP
2178 struct net_device *netdev = adapter->netdev;
2179 struct be_eq_obj *eqo;
2180 int status, i, vec;
6b7c5b94 2181
10ef9ab4
SP
2182 for_all_evt_queues(adapter, eqo, i) {
2183 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2184 vec = be_msix_vec_get(adapter, eqo);
2185 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2186 if (status)
2187 goto err_msix;
2188 }
b628bde2 2189
6b7c5b94 2190 return 0;
3abcdeda 2191err_msix:
10ef9ab4
SP
2192 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2193 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2194 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2195 status);
ac6a0c4a 2196 be_msix_disable(adapter);
6b7c5b94
SP
2197 return status;
2198}
2199
2200static int be_irq_register(struct be_adapter *adapter)
2201{
2202 struct net_device *netdev = adapter->netdev;
2203 int status;
2204
ac6a0c4a 2205 if (msix_enabled(adapter)) {
6b7c5b94
SP
2206 status = be_msix_register(adapter);
2207 if (status == 0)
2208 goto done;
ba343c77
SB
2209 /* INTx is not supported for VF */
2210 if (!be_physfn(adapter))
2211 return status;
6b7c5b94
SP
2212 }
2213
2214 /* INTx */
2215 netdev->irq = adapter->pdev->irq;
2216 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2217 adapter);
2218 if (status) {
2219 dev_err(&adapter->pdev->dev,
2220 "INTx request IRQ failed - err %d\n", status);
2221 return status;
2222 }
2223done:
2224 adapter->isr_registered = true;
2225 return 0;
2226}
2227
2228static void be_irq_unregister(struct be_adapter *adapter)
2229{
2230 struct net_device *netdev = adapter->netdev;
10ef9ab4 2231 struct be_eq_obj *eqo;
3abcdeda 2232 int i;
6b7c5b94
SP
2233
2234 if (!adapter->isr_registered)
2235 return;
2236
2237 /* INTx */
ac6a0c4a 2238 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2239 free_irq(netdev->irq, adapter);
2240 goto done;
2241 }
2242
2243 /* MSIx */
10ef9ab4
SP
2244 for_all_evt_queues(adapter, eqo, i)
2245 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2246
6b7c5b94
SP
2247done:
2248 adapter->isr_registered = false;
6b7c5b94
SP
2249}
2250
10ef9ab4 2251static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2252{
2253 struct be_queue_info *q;
2254 struct be_rx_obj *rxo;
2255 int i;
2256
2257 for_all_rx_queues(adapter, rxo, i) {
2258 q = &rxo->q;
2259 if (q->created) {
2260 be_cmd_rxq_destroy(adapter, q);
2261 /* After the rxq is invalidated, wait for a grace time
2262 * of 1ms for all dma to end and the flush compl to
2263 * arrive
2264 */
2265 mdelay(1);
10ef9ab4 2266 be_rx_cq_clean(rxo);
482c9e79 2267 }
10ef9ab4 2268 be_queue_free(adapter, q);
482c9e79
SP
2269 }
2270}
2271
889cd4b2
SP
2272static int be_close(struct net_device *netdev)
2273{
2274 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2275 struct be_eq_obj *eqo;
2276 int i;
889cd4b2 2277
889cd4b2
SP
2278 be_async_mcc_disable(adapter);
2279
fe6d2a38
SP
2280 if (!lancer_chip(adapter))
2281 be_intr_set(adapter, false);
889cd4b2 2282
10ef9ab4
SP
2283 for_all_evt_queues(adapter, eqo, i) {
2284 napi_disable(&eqo->napi);
2285 if (msix_enabled(adapter))
2286 synchronize_irq(be_msix_vec_get(adapter, eqo));
2287 else
2288 synchronize_irq(netdev->irq);
2289 be_eq_clean(eqo);
63fcb27f
PR
2290 }
2291
889cd4b2
SP
2292 be_irq_unregister(adapter);
2293
889cd4b2
SP
2294 /* Wait for all pending tx completions to arrive so that
2295 * all tx skbs are freed.
2296 */
0ae57bb3 2297 be_tx_compl_clean(adapter);
889cd4b2 2298
10ef9ab4 2299 be_rx_qs_destroy(adapter);
482c9e79
SP
2300 return 0;
2301}
2302
10ef9ab4 2303static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2304{
2305 struct be_rx_obj *rxo;
e9008ee9
PR
2306 int rc, i, j;
2307 u8 rsstable[128];
482c9e79
SP
2308
2309 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2310 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2311 sizeof(struct be_eth_rx_d));
2312 if (rc)
2313 return rc;
2314 }
2315
2316 /* The FW would like the default RXQ to be created first */
2317 rxo = default_rxo(adapter);
2318 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2319 adapter->if_handle, false, &rxo->rss_id);
2320 if (rc)
2321 return rc;
2322
2323 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2324 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2325 rx_frag_size, adapter->if_handle,
2326 true, &rxo->rss_id);
482c9e79
SP
2327 if (rc)
2328 return rc;
2329 }
2330
2331 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2332 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2333 for_all_rss_queues(adapter, rxo, i) {
2334 if ((j + i) >= 128)
2335 break;
2336 rsstable[j + i] = rxo->rss_id;
2337 }
2338 }
2339 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2340 if (rc)
2341 return rc;
2342 }
2343
2344 /* First time posting */
10ef9ab4 2345 for_all_rx_queues(adapter, rxo, i)
482c9e79 2346 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2347 return 0;
2348}
2349
6b7c5b94
SP
2350static int be_open(struct net_device *netdev)
2351{
2352 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2353 struct be_eq_obj *eqo;
3abcdeda 2354 struct be_rx_obj *rxo;
10ef9ab4 2355 struct be_tx_obj *txo;
b236916a 2356 u8 link_status;
3abcdeda 2357 int status, i;
5fb379ee 2358
10ef9ab4 2359 status = be_rx_qs_create(adapter);
482c9e79
SP
2360 if (status)
2361 goto err;
2362
5fb379ee
SP
2363 be_irq_register(adapter);
2364
fe6d2a38
SP
2365 if (!lancer_chip(adapter))
2366 be_intr_set(adapter, true);
5fb379ee 2367
10ef9ab4 2368 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2369 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2370
10ef9ab4
SP
2371 for_all_tx_queues(adapter, txo, i)
2372 be_cq_notify(adapter, txo->cq.id, true, 0);
2373
7a1e9b20
SP
2374 be_async_mcc_enable(adapter);
2375
10ef9ab4
SP
2376 for_all_evt_queues(adapter, eqo, i) {
2377 napi_enable(&eqo->napi);
2378 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2379 }
2380
b236916a
AK
2381 status = be_cmd_link_status_query(adapter, NULL, NULL,
2382 &link_status, 0);
2383 if (!status)
2384 be_link_status_update(adapter, link_status);
2385
889cd4b2
SP
2386 return 0;
2387err:
2388 be_close(adapter->netdev);
2389 return -EIO;
5fb379ee
SP
2390}
2391
71d8d1b5
AK
2392static int be_setup_wol(struct be_adapter *adapter, bool enable)
2393{
2394 struct be_dma_mem cmd;
2395 int status = 0;
2396 u8 mac[ETH_ALEN];
2397
2398 memset(mac, 0, ETH_ALEN);
2399
2400 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2401 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2402 GFP_KERNEL);
71d8d1b5
AK
2403 if (cmd.va == NULL)
2404 return -1;
2405 memset(cmd.va, 0, cmd.size);
2406
2407 if (enable) {
2408 status = pci_write_config_dword(adapter->pdev,
2409 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2410 if (status) {
2411 dev_err(&adapter->pdev->dev,
2381a55c 2412 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2413 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2414 cmd.dma);
71d8d1b5
AK
2415 return status;
2416 }
2417 status = be_cmd_enable_magic_wol(adapter,
2418 adapter->netdev->dev_addr, &cmd);
2419 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2420 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2421 } else {
2422 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2423 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2424 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2425 }
2426
2b7bcebf 2427 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2428 return status;
2429}
2430
6d87f5c3
AK
2431/*
2432 * Generate a seed MAC address from the PF MAC Address using jhash.
2433 * MAC Address for VFs are assigned incrementally starting from the seed.
2434 * These addresses are programmed in the ASIC by the PF and the VF driver
2435 * queries for the MAC address during its probe.
2436 */
2437static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2438{
f9449ab7 2439 u32 vf;
3abcdeda 2440 int status = 0;
6d87f5c3 2441 u8 mac[ETH_ALEN];
11ac75ed 2442 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2443
2444 be_vf_eth_addr_generate(adapter, mac);
2445
11ac75ed 2446 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2447 if (lancer_chip(adapter)) {
2448 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2449 } else {
2450 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2451 vf_cfg->if_handle,
2452 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2453 }
2454
6d87f5c3
AK
2455 if (status)
2456 dev_err(&adapter->pdev->dev,
590c391d 2457 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2458 else
11ac75ed 2459 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2460
2461 mac[5] += 1;
2462 }
2463 return status;
2464}
2465
f9449ab7 2466static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2467{
11ac75ed 2468 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2469 u32 vf;
2470
11ac75ed 2471 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2472 if (lancer_chip(adapter))
2473 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2474 else
11ac75ed
SP
2475 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2476 vf_cfg->pmac_id, vf + 1);
f9449ab7 2477
11ac75ed
SP
2478 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2479 }
6d87f5c3
AK
2480}
2481
a54769f5
SP
2482static int be_clear(struct be_adapter *adapter)
2483{
fbc13f01
AK
2484 int i = 1;
2485
191eb756
SP
2486 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2487 cancel_delayed_work_sync(&adapter->work);
2488 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2489 }
2490
11ac75ed 2491 if (sriov_enabled(adapter))
f9449ab7
SP
2492 be_vf_clear(adapter);
2493
fbc13f01
AK
2494 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2495 be_cmd_pmac_del(adapter, adapter->if_handle,
2496 adapter->pmac_id[i], 0);
2497
f9449ab7 2498 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2499
2500 be_mcc_queues_destroy(adapter);
10ef9ab4 2501 be_rx_cqs_destroy(adapter);
a54769f5 2502 be_tx_queues_destroy(adapter);
10ef9ab4 2503 be_evt_queues_destroy(adapter);
a54769f5
SP
2504
2505 /* tell fw we're done with firing cmds */
2506 be_cmd_fw_clean(adapter);
10ef9ab4
SP
2507
2508 be_msix_disable(adapter);
fbc13f01 2509 kfree(adapter->pmac_id);
a54769f5
SP
2510 return 0;
2511}
2512
30128031
SP
2513static void be_vf_setup_init(struct be_adapter *adapter)
2514{
11ac75ed 2515 struct be_vf_cfg *vf_cfg;
30128031
SP
2516 int vf;
2517
11ac75ed
SP
2518 for_all_vfs(adapter, vf_cfg, vf) {
2519 vf_cfg->if_handle = -1;
2520 vf_cfg->pmac_id = -1;
30128031
SP
2521 }
2522}
2523
f9449ab7
SP
2524static int be_vf_setup(struct be_adapter *adapter)
2525{
11ac75ed 2526 struct be_vf_cfg *vf_cfg;
f9449ab7
SP
2527 u32 cap_flags, en_flags, vf;
2528 u16 lnk_speed;
2529 int status;
2530
30128031
SP
2531 be_vf_setup_init(adapter);
2532
590c391d
PR
2533 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2534 BE_IF_FLAGS_MULTICAST;
11ac75ed 2535 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2536 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
11ac75ed 2537 &vf_cfg->if_handle, NULL, vf + 1);
f9449ab7
SP
2538 if (status)
2539 goto err;
f9449ab7
SP
2540 }
2541
590c391d
PR
2542 status = be_vf_eth_addr_config(adapter);
2543 if (status)
2544 goto err;
f9449ab7 2545
11ac75ed 2546 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2547 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
b236916a 2548 NULL, vf + 1);
f9449ab7
SP
2549 if (status)
2550 goto err;
11ac75ed 2551 vf_cfg->tx_rate = lnk_speed * 10;
f9449ab7
SP
2552 }
2553 return 0;
2554err:
2555 return status;
2556}
2557
30128031
SP
2558static void be_setup_init(struct be_adapter *adapter)
2559{
2560 adapter->vlan_prio_bmap = 0xff;
2561 adapter->link_speed = -1;
2562 adapter->if_handle = -1;
2563 adapter->be3_native = false;
2564 adapter->promiscuous = false;
2565 adapter->eq_next_idx = 0;
2566}
2567
e5e1ee89 2568static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
590c391d
PR
2569{
2570 u32 pmac_id;
e5e1ee89
PR
2571 int status;
2572 bool pmac_id_active;
2573
2574 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2575 &pmac_id, mac);
590c391d
PR
2576 if (status != 0)
2577 goto do_none;
e5e1ee89
PR
2578
2579 if (pmac_id_active) {
2580 status = be_cmd_mac_addr_query(adapter, mac,
2581 MAC_ADDRESS_TYPE_NETWORK,
2582 false, adapter->if_handle, pmac_id);
2583
2584 if (!status)
fbc13f01 2585 adapter->pmac_id[0] = pmac_id;
e5e1ee89
PR
2586 } else {
2587 status = be_cmd_pmac_add(adapter, mac,
fbc13f01 2588 adapter->if_handle, &adapter->pmac_id[0], 0);
e5e1ee89 2589 }
590c391d
PR
2590do_none:
2591 return status;
2592}
2593
5fb379ee
SP
2594static int be_setup(struct be_adapter *adapter)
2595{
5fb379ee 2596 struct net_device *netdev = adapter->netdev;
f9449ab7 2597 u32 cap_flags, en_flags;
a54769f5 2598 u32 tx_fc, rx_fc;
10ef9ab4 2599 int status;
ba343c77
SB
2600 u8 mac[ETH_ALEN];
2601
30128031 2602 be_setup_init(adapter);
6b7c5b94 2603
f9449ab7 2604 be_cmd_req_native_mode(adapter);
73d540f2 2605
10ef9ab4
SP
2606 be_msix_enable(adapter);
2607
2608 status = be_evt_queues_create(adapter);
2609 if (status)
a54769f5 2610 goto err;
6b7c5b94 2611
10ef9ab4
SP
2612 status = be_tx_cqs_create(adapter);
2613 if (status)
2614 goto err;
2615
2616 status = be_rx_cqs_create(adapter);
2617 if (status)
a54769f5 2618 goto err;
6b7c5b94 2619
f9449ab7 2620 status = be_mcc_queues_create(adapter);
10ef9ab4 2621 if (status)
a54769f5 2622 goto err;
6b7c5b94 2623
f9449ab7
SP
2624 memset(mac, 0, ETH_ALEN);
2625 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2626 true /*permanent */, 0, 0);
f9449ab7
SP
2627 if (status)
2628 return status;
2629 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2630 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2631
f9449ab7
SP
2632 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2633 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2634 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2635 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2636
f9449ab7
SP
2637 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2638 cap_flags |= BE_IF_FLAGS_RSS;
2639 en_flags |= BE_IF_FLAGS_RSS;
2640 }
2641 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2642 netdev->dev_addr, &adapter->if_handle,
fbc13f01 2643 &adapter->pmac_id[0], 0);
5fb379ee 2644 if (status != 0)
a54769f5 2645 goto err;
6b7c5b94 2646
590c391d
PR
2647 /* The VF's permanent mac queried from card is incorrect.
2648 * For BEx: Query the mac configued by the PF using if_handle
2649 * For Lancer: Get and use mac_list to obtain mac address.
2650 */
2651 if (!be_physfn(adapter)) {
2652 if (lancer_chip(adapter))
e5e1ee89 2653 status = be_add_mac_from_list(adapter, mac);
590c391d
PR
2654 else
2655 status = be_cmd_mac_addr_query(adapter, mac,
2656 MAC_ADDRESS_TYPE_NETWORK, false,
2657 adapter->if_handle, 0);
f9449ab7
SP
2658 if (!status) {
2659 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2660 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2661 }
2662 }
0dffc83e 2663
10ef9ab4
SP
2664 status = be_tx_qs_create(adapter);
2665 if (status)
2666 goto err;
2667
04b71175 2668 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2669
a54769f5
SP
2670 status = be_vid_config(adapter, false, 0);
2671 if (status)
2672 goto err;
7ab8b0b4 2673
a54769f5 2674 be_set_rx_mode(adapter->netdev);
5fb379ee 2675
a54769f5 2676 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d
PR
2677 /* For Lancer: It is legal for this cmd to fail on VF */
2678 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5 2679 goto err;
590c391d 2680
a54769f5
SP
2681 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2682 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2683 adapter->rx_fc);
590c391d
PR
2684 /* For Lancer: It is legal for this cmd to fail on VF */
2685 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5
SP
2686 goto err;
2687 }
2dc1deb6 2688
a54769f5 2689 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2690
11ac75ed 2691 if (sriov_enabled(adapter)) {
f9449ab7
SP
2692 status = be_vf_setup(adapter);
2693 if (status)
2694 goto err;
2695 }
2696
191eb756
SP
2697 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2698 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2699
f9449ab7 2700 return 0;
a54769f5
SP
2701err:
2702 be_clear(adapter);
2703 return status;
2704}
6b7c5b94 2705
66268739
IV
2706#ifdef CONFIG_NET_POLL_CONTROLLER
2707static void be_netpoll(struct net_device *netdev)
2708{
2709 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2710 struct be_eq_obj *eqo;
66268739
IV
2711 int i;
2712
10ef9ab4
SP
2713 for_all_evt_queues(adapter, eqo, i)
2714 event_handle(eqo);
2715
2716 return;
66268739
IV
2717}
2718#endif
2719
84517482 2720#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2721static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2722 const u8 *p, u32 img_start, int image_size,
2723 int hdr_size)
fa9a6fed
SB
2724{
2725 u32 crc_offset;
2726 u8 flashed_crc[4];
2727 int status;
3f0d4560
AK
2728
2729 crc_offset = hdr_size + img_start + image_size - 4;
2730
fa9a6fed 2731 p += crc_offset;
3f0d4560
AK
2732
2733 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2734 (image_size - 4));
fa9a6fed
SB
2735 if (status) {
2736 dev_err(&adapter->pdev->dev,
2737 "could not get crc from flash, not flashing redboot\n");
2738 return false;
2739 }
2740
2741 /*update redboot only if crc does not match*/
2742 if (!memcmp(flashed_crc, p, 4))
2743 return false;
2744 else
2745 return true;
fa9a6fed
SB
2746}
2747
306f1348
SP
2748static bool phy_flashing_required(struct be_adapter *adapter)
2749{
2750 int status = 0;
2751 struct be_phy_info phy_info;
2752
2753 status = be_cmd_get_phy_info(adapter, &phy_info);
2754 if (status)
2755 return false;
2756 if ((phy_info.phy_type == TN_8022) &&
2757 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2758 return true;
2759 }
2760 return false;
2761}
2762
3f0d4560 2763static int be_flash_data(struct be_adapter *adapter,
84517482 2764 const struct firmware *fw,
3f0d4560
AK
2765 struct be_dma_mem *flash_cmd, int num_of_images)
2766
84517482 2767{
3f0d4560
AK
2768 int status = 0, i, filehdr_size = 0;
2769 u32 total_bytes = 0, flash_op;
84517482
AK
2770 int num_bytes;
2771 const u8 *p = fw->data;
2772 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2773 const struct flash_comp *pflashcomp;
9fe96934 2774 int num_comp;
3f0d4560 2775
306f1348 2776 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2777 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2778 FLASH_IMAGE_MAX_SIZE_g3},
2779 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2780 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2781 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2782 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2783 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2784 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2785 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2786 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2787 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2788 FLASH_IMAGE_MAX_SIZE_g3},
2789 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2790 FLASH_IMAGE_MAX_SIZE_g3},
2791 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2792 FLASH_IMAGE_MAX_SIZE_g3},
2793 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2794 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2795 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2796 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2797 };
215faf9c 2798 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2799 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2800 FLASH_IMAGE_MAX_SIZE_g2},
2801 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2802 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2803 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2804 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2805 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2806 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2807 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2808 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2809 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2810 FLASH_IMAGE_MAX_SIZE_g2},
2811 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2812 FLASH_IMAGE_MAX_SIZE_g2},
2813 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2814 FLASH_IMAGE_MAX_SIZE_g2}
2815 };
2816
2817 if (adapter->generation == BE_GEN3) {
2818 pflashcomp = gen3_flash_types;
2819 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2820 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2821 } else {
2822 pflashcomp = gen2_flash_types;
2823 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2824 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2825 }
9fe96934
SB
2826 for (i = 0; i < num_comp; i++) {
2827 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2828 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2829 continue;
306f1348
SP
2830 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2831 if (!phy_flashing_required(adapter))
2832 continue;
2833 }
3f0d4560
AK
2834 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2835 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2836 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2837 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2838 continue;
2839 p = fw->data;
2840 p += filehdr_size + pflashcomp[i].offset
2841 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2842 if (p + pflashcomp[i].size > fw->data + fw->size)
2843 return -1;
2844 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2845 while (total_bytes) {
2846 if (total_bytes > 32*1024)
2847 num_bytes = 32*1024;
2848 else
2849 num_bytes = total_bytes;
2850 total_bytes -= num_bytes;
306f1348
SP
2851 if (!total_bytes) {
2852 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2853 flash_op = FLASHROM_OPER_PHY_FLASH;
2854 else
2855 flash_op = FLASHROM_OPER_FLASH;
2856 } else {
2857 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2858 flash_op = FLASHROM_OPER_PHY_SAVE;
2859 else
2860 flash_op = FLASHROM_OPER_SAVE;
2861 }
3f0d4560
AK
2862 memcpy(req->params.data_buf, p, num_bytes);
2863 p += num_bytes;
2864 status = be_cmd_write_flashrom(adapter, flash_cmd,
2865 pflashcomp[i].optype, flash_op, num_bytes);
2866 if (status) {
306f1348
SP
2867 if ((status == ILLEGAL_IOCTL_REQ) &&
2868 (pflashcomp[i].optype ==
2869 IMG_TYPE_PHY_FW))
2870 break;
3f0d4560
AK
2871 dev_err(&adapter->pdev->dev,
2872 "cmd to write to flash rom failed.\n");
2873 return -1;
2874 }
84517482 2875 }
84517482 2876 }
84517482
AK
2877 return 0;
2878}
2879
3f0d4560
AK
2880static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2881{
2882 if (fhdr == NULL)
2883 return 0;
2884 if (fhdr->build[0] == '3')
2885 return BE_GEN3;
2886 else if (fhdr->build[0] == '2')
2887 return BE_GEN2;
2888 else
2889 return 0;
2890}
2891
485bf569
SN
2892static int lancer_fw_download(struct be_adapter *adapter,
2893 const struct firmware *fw)
84517482 2894{
485bf569
SN
2895#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2896#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2897 struct be_dma_mem flash_cmd;
485bf569
SN
2898 const u8 *data_ptr = NULL;
2899 u8 *dest_image_ptr = NULL;
2900 size_t image_size = 0;
2901 u32 chunk_size = 0;
2902 u32 data_written = 0;
2903 u32 offset = 0;
2904 int status = 0;
2905 u8 add_status = 0;
84517482 2906
485bf569 2907 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2908 dev_err(&adapter->pdev->dev,
485bf569
SN
2909 "FW Image not properly aligned. "
2910 "Length must be 4 byte aligned.\n");
2911 status = -EINVAL;
2912 goto lancer_fw_exit;
d9efd2af
SB
2913 }
2914
485bf569
SN
2915 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2916 + LANCER_FW_DOWNLOAD_CHUNK;
2917 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2918 &flash_cmd.dma, GFP_KERNEL);
2919 if (!flash_cmd.va) {
2920 status = -ENOMEM;
2921 dev_err(&adapter->pdev->dev,
2922 "Memory allocation failure while flashing\n");
2923 goto lancer_fw_exit;
2924 }
84517482 2925
485bf569
SN
2926 dest_image_ptr = flash_cmd.va +
2927 sizeof(struct lancer_cmd_req_write_object);
2928 image_size = fw->size;
2929 data_ptr = fw->data;
2930
2931 while (image_size) {
2932 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2933
2934 /* Copy the image chunk content. */
2935 memcpy(dest_image_ptr, data_ptr, chunk_size);
2936
2937 status = lancer_cmd_write_object(adapter, &flash_cmd,
2938 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2939 &data_written, &add_status);
2940
2941 if (status)
2942 break;
2943
2944 offset += data_written;
2945 data_ptr += data_written;
2946 image_size -= data_written;
2947 }
2948
2949 if (!status) {
2950 /* Commit the FW written */
2951 status = lancer_cmd_write_object(adapter, &flash_cmd,
2952 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2953 &data_written, &add_status);
2954 }
2955
2956 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2957 flash_cmd.dma);
2958 if (status) {
2959 dev_err(&adapter->pdev->dev,
2960 "Firmware load error. "
2961 "Status code: 0x%x Additional Status: 0x%x\n",
2962 status, add_status);
2963 goto lancer_fw_exit;
2964 }
2965
2966 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2967lancer_fw_exit:
2968 return status;
2969}
2970
2971static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2972{
2973 struct flash_file_hdr_g2 *fhdr;
2974 struct flash_file_hdr_g3 *fhdr3;
2975 struct image_hdr *img_hdr_ptr = NULL;
2976 struct be_dma_mem flash_cmd;
2977 const u8 *p;
2978 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2979
2980 p = fw->data;
3f0d4560 2981 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2982
84517482 2983 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2984 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2985 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2986 if (!flash_cmd.va) {
2987 status = -ENOMEM;
2988 dev_err(&adapter->pdev->dev,
2989 "Memory allocation failure while flashing\n");
485bf569 2990 goto be_fw_exit;
84517482
AK
2991 }
2992
3f0d4560
AK
2993 if ((adapter->generation == BE_GEN3) &&
2994 (get_ufigen_type(fhdr) == BE_GEN3)) {
2995 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2996 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2997 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2998 img_hdr_ptr = (struct image_hdr *) (fw->data +
2999 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
3000 i * sizeof(struct image_hdr)));
3001 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3002 status = be_flash_data(adapter, fw, &flash_cmd,
3003 num_imgs);
3f0d4560
AK
3004 }
3005 } else if ((adapter->generation == BE_GEN2) &&
3006 (get_ufigen_type(fhdr) == BE_GEN2)) {
3007 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3008 } else {
3009 dev_err(&adapter->pdev->dev,
3010 "UFI and Interface are not compatible for flashing\n");
3011 status = -1;
84517482
AK
3012 }
3013
2b7bcebf
IV
3014 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3015 flash_cmd.dma);
84517482
AK
3016 if (status) {
3017 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3018 goto be_fw_exit;
84517482
AK
3019 }
3020
af901ca1 3021 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3022
485bf569
SN
3023be_fw_exit:
3024 return status;
3025}
3026
3027int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3028{
3029 const struct firmware *fw;
3030 int status;
3031
3032 if (!netif_running(adapter->netdev)) {
3033 dev_err(&adapter->pdev->dev,
3034 "Firmware load not allowed (interface is down)\n");
3035 return -1;
3036 }
3037
3038 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3039 if (status)
3040 goto fw_exit;
3041
3042 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3043
3044 if (lancer_chip(adapter))
3045 status = lancer_fw_download(adapter, fw);
3046 else
3047 status = be_fw_download(adapter, fw);
3048
84517482
AK
3049fw_exit:
3050 release_firmware(fw);
3051 return status;
3052}
3053
e5686ad8 3054static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3055 .ndo_open = be_open,
3056 .ndo_stop = be_close,
3057 .ndo_start_xmit = be_xmit,
a54769f5 3058 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3059 .ndo_set_mac_address = be_mac_addr_set,
3060 .ndo_change_mtu = be_change_mtu,
ab1594e9 3061 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3062 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3063 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3064 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3065 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3066 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3067 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3068 .ndo_get_vf_config = be_get_vf_config,
3069#ifdef CONFIG_NET_POLL_CONTROLLER
3070 .ndo_poll_controller = be_netpoll,
3071#endif
6b7c5b94
SP
3072};
3073
3074static void be_netdev_init(struct net_device *netdev)
3075{
3076 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3077 struct be_eq_obj *eqo;
3abcdeda 3078 int i;
6b7c5b94 3079
6332c8d3 3080 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3081 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3082 NETIF_F_HW_VLAN_TX;
3083 if (be_multi_rxq(adapter))
3084 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3085
3086 netdev->features |= netdev->hw_features |
8b8ddc68 3087 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3088
eb8a50d9 3089 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3090 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3091
fbc13f01
AK
3092 netdev->priv_flags |= IFF_UNICAST_FLT;
3093
6b7c5b94
SP
3094 netdev->flags |= IFF_MULTICAST;
3095
c190e3c8
AK
3096 netif_set_gso_max_size(netdev, 65535);
3097
10ef9ab4 3098 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3099
3100 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3101
10ef9ab4
SP
3102 for_all_evt_queues(adapter, eqo, i)
3103 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3104}
3105
3106static void be_unmap_pci_bars(struct be_adapter *adapter)
3107{
8788fdc2
SP
3108 if (adapter->csr)
3109 iounmap(adapter->csr);
3110 if (adapter->db)
3111 iounmap(adapter->db);
6b7c5b94
SP
3112}
3113
3114static int be_map_pci_bars(struct be_adapter *adapter)
3115{
3116 u8 __iomem *addr;
db3ea781 3117 int db_reg;
6b7c5b94 3118
fe6d2a38
SP
3119 if (lancer_chip(adapter)) {
3120 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3121 pci_resource_len(adapter->pdev, 0));
3122 if (addr == NULL)
3123 return -ENOMEM;
3124 adapter->db = addr;
3125 return 0;
3126 }
3127
ba343c77
SB
3128 if (be_physfn(adapter)) {
3129 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3130 pci_resource_len(adapter->pdev, 2));
3131 if (addr == NULL)
3132 return -ENOMEM;
3133 adapter->csr = addr;
3134 }
6b7c5b94 3135
ba343c77 3136 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3137 db_reg = 4;
3138 } else {
ba343c77
SB
3139 if (be_physfn(adapter))
3140 db_reg = 4;
3141 else
3142 db_reg = 0;
3143 }
3144 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3145 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3146 if (addr == NULL)
3147 goto pci_map_err;
ba343c77
SB
3148 adapter->db = addr;
3149
6b7c5b94
SP
3150 return 0;
3151pci_map_err:
3152 be_unmap_pci_bars(adapter);
3153 return -ENOMEM;
3154}
3155
3156
3157static void be_ctrl_cleanup(struct be_adapter *adapter)
3158{
8788fdc2 3159 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3160
3161 be_unmap_pci_bars(adapter);
3162
3163 if (mem->va)
2b7bcebf
IV
3164 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3165 mem->dma);
e7b909a6 3166
5b8821b7 3167 mem = &adapter->rx_filter;
e7b909a6 3168 if (mem->va)
2b7bcebf
IV
3169 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3170 mem->dma);
6b7c5b94
SP
3171}
3172
6b7c5b94
SP
3173static int be_ctrl_init(struct be_adapter *adapter)
3174{
8788fdc2
SP
3175 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3176 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3177 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3178 int status;
6b7c5b94
SP
3179
3180 status = be_map_pci_bars(adapter);
3181 if (status)
e7b909a6 3182 goto done;
6b7c5b94
SP
3183
3184 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3185 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3186 mbox_mem_alloc->size,
3187 &mbox_mem_alloc->dma,
3188 GFP_KERNEL);
6b7c5b94 3189 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3190 status = -ENOMEM;
3191 goto unmap_pci_bars;
6b7c5b94
SP
3192 }
3193 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3194 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3195 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3196 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3197
5b8821b7
SP
3198 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3199 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3200 &rx_filter->dma, GFP_KERNEL);
3201 if (rx_filter->va == NULL) {
e7b909a6
SP
3202 status = -ENOMEM;
3203 goto free_mbox;
3204 }
5b8821b7 3205 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3206
2984961c 3207 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3208 spin_lock_init(&adapter->mcc_lock);
3209 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3210
dd131e76 3211 init_completion(&adapter->flash_compl);
cf588477 3212 pci_save_state(adapter->pdev);
6b7c5b94 3213 return 0;
e7b909a6
SP
3214
3215free_mbox:
2b7bcebf
IV
3216 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3217 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3218
3219unmap_pci_bars:
3220 be_unmap_pci_bars(adapter);
3221
3222done:
3223 return status;
6b7c5b94
SP
3224}
3225
3226static void be_stats_cleanup(struct be_adapter *adapter)
3227{
3abcdeda 3228 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3229
3230 if (cmd->va)
2b7bcebf
IV
3231 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3232 cmd->va, cmd->dma);
6b7c5b94
SP
3233}
3234
3235static int be_stats_init(struct be_adapter *adapter)
3236{
3abcdeda 3237 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3238
005d5696 3239 if (adapter->generation == BE_GEN2) {
89a88ab8 3240 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3241 } else {
3242 if (lancer_chip(adapter))
3243 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3244 else
3245 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3246 }
2b7bcebf
IV
3247 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3248 GFP_KERNEL);
6b7c5b94
SP
3249 if (cmd->va == NULL)
3250 return -1;
d291b9af 3251 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3252 return 0;
3253}
3254
3255static void __devexit be_remove(struct pci_dev *pdev)
3256{
3257 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3258
6b7c5b94
SP
3259 if (!adapter)
3260 return;
3261
3262 unregister_netdev(adapter->netdev);
3263
5fb379ee
SP
3264 be_clear(adapter);
3265
6b7c5b94
SP
3266 be_stats_cleanup(adapter);
3267
3268 be_ctrl_cleanup(adapter);
3269
ba343c77
SB
3270 be_sriov_disable(adapter);
3271
6b7c5b94
SP
3272 pci_set_drvdata(pdev, NULL);
3273 pci_release_regions(pdev);
3274 pci_disable_device(pdev);
3275
3276 free_netdev(adapter->netdev);
3277}
3278
4762f6ce
AK
3279bool be_is_wol_supported(struct be_adapter *adapter)
3280{
3281 return ((adapter->wol_cap & BE_WOL_CAP) &&
3282 !be_is_wol_excluded(adapter)) ? true : false;
3283}
3284
2243e2e9 3285static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3286{
6b7c5b94
SP
3287 int status;
3288
3abcdeda
SP
3289 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3290 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3291 if (status)
3292 return status;
3293
752961a1 3294 if (adapter->function_mode & FLEX10_MODE)
456d9c96 3295 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
82903e4b
AK
3296 else
3297 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3298
fbc13f01
AK
3299 if (be_physfn(adapter))
3300 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3301 else
3302 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3303
3304 /* primary mac needs 1 pmac entry */
3305 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3306 sizeof(u32), GFP_KERNEL);
3307 if (!adapter->pmac_id)
3308 return -ENOMEM;
3309
9e1453c5
AK
3310 status = be_cmd_get_cntl_attributes(adapter);
3311 if (status)
3312 return status;
3313
4762f6ce
AK
3314 status = be_cmd_get_acpi_wol_cap(adapter);
3315 if (status) {
3316 /* in case of a failure to get wol capabillities
3317 * check the exclusion list to determine WOL capability */
3318 if (!be_is_wol_excluded(adapter))
3319 adapter->wol_cap |= BE_WOL_CAP;
3320 }
3321
3322 if (be_is_wol_supported(adapter))
3323 adapter->wol = true;
3324
2243e2e9 3325 return 0;
6b7c5b94
SP
3326}
3327
fe6d2a38
SP
3328static int be_dev_family_check(struct be_adapter *adapter)
3329{
3330 struct pci_dev *pdev = adapter->pdev;
3331 u32 sli_intf = 0, if_type;
3332
3333 switch (pdev->device) {
3334 case BE_DEVICE_ID1:
3335 case OC_DEVICE_ID1:
3336 adapter->generation = BE_GEN2;
3337 break;
3338 case BE_DEVICE_ID2:
3339 case OC_DEVICE_ID2:
ecedb6ae 3340 case OC_DEVICE_ID5:
fe6d2a38
SP
3341 adapter->generation = BE_GEN3;
3342 break;
3343 case OC_DEVICE_ID3:
12f4d0a8 3344 case OC_DEVICE_ID4:
fe6d2a38
SP
3345 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3346 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3347 SLI_INTF_IF_TYPE_SHIFT;
3348
3349 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3350 if_type != 0x02) {
3351 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3352 return -EINVAL;
3353 }
fe6d2a38
SP
3354 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3355 SLI_INTF_FAMILY_SHIFT);
3356 adapter->generation = BE_GEN3;
3357 break;
3358 default:
3359 adapter->generation = 0;
3360 }
3361 return 0;
3362}
3363
37eed1cb
PR
3364static int lancer_wait_ready(struct be_adapter *adapter)
3365{
d8110f62 3366#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3367 u32 sliport_status;
3368 int status = 0, i;
3369
3370 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3371 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3372 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3373 break;
3374
d8110f62 3375 msleep(1000);
37eed1cb
PR
3376 }
3377
3378 if (i == SLIPORT_READY_TIMEOUT)
3379 status = -1;
3380
3381 return status;
3382}
3383
3384static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3385{
3386 int status;
3387 u32 sliport_status, err, reset_needed;
3388 status = lancer_wait_ready(adapter);
3389 if (!status) {
3390 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3391 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3392 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3393 if (err && reset_needed) {
3394 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3395 adapter->db + SLIPORT_CONTROL_OFFSET);
3396
3397 /* check adapter has corrected the error */
3398 status = lancer_wait_ready(adapter);
3399 sliport_status = ioread32(adapter->db +
3400 SLIPORT_STATUS_OFFSET);
3401 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3402 SLIPORT_STATUS_RN_MASK);
3403 if (status || sliport_status)
3404 status = -1;
3405 } else if (err || reset_needed) {
3406 status = -1;
3407 }
3408 }
3409 return status;
3410}
3411
d8110f62
PR
3412static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3413{
3414 int status;
3415 u32 sliport_status;
3416
3417 if (adapter->eeh_err || adapter->ue_detected)
3418 return;
3419
3420 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3421
3422 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3423 dev_err(&adapter->pdev->dev,
3424 "Adapter in error state."
3425 "Trying to recover.\n");
3426
3427 status = lancer_test_and_set_rdy_state(adapter);
3428 if (status)
3429 goto err;
3430
3431 netif_device_detach(adapter->netdev);
3432
3433 if (netif_running(adapter->netdev))
3434 be_close(adapter->netdev);
3435
3436 be_clear(adapter);
3437
3438 adapter->fw_timeout = false;
3439
3440 status = be_setup(adapter);
3441 if (status)
3442 goto err;
3443
3444 if (netif_running(adapter->netdev)) {
3445 status = be_open(adapter->netdev);
3446 if (status)
3447 goto err;
3448 }
3449
3450 netif_device_attach(adapter->netdev);
3451
3452 dev_err(&adapter->pdev->dev,
3453 "Adapter error recovery succeeded\n");
3454 }
3455 return;
3456err:
3457 dev_err(&adapter->pdev->dev,
3458 "Adapter error recovery failed\n");
3459}
3460
3461static void be_worker(struct work_struct *work)
3462{
3463 struct be_adapter *adapter =
3464 container_of(work, struct be_adapter, work.work);
3465 struct be_rx_obj *rxo;
10ef9ab4 3466 struct be_eq_obj *eqo;
d8110f62
PR
3467 int i;
3468
3469 if (lancer_chip(adapter))
3470 lancer_test_and_recover_fn_err(adapter);
3471
3472 be_detect_dump_ue(adapter);
3473
3474 /* when interrupts are not yet enabled, just reap any pending
3475 * mcc completions */
3476 if (!netif_running(adapter->netdev)) {
10ef9ab4 3477 be_process_mcc(adapter);
d8110f62
PR
3478 goto reschedule;
3479 }
3480
3481 if (!adapter->stats_cmd_sent) {
3482 if (lancer_chip(adapter))
3483 lancer_cmd_get_pport_stats(adapter,
3484 &adapter->stats_cmd);
3485 else
3486 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3487 }
3488
3489 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
3490 if (rxo->rx_post_starved) {
3491 rxo->rx_post_starved = false;
3492 be_post_rx_frags(rxo, GFP_KERNEL);
3493 }
3494 }
3495
10ef9ab4
SP
3496 for_all_evt_queues(adapter, eqo, i)
3497 be_eqd_update(adapter, eqo);
3498
d8110f62
PR
3499reschedule:
3500 adapter->work_counter++;
3501 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3502}
3503
6b7c5b94
SP
3504static int __devinit be_probe(struct pci_dev *pdev,
3505 const struct pci_device_id *pdev_id)
3506{
3507 int status = 0;
3508 struct be_adapter *adapter;
3509 struct net_device *netdev;
6b7c5b94
SP
3510
3511 status = pci_enable_device(pdev);
3512 if (status)
3513 goto do_none;
3514
3515 status = pci_request_regions(pdev, DRV_NAME);
3516 if (status)
3517 goto disable_dev;
3518 pci_set_master(pdev);
3519
3c8def97 3520 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3521 if (netdev == NULL) {
3522 status = -ENOMEM;
3523 goto rel_reg;
3524 }
3525 adapter = netdev_priv(netdev);
3526 adapter->pdev = pdev;
3527 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3528
3529 status = be_dev_family_check(adapter);
63657b9c 3530 if (status)
fe6d2a38
SP
3531 goto free_netdev;
3532
6b7c5b94 3533 adapter->netdev = netdev;
2243e2e9 3534 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3535
2b7bcebf 3536 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3537 if (!status) {
3538 netdev->features |= NETIF_F_HIGHDMA;
3539 } else {
2b7bcebf 3540 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3541 if (status) {
3542 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3543 goto free_netdev;
3544 }
3545 }
3546
f9449ab7
SP
3547 status = be_sriov_enable(adapter);
3548 if (status)
3549 goto free_netdev;
ba343c77 3550
6b7c5b94
SP
3551 status = be_ctrl_init(adapter);
3552 if (status)
f9449ab7 3553 goto disable_sriov;
6b7c5b94 3554
37eed1cb 3555 if (lancer_chip(adapter)) {
d8110f62
PR
3556 status = lancer_wait_ready(adapter);
3557 if (!status) {
3558 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3559 adapter->db + SLIPORT_CONTROL_OFFSET);
3560 status = lancer_test_and_set_rdy_state(adapter);
3561 }
37eed1cb
PR
3562 if (status) {
3563 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3564 goto ctrl_clean;
37eed1cb
PR
3565 }
3566 }
3567
2243e2e9 3568 /* sync up with fw's ready state */
ba343c77
SB
3569 if (be_physfn(adapter)) {
3570 status = be_cmd_POST(adapter);
3571 if (status)
3572 goto ctrl_clean;
ba343c77 3573 }
6b7c5b94 3574
2243e2e9
SP
3575 /* tell fw we're ready to fire cmds */
3576 status = be_cmd_fw_init(adapter);
6b7c5b94 3577 if (status)
2243e2e9
SP
3578 goto ctrl_clean;
3579
a4b4dfab
AK
3580 status = be_cmd_reset_function(adapter);
3581 if (status)
3582 goto ctrl_clean;
556ae191 3583
10ef9ab4
SP
3584 /* The INTR bit may be set in the card when probed by a kdump kernel
3585 * after a crash.
3586 */
3587 if (!lancer_chip(adapter))
3588 be_intr_set(adapter, false);
3589
2243e2e9
SP
3590 status = be_stats_init(adapter);
3591 if (status)
3592 goto ctrl_clean;
3593
3594 status = be_get_config(adapter);
6b7c5b94
SP
3595 if (status)
3596 goto stats_clean;
6b7c5b94
SP
3597
3598 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3599 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3600
5fb379ee
SP
3601 status = be_setup(adapter);
3602 if (status)
3abcdeda 3603 goto msix_disable;
2243e2e9 3604
3abcdeda 3605 be_netdev_init(netdev);
6b7c5b94
SP
3606 status = register_netdev(netdev);
3607 if (status != 0)
5fb379ee 3608 goto unsetup;
6b7c5b94 3609
10ef9ab4
SP
3610 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3611 adapter->port_num);
34b1ef04 3612
6b7c5b94
SP
3613 return 0;
3614
5fb379ee
SP
3615unsetup:
3616 be_clear(adapter);
3abcdeda
SP
3617msix_disable:
3618 be_msix_disable(adapter);
6b7c5b94
SP
3619stats_clean:
3620 be_stats_cleanup(adapter);
3621ctrl_clean:
3622 be_ctrl_cleanup(adapter);
f9449ab7 3623disable_sriov:
ba343c77 3624 be_sriov_disable(adapter);
f9449ab7 3625free_netdev:
fe6d2a38 3626 free_netdev(netdev);
8d56ff11 3627 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3628rel_reg:
3629 pci_release_regions(pdev);
3630disable_dev:
3631 pci_disable_device(pdev);
3632do_none:
c4ca2374 3633 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3634 return status;
3635}
3636
3637static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3638{
3639 struct be_adapter *adapter = pci_get_drvdata(pdev);
3640 struct net_device *netdev = adapter->netdev;
3641
71d8d1b5
AK
3642 if (adapter->wol)
3643 be_setup_wol(adapter, true);
3644
6b7c5b94
SP
3645 netif_device_detach(netdev);
3646 if (netif_running(netdev)) {
3647 rtnl_lock();
3648 be_close(netdev);
3649 rtnl_unlock();
3650 }
9b0365f1 3651 be_clear(adapter);
6b7c5b94
SP
3652
3653 pci_save_state(pdev);
3654 pci_disable_device(pdev);
3655 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3656 return 0;
3657}
3658
3659static int be_resume(struct pci_dev *pdev)
3660{
3661 int status = 0;
3662 struct be_adapter *adapter = pci_get_drvdata(pdev);
3663 struct net_device *netdev = adapter->netdev;
3664
3665 netif_device_detach(netdev);
3666
3667 status = pci_enable_device(pdev);
3668 if (status)
3669 return status;
3670
3671 pci_set_power_state(pdev, 0);
3672 pci_restore_state(pdev);
3673
2243e2e9
SP
3674 /* tell fw we're ready to fire cmds */
3675 status = be_cmd_fw_init(adapter);
3676 if (status)
3677 return status;
3678
9b0365f1 3679 be_setup(adapter);
6b7c5b94
SP
3680 if (netif_running(netdev)) {
3681 rtnl_lock();
3682 be_open(netdev);
3683 rtnl_unlock();
3684 }
3685 netif_device_attach(netdev);
71d8d1b5
AK
3686
3687 if (adapter->wol)
3688 be_setup_wol(adapter, false);
a4ca055f 3689
6b7c5b94
SP
3690 return 0;
3691}
3692
82456b03
SP
3693/*
3694 * An FLR will stop BE from DMAing any data.
3695 */
3696static void be_shutdown(struct pci_dev *pdev)
3697{
3698 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3699
2d5d4154
AK
3700 if (!adapter)
3701 return;
82456b03 3702
0f4a6828 3703 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3704
2d5d4154 3705 netif_device_detach(adapter->netdev);
82456b03 3706
82456b03
SP
3707 if (adapter->wol)
3708 be_setup_wol(adapter, true);
3709
57841869
AK
3710 be_cmd_reset_function(adapter);
3711
82456b03 3712 pci_disable_device(pdev);
82456b03
SP
3713}
3714
cf588477
SP
3715static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3716 pci_channel_state_t state)
3717{
3718 struct be_adapter *adapter = pci_get_drvdata(pdev);
3719 struct net_device *netdev = adapter->netdev;
3720
3721 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3722
3723 adapter->eeh_err = true;
3724
3725 netif_device_detach(netdev);
3726
3727 if (netif_running(netdev)) {
3728 rtnl_lock();
3729 be_close(netdev);
3730 rtnl_unlock();
3731 }
3732 be_clear(adapter);
3733
3734 if (state == pci_channel_io_perm_failure)
3735 return PCI_ERS_RESULT_DISCONNECT;
3736
3737 pci_disable_device(pdev);
3738
3739 return PCI_ERS_RESULT_NEED_RESET;
3740}
3741
3742static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3743{
3744 struct be_adapter *adapter = pci_get_drvdata(pdev);
3745 int status;
3746
3747 dev_info(&adapter->pdev->dev, "EEH reset\n");
3748 adapter->eeh_err = false;
6589ade0
SP
3749 adapter->ue_detected = false;
3750 adapter->fw_timeout = false;
cf588477
SP
3751
3752 status = pci_enable_device(pdev);
3753 if (status)
3754 return PCI_ERS_RESULT_DISCONNECT;
3755
3756 pci_set_master(pdev);
3757 pci_set_power_state(pdev, 0);
3758 pci_restore_state(pdev);
3759
3760 /* Check if card is ok and fw is ready */
3761 status = be_cmd_POST(adapter);
3762 if (status)
3763 return PCI_ERS_RESULT_DISCONNECT;
3764
3765 return PCI_ERS_RESULT_RECOVERED;
3766}
3767
3768static void be_eeh_resume(struct pci_dev *pdev)
3769{
3770 int status = 0;
3771 struct be_adapter *adapter = pci_get_drvdata(pdev);
3772 struct net_device *netdev = adapter->netdev;
3773
3774 dev_info(&adapter->pdev->dev, "EEH resume\n");
3775
3776 pci_save_state(pdev);
3777
3778 /* tell fw we're ready to fire cmds */
3779 status = be_cmd_fw_init(adapter);
3780 if (status)
3781 goto err;
3782
3783 status = be_setup(adapter);
3784 if (status)
3785 goto err;
3786
3787 if (netif_running(netdev)) {
3788 status = be_open(netdev);
3789 if (status)
3790 goto err;
3791 }
3792 netif_device_attach(netdev);
3793 return;
3794err:
3795 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3796}
3797
3798static struct pci_error_handlers be_eeh_handlers = {
3799 .error_detected = be_eeh_err_detected,
3800 .slot_reset = be_eeh_reset,
3801 .resume = be_eeh_resume,
3802};
3803
6b7c5b94
SP
3804static struct pci_driver be_driver = {
3805 .name = DRV_NAME,
3806 .id_table = be_dev_ids,
3807 .probe = be_probe,
3808 .remove = be_remove,
3809 .suspend = be_suspend,
cf588477 3810 .resume = be_resume,
82456b03 3811 .shutdown = be_shutdown,
cf588477 3812 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3813};
3814
3815static int __init be_init_module(void)
3816{
8e95a202
JP
3817 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3818 rx_frag_size != 2048) {
6b7c5b94
SP
3819 printk(KERN_WARNING DRV_NAME
3820 " : Module param rx_frag_size must be 2048/4096/8192."
3821 " Using 2048\n");
3822 rx_frag_size = 2048;
3823 }
6b7c5b94
SP
3824
3825 return pci_register_driver(&be_driver);
3826}
3827module_init(be_init_module);
3828
3829static void __exit be_exit_module(void)
3830{
3831 pci_unregister_driver(&be_driver);
3832}
3833module_exit(be_exit_module);
This page took 0.625208 seconds and 5 git commands to generate.