be2net: fix wrong handling of be_setup() failure in be_probe()
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
47 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 50/* UE Status Low CSR */
42c8b11e 51static const char * const ue_status_low_desc[] = {
7c185276
AK
52 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
42c8b11e 86static const char * const ue_status_hi_desc[] = {
7c185276
AK
87 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
42c8b11e 110 "NETC",
7c185276
AK
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
6b7c5b94 120
752961a1
SP
121/* Is BE in a multi-channel mode */
122static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126}
127
6b7c5b94
SP
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 131 if (mem->va) {
2b7bcebf
IV
132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
1cfafab9
SP
134 mem->va = NULL;
135 }
6b7c5b94
SP
136}
137
138static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140{
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
2b7bcebf
IV
147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
6b7c5b94 149 if (!mem->va)
10ef9ab4 150 return -ENOMEM;
6b7c5b94
SP
151 memset(mem->va, 0, mem->size);
152 return 0;
153}
154
8788fdc2 155static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
f67ef7ba 159 if (adapter->eeh_error)
cf588477
SP
160 return;
161
db3ea781
SP
162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
5f0b849e 166 if (!enabled && enable)
6b7c5b94 167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 168 else if (enabled && !enable)
6b7c5b94 169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 170 else
6b7c5b94 171 return;
5f0b849e 172
db3ea781
SP
173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
175}
176
8788fdc2 177static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
178{
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
182
183 wmb();
8788fdc2 184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
185}
186
8788fdc2 187static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
195}
196
8788fdc2 197static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
198 bool arm, bool clear_int, u16 num_popped)
199{
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 204
f67ef7ba 205 if (adapter->eeh_error)
cf588477
SP
206 return;
207
6b7c5b94
SP
208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
215}
216
8788fdc2 217void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
218{
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 223
f67ef7ba 224 if (adapter->eeh_error)
cf588477
SP
225 return;
226
6b7c5b94
SP
227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
231}
232
6b7c5b94
SP
233static int be_mac_addr_set(struct net_device *netdev, void *p)
234{
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
e3a7ae2c 238 u8 current_mac[ETH_ALEN];
fbc13f01 239 u32 pmac_id = adapter->pmac_id[0];
6b7c5b94 240
ca9e4988
AK
241 if (!is_valid_ether_addr(addr->sa_data))
242 return -EADDRNOTAVAIL;
243
5ee4979b
SP
244 status = be_cmd_mac_addr_query(adapter, current_mac, false,
245 adapter->if_handle, 0);
a65027e4 246 if (status)
e3a7ae2c 247 goto err;
6b7c5b94 248
e3a7ae2c
SK
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
fbc13f01 251 adapter->if_handle, &adapter->pmac_id[0], 0);
e3a7ae2c
SK
252 if (status)
253 goto err;
6b7c5b94 254
e3a7ae2c
SK
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
261 return status;
262}
263
89a88ab8
AK
264static void populate_be2_stats(struct be_adapter *adapter)
265{
ac124ff9
SP
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 269 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 272
ac124ff9 273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
ac124ff9 301 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 302 else
ac124ff9 303 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
ac124ff9
SP
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 318 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 321
ac124ff9 322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
ac124ff9 345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
005d5696
SX
358static void populate_lancer_stats(struct be_adapter *adapter)
359{
89a88ab8 360
005d5696 361 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 390 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 393 drvs->rx_drops_too_many_frags =
ac124ff9 394 pport_stats->rx_drops_too_many_frags_lo;
005d5696 395}
89a88ab8 396
09c1c68f
SP
397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
89a88ab8
AK
409void be_parse_stats(struct be_adapter *adapter)
410{
ac124ff9
SP
411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
005d5696
SX
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
89a88ab8 421 populate_be2_stats(adapter);
005d5696 422 }
ac124ff9 423
d51ebd33
PR
424 if (lancer_chip(adapter))
425 goto done;
426
ac124ff9 427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
d51ebd33
PR
435done:
436 return;
89a88ab8
AK
437}
438
ab1594e9
SP
439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
6b7c5b94 441{
ab1594e9 442 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 443 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 444 struct be_rx_obj *rxo;
3c8def97 445 struct be_tx_obj *txo;
ab1594e9
SP
446 u64 pkts, bytes;
447 unsigned int start;
3abcdeda 448 int i;
6b7c5b94 449
3abcdeda 450 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
462 }
463
3c8def97 464 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
3c8def97 473 }
6b7c5b94
SP
474
475 /* bad pkts received */
ab1594e9 476 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
ab1594e9 485 drvs->rx_dropped_runt;
68110868 486
6b7c5b94 487 /* detailed rx errors */
ab1594e9 488 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
68110868 491
ab1594e9 492 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
493
494 /* frame alignment errors */
ab1594e9 495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 496
6b7c5b94
SP
497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
ab1594e9 502 return stats;
6b7c5b94
SP
503}
504
b236916a 505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 506{
6b7c5b94
SP
507 struct net_device *netdev = adapter->netdev;
508
b236916a 509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 510 netif_carrier_off(netdev);
b236916a 511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 512 }
b236916a
AK
513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
6b7c5b94
SP
518}
519
3c8def97 520static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 522{
3c8def97
SP
523 struct be_tx_stats *stats = tx_stats(txo);
524
ab1594e9 525 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 530 if (stopped)
ac124ff9 531 stats->tx_stops++;
ab1594e9 532 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
6b7c5b94 538{
ebc8d2ab
DM
539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
6b7c5b94
SP
543 /* to account for hdr wrb */
544 cnt++;
fe6d2a38
SP
545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
6b7c5b94
SP
548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
fe6d2a38 551 }
6b7c5b94
SP
552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 561 wrb->rsvd0 = 0;
6b7c5b94
SP
562}
563
1ded132d
AK
564static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565 struct sk_buff *skb)
566{
567 u8 vlan_prio;
568 u16 vlan_tag;
569
570 vlan_tag = vlan_tx_tag_get(skb);
571 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 /* If vlan priority provided by OS is NOT in available bmap */
573 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 adapter->recommended_prio;
576
577 return vlan_tag;
578}
579
93040ae5
SK
580static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581{
582 return vlan_tx_tag_present(skb) || adapter->pvid;
583}
584
cc4ce020
SK
585static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 587{
1ded132d 588 u16 vlan_tag;
cc4ce020 589
6b7c5b94
SP
590 memset(hdr, 0, sizeof(*hdr));
591
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
49e4b847 594 if (skb_is_gso(skb)) {
6b7c5b94
SP
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 if (is_tcp_pkt(skb))
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 tcpcs, hdr, 1);
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 udpcs, hdr, 1);
609 }
6b7c5b94
SP
610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 if (is_tcp_pkt(skb))
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613 else if (is_udp_pkt(skb))
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615 }
616
4c5102f9 617 if (vlan_tx_tag_present(skb)) {
6b7c5b94 618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 619 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
621 }
622
623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627}
628
2b7bcebf 629static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
630 bool unmap_single)
631{
632 dma_addr_t dma;
633
634 be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 637 if (wrb->frag_len) {
7101e111 638 if (unmap_single)
2b7bcebf
IV
639 dma_unmap_single(dev, dma, wrb->frag_len,
640 DMA_TO_DEVICE);
7101e111 641 else
2b7bcebf 642 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
643 }
644}
6b7c5b94 645
3c8def97 646static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
647 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648{
7101e111
SP
649 dma_addr_t busaddr;
650 int i, copied = 0;
2b7bcebf 651 struct device *dev = &adapter->pdev->dev;
6b7c5b94 652 struct sk_buff *first_skb = skb;
6b7c5b94
SP
653 struct be_eth_wrb *wrb;
654 struct be_eth_hdr_wrb *hdr;
7101e111
SP
655 bool map_single = false;
656 u16 map_head;
6b7c5b94 657
6b7c5b94
SP
658 hdr = queue_head_node(txq);
659 queue_head_inc(txq);
7101e111 660 map_head = txq->head;
6b7c5b94 661
ebc8d2ab 662 if (skb->len > skb->data_len) {
e743d313 663 int len = skb_headlen(skb);
2b7bcebf
IV
664 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665 if (dma_mapping_error(dev, busaddr))
7101e111
SP
666 goto dma_err;
667 map_single = true;
ebc8d2ab
DM
668 wrb = queue_head_node(txq);
669 wrb_fill(wrb, busaddr, len);
670 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671 queue_head_inc(txq);
672 copied += len;
673 }
6b7c5b94 674
ebc8d2ab 675 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 676 const struct skb_frag_struct *frag =
ebc8d2ab 677 &skb_shinfo(skb)->frags[i];
b061b39e 678 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 679 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 680 if (dma_mapping_error(dev, busaddr))
7101e111 681 goto dma_err;
ebc8d2ab 682 wrb = queue_head_node(txq);
9e903e08 683 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 queue_head_inc(txq);
9e903e08 686 copied += skb_frag_size(frag);
6b7c5b94
SP
687 }
688
689 if (dummy_wrb) {
690 wrb = queue_head_node(txq);
691 wrb_fill(wrb, 0, 0);
692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693 queue_head_inc(txq);
694 }
695
cc4ce020 696 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
697 be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699 return copied;
7101e111
SP
700dma_err:
701 txq->head = map_head;
702 while (copied) {
703 wrb = queue_head_node(txq);
2b7bcebf 704 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
705 map_single = false;
706 copied -= wrb->frag_len;
707 queue_head_inc(txq);
708 }
709 return 0;
6b7c5b94
SP
710}
711
93040ae5
SK
712static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 struct sk_buff *skb)
714{
715 u16 vlan_tag = 0;
716
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 return skb;
720
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
724 skb->vlan_tci = 0;
725 }
726
727 return skb;
728}
729
61357325 730static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 731 struct net_device *netdev)
6b7c5b94
SP
732{
733 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735 struct be_queue_info *txq = &txo->q;
93040ae5 736 struct iphdr *ip = NULL;
6b7c5b94 737 u32 wrb_cnt = 0, copied = 0;
93040ae5 738 u32 start = txq->head, eth_hdr_len;
6b7c5b94
SP
739 bool dummy_wrb, stopped = false;
740
93040ae5
SK
741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742 VLAN_ETH_HLEN : ETH_HLEN;
743
744 /* HW has a bug which considers padding bytes as legal
745 * and modifies the IPv4 hdr's 'tot_len' field
1ded132d 746 */
93040ae5
SK
747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748 is_ipv4_pkt(skb)) {
749 ip = (struct iphdr *)ip_hdr(skb);
750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751 }
1ded132d 752
93040ae5
SK
753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
1ded132d
AK
760 if (unlikely(!skb))
761 goto tx_drop;
1ded132d
AK
762 }
763
fe6d2a38 764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 765
3c8def97 766 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8 767 if (copied) {
cd8f76c0
ED
768 int gso_segs = skb_shinfo(skb)->gso_segs;
769
c190e3c8 770 /* record the sent skb in the sent_skb table */
3c8def97
SP
771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
c190e3c8
AK
773
774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
7101e111 778 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
3c8def97 781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
782 stopped = true;
783 }
6b7c5b94 784
c190e3c8 785 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 786
cd8f76c0 787 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
788 } else {
789 txq->head = start;
790 dev_kfree_skb_any(skb);
6b7c5b94 791 }
1ded132d 792tx_drop:
6b7c5b94
SP
793 return NETDEV_TX_OK;
794}
795
796static int be_change_mtu(struct net_device *netdev, int new_mtu)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812}
813
814/*
82903e4b
AK
815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 817 */
10329df8 818static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 819{
10329df8
SP
820 u16 vids[BE_NUM_VLANS_SUPPORTED];
821 u16 num = 0, i;
82903e4b 822 int status = 0;
1da87b7f 823
c0e64ef4
SP
824 /* No need to further configure vids if in promiscuous mode */
825 if (adapter->promiscuous)
826 return 0;
827
0fc16ebf
PR
828 if (adapter->vlans_added > adapter->max_vlans)
829 goto set_vlan_promisc;
830
831 /* Construct VLAN Table to give to HW */
832 for (i = 0; i < VLAN_N_VID; i++)
833 if (adapter->vlan_tag[i])
10329df8 834 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
835
836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 837 vids, num, 1, 0);
0fc16ebf
PR
838
839 /* Set to VLAN promisc mode as setting VLAN filter failed */
840 if (status) {
841 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843 goto set_vlan_promisc;
6b7c5b94 844 }
1da87b7f 845
b31c50a7 846 return status;
0fc16ebf
PR
847
848set_vlan_promisc:
849 status = be_cmd_vlan_config(adapter, adapter->if_handle,
850 NULL, 0, 1, 1);
851 return status;
6b7c5b94
SP
852}
853
8e586137 854static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
855{
856 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 857 int status = 0;
6b7c5b94 858
80817cbf
AK
859 if (!be_physfn(adapter)) {
860 status = -EINVAL;
861 goto ret;
862 }
ba343c77 863
6b7c5b94 864 adapter->vlan_tag[vid] = 1;
82903e4b 865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 866 status = be_vid_config(adapter);
8e586137 867
80817cbf
AK
868 if (!status)
869 adapter->vlans_added++;
870 else
871 adapter->vlan_tag[vid] = 0;
872ret:
873 return status;
6b7c5b94
SP
874}
875
8e586137 876static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
877{
878 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 879 int status = 0;
6b7c5b94 880
80817cbf
AK
881 if (!be_physfn(adapter)) {
882 status = -EINVAL;
883 goto ret;
884 }
ba343c77 885
6b7c5b94 886 adapter->vlan_tag[vid] = 0;
82903e4b 887 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 888 status = be_vid_config(adapter);
8e586137 889
80817cbf
AK
890 if (!status)
891 adapter->vlans_added--;
892 else
893 adapter->vlan_tag[vid] = 1;
894ret:
895 return status;
6b7c5b94
SP
896}
897
a54769f5 898static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
899{
900 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 901 int status;
6b7c5b94 902
24307eef 903 if (netdev->flags & IFF_PROMISC) {
5b8821b7 904 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
905 adapter->promiscuous = true;
906 goto done;
6b7c5b94
SP
907 }
908
25985edc 909 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
910 if (adapter->promiscuous) {
911 adapter->promiscuous = false;
5b8821b7 912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
913
914 if (adapter->vlans_added)
10329df8 915 be_vid_config(adapter);
6b7c5b94
SP
916 }
917
e7b909a6 918 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 919 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
920 netdev_mc_count(netdev) > BE_MAX_MC) {
921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 922 goto done;
6b7c5b94 923 }
6b7c5b94 924
fbc13f01
AK
925 if (netdev_uc_count(netdev) != adapter->uc_macs) {
926 struct netdev_hw_addr *ha;
927 int i = 1; /* First slot is claimed by the Primary MAC */
928
929 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930 be_cmd_pmac_del(adapter, adapter->if_handle,
931 adapter->pmac_id[i], 0);
932 }
933
934 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936 adapter->promiscuous = true;
937 goto done;
938 }
939
940 netdev_for_each_uc_addr(ha, adapter->netdev) {
941 adapter->uc_macs++; /* First slot is for Primary MAC */
942 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943 adapter->if_handle,
944 &adapter->pmac_id[adapter->uc_macs], 0);
945 }
946 }
947
0fc16ebf
PR
948 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950 /* Set to MCAST promisc mode if setting MULTICAST address fails */
951 if (status) {
952 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955 }
24307eef
SP
956done:
957 return;
6b7c5b94
SP
958}
959
ba343c77
SB
960static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
964 int status;
965
11ac75ed 966 if (!sriov_enabled(adapter))
ba343c77
SB
967 return -EPERM;
968
11ac75ed 969 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
970 return -EINVAL;
971
590c391d
PR
972 if (lancer_chip(adapter)) {
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
974 } else {
11ac75ed
SP
975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976 vf_cfg->pmac_id, vf + 1);
ba343c77 977
11ac75ed
SP
978 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
980 }
981
64600ea5 982 if (status)
ba343c77
SB
983 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984 mac, vf);
64600ea5 985 else
11ac75ed 986 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 987
ba343c77
SB
988 return status;
989}
990
64600ea5
AK
991static int be_get_vf_config(struct net_device *netdev, int vf,
992 struct ifla_vf_info *vi)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 996
11ac75ed 997 if (!sriov_enabled(adapter))
64600ea5
AK
998 return -EPERM;
999
11ac75ed 1000 if (vf >= adapter->num_vfs)
64600ea5
AK
1001 return -EINVAL;
1002
1003 vi->vf = vf;
11ac75ed
SP
1004 vi->tx_rate = vf_cfg->tx_rate;
1005 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1006 vi->qos = 0;
11ac75ed 1007 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1008
1009 return 0;
1010}
1011
1da87b7f
AK
1012static int be_set_vf_vlan(struct net_device *netdev,
1013 int vf, u16 vlan, u8 qos)
1014{
1015 struct be_adapter *adapter = netdev_priv(netdev);
1016 int status = 0;
1017
11ac75ed 1018 if (!sriov_enabled(adapter))
1da87b7f
AK
1019 return -EPERM;
1020
11ac75ed 1021 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1022 return -EINVAL;
1023
1024 if (vlan) {
f1f3ee1b
AK
1025 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026 /* If this is new value, program it. Else skip. */
1027 adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029 status = be_cmd_set_hsw_config(adapter, vlan,
1030 vf + 1, adapter->vf_cfg[vf].if_handle);
1031 }
1da87b7f 1032 } else {
f1f3ee1b 1033 /* Reset Transparent Vlan Tagging. */
11ac75ed 1034 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1035 vlan = adapter->vf_cfg[vf].def_vid;
1036 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1038 }
1039
1da87b7f
AK
1040
1041 if (status)
1042 dev_info(&adapter->pdev->dev,
1043 "VLAN %d config on VF %d failed\n", vlan, vf);
1044 return status;
1045}
1046
e1d18735
AK
1047static int be_set_vf_tx_rate(struct net_device *netdev,
1048 int vf, int rate)
1049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
1051 int status = 0;
1052
11ac75ed 1053 if (!sriov_enabled(adapter))
e1d18735
AK
1054 return -EPERM;
1055
94f434c2 1056 if (vf >= adapter->num_vfs)
e1d18735
AK
1057 return -EINVAL;
1058
94f434c2
AK
1059 if (rate < 100 || rate > 10000) {
1060 dev_err(&adapter->pdev->dev,
1061 "tx rate must be between 100 and 10000 Mbps\n");
1062 return -EINVAL;
1063 }
e1d18735 1064
856c4012 1065 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1066
1067 if (status)
94f434c2 1068 dev_err(&adapter->pdev->dev,
e1d18735 1069 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1070 else
1071 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1072 return status;
1073}
1074
39f1d94d
SP
1075static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076{
1077 struct pci_dev *dev, *pdev = adapter->pdev;
d9f72f35 1078 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
39f1d94d
SP
1079 u16 offset, stride;
1080
1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1082 if (!pos)
1083 return 0;
39f1d94d
SP
1084 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088 while (dev) {
d9f72f35
DM
1089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
39f1d94d
SP
1092 vfs++;
1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094 assigned_vfs++;
1095 }
1096 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1097 }
1098 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1099}
1100
10ef9ab4 1101static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1102{
10ef9ab4 1103 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1104 ulong now = jiffies;
ac124ff9 1105 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1106 u64 pkts;
1107 unsigned int start, eqd;
ac124ff9 1108
10ef9ab4
SP
1109 if (!eqo->enable_aic) {
1110 eqd = eqo->eqd;
1111 goto modify_eqd;
1112 }
1113
1114 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1115 return;
6b7c5b94 1116
10ef9ab4
SP
1117 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1118
4097f663 1119 /* Wrapped around */
3abcdeda
SP
1120 if (time_before(now, stats->rx_jiffies)) {
1121 stats->rx_jiffies = now;
4097f663
SP
1122 return;
1123 }
6b7c5b94 1124
ac124ff9
SP
1125 /* Update once a second */
1126 if (delta < HZ)
6b7c5b94
SP
1127 return;
1128
ab1594e9
SP
1129 do {
1130 start = u64_stats_fetch_begin_bh(&stats->sync);
1131 pkts = stats->rx_pkts;
1132 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1133
68c3e5a7 1134 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1135 stats->rx_pkts_prev = pkts;
3abcdeda 1136 stats->rx_jiffies = now;
10ef9ab4
SP
1137 eqd = (stats->rx_pps / 110000) << 3;
1138 eqd = min(eqd, eqo->max_eqd);
1139 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1140 if (eqd < 10)
1141 eqd = 0;
10ef9ab4
SP
1142
1143modify_eqd:
1144 if (eqd != eqo->cur_eqd) {
1145 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1146 eqo->cur_eqd = eqd;
ac124ff9 1147 }
6b7c5b94
SP
1148}
1149
3abcdeda 1150static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1151 struct be_rx_compl_info *rxcp)
4097f663 1152{
ac124ff9 1153 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1154
ab1594e9 1155 u64_stats_update_begin(&stats->sync);
3abcdeda 1156 stats->rx_compl++;
2e588f84 1157 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1158 stats->rx_pkts++;
2e588f84 1159 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1160 stats->rx_mcast_pkts++;
2e588f84 1161 if (rxcp->err)
ac124ff9 1162 stats->rx_compl_err++;
ab1594e9 1163 u64_stats_update_end(&stats->sync);
4097f663
SP
1164}
1165
2e588f84 1166static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1167{
19fad86f
PR
1168 /* L4 checksum is not reliable for non TCP/UDP packets.
1169 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1170 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1171 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1172}
1173
10ef9ab4
SP
1174static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1175 u16 frag_idx)
6b7c5b94 1176{
10ef9ab4 1177 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1178 struct be_rx_page_info *rx_page_info;
3abcdeda 1179 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1180
3abcdeda 1181 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1182 BUG_ON(!rx_page_info->page);
1183
205859a2 1184 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1185 dma_unmap_page(&adapter->pdev->dev,
1186 dma_unmap_addr(rx_page_info, bus),
1187 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1188 rx_page_info->last_page_user = false;
1189 }
6b7c5b94
SP
1190
1191 atomic_dec(&rxq->used);
1192 return rx_page_info;
1193}
1194
1195/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1196static void be_rx_compl_discard(struct be_rx_obj *rxo,
1197 struct be_rx_compl_info *rxcp)
6b7c5b94 1198{
3abcdeda 1199 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1200 struct be_rx_page_info *page_info;
2e588f84 1201 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1202
e80d9da6 1203 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1204 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1205 put_page(page_info->page);
1206 memset(page_info, 0, sizeof(*page_info));
2e588f84 1207 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1208 }
1209}
1210
1211/*
1212 * skb_fill_rx_data forms a complete skb for an ether frame
1213 * indicated by rxcp.
1214 */
10ef9ab4
SP
1215static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1216 struct be_rx_compl_info *rxcp)
6b7c5b94 1217{
3abcdeda 1218 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1219 struct be_rx_page_info *page_info;
2e588f84
SP
1220 u16 i, j;
1221 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1222 u8 *start;
6b7c5b94 1223
10ef9ab4 1224 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1225 start = page_address(page_info->page) + page_info->page_offset;
1226 prefetch(start);
1227
1228 /* Copy data in the first descriptor of this completion */
2e588f84 1229 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1230
6b7c5b94
SP
1231 skb->len = curr_frag_len;
1232 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1233 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1234 /* Complete packet has now been moved to data */
1235 put_page(page_info->page);
1236 skb->data_len = 0;
1237 skb->tail += curr_frag_len;
1238 } else {
ac1ae5f3
ED
1239 hdr_len = ETH_HLEN;
1240 memcpy(skb->data, start, hdr_len);
6b7c5b94 1241 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1242 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1243 skb_shinfo(skb)->frags[0].page_offset =
1244 page_info->page_offset + hdr_len;
9e903e08 1245 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1246 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1247 skb->truesize += rx_frag_size;
6b7c5b94
SP
1248 skb->tail += hdr_len;
1249 }
205859a2 1250 page_info->page = NULL;
6b7c5b94 1251
2e588f84
SP
1252 if (rxcp->pkt_size <= rx_frag_size) {
1253 BUG_ON(rxcp->num_rcvd != 1);
1254 return;
6b7c5b94
SP
1255 }
1256
1257 /* More frags present for this completion */
2e588f84
SP
1258 index_inc(&rxcp->rxq_idx, rxq->len);
1259 remaining = rxcp->pkt_size - curr_frag_len;
1260 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1262 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1263
bd46cb6c
AK
1264 /* Coalesce all frags from the same physical page in one slot */
1265 if (page_info->page_offset == 0) {
1266 /* Fresh page */
1267 j++;
b061b39e 1268 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1269 skb_shinfo(skb)->frags[j].page_offset =
1270 page_info->page_offset;
9e903e08 1271 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1272 skb_shinfo(skb)->nr_frags++;
1273 } else {
1274 put_page(page_info->page);
1275 }
1276
9e903e08 1277 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1278 skb->len += curr_frag_len;
1279 skb->data_len += curr_frag_len;
bdb28a97 1280 skb->truesize += rx_frag_size;
2e588f84
SP
1281 remaining -= curr_frag_len;
1282 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1283 page_info->page = NULL;
6b7c5b94 1284 }
bd46cb6c 1285 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1286}
1287
5be93b9a 1288/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1289static void be_rx_compl_process(struct be_rx_obj *rxo,
1290 struct be_rx_compl_info *rxcp)
6b7c5b94 1291{
10ef9ab4 1292 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1293 struct net_device *netdev = adapter->netdev;
6b7c5b94 1294 struct sk_buff *skb;
89420424 1295
bb349bb4 1296 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1297 if (unlikely(!skb)) {
ac124ff9 1298 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1299 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1300 return;
1301 }
1302
10ef9ab4 1303 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1304
6332c8d3 1305 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1306 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1307 else
1308 skb_checksum_none_assert(skb);
6b7c5b94 1309
6332c8d3 1310 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1311 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1312 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1313 skb->rxhash = rxcp->rss_hash;
1314
6b7c5b94 1315
343e43c0 1316 if (rxcp->vlanf)
4c5102f9
AK
1317 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318
1319 netif_receive_skb(skb);
6b7c5b94
SP
1320}
1321
5be93b9a 1322/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1323void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1324 struct be_rx_compl_info *rxcp)
6b7c5b94 1325{
10ef9ab4 1326 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1327 struct be_rx_page_info *page_info;
5be93b9a 1328 struct sk_buff *skb = NULL;
3abcdeda 1329 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1330 u16 remaining, curr_frag_len;
1331 u16 i, j;
3968fa1e 1332
10ef9ab4 1333 skb = napi_get_frags(napi);
5be93b9a 1334 if (!skb) {
10ef9ab4 1335 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1336 return;
1337 }
1338
2e588f84
SP
1339 remaining = rxcp->pkt_size;
1340 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1341 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1342
1343 curr_frag_len = min(remaining, rx_frag_size);
1344
bd46cb6c
AK
1345 /* Coalesce all frags from the same physical page in one slot */
1346 if (i == 0 || page_info->page_offset == 0) {
1347 /* First frag or Fresh page */
1348 j++;
b061b39e 1349 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1350 skb_shinfo(skb)->frags[j].page_offset =
1351 page_info->page_offset;
9e903e08 1352 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1353 } else {
1354 put_page(page_info->page);
1355 }
9e903e08 1356 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1357 skb->truesize += rx_frag_size;
bd46cb6c 1358 remaining -= curr_frag_len;
2e588f84 1359 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1360 memset(page_info, 0, sizeof(*page_info));
1361 }
bd46cb6c 1362 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1363
5be93b9a 1364 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1365 skb->len = rxcp->pkt_size;
1366 skb->data_len = rxcp->pkt_size;
5be93b9a 1367 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1369 if (adapter->netdev->features & NETIF_F_RXHASH)
1370 skb->rxhash = rxcp->rss_hash;
5be93b9a 1371
343e43c0 1372 if (rxcp->vlanf)
4c5102f9
AK
1373 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1374
10ef9ab4 1375 napi_gro_frags(napi);
2e588f84
SP
1376}
1377
10ef9ab4
SP
1378static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1379 struct be_rx_compl_info *rxcp)
2e588f84
SP
1380{
1381 rxcp->pkt_size =
1382 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1383 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1384 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1385 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1386 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1387 rxcp->ip_csum =
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1389 rxcp->l4_csum =
1390 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1391 rxcp->ipv6 =
1392 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1393 rxcp->rxq_idx =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1395 rxcp->num_rcvd =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1397 rxcp->pkt_type =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1399 rxcp->rss_hash =
c297977e 1400 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1401 if (rxcp->vlanf) {
1402 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1403 compl);
1404 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1405 compl);
15d72184 1406 }
12004ae9 1407 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1408}
1409
10ef9ab4
SP
1410static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1411 struct be_rx_compl_info *rxcp)
2e588f84
SP
1412{
1413 rxcp->pkt_size =
1414 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1415 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1416 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1417 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1418 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1419 rxcp->ip_csum =
1420 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1421 rxcp->l4_csum =
1422 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1423 rxcp->ipv6 =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1425 rxcp->rxq_idx =
1426 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1427 rxcp->num_rcvd =
1428 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1429 rxcp->pkt_type =
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1431 rxcp->rss_hash =
c297977e 1432 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1433 if (rxcp->vlanf) {
1434 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1435 compl);
1436 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1437 compl);
15d72184 1438 }
12004ae9 1439 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1440}
1441
1442static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1443{
1444 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1445 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1446 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1447
2e588f84
SP
1448 /* For checking the valid bit it is Ok to use either definition as the
1449 * valid bit is at the same position in both v0 and v1 Rx compl */
1450 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1451 return NULL;
6b7c5b94 1452
2e588f84
SP
1453 rmb();
1454 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1455
2e588f84 1456 if (adapter->be3_native)
10ef9ab4 1457 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1458 else
10ef9ab4 1459 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1460
15d72184
SP
1461 if (rxcp->vlanf) {
1462 /* vlanf could be wrongly set in some cards.
1463 * ignore if vtm is not set */
752961a1 1464 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1465 rxcp->vlanf = 0;
6b7c5b94 1466
15d72184 1467 if (!lancer_chip(adapter))
3c709f8f 1468 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1469
939cf306 1470 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1471 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1472 rxcp->vlanf = 0;
1473 }
2e588f84
SP
1474
1475 /* As the compl has been parsed, reset it; we wont touch it again */
1476 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1477
3abcdeda 1478 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1479 return rxcp;
1480}
1481
1829b086 1482static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1483{
6b7c5b94 1484 u32 order = get_order(size);
1829b086 1485
6b7c5b94 1486 if (order > 0)
1829b086
ED
1487 gfp |= __GFP_COMP;
1488 return alloc_pages(gfp, order);
6b7c5b94
SP
1489}
1490
1491/*
1492 * Allocate a page, split it to fragments of size rx_frag_size and post as
1493 * receive buffers to BE
1494 */
1829b086 1495static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1496{
3abcdeda 1497 struct be_adapter *adapter = rxo->adapter;
26d92f92 1498 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1499 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1500 struct page *pagep = NULL;
1501 struct be_eth_rx_d *rxd;
1502 u64 page_dmaaddr = 0, frag_dmaaddr;
1503 u32 posted, page_offset = 0;
1504
3abcdeda 1505 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1506 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1507 if (!pagep) {
1829b086 1508 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1509 if (unlikely(!pagep)) {
ac124ff9 1510 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1511 break;
1512 }
2b7bcebf
IV
1513 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1514 0, adapter->big_page_size,
1515 DMA_FROM_DEVICE);
6b7c5b94
SP
1516 page_info->page_offset = 0;
1517 } else {
1518 get_page(pagep);
1519 page_info->page_offset = page_offset + rx_frag_size;
1520 }
1521 page_offset = page_info->page_offset;
1522 page_info->page = pagep;
fac6da5b 1523 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1524 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1525
1526 rxd = queue_head_node(rxq);
1527 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1528 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1529
1530 /* Any space left in the current big page for another frag? */
1531 if ((page_offset + rx_frag_size + rx_frag_size) >
1532 adapter->big_page_size) {
1533 pagep = NULL;
1534 page_info->last_page_user = true;
1535 }
26d92f92
SP
1536
1537 prev_page_info = page_info;
1538 queue_head_inc(rxq);
10ef9ab4 1539 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1540 }
1541 if (pagep)
26d92f92 1542 prev_page_info->last_page_user = true;
6b7c5b94
SP
1543
1544 if (posted) {
6b7c5b94 1545 atomic_add(posted, &rxq->used);
8788fdc2 1546 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1547 } else if (atomic_read(&rxq->used) == 0) {
1548 /* Let be_worker replenish when memory is available */
3abcdeda 1549 rxo->rx_post_starved = true;
6b7c5b94 1550 }
6b7c5b94
SP
1551}
1552
5fb379ee 1553static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1554{
6b7c5b94
SP
1555 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1556
1557 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1558 return NULL;
1559
f3eb62d2 1560 rmb();
6b7c5b94
SP
1561 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1562
1563 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1564
1565 queue_tail_inc(tx_cq);
1566 return txcp;
1567}
1568
3c8def97
SP
1569static u16 be_tx_compl_process(struct be_adapter *adapter,
1570 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1571{
3c8def97 1572 struct be_queue_info *txq = &txo->q;
a73b796e 1573 struct be_eth_wrb *wrb;
3c8def97 1574 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1575 struct sk_buff *sent_skb;
ec43b1a6
SP
1576 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1577 bool unmap_skb_hdr = true;
6b7c5b94 1578
ec43b1a6 1579 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1580 BUG_ON(!sent_skb);
ec43b1a6
SP
1581 sent_skbs[txq->tail] = NULL;
1582
1583 /* skip header wrb */
a73b796e 1584 queue_tail_inc(txq);
6b7c5b94 1585
ec43b1a6 1586 do {
6b7c5b94 1587 cur_index = txq->tail;
a73b796e 1588 wrb = queue_tail_node(txq);
2b7bcebf
IV
1589 unmap_tx_frag(&adapter->pdev->dev, wrb,
1590 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1591 unmap_skb_hdr = false;
1592
6b7c5b94
SP
1593 num_wrbs++;
1594 queue_tail_inc(txq);
ec43b1a6 1595 } while (cur_index != last_index);
6b7c5b94 1596
6b7c5b94 1597 kfree_skb(sent_skb);
4d586b82 1598 return num_wrbs;
6b7c5b94
SP
1599}
1600
10ef9ab4
SP
1601/* Return the number of events in the event queue */
1602static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1603{
10ef9ab4
SP
1604 struct be_eq_entry *eqe;
1605 int num = 0;
859b1e4e 1606
10ef9ab4
SP
1607 do {
1608 eqe = queue_tail_node(&eqo->q);
1609 if (eqe->evt == 0)
1610 break;
859b1e4e 1611
10ef9ab4
SP
1612 rmb();
1613 eqe->evt = 0;
1614 num++;
1615 queue_tail_inc(&eqo->q);
1616 } while (true);
1617
1618 return num;
859b1e4e
SP
1619}
1620
10ef9ab4 1621static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1622{
10ef9ab4
SP
1623 bool rearm = false;
1624 int num = events_get(eqo);
859b1e4e 1625
10ef9ab4 1626 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1627 if (!num)
1628 rearm = true;
1629
af311fe3
PR
1630 if (num || msix_enabled(eqo->adapter))
1631 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1632
859b1e4e 1633 if (num)
10ef9ab4 1634 napi_schedule(&eqo->napi);
859b1e4e
SP
1635
1636 return num;
1637}
1638
10ef9ab4
SP
1639/* Leaves the EQ is disarmed state */
1640static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1641{
10ef9ab4 1642 int num = events_get(eqo);
859b1e4e 1643
10ef9ab4 1644 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1645}
1646
10ef9ab4 1647static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1648{
1649 struct be_rx_page_info *page_info;
3abcdeda
SP
1650 struct be_queue_info *rxq = &rxo->q;
1651 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1652 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1653 u16 tail;
1654
1655 /* First cleanup pending rx completions */
3abcdeda 1656 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1657 be_rx_compl_discard(rxo, rxcp);
1658 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1659 }
1660
1661 /* Then free posted rx buffer that were not used */
1662 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1663 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1664 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1665 put_page(page_info->page);
1666 memset(page_info, 0, sizeof(*page_info));
1667 }
1668 BUG_ON(atomic_read(&rxq->used));
482c9e79 1669 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1670}
1671
0ae57bb3 1672static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1673{
0ae57bb3
SP
1674 struct be_tx_obj *txo;
1675 struct be_queue_info *txq;
a8e9179a 1676 struct be_eth_tx_compl *txcp;
4d586b82 1677 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1678 struct sk_buff *sent_skb;
1679 bool dummy_wrb;
0ae57bb3 1680 int i, pending_txqs;
a8e9179a
SP
1681
1682 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1683 do {
0ae57bb3
SP
1684 pending_txqs = adapter->num_tx_qs;
1685
1686 for_all_tx_queues(adapter, txo, i) {
1687 txq = &txo->q;
1688 while ((txcp = be_tx_compl_get(&txo->cq))) {
1689 end_idx =
1690 AMAP_GET_BITS(struct amap_eth_tx_compl,
1691 wrb_index, txcp);
1692 num_wrbs += be_tx_compl_process(adapter, txo,
1693 end_idx);
1694 cmpl++;
1695 }
1696 if (cmpl) {
1697 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1698 atomic_sub(num_wrbs, &txq->used);
1699 cmpl = 0;
1700 num_wrbs = 0;
1701 }
1702 if (atomic_read(&txq->used) == 0)
1703 pending_txqs--;
a8e9179a
SP
1704 }
1705
0ae57bb3 1706 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1707 break;
1708
1709 mdelay(1);
1710 } while (true);
1711
0ae57bb3
SP
1712 for_all_tx_queues(adapter, txo, i) {
1713 txq = &txo->q;
1714 if (atomic_read(&txq->used))
1715 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1716 atomic_read(&txq->used));
1717
1718 /* free posted tx for which compls will never arrive */
1719 while (atomic_read(&txq->used)) {
1720 sent_skb = txo->sent_skb_list[txq->tail];
1721 end_idx = txq->tail;
1722 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1723 &dummy_wrb);
1724 index_adv(&end_idx, num_wrbs - 1, txq->len);
1725 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1726 atomic_sub(num_wrbs, &txq->used);
1727 }
b03388d6 1728 }
6b7c5b94
SP
1729}
1730
10ef9ab4
SP
1731static void be_evt_queues_destroy(struct be_adapter *adapter)
1732{
1733 struct be_eq_obj *eqo;
1734 int i;
1735
1736 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1737 if (eqo->q.created) {
1738 be_eq_clean(eqo);
10ef9ab4 1739 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1740 }
10ef9ab4
SP
1741 be_queue_free(adapter, &eqo->q);
1742 }
1743}
1744
1745static int be_evt_queues_create(struct be_adapter *adapter)
1746{
1747 struct be_queue_info *eq;
1748 struct be_eq_obj *eqo;
1749 int i, rc;
1750
1751 adapter->num_evt_qs = num_irqs(adapter);
1752
1753 for_all_evt_queues(adapter, eqo, i) {
1754 eqo->adapter = adapter;
1755 eqo->tx_budget = BE_TX_BUDGET;
1756 eqo->idx = i;
1757 eqo->max_eqd = BE_MAX_EQD;
1758 eqo->enable_aic = true;
1759
1760 eq = &eqo->q;
1761 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1762 sizeof(struct be_eq_entry));
1763 if (rc)
1764 return rc;
1765
1766 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1767 if (rc)
1768 return rc;
1769 }
1cfafab9 1770 return 0;
10ef9ab4
SP
1771}
1772
5fb379ee
SP
1773static void be_mcc_queues_destroy(struct be_adapter *adapter)
1774{
1775 struct be_queue_info *q;
5fb379ee 1776
8788fdc2 1777 q = &adapter->mcc_obj.q;
5fb379ee 1778 if (q->created)
8788fdc2 1779 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1780 be_queue_free(adapter, q);
1781
8788fdc2 1782 q = &adapter->mcc_obj.cq;
5fb379ee 1783 if (q->created)
8788fdc2 1784 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1785 be_queue_free(adapter, q);
1786}
1787
1788/* Must be called only after TX qs are created as MCC shares TX EQ */
1789static int be_mcc_queues_create(struct be_adapter *adapter)
1790{
1791 struct be_queue_info *q, *cq;
5fb379ee 1792
8788fdc2 1793 cq = &adapter->mcc_obj.cq;
5fb379ee 1794 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1795 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1796 goto err;
1797
10ef9ab4
SP
1798 /* Use the default EQ for MCC completions */
1799 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1800 goto mcc_cq_free;
1801
8788fdc2 1802 q = &adapter->mcc_obj.q;
5fb379ee
SP
1803 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1804 goto mcc_cq_destroy;
1805
8788fdc2 1806 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1807 goto mcc_q_free;
1808
1809 return 0;
1810
1811mcc_q_free:
1812 be_queue_free(adapter, q);
1813mcc_cq_destroy:
8788fdc2 1814 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1815mcc_cq_free:
1816 be_queue_free(adapter, cq);
1817err:
1818 return -1;
1819}
1820
6b7c5b94
SP
1821static void be_tx_queues_destroy(struct be_adapter *adapter)
1822{
1823 struct be_queue_info *q;
3c8def97
SP
1824 struct be_tx_obj *txo;
1825 u8 i;
6b7c5b94 1826
3c8def97
SP
1827 for_all_tx_queues(adapter, txo, i) {
1828 q = &txo->q;
1829 if (q->created)
1830 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1831 be_queue_free(adapter, q);
6b7c5b94 1832
3c8def97
SP
1833 q = &txo->cq;
1834 if (q->created)
1835 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1836 be_queue_free(adapter, q);
1837 }
6b7c5b94
SP
1838}
1839
dafc0fe3
SP
1840static int be_num_txqs_want(struct be_adapter *adapter)
1841{
39f1d94d
SP
1842 if (sriov_want(adapter) || be_is_mc(adapter) ||
1843 lancer_chip(adapter) || !be_physfn(adapter) ||
1844 adapter->generation == BE_GEN2)
dafc0fe3
SP
1845 return 1;
1846 else
1847 return MAX_TX_QS;
1848}
1849
10ef9ab4 1850static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1851{
10ef9ab4
SP
1852 struct be_queue_info *cq, *eq;
1853 int status;
3c8def97
SP
1854 struct be_tx_obj *txo;
1855 u8 i;
6b7c5b94 1856
dafc0fe3 1857 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1858 if (adapter->num_tx_qs != MAX_TX_QS) {
1859 rtnl_lock();
dafc0fe3
SP
1860 netif_set_real_num_tx_queues(adapter->netdev,
1861 adapter->num_tx_qs);
3bb62f4f
PR
1862 rtnl_unlock();
1863 }
dafc0fe3 1864
10ef9ab4
SP
1865 for_all_tx_queues(adapter, txo, i) {
1866 cq = &txo->cq;
1867 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1868 sizeof(struct be_eth_tx_compl));
1869 if (status)
1870 return status;
3c8def97 1871
10ef9ab4
SP
1872 /* If num_evt_qs is less than num_tx_qs, then more than
1873 * one txq share an eq
1874 */
1875 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1876 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1877 if (status)
1878 return status;
1879 }
1880 return 0;
1881}
6b7c5b94 1882
10ef9ab4
SP
1883static int be_tx_qs_create(struct be_adapter *adapter)
1884{
1885 struct be_tx_obj *txo;
1886 int i, status;
fe6d2a38 1887
3c8def97 1888 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1889 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1890 sizeof(struct be_eth_wrb));
1891 if (status)
1892 return status;
6b7c5b94 1893
10ef9ab4
SP
1894 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1895 if (status)
1896 return status;
3c8def97 1897 }
6b7c5b94 1898
10ef9ab4 1899 return 0;
6b7c5b94
SP
1900}
1901
10ef9ab4 1902static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1903{
1904 struct be_queue_info *q;
3abcdeda
SP
1905 struct be_rx_obj *rxo;
1906 int i;
1907
1908 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1909 q = &rxo->cq;
1910 if (q->created)
1911 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1912 be_queue_free(adapter, q);
ac6a0c4a
SP
1913 }
1914}
1915
10ef9ab4 1916static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1917{
10ef9ab4 1918 struct be_queue_info *eq, *cq;
3abcdeda
SP
1919 struct be_rx_obj *rxo;
1920 int rc, i;
6b7c5b94 1921
10ef9ab4
SP
1922 /* We'll create as many RSS rings as there are irqs.
1923 * But when there's only one irq there's no use creating RSS rings
1924 */
1925 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1926 num_irqs(adapter) + 1 : 1;
7f640062
SP
1927 if (adapter->num_rx_qs != MAX_RX_QS) {
1928 rtnl_lock();
1929 netif_set_real_num_rx_queues(adapter->netdev,
1930 adapter->num_rx_qs);
1931 rtnl_unlock();
1932 }
ac6a0c4a 1933
6b7c5b94 1934 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1935 for_all_rx_queues(adapter, rxo, i) {
1936 rxo->adapter = adapter;
3abcdeda
SP
1937 cq = &rxo->cq;
1938 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1939 sizeof(struct be_eth_rx_compl));
1940 if (rc)
10ef9ab4 1941 return rc;
3abcdeda 1942
10ef9ab4
SP
1943 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1944 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1945 if (rc)
10ef9ab4 1946 return rc;
3abcdeda 1947 }
6b7c5b94 1948
10ef9ab4
SP
1949 if (adapter->num_rx_qs != MAX_RX_QS)
1950 dev_info(&adapter->pdev->dev,
f3f9f332 1951 "Created only %d receive queues\n", adapter->num_rx_qs);
6b7c5b94 1952
10ef9ab4 1953 return 0;
b628bde2
SP
1954}
1955
6b7c5b94
SP
1956static irqreturn_t be_intx(int irq, void *dev)
1957{
1958 struct be_adapter *adapter = dev;
10ef9ab4 1959 int num_evts;
6b7c5b94 1960
10ef9ab4
SP
1961 /* With INTx only one EQ is used */
1962 num_evts = event_handle(&adapter->eq_obj[0]);
1963 if (num_evts)
1964 return IRQ_HANDLED;
1965 else
1966 return IRQ_NONE;
6b7c5b94
SP
1967}
1968
10ef9ab4 1969static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 1970{
10ef9ab4 1971 struct be_eq_obj *eqo = dev;
6b7c5b94 1972
10ef9ab4 1973 event_handle(eqo);
6b7c5b94
SP
1974 return IRQ_HANDLED;
1975}
1976
2e588f84 1977static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1978{
2e588f84 1979 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1980}
1981
10ef9ab4
SP
1982static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1983 int budget)
6b7c5b94 1984{
3abcdeda
SP
1985 struct be_adapter *adapter = rxo->adapter;
1986 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1987 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1988 u32 work_done;
1989
1990 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1991 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1992 if (!rxcp)
1993 break;
1994
12004ae9
SP
1995 /* Is it a flush compl that has no data */
1996 if (unlikely(rxcp->num_rcvd == 0))
1997 goto loop_continue;
1998
1999 /* Discard compl with partial DMA Lancer B0 */
2000 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2001 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2002 goto loop_continue;
2003 }
2004
2005 /* On BE drop pkts that arrive due to imperfect filtering in
2006 * promiscuous mode on some skews
2007 */
2008 if (unlikely(rxcp->port != adapter->port_num &&
2009 !lancer_chip(adapter))) {
10ef9ab4 2010 be_rx_compl_discard(rxo, rxcp);
12004ae9 2011 goto loop_continue;
64642811 2012 }
009dd872 2013
12004ae9 2014 if (do_gro(rxcp))
10ef9ab4 2015 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2016 else
10ef9ab4 2017 be_rx_compl_process(rxo, rxcp);
12004ae9 2018loop_continue:
2e588f84 2019 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2020 }
2021
10ef9ab4
SP
2022 if (work_done) {
2023 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2024
10ef9ab4
SP
2025 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2026 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2027 }
10ef9ab4 2028
6b7c5b94
SP
2029 return work_done;
2030}
2031
10ef9ab4
SP
2032static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2033 int budget, int idx)
6b7c5b94 2034{
6b7c5b94 2035 struct be_eth_tx_compl *txcp;
10ef9ab4 2036 int num_wrbs = 0, work_done;
3c8def97 2037
10ef9ab4
SP
2038 for (work_done = 0; work_done < budget; work_done++) {
2039 txcp = be_tx_compl_get(&txo->cq);
2040 if (!txcp)
2041 break;
2042 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2043 AMAP_GET_BITS(struct amap_eth_tx_compl,
2044 wrb_index, txcp));
10ef9ab4 2045 }
6b7c5b94 2046
10ef9ab4
SP
2047 if (work_done) {
2048 be_cq_notify(adapter, txo->cq.id, true, work_done);
2049 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2050
10ef9ab4
SP
2051 /* As Tx wrbs have been freed up, wake up netdev queue
2052 * if it was stopped due to lack of tx wrbs. */
2053 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2054 atomic_read(&txo->q.used) < txo->q.len / 2) {
2055 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2056 }
10ef9ab4
SP
2057
2058 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2059 tx_stats(txo)->tx_compl += work_done;
2060 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2061 }
10ef9ab4
SP
2062 return (work_done < budget); /* Done */
2063}
6b7c5b94 2064
10ef9ab4
SP
2065int be_poll(struct napi_struct *napi, int budget)
2066{
2067 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2068 struct be_adapter *adapter = eqo->adapter;
2069 int max_work = 0, work, i;
2070 bool tx_done;
f31e50a8 2071
10ef9ab4
SP
2072 /* Process all TXQs serviced by this EQ */
2073 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2074 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2075 eqo->tx_budget, i);
2076 if (!tx_done)
2077 max_work = budget;
f31e50a8
SP
2078 }
2079
10ef9ab4
SP
2080 /* This loop will iterate twice for EQ0 in which
2081 * completions of the last RXQ (default one) are also processed
2082 * For other EQs the loop iterates only once
2083 */
2084 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2085 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2086 max_work = max(work, max_work);
2087 }
6b7c5b94 2088
10ef9ab4
SP
2089 if (is_mcc_eqo(eqo))
2090 be_process_mcc(adapter);
93c86700 2091
10ef9ab4
SP
2092 if (max_work < budget) {
2093 napi_complete(napi);
2094 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2095 } else {
2096 /* As we'll continue in polling mode, count and clear events */
2097 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 2098 }
10ef9ab4 2099 return max_work;
6b7c5b94
SP
2100}
2101
f67ef7ba 2102void be_detect_error(struct be_adapter *adapter)
7c185276 2103{
e1cfb67a
PR
2104 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2105 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2106 u32 i;
2107
f67ef7ba 2108 if (be_crit_error(adapter))
72f02485
SP
2109 return;
2110
e1cfb67a
PR
2111 if (lancer_chip(adapter)) {
2112 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2113 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2114 sliport_err1 = ioread32(adapter->db +
2115 SLIPORT_ERROR1_OFFSET);
2116 sliport_err2 = ioread32(adapter->db +
2117 SLIPORT_ERROR2_OFFSET);
2118 }
2119 } else {
2120 pci_read_config_dword(adapter->pdev,
2121 PCICFG_UE_STATUS_LOW, &ue_lo);
2122 pci_read_config_dword(adapter->pdev,
2123 PCICFG_UE_STATUS_HIGH, &ue_hi);
2124 pci_read_config_dword(adapter->pdev,
2125 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2126 pci_read_config_dword(adapter->pdev,
2127 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2128
f67ef7ba
PR
2129 ue_lo = (ue_lo & ~ue_lo_mask);
2130 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2131 }
7c185276 2132
e1cfb67a
PR
2133 if (ue_lo || ue_hi ||
2134 sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2135 adapter->hw_error = true;
434b3648 2136 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2137 "Error detected in the card\n");
2138 }
2139
2140 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2141 dev_err(&adapter->pdev->dev,
2142 "ERR: sliport status 0x%x\n", sliport_status);
2143 dev_err(&adapter->pdev->dev,
2144 "ERR: sliport error1 0x%x\n", sliport_err1);
2145 dev_err(&adapter->pdev->dev,
2146 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2147 }
2148
e1cfb67a
PR
2149 if (ue_lo) {
2150 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2151 if (ue_lo & 1)
7c185276
AK
2152 dev_err(&adapter->pdev->dev,
2153 "UE: %s bit set\n", ue_status_low_desc[i]);
2154 }
2155 }
f67ef7ba 2156
e1cfb67a
PR
2157 if (ue_hi) {
2158 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2159 if (ue_hi & 1)
7c185276
AK
2160 dev_err(&adapter->pdev->dev,
2161 "UE: %s bit set\n", ue_status_hi_desc[i]);
2162 }
2163 }
2164
2165}
2166
8d56ff11
SP
2167static void be_msix_disable(struct be_adapter *adapter)
2168{
ac6a0c4a 2169 if (msix_enabled(adapter)) {
8d56ff11 2170 pci_disable_msix(adapter->pdev);
ac6a0c4a 2171 adapter->num_msix_vec = 0;
3abcdeda
SP
2172 }
2173}
2174
10ef9ab4
SP
2175static uint be_num_rss_want(struct be_adapter *adapter)
2176{
30e80b55 2177 u32 num = 0;
10ef9ab4 2178 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4cbdaf6d 2179 !sriov_want(adapter) && be_physfn(adapter)) {
30e80b55
YM
2180 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2181 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2182 }
2183 return num;
10ef9ab4
SP
2184}
2185
6b7c5b94
SP
2186static void be_msix_enable(struct be_adapter *adapter)
2187{
10ef9ab4 2188#define BE_MIN_MSIX_VECTORS 1
045508a8 2189 int i, status, num_vec, num_roce_vec = 0;
6b7c5b94 2190
10ef9ab4
SP
2191 /* If RSS queues are not used, need a vec for default RX Q */
2192 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2193 if (be_roce_supported(adapter)) {
2194 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2195 (num_online_cpus() + 1));
2196 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2197 num_vec += num_roce_vec;
2198 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2199 }
10ef9ab4 2200 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2201
ac6a0c4a 2202 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2203 adapter->msix_entries[i].entry = i;
2204
ac6a0c4a 2205 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2206 if (status == 0) {
2207 goto done;
2208 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2209 num_vec = status;
3abcdeda 2210 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2211 num_vec) == 0)
3abcdeda 2212 goto done;
3abcdeda
SP
2213 }
2214 return;
2215done:
045508a8
PP
2216 if (be_roce_supported(adapter)) {
2217 if (num_vec > num_roce_vec) {
2218 adapter->num_msix_vec = num_vec - num_roce_vec;
2219 adapter->num_msix_roce_vec =
2220 num_vec - adapter->num_msix_vec;
2221 } else {
2222 adapter->num_msix_vec = num_vec;
2223 adapter->num_msix_roce_vec = 0;
2224 }
2225 } else
2226 adapter->num_msix_vec = num_vec;
ac6a0c4a 2227 return;
6b7c5b94
SP
2228}
2229
fe6d2a38 2230static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2231 struct be_eq_obj *eqo)
b628bde2 2232{
10ef9ab4 2233 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2234}
6b7c5b94 2235
b628bde2
SP
2236static int be_msix_register(struct be_adapter *adapter)
2237{
10ef9ab4
SP
2238 struct net_device *netdev = adapter->netdev;
2239 struct be_eq_obj *eqo;
2240 int status, i, vec;
6b7c5b94 2241
10ef9ab4
SP
2242 for_all_evt_queues(adapter, eqo, i) {
2243 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2244 vec = be_msix_vec_get(adapter, eqo);
2245 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2246 if (status)
2247 goto err_msix;
2248 }
b628bde2 2249
6b7c5b94 2250 return 0;
3abcdeda 2251err_msix:
10ef9ab4
SP
2252 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2253 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2254 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2255 status);
ac6a0c4a 2256 be_msix_disable(adapter);
6b7c5b94
SP
2257 return status;
2258}
2259
2260static int be_irq_register(struct be_adapter *adapter)
2261{
2262 struct net_device *netdev = adapter->netdev;
2263 int status;
2264
ac6a0c4a 2265 if (msix_enabled(adapter)) {
6b7c5b94
SP
2266 status = be_msix_register(adapter);
2267 if (status == 0)
2268 goto done;
ba343c77
SB
2269 /* INTx is not supported for VF */
2270 if (!be_physfn(adapter))
2271 return status;
6b7c5b94
SP
2272 }
2273
2274 /* INTx */
2275 netdev->irq = adapter->pdev->irq;
2276 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2277 adapter);
2278 if (status) {
2279 dev_err(&adapter->pdev->dev,
2280 "INTx request IRQ failed - err %d\n", status);
2281 return status;
2282 }
2283done:
2284 adapter->isr_registered = true;
2285 return 0;
2286}
2287
2288static void be_irq_unregister(struct be_adapter *adapter)
2289{
2290 struct net_device *netdev = adapter->netdev;
10ef9ab4 2291 struct be_eq_obj *eqo;
3abcdeda 2292 int i;
6b7c5b94
SP
2293
2294 if (!adapter->isr_registered)
2295 return;
2296
2297 /* INTx */
ac6a0c4a 2298 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2299 free_irq(netdev->irq, adapter);
2300 goto done;
2301 }
2302
2303 /* MSIx */
10ef9ab4
SP
2304 for_all_evt_queues(adapter, eqo, i)
2305 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2306
6b7c5b94
SP
2307done:
2308 adapter->isr_registered = false;
6b7c5b94
SP
2309}
2310
10ef9ab4 2311static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2312{
2313 struct be_queue_info *q;
2314 struct be_rx_obj *rxo;
2315 int i;
2316
2317 for_all_rx_queues(adapter, rxo, i) {
2318 q = &rxo->q;
2319 if (q->created) {
2320 be_cmd_rxq_destroy(adapter, q);
2321 /* After the rxq is invalidated, wait for a grace time
2322 * of 1ms for all dma to end and the flush compl to
2323 * arrive
2324 */
2325 mdelay(1);
10ef9ab4 2326 be_rx_cq_clean(rxo);
482c9e79 2327 }
10ef9ab4 2328 be_queue_free(adapter, q);
482c9e79
SP
2329 }
2330}
2331
889cd4b2
SP
2332static int be_close(struct net_device *netdev)
2333{
2334 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2335 struct be_eq_obj *eqo;
2336 int i;
889cd4b2 2337
045508a8
PP
2338 be_roce_dev_close(adapter);
2339
889cd4b2
SP
2340 be_async_mcc_disable(adapter);
2341
fe6d2a38
SP
2342 if (!lancer_chip(adapter))
2343 be_intr_set(adapter, false);
889cd4b2 2344
10ef9ab4
SP
2345 for_all_evt_queues(adapter, eqo, i) {
2346 napi_disable(&eqo->napi);
2347 if (msix_enabled(adapter))
2348 synchronize_irq(be_msix_vec_get(adapter, eqo));
2349 else
2350 synchronize_irq(netdev->irq);
2351 be_eq_clean(eqo);
63fcb27f
PR
2352 }
2353
889cd4b2
SP
2354 be_irq_unregister(adapter);
2355
889cd4b2
SP
2356 /* Wait for all pending tx completions to arrive so that
2357 * all tx skbs are freed.
2358 */
0ae57bb3 2359 be_tx_compl_clean(adapter);
889cd4b2 2360
10ef9ab4 2361 be_rx_qs_destroy(adapter);
482c9e79
SP
2362 return 0;
2363}
2364
10ef9ab4 2365static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2366{
2367 struct be_rx_obj *rxo;
e9008ee9
PR
2368 int rc, i, j;
2369 u8 rsstable[128];
482c9e79
SP
2370
2371 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2372 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2373 sizeof(struct be_eth_rx_d));
2374 if (rc)
2375 return rc;
2376 }
2377
2378 /* The FW would like the default RXQ to be created first */
2379 rxo = default_rxo(adapter);
2380 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2381 adapter->if_handle, false, &rxo->rss_id);
2382 if (rc)
2383 return rc;
2384
2385 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2386 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2387 rx_frag_size, adapter->if_handle,
2388 true, &rxo->rss_id);
482c9e79
SP
2389 if (rc)
2390 return rc;
2391 }
2392
2393 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2394 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2395 for_all_rss_queues(adapter, rxo, i) {
2396 if ((j + i) >= 128)
2397 break;
2398 rsstable[j + i] = rxo->rss_id;
2399 }
2400 }
2401 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2402 if (rc)
2403 return rc;
2404 }
2405
2406 /* First time posting */
10ef9ab4 2407 for_all_rx_queues(adapter, rxo, i)
482c9e79 2408 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2409 return 0;
2410}
2411
6b7c5b94
SP
2412static int be_open(struct net_device *netdev)
2413{
2414 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2415 struct be_eq_obj *eqo;
3abcdeda 2416 struct be_rx_obj *rxo;
10ef9ab4 2417 struct be_tx_obj *txo;
b236916a 2418 u8 link_status;
3abcdeda 2419 int status, i;
5fb379ee 2420
10ef9ab4 2421 status = be_rx_qs_create(adapter);
482c9e79
SP
2422 if (status)
2423 goto err;
2424
5fb379ee
SP
2425 be_irq_register(adapter);
2426
fe6d2a38
SP
2427 if (!lancer_chip(adapter))
2428 be_intr_set(adapter, true);
5fb379ee 2429
10ef9ab4 2430 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2431 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2432
10ef9ab4
SP
2433 for_all_tx_queues(adapter, txo, i)
2434 be_cq_notify(adapter, txo->cq.id, true, 0);
2435
7a1e9b20
SP
2436 be_async_mcc_enable(adapter);
2437
10ef9ab4
SP
2438 for_all_evt_queues(adapter, eqo, i) {
2439 napi_enable(&eqo->napi);
2440 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2441 }
2442
b236916a
AK
2443 status = be_cmd_link_status_query(adapter, NULL, NULL,
2444 &link_status, 0);
2445 if (!status)
2446 be_link_status_update(adapter, link_status);
2447
045508a8 2448 be_roce_dev_open(adapter);
889cd4b2
SP
2449 return 0;
2450err:
2451 be_close(adapter->netdev);
2452 return -EIO;
5fb379ee
SP
2453}
2454
71d8d1b5
AK
2455static int be_setup_wol(struct be_adapter *adapter, bool enable)
2456{
2457 struct be_dma_mem cmd;
2458 int status = 0;
2459 u8 mac[ETH_ALEN];
2460
2461 memset(mac, 0, ETH_ALEN);
2462
2463 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2464 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2465 GFP_KERNEL);
71d8d1b5
AK
2466 if (cmd.va == NULL)
2467 return -1;
2468 memset(cmd.va, 0, cmd.size);
2469
2470 if (enable) {
2471 status = pci_write_config_dword(adapter->pdev,
2472 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2473 if (status) {
2474 dev_err(&adapter->pdev->dev,
2381a55c 2475 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2476 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2477 cmd.dma);
71d8d1b5
AK
2478 return status;
2479 }
2480 status = be_cmd_enable_magic_wol(adapter,
2481 adapter->netdev->dev_addr, &cmd);
2482 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2483 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2484 } else {
2485 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2486 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2487 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2488 }
2489
2b7bcebf 2490 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2491 return status;
2492}
2493
6d87f5c3
AK
2494/*
2495 * Generate a seed MAC address from the PF MAC Address using jhash.
2496 * MAC Address for VFs are assigned incrementally starting from the seed.
2497 * These addresses are programmed in the ASIC by the PF and the VF driver
2498 * queries for the MAC address during its probe.
2499 */
2500static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2501{
f9449ab7 2502 u32 vf;
3abcdeda 2503 int status = 0;
6d87f5c3 2504 u8 mac[ETH_ALEN];
11ac75ed 2505 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2506
2507 be_vf_eth_addr_generate(adapter, mac);
2508
11ac75ed 2509 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2510 if (lancer_chip(adapter)) {
2511 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2512 } else {
2513 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2514 vf_cfg->if_handle,
2515 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2516 }
2517
6d87f5c3
AK
2518 if (status)
2519 dev_err(&adapter->pdev->dev,
590c391d 2520 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2521 else
11ac75ed 2522 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2523
2524 mac[5] += 1;
2525 }
2526 return status;
2527}
2528
f9449ab7 2529static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2530{
11ac75ed 2531 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2532 u32 vf;
2533
39f1d94d
SP
2534 if (be_find_vfs(adapter, ASSIGNED)) {
2535 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2536 goto done;
2537 }
2538
11ac75ed 2539 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2540 if (lancer_chip(adapter))
2541 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2542 else
11ac75ed
SP
2543 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2544 vf_cfg->pmac_id, vf + 1);
f9449ab7 2545
11ac75ed
SP
2546 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2547 }
39f1d94d
SP
2548 pci_disable_sriov(adapter->pdev);
2549done:
2550 kfree(adapter->vf_cfg);
2551 adapter->num_vfs = 0;
6d87f5c3
AK
2552}
2553
a54769f5
SP
2554static int be_clear(struct be_adapter *adapter)
2555{
fbc13f01
AK
2556 int i = 1;
2557
191eb756
SP
2558 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2559 cancel_delayed_work_sync(&adapter->work);
2560 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2561 }
2562
11ac75ed 2563 if (sriov_enabled(adapter))
f9449ab7
SP
2564 be_vf_clear(adapter);
2565
fbc13f01
AK
2566 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2567 be_cmd_pmac_del(adapter, adapter->if_handle,
2568 adapter->pmac_id[i], 0);
2569
f9449ab7 2570 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2571
2572 be_mcc_queues_destroy(adapter);
10ef9ab4 2573 be_rx_cqs_destroy(adapter);
a54769f5 2574 be_tx_queues_destroy(adapter);
10ef9ab4 2575 be_evt_queues_destroy(adapter);
a54769f5 2576
10ef9ab4 2577 be_msix_disable(adapter);
a54769f5
SP
2578 return 0;
2579}
2580
39f1d94d 2581static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2582{
11ac75ed 2583 struct be_vf_cfg *vf_cfg;
30128031
SP
2584 int vf;
2585
39f1d94d
SP
2586 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2587 GFP_KERNEL);
2588 if (!adapter->vf_cfg)
2589 return -ENOMEM;
2590
11ac75ed
SP
2591 for_all_vfs(adapter, vf_cfg, vf) {
2592 vf_cfg->if_handle = -1;
2593 vf_cfg->pmac_id = -1;
30128031 2594 }
39f1d94d 2595 return 0;
30128031
SP
2596}
2597
f9449ab7
SP
2598static int be_vf_setup(struct be_adapter *adapter)
2599{
11ac75ed 2600 struct be_vf_cfg *vf_cfg;
39f1d94d 2601 struct device *dev = &adapter->pdev->dev;
f9449ab7 2602 u32 cap_flags, en_flags, vf;
f1f3ee1b 2603 u16 def_vlan, lnk_speed;
39f1d94d
SP
2604 int status, enabled_vfs;
2605
2606 enabled_vfs = be_find_vfs(adapter, ENABLED);
2607 if (enabled_vfs) {
2608 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2609 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2610 return 0;
2611 }
f9449ab7 2612
39f1d94d
SP
2613 if (num_vfs > adapter->dev_num_vfs) {
2614 dev_warn(dev, "Device supports %d VFs and not %d\n",
2615 adapter->dev_num_vfs, num_vfs);
2616 num_vfs = adapter->dev_num_vfs;
2617 }
2618
2619 status = pci_enable_sriov(adapter->pdev, num_vfs);
2620 if (!status) {
2621 adapter->num_vfs = num_vfs;
2622 } else {
2623 /* Platform doesn't support SRIOV though device supports it */
2624 dev_warn(dev, "SRIOV enable failed\n");
2625 return 0;
2626 }
2627
2628 status = be_vf_setup_init(adapter);
2629 if (status)
2630 goto err;
30128031 2631
590c391d
PR
2632 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2633 BE_IF_FLAGS_MULTICAST;
11ac75ed 2634 for_all_vfs(adapter, vf_cfg, vf) {
1578e777
PR
2635 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2636 &vf_cfg->if_handle, vf + 1);
f9449ab7
SP
2637 if (status)
2638 goto err;
f9449ab7
SP
2639 }
2640
39f1d94d
SP
2641 if (!enabled_vfs) {
2642 status = be_vf_eth_addr_config(adapter);
2643 if (status)
2644 goto err;
2645 }
f9449ab7 2646
11ac75ed 2647 for_all_vfs(adapter, vf_cfg, vf) {
8a046d3b
VV
2648 lnk_speed = 1000;
2649 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
f9449ab7
SP
2650 if (status)
2651 goto err;
11ac75ed 2652 vf_cfg->tx_rate = lnk_speed * 10;
f1f3ee1b
AK
2653
2654 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2655 vf + 1, vf_cfg->if_handle);
2656 if (status)
2657 goto err;
2658 vf_cfg->def_vid = def_vlan;
f9449ab7
SP
2659 }
2660 return 0;
2661err:
2662 return status;
2663}
2664
30128031
SP
2665static void be_setup_init(struct be_adapter *adapter)
2666{
2667 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2668 adapter->phy.link_speed = -1;
30128031
SP
2669 adapter->if_handle = -1;
2670 adapter->be3_native = false;
2671 adapter->promiscuous = false;
2672 adapter->eq_next_idx = 0;
42f11cf2 2673 adapter->phy.forced_port_speed = -1;
30128031
SP
2674}
2675
1578e777
PR
2676static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2677 bool *active_mac, u32 *pmac_id)
590c391d 2678{
1578e777 2679 int status = 0;
e5e1ee89 2680
1578e777
PR
2681 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2682 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2683 if (!lancer_chip(adapter) && !be_physfn(adapter))
2684 *active_mac = true;
2685 else
2686 *active_mac = false;
e5e1ee89 2687
1578e777
PR
2688 return status;
2689 }
e5e1ee89 2690
1578e777
PR
2691 if (lancer_chip(adapter)) {
2692 status = be_cmd_get_mac_from_list(adapter, mac,
2693 active_mac, pmac_id, 0);
2694 if (*active_mac) {
5ee4979b
SP
2695 status = be_cmd_mac_addr_query(adapter, mac, false,
2696 if_handle, *pmac_id);
1578e777
PR
2697 }
2698 } else if (be_physfn(adapter)) {
2699 /* For BE3, for PF get permanent MAC */
5ee4979b 2700 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2701 *active_mac = false;
e5e1ee89 2702 } else {
1578e777 2703 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 2704 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
2705 if_handle, 0);
2706 *active_mac = true;
e5e1ee89 2707 }
590c391d
PR
2708 return status;
2709}
2710
39f1d94d
SP
2711/* Routine to query per function resource limits */
2712static int be_get_config(struct be_adapter *adapter)
2713{
2714 int pos;
2715 u16 dev_num_vfs;
2716
2717 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2718 if (pos) {
2719 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2720 &dev_num_vfs);
7c5a5242
VV
2721 if (!lancer_chip(adapter))
2722 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
39f1d94d
SP
2723 adapter->dev_num_vfs = dev_num_vfs;
2724 }
2725 return 0;
2726}
2727
5fb379ee
SP
2728static int be_setup(struct be_adapter *adapter)
2729{
39f1d94d 2730 struct device *dev = &adapter->pdev->dev;
f9449ab7 2731 u32 cap_flags, en_flags;
a54769f5 2732 u32 tx_fc, rx_fc;
10ef9ab4 2733 int status;
ba343c77 2734 u8 mac[ETH_ALEN];
1578e777 2735 bool active_mac;
ba343c77 2736
30128031 2737 be_setup_init(adapter);
6b7c5b94 2738
39f1d94d
SP
2739 be_get_config(adapter);
2740
f9449ab7 2741 be_cmd_req_native_mode(adapter);
73d540f2 2742
10ef9ab4
SP
2743 be_msix_enable(adapter);
2744
2745 status = be_evt_queues_create(adapter);
2746 if (status)
a54769f5 2747 goto err;
6b7c5b94 2748
10ef9ab4
SP
2749 status = be_tx_cqs_create(adapter);
2750 if (status)
2751 goto err;
2752
2753 status = be_rx_cqs_create(adapter);
2754 if (status)
a54769f5 2755 goto err;
6b7c5b94 2756
f9449ab7 2757 status = be_mcc_queues_create(adapter);
10ef9ab4 2758 if (status)
a54769f5 2759 goto err;
6b7c5b94 2760
f9449ab7
SP
2761 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2762 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2763 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2764 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2765
f9449ab7
SP
2766 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2767 cap_flags |= BE_IF_FLAGS_RSS;
2768 en_flags |= BE_IF_FLAGS_RSS;
2769 }
1578e777 2770
0b13fb45
PR
2771 if (lancer_chip(adapter) && !be_physfn(adapter)) {
2772 en_flags = BE_IF_FLAGS_UNTAGGED |
2773 BE_IF_FLAGS_BROADCAST |
2774 BE_IF_FLAGS_MULTICAST;
2775 cap_flags = en_flags;
2776 }
2777
f9449ab7 2778 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1578e777 2779 &adapter->if_handle, 0);
5fb379ee 2780 if (status != 0)
a54769f5 2781 goto err;
6b7c5b94 2782
1578e777
PR
2783 memset(mac, 0, ETH_ALEN);
2784 active_mac = false;
2785 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2786 &active_mac, &adapter->pmac_id[0]);
2787 if (status != 0)
2788 goto err;
2789
2790 if (!active_mac) {
2791 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2792 &adapter->pmac_id[0], 0);
2793 if (status != 0)
2794 goto err;
2795 }
2796
2797 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2798 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2799 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 2800 }
0dffc83e 2801
10ef9ab4
SP
2802 status = be_tx_qs_create(adapter);
2803 if (status)
2804 goto err;
2805
04b71175 2806 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2807
1d1e9a46 2808 if (adapter->vlans_added)
10329df8 2809 be_vid_config(adapter);
7ab8b0b4 2810
a54769f5 2811 be_set_rx_mode(adapter->netdev);
5fb379ee 2812
ddc3f5cb 2813 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 2814
ddc3f5cb
AK
2815 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2816 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 2817 adapter->rx_fc);
2dc1deb6 2818
39f1d94d
SP
2819 if (be_physfn(adapter) && num_vfs) {
2820 if (adapter->dev_num_vfs)
2821 be_vf_setup(adapter);
2822 else
2823 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
2824 }
2825
42f11cf2
AK
2826 be_cmd_get_phy_info(adapter);
2827 if (be_pause_supported(adapter))
2828 adapter->phy.fc_autoneg = 1;
2829
191eb756
SP
2830 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2831 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 2832 return 0;
a54769f5
SP
2833err:
2834 be_clear(adapter);
2835 return status;
2836}
6b7c5b94 2837
66268739
IV
2838#ifdef CONFIG_NET_POLL_CONTROLLER
2839static void be_netpoll(struct net_device *netdev)
2840{
2841 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2842 struct be_eq_obj *eqo;
66268739
IV
2843 int i;
2844
10ef9ab4
SP
2845 for_all_evt_queues(adapter, eqo, i)
2846 event_handle(eqo);
2847
2848 return;
66268739
IV
2849}
2850#endif
2851
84517482 2852#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
2853char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2854
fa9a6fed 2855static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2856 const u8 *p, u32 img_start, int image_size,
2857 int hdr_size)
fa9a6fed
SB
2858{
2859 u32 crc_offset;
2860 u8 flashed_crc[4];
2861 int status;
3f0d4560
AK
2862
2863 crc_offset = hdr_size + img_start + image_size - 4;
2864
fa9a6fed 2865 p += crc_offset;
3f0d4560
AK
2866
2867 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2868 (image_size - 4));
fa9a6fed
SB
2869 if (status) {
2870 dev_err(&adapter->pdev->dev,
2871 "could not get crc from flash, not flashing redboot\n");
2872 return false;
2873 }
2874
2875 /*update redboot only if crc does not match*/
2876 if (!memcmp(flashed_crc, p, 4))
2877 return false;
2878 else
2879 return true;
fa9a6fed
SB
2880}
2881
306f1348
SP
2882static bool phy_flashing_required(struct be_adapter *adapter)
2883{
42f11cf2
AK
2884 return (adapter->phy.phy_type == TN_8022 &&
2885 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
2886}
2887
c165541e
PR
2888static bool is_comp_in_ufi(struct be_adapter *adapter,
2889 struct flash_section_info *fsec, int type)
2890{
2891 int i = 0, img_type = 0;
2892 struct flash_section_info_g2 *fsec_g2 = NULL;
2893
2894 if (adapter->generation != BE_GEN3)
2895 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2896
2897 for (i = 0; i < MAX_FLASH_COMP; i++) {
2898 if (fsec_g2)
2899 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2900 else
2901 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2902
2903 if (img_type == type)
2904 return true;
2905 }
2906 return false;
2907
2908}
2909
2910struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2911 int header_size,
2912 const struct firmware *fw)
2913{
2914 struct flash_section_info *fsec = NULL;
2915 const u8 *p = fw->data;
2916
2917 p += header_size;
2918 while (p < (fw->data + fw->size)) {
2919 fsec = (struct flash_section_info *)p;
2920 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2921 return fsec;
2922 p += 32;
2923 }
2924 return NULL;
2925}
2926
3f0d4560 2927static int be_flash_data(struct be_adapter *adapter,
c165541e
PR
2928 const struct firmware *fw,
2929 struct be_dma_mem *flash_cmd,
2930 int num_of_images)
3f0d4560 2931
84517482 2932{
3f0d4560 2933 int status = 0, i, filehdr_size = 0;
c165541e 2934 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3f0d4560 2935 u32 total_bytes = 0, flash_op;
84517482
AK
2936 int num_bytes;
2937 const u8 *p = fw->data;
2938 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2939 const struct flash_comp *pflashcomp;
c165541e
PR
2940 int num_comp, hdr_size;
2941 struct flash_section_info *fsec = NULL;
2942
2943 struct flash_comp gen3_flash_types[] = {
2944 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2945 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2946 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2947 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2948 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2949 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2950 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2951 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2952 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2953 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2954 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2955 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2956 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2957 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2958 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2959 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2960 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2961 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2962 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2963 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 2964 };
c165541e
PR
2965
2966 struct flash_comp gen2_flash_types[] = {
2967 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2968 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2969 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2970 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2971 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2972 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2973 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2974 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2975 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2976 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2977 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2978 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2979 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2980 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2981 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2982 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
2983 };
2984
2985 if (adapter->generation == BE_GEN3) {
2986 pflashcomp = gen3_flash_types;
2987 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2988 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2989 } else {
2990 pflashcomp = gen2_flash_types;
2991 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2992 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2993 }
c165541e
PR
2994 /* Get flash section info*/
2995 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2996 if (!fsec) {
2997 dev_err(&adapter->pdev->dev,
2998 "Invalid Cookie. UFI corrupted ?\n");
2999 return -1;
3000 }
9fe96934 3001 for (i = 0; i < num_comp; i++) {
c165541e 3002 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3003 continue;
c165541e
PR
3004
3005 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3006 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3007 continue;
3008
3009 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
306f1348
SP
3010 if (!phy_flashing_required(adapter))
3011 continue;
3012 }
c165541e
PR
3013
3014 hdr_size = filehdr_size +
3015 (num_of_images * sizeof(struct image_hdr));
3016
3017 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3018 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3019 pflashcomp[i].size, hdr_size)))
3f0d4560 3020 continue;
c165541e
PR
3021
3022 /* Flash the component */
3f0d4560 3023 p = fw->data;
c165541e 3024 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3025 if (p + pflashcomp[i].size > fw->data + fw->size)
3026 return -1;
3027 total_bytes = pflashcomp[i].size;
3f0d4560
AK
3028 while (total_bytes) {
3029 if (total_bytes > 32*1024)
3030 num_bytes = 32*1024;
3031 else
3032 num_bytes = total_bytes;
3033 total_bytes -= num_bytes;
306f1348 3034 if (!total_bytes) {
c165541e 3035 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
3036 flash_op = FLASHROM_OPER_PHY_FLASH;
3037 else
3038 flash_op = FLASHROM_OPER_FLASH;
3039 } else {
c165541e 3040 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
3041 flash_op = FLASHROM_OPER_PHY_SAVE;
3042 else
3043 flash_op = FLASHROM_OPER_SAVE;
3044 }
3f0d4560
AK
3045 memcpy(req->params.data_buf, p, num_bytes);
3046 p += num_bytes;
3047 status = be_cmd_write_flashrom(adapter, flash_cmd,
3048 pflashcomp[i].optype, flash_op, num_bytes);
3049 if (status) {
306f1348
SP
3050 if ((status == ILLEGAL_IOCTL_REQ) &&
3051 (pflashcomp[i].optype ==
c165541e 3052 OPTYPE_PHY_FW))
306f1348 3053 break;
3f0d4560
AK
3054 dev_err(&adapter->pdev->dev,
3055 "cmd to write to flash rom failed.\n");
3056 return -1;
3057 }
84517482 3058 }
84517482 3059 }
84517482
AK
3060 return 0;
3061}
3062
3f0d4560
AK
3063static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3064{
3065 if (fhdr == NULL)
3066 return 0;
3067 if (fhdr->build[0] == '3')
3068 return BE_GEN3;
3069 else if (fhdr->build[0] == '2')
3070 return BE_GEN2;
3071 else
3072 return 0;
3073}
3074
f67ef7ba
PR
3075static int lancer_wait_idle(struct be_adapter *adapter)
3076{
3077#define SLIPORT_IDLE_TIMEOUT 30
3078 u32 reg_val;
3079 int status = 0, i;
3080
3081 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3082 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3083 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3084 break;
3085
3086 ssleep(1);
3087 }
3088
3089 if (i == SLIPORT_IDLE_TIMEOUT)
3090 status = -1;
3091
3092 return status;
3093}
3094
3095static int lancer_fw_reset(struct be_adapter *adapter)
3096{
3097 int status = 0;
3098
3099 status = lancer_wait_idle(adapter);
3100 if (status)
3101 return status;
3102
3103 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3104 PHYSDEV_CONTROL_OFFSET);
3105
3106 return status;
3107}
3108
485bf569
SN
3109static int lancer_fw_download(struct be_adapter *adapter,
3110 const struct firmware *fw)
84517482 3111{
485bf569
SN
3112#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3113#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3114 struct be_dma_mem flash_cmd;
485bf569
SN
3115 const u8 *data_ptr = NULL;
3116 u8 *dest_image_ptr = NULL;
3117 size_t image_size = 0;
3118 u32 chunk_size = 0;
3119 u32 data_written = 0;
3120 u32 offset = 0;
3121 int status = 0;
3122 u8 add_status = 0;
f67ef7ba 3123 u8 change_status;
84517482 3124
485bf569 3125 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3126 dev_err(&adapter->pdev->dev,
485bf569
SN
3127 "FW Image not properly aligned. "
3128 "Length must be 4 byte aligned.\n");
3129 status = -EINVAL;
3130 goto lancer_fw_exit;
d9efd2af
SB
3131 }
3132
485bf569
SN
3133 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3134 + LANCER_FW_DOWNLOAD_CHUNK;
3135 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3136 &flash_cmd.dma, GFP_KERNEL);
3137 if (!flash_cmd.va) {
3138 status = -ENOMEM;
3139 dev_err(&adapter->pdev->dev,
3140 "Memory allocation failure while flashing\n");
3141 goto lancer_fw_exit;
3142 }
84517482 3143
485bf569
SN
3144 dest_image_ptr = flash_cmd.va +
3145 sizeof(struct lancer_cmd_req_write_object);
3146 image_size = fw->size;
3147 data_ptr = fw->data;
3148
3149 while (image_size) {
3150 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3151
3152 /* Copy the image chunk content. */
3153 memcpy(dest_image_ptr, data_ptr, chunk_size);
3154
3155 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3156 chunk_size, offset,
3157 LANCER_FW_DOWNLOAD_LOCATION,
3158 &data_written, &change_status,
3159 &add_status);
485bf569
SN
3160 if (status)
3161 break;
3162
3163 offset += data_written;
3164 data_ptr += data_written;
3165 image_size -= data_written;
3166 }
3167
3168 if (!status) {
3169 /* Commit the FW written */
3170 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3171 0, offset,
3172 LANCER_FW_DOWNLOAD_LOCATION,
3173 &data_written, &change_status,
3174 &add_status);
485bf569
SN
3175 }
3176
3177 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3178 flash_cmd.dma);
3179 if (status) {
3180 dev_err(&adapter->pdev->dev,
3181 "Firmware load error. "
3182 "Status code: 0x%x Additional Status: 0x%x\n",
3183 status, add_status);
3184 goto lancer_fw_exit;
3185 }
3186
f67ef7ba
PR
3187 if (change_status == LANCER_FW_RESET_NEEDED) {
3188 status = lancer_fw_reset(adapter);
3189 if (status) {
3190 dev_err(&adapter->pdev->dev,
3191 "Adapter busy for FW reset.\n"
3192 "New FW will not be active.\n");
3193 goto lancer_fw_exit;
3194 }
3195 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3196 dev_err(&adapter->pdev->dev,
3197 "System reboot required for new FW"
3198 " to be active\n");
3199 }
3200
485bf569
SN
3201 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3202lancer_fw_exit:
3203 return status;
3204}
3205
3206static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3207{
3208 struct flash_file_hdr_g2 *fhdr;
3209 struct flash_file_hdr_g3 *fhdr3;
3210 struct image_hdr *img_hdr_ptr = NULL;
3211 struct be_dma_mem flash_cmd;
3212 const u8 *p;
3213 int status = 0, i = 0, num_imgs = 0;
84517482
AK
3214
3215 p = fw->data;
3f0d4560 3216 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 3217
84517482 3218 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
3219 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3220 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3221 if (!flash_cmd.va) {
3222 status = -ENOMEM;
3223 dev_err(&adapter->pdev->dev,
3224 "Memory allocation failure while flashing\n");
485bf569 3225 goto be_fw_exit;
84517482
AK
3226 }
3227
3f0d4560
AK
3228 if ((adapter->generation == BE_GEN3) &&
3229 (get_ufigen_type(fhdr) == BE_GEN3)) {
3230 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
3231 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3232 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
3233 img_hdr_ptr = (struct image_hdr *) (fw->data +
3234 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
3235 i * sizeof(struct image_hdr)));
3236 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3237 status = be_flash_data(adapter, fw, &flash_cmd,
3238 num_imgs);
3f0d4560
AK
3239 }
3240 } else if ((adapter->generation == BE_GEN2) &&
3241 (get_ufigen_type(fhdr) == BE_GEN2)) {
3242 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3243 } else {
3244 dev_err(&adapter->pdev->dev,
3245 "UFI and Interface are not compatible for flashing\n");
3246 status = -1;
84517482
AK
3247 }
3248
2b7bcebf
IV
3249 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3250 flash_cmd.dma);
84517482
AK
3251 if (status) {
3252 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3253 goto be_fw_exit;
84517482
AK
3254 }
3255
af901ca1 3256 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3257
485bf569
SN
3258be_fw_exit:
3259 return status;
3260}
3261
3262int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3263{
3264 const struct firmware *fw;
3265 int status;
3266
3267 if (!netif_running(adapter->netdev)) {
3268 dev_err(&adapter->pdev->dev,
3269 "Firmware load not allowed (interface is down)\n");
3270 return -1;
3271 }
3272
3273 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3274 if (status)
3275 goto fw_exit;
3276
3277 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3278
3279 if (lancer_chip(adapter))
3280 status = lancer_fw_download(adapter, fw);
3281 else
3282 status = be_fw_download(adapter, fw);
3283
84517482
AK
3284fw_exit:
3285 release_firmware(fw);
3286 return status;
3287}
3288
e5686ad8 3289static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3290 .ndo_open = be_open,
3291 .ndo_stop = be_close,
3292 .ndo_start_xmit = be_xmit,
a54769f5 3293 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3294 .ndo_set_mac_address = be_mac_addr_set,
3295 .ndo_change_mtu = be_change_mtu,
ab1594e9 3296 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3297 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3298 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3299 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3300 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3301 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3302 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3303 .ndo_get_vf_config = be_get_vf_config,
3304#ifdef CONFIG_NET_POLL_CONTROLLER
3305 .ndo_poll_controller = be_netpoll,
3306#endif
6b7c5b94
SP
3307};
3308
3309static void be_netdev_init(struct net_device *netdev)
3310{
3311 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3312 struct be_eq_obj *eqo;
3abcdeda 3313 int i;
6b7c5b94 3314
6332c8d3 3315 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3316 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3317 NETIF_F_HW_VLAN_TX;
3318 if (be_multi_rxq(adapter))
3319 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3320
3321 netdev->features |= netdev->hw_features |
8b8ddc68 3322 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3323
eb8a50d9 3324 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3325 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3326
fbc13f01
AK
3327 netdev->priv_flags |= IFF_UNICAST_FLT;
3328
6b7c5b94
SP
3329 netdev->flags |= IFF_MULTICAST;
3330
b7e5887e 3331 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3332
10ef9ab4 3333 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3334
3335 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3336
10ef9ab4
SP
3337 for_all_evt_queues(adapter, eqo, i)
3338 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3339}
3340
3341static void be_unmap_pci_bars(struct be_adapter *adapter)
3342{
8788fdc2
SP
3343 if (adapter->csr)
3344 iounmap(adapter->csr);
3345 if (adapter->db)
3346 iounmap(adapter->db);
045508a8
PP
3347 if (adapter->roce_db.base)
3348 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3349}
3350
3351static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3352{
3353 struct pci_dev *pdev = adapter->pdev;
3354 u8 __iomem *addr;
3355
3356 addr = pci_iomap(pdev, 2, 0);
3357 if (addr == NULL)
3358 return -ENOMEM;
3359
3360 adapter->roce_db.base = addr;
3361 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3362 adapter->roce_db.size = 8192;
3363 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3364 return 0;
6b7c5b94
SP
3365}
3366
3367static int be_map_pci_bars(struct be_adapter *adapter)
3368{
3369 u8 __iomem *addr;
db3ea781 3370 int db_reg;
6b7c5b94 3371
fe6d2a38 3372 if (lancer_chip(adapter)) {
045508a8
PP
3373 if (be_type_2_3(adapter)) {
3374 addr = ioremap_nocache(
3375 pci_resource_start(adapter->pdev, 0),
3376 pci_resource_len(adapter->pdev, 0));
3377 if (addr == NULL)
3378 return -ENOMEM;
3379 adapter->db = addr;
3380 }
3381 if (adapter->if_type == SLI_INTF_TYPE_3) {
3382 if (lancer_roce_map_pci_bars(adapter))
3383 goto pci_map_err;
3384 }
fe6d2a38
SP
3385 return 0;
3386 }
3387
ba343c77
SB
3388 if (be_physfn(adapter)) {
3389 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3390 pci_resource_len(adapter->pdev, 2));
3391 if (addr == NULL)
3392 return -ENOMEM;
3393 adapter->csr = addr;
3394 }
6b7c5b94 3395
ba343c77 3396 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3397 db_reg = 4;
3398 } else {
ba343c77
SB
3399 if (be_physfn(adapter))
3400 db_reg = 4;
3401 else
3402 db_reg = 0;
3403 }
3404 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3405 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3406 if (addr == NULL)
3407 goto pci_map_err;
ba343c77 3408 adapter->db = addr;
045508a8
PP
3409 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3410 adapter->roce_db.size = 4096;
3411 adapter->roce_db.io_addr =
3412 pci_resource_start(adapter->pdev, db_reg);
3413 adapter->roce_db.total_size =
3414 pci_resource_len(adapter->pdev, db_reg);
3415 }
6b7c5b94
SP
3416 return 0;
3417pci_map_err:
3418 be_unmap_pci_bars(adapter);
3419 return -ENOMEM;
3420}
3421
6b7c5b94
SP
3422static void be_ctrl_cleanup(struct be_adapter *adapter)
3423{
8788fdc2 3424 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3425
3426 be_unmap_pci_bars(adapter);
3427
3428 if (mem->va)
2b7bcebf
IV
3429 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3430 mem->dma);
e7b909a6 3431
5b8821b7 3432 mem = &adapter->rx_filter;
e7b909a6 3433 if (mem->va)
2b7bcebf
IV
3434 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3435 mem->dma);
cc7d723a 3436 kfree(adapter->pmac_id);
6b7c5b94
SP
3437}
3438
6b7c5b94
SP
3439static int be_ctrl_init(struct be_adapter *adapter)
3440{
8788fdc2
SP
3441 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3442 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3443 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3444 int status;
6b7c5b94
SP
3445
3446 status = be_map_pci_bars(adapter);
3447 if (status)
e7b909a6 3448 goto done;
6b7c5b94
SP
3449
3450 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3451 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3452 mbox_mem_alloc->size,
3453 &mbox_mem_alloc->dma,
3454 GFP_KERNEL);
6b7c5b94 3455 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3456 status = -ENOMEM;
3457 goto unmap_pci_bars;
6b7c5b94
SP
3458 }
3459 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3460 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3461 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3462 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3463
5b8821b7
SP
3464 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3465 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3466 &rx_filter->dma, GFP_KERNEL);
3467 if (rx_filter->va == NULL) {
e7b909a6
SP
3468 status = -ENOMEM;
3469 goto free_mbox;
3470 }
5b8821b7 3471 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3472
cc7d723a
SP
3473 /* primary mac needs 1 pmac entry */
3474 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3475 sizeof(*adapter->pmac_id), GFP_KERNEL);
3476 if (!adapter->pmac_id)
3477 return -ENOMEM;
3478
2984961c 3479 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3480 spin_lock_init(&adapter->mcc_lock);
3481 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3482
dd131e76 3483 init_completion(&adapter->flash_compl);
cf588477 3484 pci_save_state(adapter->pdev);
6b7c5b94 3485 return 0;
e7b909a6
SP
3486
3487free_mbox:
2b7bcebf
IV
3488 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3489 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3490
3491unmap_pci_bars:
3492 be_unmap_pci_bars(adapter);
3493
3494done:
3495 return status;
6b7c5b94
SP
3496}
3497
3498static void be_stats_cleanup(struct be_adapter *adapter)
3499{
3abcdeda 3500 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3501
3502 if (cmd->va)
2b7bcebf
IV
3503 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3504 cmd->va, cmd->dma);
6b7c5b94
SP
3505}
3506
3507static int be_stats_init(struct be_adapter *adapter)
3508{
3abcdeda 3509 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3510
005d5696 3511 if (adapter->generation == BE_GEN2) {
89a88ab8 3512 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3513 } else {
3514 if (lancer_chip(adapter))
3515 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3516 else
3517 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3518 }
2b7bcebf
IV
3519 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3520 GFP_KERNEL);
6b7c5b94
SP
3521 if (cmd->va == NULL)
3522 return -1;
d291b9af 3523 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3524 return 0;
3525}
3526
3527static void __devexit be_remove(struct pci_dev *pdev)
3528{
3529 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3530
6b7c5b94
SP
3531 if (!adapter)
3532 return;
3533
045508a8
PP
3534 be_roce_dev_remove(adapter);
3535
f67ef7ba
PR
3536 cancel_delayed_work_sync(&adapter->func_recovery_work);
3537
6b7c5b94
SP
3538 unregister_netdev(adapter->netdev);
3539
5fb379ee
SP
3540 be_clear(adapter);
3541
bf99e50d
PR
3542 /* tell fw we're done with firing cmds */
3543 be_cmd_fw_clean(adapter);
3544
6b7c5b94
SP
3545 be_stats_cleanup(adapter);
3546
3547 be_ctrl_cleanup(adapter);
3548
d6b6d987
SP
3549 pci_disable_pcie_error_reporting(pdev);
3550
6b7c5b94
SP
3551 pci_set_drvdata(pdev, NULL);
3552 pci_release_regions(pdev);
3553 pci_disable_device(pdev);
3554
3555 free_netdev(adapter->netdev);
3556}
3557
4762f6ce
AK
3558bool be_is_wol_supported(struct be_adapter *adapter)
3559{
3560 return ((adapter->wol_cap & BE_WOL_CAP) &&
3561 !be_is_wol_excluded(adapter)) ? true : false;
3562}
3563
941a77d5
SK
3564u32 be_get_fw_log_level(struct be_adapter *adapter)
3565{
3566 struct be_dma_mem extfat_cmd;
3567 struct be_fat_conf_params *cfgs;
3568 int status;
3569 u32 level = 0;
3570 int j;
3571
3572 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3573 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3574 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3575 &extfat_cmd.dma);
3576
3577 if (!extfat_cmd.va) {
3578 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3579 __func__);
3580 goto err;
3581 }
3582
3583 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3584 if (!status) {
3585 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3586 sizeof(struct be_cmd_resp_hdr));
ac46a462 3587 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
3588 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3589 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3590 }
3591 }
3592 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3593 extfat_cmd.dma);
3594err:
3595 return level;
3596}
39f1d94d 3597static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3598{
6b7c5b94 3599 int status;
941a77d5 3600 u32 level;
6b7c5b94 3601
3abcdeda
SP
3602 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3603 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3604 if (status)
3605 return status;
3606
752961a1 3607 if (adapter->function_mode & FLEX10_MODE)
456d9c96 3608 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
82903e4b
AK
3609 else
3610 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3611
fbc13f01
AK
3612 if (be_physfn(adapter))
3613 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3614 else
3615 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3616
9e1453c5
AK
3617 status = be_cmd_get_cntl_attributes(adapter);
3618 if (status)
3619 return status;
3620
4762f6ce
AK
3621 status = be_cmd_get_acpi_wol_cap(adapter);
3622 if (status) {
3623 /* in case of a failure to get wol capabillities
3624 * check the exclusion list to determine WOL capability */
3625 if (!be_is_wol_excluded(adapter))
3626 adapter->wol_cap |= BE_WOL_CAP;
3627 }
3628
3629 if (be_is_wol_supported(adapter))
3630 adapter->wol = true;
3631
7aeb2156
PR
3632 /* Must be a power of 2 or else MODULO will BUG_ON */
3633 adapter->be_get_temp_freq = 64;
3634
941a77d5
SK
3635 level = be_get_fw_log_level(adapter);
3636 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3637
2243e2e9 3638 return 0;
6b7c5b94
SP
3639}
3640
39f1d94d 3641static int be_dev_type_check(struct be_adapter *adapter)
fe6d2a38
SP
3642{
3643 struct pci_dev *pdev = adapter->pdev;
3644 u32 sli_intf = 0, if_type;
3645
3646 switch (pdev->device) {
3647 case BE_DEVICE_ID1:
3648 case OC_DEVICE_ID1:
3649 adapter->generation = BE_GEN2;
3650 break;
3651 case BE_DEVICE_ID2:
3652 case OC_DEVICE_ID2:
3653 adapter->generation = BE_GEN3;
3654 break;
3655 case OC_DEVICE_ID3:
12f4d0a8 3656 case OC_DEVICE_ID4:
fe6d2a38 3657 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
045508a8
PP
3658 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3659 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38
SP
3660 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3661 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3662 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
045508a8
PP
3663 !be_type_2_3(adapter)) {
3664 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3665 return -EINVAL;
3666 }
3667 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3668 SLI_INTF_FAMILY_SHIFT);
3669 adapter->generation = BE_GEN3;
3670 break;
3671 case OC_DEVICE_ID5:
3672 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3673 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
fe6d2a38
SP
3674 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3675 return -EINVAL;
3676 }
fe6d2a38
SP
3677 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3678 SLI_INTF_FAMILY_SHIFT);
3679 adapter->generation = BE_GEN3;
3680 break;
3681 default:
3682 adapter->generation = 0;
3683 }
39f1d94d
SP
3684
3685 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3686 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38
SP
3687 return 0;
3688}
3689
f67ef7ba 3690static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
3691{
3692 int status;
d8110f62 3693
f67ef7ba
PR
3694 status = lancer_test_and_set_rdy_state(adapter);
3695 if (status)
3696 goto err;
d8110f62 3697
f67ef7ba
PR
3698 if (netif_running(adapter->netdev))
3699 be_close(adapter->netdev);
d8110f62 3700
f67ef7ba
PR
3701 be_clear(adapter);
3702
3703 adapter->hw_error = false;
3704 adapter->fw_timeout = false;
3705
3706 status = be_setup(adapter);
3707 if (status)
3708 goto err;
d8110f62 3709
f67ef7ba
PR
3710 if (netif_running(adapter->netdev)) {
3711 status = be_open(adapter->netdev);
d8110f62
PR
3712 if (status)
3713 goto err;
f67ef7ba 3714 }
d8110f62 3715
f67ef7ba
PR
3716 dev_err(&adapter->pdev->dev,
3717 "Adapter SLIPORT recovery succeeded\n");
3718 return 0;
3719err:
3720 dev_err(&adapter->pdev->dev,
3721 "Adapter SLIPORT recovery failed\n");
d8110f62 3722
f67ef7ba
PR
3723 return status;
3724}
3725
3726static void be_func_recovery_task(struct work_struct *work)
3727{
3728 struct be_adapter *adapter =
3729 container_of(work, struct be_adapter, func_recovery_work.work);
3730 int status;
d8110f62 3731
f67ef7ba 3732 be_detect_error(adapter);
d8110f62 3733
f67ef7ba 3734 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 3735
f67ef7ba
PR
3736 if (adapter->eeh_error)
3737 goto out;
d8110f62 3738
f67ef7ba
PR
3739 rtnl_lock();
3740 netif_device_detach(adapter->netdev);
3741 rtnl_unlock();
d8110f62 3742
f67ef7ba 3743 status = lancer_recover_func(adapter);
d8110f62 3744
f67ef7ba
PR
3745 if (!status)
3746 netif_device_attach(adapter->netdev);
d8110f62 3747 }
f67ef7ba
PR
3748
3749out:
3750 schedule_delayed_work(&adapter->func_recovery_work,
3751 msecs_to_jiffies(1000));
d8110f62
PR
3752}
3753
3754static void be_worker(struct work_struct *work)
3755{
3756 struct be_adapter *adapter =
3757 container_of(work, struct be_adapter, work.work);
3758 struct be_rx_obj *rxo;
10ef9ab4 3759 struct be_eq_obj *eqo;
d8110f62
PR
3760 int i;
3761
d8110f62
PR
3762 /* when interrupts are not yet enabled, just reap any pending
3763 * mcc completions */
3764 if (!netif_running(adapter->netdev)) {
072a9c48 3765 local_bh_disable();
10ef9ab4 3766 be_process_mcc(adapter);
072a9c48 3767 local_bh_enable();
d8110f62
PR
3768 goto reschedule;
3769 }
3770
3771 if (!adapter->stats_cmd_sent) {
3772 if (lancer_chip(adapter))
3773 lancer_cmd_get_pport_stats(adapter,
3774 &adapter->stats_cmd);
3775 else
3776 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3777 }
3778
7aeb2156
PR
3779 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3780 be_cmd_get_die_temperature(adapter);
3781
d8110f62 3782 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
3783 if (rxo->rx_post_starved) {
3784 rxo->rx_post_starved = false;
3785 be_post_rx_frags(rxo, GFP_KERNEL);
3786 }
3787 }
3788
10ef9ab4
SP
3789 for_all_evt_queues(adapter, eqo, i)
3790 be_eqd_update(adapter, eqo);
3791
d8110f62
PR
3792reschedule:
3793 adapter->work_counter++;
3794 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3795}
3796
39f1d94d
SP
3797static bool be_reset_required(struct be_adapter *adapter)
3798{
d79c0a20 3799 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
3800}
3801
6b7c5b94
SP
3802static int __devinit be_probe(struct pci_dev *pdev,
3803 const struct pci_device_id *pdev_id)
3804{
3805 int status = 0;
3806 struct be_adapter *adapter;
3807 struct net_device *netdev;
b4e32a71 3808 char port_name;
6b7c5b94
SP
3809
3810 status = pci_enable_device(pdev);
3811 if (status)
3812 goto do_none;
3813
3814 status = pci_request_regions(pdev, DRV_NAME);
3815 if (status)
3816 goto disable_dev;
3817 pci_set_master(pdev);
3818
7f640062 3819 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
3820 if (netdev == NULL) {
3821 status = -ENOMEM;
3822 goto rel_reg;
3823 }
3824 adapter = netdev_priv(netdev);
3825 adapter->pdev = pdev;
3826 pci_set_drvdata(pdev, adapter);
fe6d2a38 3827
39f1d94d 3828 status = be_dev_type_check(adapter);
63657b9c 3829 if (status)
fe6d2a38
SP
3830 goto free_netdev;
3831
6b7c5b94 3832 adapter->netdev = netdev;
2243e2e9 3833 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3834
2b7bcebf 3835 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3836 if (!status) {
3837 netdev->features |= NETIF_F_HIGHDMA;
3838 } else {
2b7bcebf 3839 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3840 if (status) {
3841 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3842 goto free_netdev;
3843 }
3844 }
3845
d6b6d987
SP
3846 status = pci_enable_pcie_error_reporting(pdev);
3847 if (status)
3848 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3849
6b7c5b94
SP
3850 status = be_ctrl_init(adapter);
3851 if (status)
39f1d94d 3852 goto free_netdev;
6b7c5b94 3853
2243e2e9 3854 /* sync up with fw's ready state */
ba343c77 3855 if (be_physfn(adapter)) {
bf99e50d 3856 status = be_fw_wait_ready(adapter);
ba343c77
SB
3857 if (status)
3858 goto ctrl_clean;
ba343c77 3859 }
6b7c5b94 3860
2243e2e9
SP
3861 /* tell fw we're ready to fire cmds */
3862 status = be_cmd_fw_init(adapter);
6b7c5b94 3863 if (status)
2243e2e9
SP
3864 goto ctrl_clean;
3865
39f1d94d
SP
3866 if (be_reset_required(adapter)) {
3867 status = be_cmd_reset_function(adapter);
3868 if (status)
3869 goto ctrl_clean;
3870 }
556ae191 3871
10ef9ab4
SP
3872 /* The INTR bit may be set in the card when probed by a kdump kernel
3873 * after a crash.
3874 */
3875 if (!lancer_chip(adapter))
3876 be_intr_set(adapter, false);
3877
2243e2e9
SP
3878 status = be_stats_init(adapter);
3879 if (status)
3880 goto ctrl_clean;
3881
39f1d94d 3882 status = be_get_initial_config(adapter);
6b7c5b94
SP
3883 if (status)
3884 goto stats_clean;
6b7c5b94
SP
3885
3886 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 3887 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 3888 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3889
5fb379ee
SP
3890 status = be_setup(adapter);
3891 if (status)
55f5c3c5 3892 goto stats_clean;
2243e2e9 3893
3abcdeda 3894 be_netdev_init(netdev);
6b7c5b94
SP
3895 status = register_netdev(netdev);
3896 if (status != 0)
5fb379ee 3897 goto unsetup;
6b7c5b94 3898
045508a8
PP
3899 be_roce_dev_add(adapter);
3900
f67ef7ba
PR
3901 schedule_delayed_work(&adapter->func_recovery_work,
3902 msecs_to_jiffies(1000));
b4e32a71
PR
3903
3904 be_cmd_query_port_name(adapter, &port_name);
3905
3906 dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
3907 port_name);
34b1ef04 3908
6b7c5b94
SP
3909 return 0;
3910
5fb379ee
SP
3911unsetup:
3912 be_clear(adapter);
6b7c5b94
SP
3913stats_clean:
3914 be_stats_cleanup(adapter);
3915ctrl_clean:
3916 be_ctrl_cleanup(adapter);
f9449ab7 3917free_netdev:
fe6d2a38 3918 free_netdev(netdev);
8d56ff11 3919 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3920rel_reg:
3921 pci_release_regions(pdev);
3922disable_dev:
3923 pci_disable_device(pdev);
3924do_none:
c4ca2374 3925 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3926 return status;
3927}
3928
3929static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3930{
3931 struct be_adapter *adapter = pci_get_drvdata(pdev);
3932 struct net_device *netdev = adapter->netdev;
3933
71d8d1b5
AK
3934 if (adapter->wol)
3935 be_setup_wol(adapter, true);
3936
f67ef7ba
PR
3937 cancel_delayed_work_sync(&adapter->func_recovery_work);
3938
6b7c5b94
SP
3939 netif_device_detach(netdev);
3940 if (netif_running(netdev)) {
3941 rtnl_lock();
3942 be_close(netdev);
3943 rtnl_unlock();
3944 }
9b0365f1 3945 be_clear(adapter);
6b7c5b94
SP
3946
3947 pci_save_state(pdev);
3948 pci_disable_device(pdev);
3949 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3950 return 0;
3951}
3952
3953static int be_resume(struct pci_dev *pdev)
3954{
3955 int status = 0;
3956 struct be_adapter *adapter = pci_get_drvdata(pdev);
3957 struct net_device *netdev = adapter->netdev;
3958
3959 netif_device_detach(netdev);
3960
3961 status = pci_enable_device(pdev);
3962 if (status)
3963 return status;
3964
3965 pci_set_power_state(pdev, 0);
3966 pci_restore_state(pdev);
3967
2243e2e9
SP
3968 /* tell fw we're ready to fire cmds */
3969 status = be_cmd_fw_init(adapter);
3970 if (status)
3971 return status;
3972
9b0365f1 3973 be_setup(adapter);
6b7c5b94
SP
3974 if (netif_running(netdev)) {
3975 rtnl_lock();
3976 be_open(netdev);
3977 rtnl_unlock();
3978 }
f67ef7ba
PR
3979
3980 schedule_delayed_work(&adapter->func_recovery_work,
3981 msecs_to_jiffies(1000));
6b7c5b94 3982 netif_device_attach(netdev);
71d8d1b5
AK
3983
3984 if (adapter->wol)
3985 be_setup_wol(adapter, false);
a4ca055f 3986
6b7c5b94
SP
3987 return 0;
3988}
3989
82456b03
SP
3990/*
3991 * An FLR will stop BE from DMAing any data.
3992 */
3993static void be_shutdown(struct pci_dev *pdev)
3994{
3995 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3996
2d5d4154
AK
3997 if (!adapter)
3998 return;
82456b03 3999
0f4a6828 4000 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4001 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4002
2d5d4154 4003 netif_device_detach(adapter->netdev);
82456b03 4004
82456b03
SP
4005 if (adapter->wol)
4006 be_setup_wol(adapter, true);
4007
57841869
AK
4008 be_cmd_reset_function(adapter);
4009
82456b03 4010 pci_disable_device(pdev);
82456b03
SP
4011}
4012
cf588477
SP
4013static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4014 pci_channel_state_t state)
4015{
4016 struct be_adapter *adapter = pci_get_drvdata(pdev);
4017 struct net_device *netdev = adapter->netdev;
4018
4019 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4020
f67ef7ba
PR
4021 adapter->eeh_error = true;
4022
4023 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4024
f67ef7ba 4025 rtnl_lock();
cf588477 4026 netif_device_detach(netdev);
f67ef7ba 4027 rtnl_unlock();
cf588477
SP
4028
4029 if (netif_running(netdev)) {
4030 rtnl_lock();
4031 be_close(netdev);
4032 rtnl_unlock();
4033 }
4034 be_clear(adapter);
4035
4036 if (state == pci_channel_io_perm_failure)
4037 return PCI_ERS_RESULT_DISCONNECT;
4038
4039 pci_disable_device(pdev);
4040
eeb7fc7b
SK
4041 /* The error could cause the FW to trigger a flash debug dump.
4042 * Resetting the card while flash dump is in progress
4043 * can cause it not to recover; wait for it to finish
4044 */
4045 ssleep(30);
cf588477
SP
4046 return PCI_ERS_RESULT_NEED_RESET;
4047}
4048
4049static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4050{
4051 struct be_adapter *adapter = pci_get_drvdata(pdev);
4052 int status;
4053
4054 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4055 be_clear_all_error(adapter);
cf588477
SP
4056
4057 status = pci_enable_device(pdev);
4058 if (status)
4059 return PCI_ERS_RESULT_DISCONNECT;
4060
4061 pci_set_master(pdev);
4062 pci_set_power_state(pdev, 0);
4063 pci_restore_state(pdev);
4064
4065 /* Check if card is ok and fw is ready */
bf99e50d 4066 status = be_fw_wait_ready(adapter);
cf588477
SP
4067 if (status)
4068 return PCI_ERS_RESULT_DISCONNECT;
4069
d6b6d987 4070 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4071 return PCI_ERS_RESULT_RECOVERED;
4072}
4073
4074static void be_eeh_resume(struct pci_dev *pdev)
4075{
4076 int status = 0;
4077 struct be_adapter *adapter = pci_get_drvdata(pdev);
4078 struct net_device *netdev = adapter->netdev;
4079
4080 dev_info(&adapter->pdev->dev, "EEH resume\n");
4081
4082 pci_save_state(pdev);
4083
4084 /* tell fw we're ready to fire cmds */
4085 status = be_cmd_fw_init(adapter);
4086 if (status)
4087 goto err;
4088
bf99e50d
PR
4089 status = be_cmd_reset_function(adapter);
4090 if (status)
4091 goto err;
4092
cf588477
SP
4093 status = be_setup(adapter);
4094 if (status)
4095 goto err;
4096
4097 if (netif_running(netdev)) {
4098 status = be_open(netdev);
4099 if (status)
4100 goto err;
4101 }
f67ef7ba
PR
4102
4103 schedule_delayed_work(&adapter->func_recovery_work,
4104 msecs_to_jiffies(1000));
cf588477
SP
4105 netif_device_attach(netdev);
4106 return;
4107err:
4108 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4109}
4110
4111static struct pci_error_handlers be_eeh_handlers = {
4112 .error_detected = be_eeh_err_detected,
4113 .slot_reset = be_eeh_reset,
4114 .resume = be_eeh_resume,
4115};
4116
6b7c5b94
SP
4117static struct pci_driver be_driver = {
4118 .name = DRV_NAME,
4119 .id_table = be_dev_ids,
4120 .probe = be_probe,
4121 .remove = be_remove,
4122 .suspend = be_suspend,
cf588477 4123 .resume = be_resume,
82456b03 4124 .shutdown = be_shutdown,
cf588477 4125 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4126};
4127
4128static int __init be_init_module(void)
4129{
8e95a202
JP
4130 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4131 rx_frag_size != 2048) {
6b7c5b94
SP
4132 printk(KERN_WARNING DRV_NAME
4133 " : Module param rx_frag_size must be 2048/4096/8192."
4134 " Using 2048\n");
4135 rx_frag_size = 2048;
4136 }
6b7c5b94
SP
4137
4138 return pci_register_driver(&be_driver);
4139}
4140module_init(be_init_module);
4141
4142static void __exit be_exit_module(void)
4143{
4144 pci_unregister_driver(&be_driver);
4145}
4146module_exit(be_exit_module);
This page took 0.748523 seconds and 5 git commands to generate.