myri10ge: Fix typo of 'VMware' in comment.
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
ba343c77 30static unsigned int num_vfs;
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
ba343c77 32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 33
11ac75ed
SP
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
6b7c5b94 38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 49/* UE Status Low CSR */
42c8b11e 50static const char * const ue_status_low_desc[] = {
7c185276
AK
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
42c8b11e 85static const char * const ue_status_hi_desc[] = {
7c185276
AK
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
42c8b11e 109 "NETC",
7c185276
AK
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
6b7c5b94 119
752961a1
SP
120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
6b7c5b94
SP
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
6b7c5b94
SP
133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
2b7bcebf
IV
144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
6b7c5b94
SP
146 if (!mem->va)
147 return -1;
148 memset(mem->va, 0, mem->size);
149 return 0;
150}
151
8788fdc2 152static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
cf588477
SP
156 if (adapter->eeh_err)
157 return;
158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
8788fdc2 174static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
175{
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
179
180 wmb();
8788fdc2 181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
182}
183
8788fdc2 184static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
185{
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
189
190 wmb();
8788fdc2 191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
192}
193
8788fdc2 194static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
195 bool arm, bool clear_int, u16 num_popped)
196{
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
201
202 if (adapter->eeh_err)
203 return;
204
6b7c5b94
SP
205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
212}
213
8788fdc2 214void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
215{
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
220
221 if (adapter->eeh_err)
222 return;
223
6b7c5b94
SP
224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
228}
229
6b7c5b94
SP
230static int be_mac_addr_set(struct net_device *netdev, void *p)
231{
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
e3a7ae2c
SK
235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
6b7c5b94 237
ca9e4988
AK
238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
e3a7ae2c 241 status = be_cmd_mac_addr_query(adapter, current_mac,
590c391d
PR
242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
a65027e4 244 if (status)
e3a7ae2c 245 goto err;
6b7c5b94 246
e3a7ae2c
SK
247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 249 adapter->if_handle, &adapter->pmac_id, 0);
e3a7ae2c
SK
250 if (status)
251 goto err;
6b7c5b94 252
e3a7ae2c
SK
253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
259 return status;
260}
261
89a88ab8
AK
262static void populate_be2_stats(struct be_adapter *adapter)
263{
ac124ff9
SP
264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 267 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 270
ac124ff9 271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
ac124ff9 289 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
290 drvs->rx_alignment_symbol_errors =
291 port_stats->rx_alignment_symbol_errors;
292
293 drvs->tx_pauseframes = port_stats->tx_pauseframes;
294 drvs->tx_controlframes = port_stats->tx_controlframes;
295
296 if (adapter->port_num)
ac124ff9 297 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 298 else
ac124ff9 299 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
300 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
301 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
302 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
303 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309}
310
311static void populate_be3_stats(struct be_adapter *adapter)
312{
ac124ff9
SP
313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 316 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 319
ac124ff9 320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 340 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
ac124ff9 343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
348 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
349 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
350 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
005d5696
SX
358static void populate_lancer_stats(struct be_adapter *adapter)
359{
89a88ab8 360
005d5696 361 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 388 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 389 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
390 drvs->forwarded_packets = pport_stats->num_forwards_lo;
391 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 392 drvs->rx_drops_too_many_frags =
ac124ff9 393 pport_stats->rx_drops_too_many_frags_lo;
005d5696 394}
89a88ab8 395
09c1c68f
SP
396static void accumulate_16bit_val(u32 *acc, u16 val)
397{
398#define lo(x) (x & 0xFFFF)
399#define hi(x) (x & 0xFFFF0000)
400 bool wrapped = val < lo(*acc);
401 u32 newacc = hi(*acc) + val;
402
403 if (wrapped)
404 newacc += 65536;
405 ACCESS_ONCE(*acc) = newacc;
406}
407
89a88ab8
AK
408void be_parse_stats(struct be_adapter *adapter)
409{
ac124ff9
SP
410 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
411 struct be_rx_obj *rxo;
412 int i;
413
005d5696
SX
414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
417 else
418 populate_be3_stats(adapter);
419 } else {
89a88ab8 420 populate_be2_stats(adapter);
005d5696 421 }
ac124ff9
SP
422
423 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
424 for_all_rx_queues(adapter, rxo, i) {
425 /* below erx HW counter can actually wrap around after
426 * 65535. Driver accumulates a 32-bit value
427 */
428 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
429 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
430 }
89a88ab8
AK
431}
432
ab1594e9
SP
433static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
434 struct rtnl_link_stats64 *stats)
6b7c5b94 435{
ab1594e9 436 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 437 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 438 struct be_rx_obj *rxo;
3c8def97 439 struct be_tx_obj *txo;
ab1594e9
SP
440 u64 pkts, bytes;
441 unsigned int start;
3abcdeda 442 int i;
6b7c5b94 443
3abcdeda 444 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
445 const struct be_rx_stats *rx_stats = rx_stats(rxo);
446 do {
447 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
448 pkts = rx_stats(rxo)->rx_pkts;
449 bytes = rx_stats(rxo)->rx_bytes;
450 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
451 stats->rx_packets += pkts;
452 stats->rx_bytes += bytes;
453 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
454 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
455 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
456 }
457
3c8def97 458 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
459 const struct be_tx_stats *tx_stats = tx_stats(txo);
460 do {
461 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
462 pkts = tx_stats(txo)->tx_pkts;
463 bytes = tx_stats(txo)->tx_bytes;
464 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
465 stats->tx_packets += pkts;
466 stats->tx_bytes += bytes;
3c8def97 467 }
6b7c5b94
SP
468
469 /* bad pkts received */
ab1594e9 470 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
471 drvs->rx_alignment_symbol_errors +
472 drvs->rx_in_range_errors +
473 drvs->rx_out_range_errors +
474 drvs->rx_frame_too_long +
475 drvs->rx_dropped_too_small +
476 drvs->rx_dropped_too_short +
477 drvs->rx_dropped_header_too_small +
478 drvs->rx_dropped_tcp_length +
ab1594e9 479 drvs->rx_dropped_runt;
68110868 480
6b7c5b94 481 /* detailed rx errors */
ab1594e9 482 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
483 drvs->rx_out_range_errors +
484 drvs->rx_frame_too_long;
68110868 485
ab1594e9 486 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
487
488 /* frame alignment errors */
ab1594e9 489 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 490
6b7c5b94
SP
491 /* receiver fifo overrun */
492 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 493 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
494 drvs->rx_input_fifo_overflow_drop +
495 drvs->rx_drops_no_pbuf;
ab1594e9 496 return stats;
6b7c5b94
SP
497}
498
ea172a01 499void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 500{
6b7c5b94
SP
501 struct net_device *netdev = adapter->netdev;
502
ea172a01
SP
503 /* when link status changes, link speed must be re-queried from card */
504 adapter->link_speed = -1;
505 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
506 netif_carrier_on(netdev);
507 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
508 } else {
509 netif_carrier_off(netdev);
510 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 511 }
6b7c5b94
SP
512}
513
3c8def97 514static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 515 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 516{
3c8def97
SP
517 struct be_tx_stats *stats = tx_stats(txo);
518
ab1594e9 519 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
520 stats->tx_reqs++;
521 stats->tx_wrbs += wrb_cnt;
522 stats->tx_bytes += copied;
523 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 524 if (stopped)
ac124ff9 525 stats->tx_stops++;
ab1594e9 526 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
527}
528
529/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
530static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
531 bool *dummy)
6b7c5b94 532{
ebc8d2ab
DM
533 int cnt = (skb->len > skb->data_len);
534
535 cnt += skb_shinfo(skb)->nr_frags;
536
6b7c5b94
SP
537 /* to account for hdr wrb */
538 cnt++;
fe6d2a38
SP
539 if (lancer_chip(adapter) || !(cnt & 1)) {
540 *dummy = false;
541 } else {
6b7c5b94
SP
542 /* add a dummy to make it an even num */
543 cnt++;
544 *dummy = true;
fe6d2a38 545 }
6b7c5b94
SP
546 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
547 return cnt;
548}
549
550static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
551{
552 wrb->frag_pa_hi = upper_32_bits(addr);
553 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
554 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
555}
556
1ded132d
AK
557static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
558 struct sk_buff *skb)
559{
560 u8 vlan_prio;
561 u16 vlan_tag;
562
563 vlan_tag = vlan_tx_tag_get(skb);
564 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
565 /* If vlan priority provided by OS is NOT in available bmap */
566 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
567 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
568 adapter->recommended_prio;
569
570 return vlan_tag;
571}
572
cc4ce020
SK
573static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
574 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 575{
1ded132d 576 u16 vlan_tag;
cc4ce020 577
6b7c5b94
SP
578 memset(hdr, 0, sizeof(*hdr));
579
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
581
49e4b847 582 if (skb_is_gso(skb)) {
6b7c5b94
SP
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
585 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 586 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
588 if (lancer_chip(adapter) && adapter->sli_family ==
589 LANCER_A0_SLI_FAMILY) {
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
591 if (is_tcp_pkt(skb))
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
593 tcpcs, hdr, 1);
594 else if (is_udp_pkt(skb))
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
596 udpcs, hdr, 1);
597 }
6b7c5b94
SP
598 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599 if (is_tcp_pkt(skb))
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
601 else if (is_udp_pkt(skb))
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
603 }
604
4c5102f9 605 if (vlan_tx_tag_present(skb)) {
6b7c5b94 606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 607 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
609 }
610
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
615}
616
2b7bcebf 617static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
618 bool unmap_single)
619{
620 dma_addr_t dma;
621
622 be_dws_le_to_cpu(wrb, sizeof(*wrb));
623
624 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 625 if (wrb->frag_len) {
7101e111 626 if (unmap_single)
2b7bcebf
IV
627 dma_unmap_single(dev, dma, wrb->frag_len,
628 DMA_TO_DEVICE);
7101e111 629 else
2b7bcebf 630 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
631 }
632}
6b7c5b94 633
3c8def97 634static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
635 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
636{
7101e111
SP
637 dma_addr_t busaddr;
638 int i, copied = 0;
2b7bcebf 639 struct device *dev = &adapter->pdev->dev;
6b7c5b94 640 struct sk_buff *first_skb = skb;
6b7c5b94
SP
641 struct be_eth_wrb *wrb;
642 struct be_eth_hdr_wrb *hdr;
7101e111
SP
643 bool map_single = false;
644 u16 map_head;
6b7c5b94 645
6b7c5b94
SP
646 hdr = queue_head_node(txq);
647 queue_head_inc(txq);
7101e111 648 map_head = txq->head;
6b7c5b94 649
ebc8d2ab 650 if (skb->len > skb->data_len) {
e743d313 651 int len = skb_headlen(skb);
2b7bcebf
IV
652 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
653 if (dma_mapping_error(dev, busaddr))
7101e111
SP
654 goto dma_err;
655 map_single = true;
ebc8d2ab
DM
656 wrb = queue_head_node(txq);
657 wrb_fill(wrb, busaddr, len);
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
660 copied += len;
661 }
6b7c5b94 662
ebc8d2ab 663 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 664 const struct skb_frag_struct *frag =
ebc8d2ab 665 &skb_shinfo(skb)->frags[i];
b061b39e 666 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 667 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 668 if (dma_mapping_error(dev, busaddr))
7101e111 669 goto dma_err;
ebc8d2ab 670 wrb = queue_head_node(txq);
9e903e08 671 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
672 be_dws_cpu_to_le(wrb, sizeof(*wrb));
673 queue_head_inc(txq);
9e903e08 674 copied += skb_frag_size(frag);
6b7c5b94
SP
675 }
676
677 if (dummy_wrb) {
678 wrb = queue_head_node(txq);
679 wrb_fill(wrb, 0, 0);
680 be_dws_cpu_to_le(wrb, sizeof(*wrb));
681 queue_head_inc(txq);
682 }
683
cc4ce020 684 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
685 be_dws_cpu_to_le(hdr, sizeof(*hdr));
686
687 return copied;
7101e111
SP
688dma_err:
689 txq->head = map_head;
690 while (copied) {
691 wrb = queue_head_node(txq);
2b7bcebf 692 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
693 map_single = false;
694 copied -= wrb->frag_len;
695 queue_head_inc(txq);
696 }
697 return 0;
6b7c5b94
SP
698}
699
61357325 700static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 701 struct net_device *netdev)
6b7c5b94
SP
702{
703 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
704 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
705 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
706 u32 wrb_cnt = 0, copied = 0;
707 u32 start = txq->head;
708 bool dummy_wrb, stopped = false;
709
1ded132d
AK
710 /* For vlan tagged pkts, BE
711 * 1) calculates checksum even when CSO is not requested
712 * 2) calculates checksum wrongly for padded pkt less than
713 * 60 bytes long.
714 * As a workaround disable TX vlan offloading in such cases.
715 */
716 if (unlikely(vlan_tx_tag_present(skb) &&
717 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
718 skb = skb_share_check(skb, GFP_ATOMIC);
719 if (unlikely(!skb))
720 goto tx_drop;
721
722 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
723 if (unlikely(!skb))
724 goto tx_drop;
725
726 skb->vlan_tci = 0;
727 }
728
fe6d2a38 729 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 730
3c8def97 731 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
732 if (copied) {
733 /* record the sent skb in the sent_skb table */
3c8def97
SP
734 BUG_ON(txo->sent_skb_list[start]);
735 txo->sent_skb_list[start] = skb;
c190e3c8
AK
736
737 /* Ensure txq has space for the next skb; Else stop the queue
738 * *BEFORE* ringing the tx doorbell, so that we serialze the
739 * tx compls of the current transmit which'll wake up the queue
740 */
7101e111 741 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
742 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
743 txq->len) {
3c8def97 744 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
745 stopped = true;
746 }
6b7c5b94 747
c190e3c8 748 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 749
3c8def97 750 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 751 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
752 } else {
753 txq->head = start;
754 dev_kfree_skb_any(skb);
6b7c5b94 755 }
1ded132d 756tx_drop:
6b7c5b94
SP
757 return NETDEV_TX_OK;
758}
759
760static int be_change_mtu(struct net_device *netdev, int new_mtu)
761{
762 struct be_adapter *adapter = netdev_priv(netdev);
763 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
764 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
765 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
766 dev_info(&adapter->pdev->dev,
767 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
768 BE_MIN_MTU,
769 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
770 return -EINVAL;
771 }
772 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
773 netdev->mtu, new_mtu);
774 netdev->mtu = new_mtu;
775 return 0;
776}
777
778/*
82903e4b
AK
779 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
780 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 781 */
1da87b7f 782static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 783{
11ac75ed 784 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
6b7c5b94
SP
785 u16 vtag[BE_NUM_VLANS_SUPPORTED];
786 u16 ntags = 0, i;
82903e4b 787 int status = 0;
1da87b7f
AK
788
789 if (vf) {
11ac75ed
SP
790 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
791 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
792 1, 1, 0);
1da87b7f 793 }
6b7c5b94 794
c0e64ef4
SP
795 /* No need to further configure vids if in promiscuous mode */
796 if (adapter->promiscuous)
797 return 0;
798
82903e4b 799 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 800 /* Construct VLAN Table to give to HW */
b738127d 801 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
802 if (adapter->vlan_tag[i]) {
803 vtag[ntags] = cpu_to_le16(i);
804 ntags++;
805 }
806 }
b31c50a7
SP
807 status = be_cmd_vlan_config(adapter, adapter->if_handle,
808 vtag, ntags, 1, 0);
6b7c5b94 809 } else {
b31c50a7
SP
810 status = be_cmd_vlan_config(adapter, adapter->if_handle,
811 NULL, 0, 1, 1);
6b7c5b94 812 }
1da87b7f 813
b31c50a7 814 return status;
6b7c5b94
SP
815}
816
8e586137 817static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
818{
819 struct be_adapter *adapter = netdev_priv(netdev);
820
1da87b7f 821 adapter->vlans_added++;
ba343c77 822 if (!be_physfn(adapter))
8e586137 823 return 0;
ba343c77 824
6b7c5b94 825 adapter->vlan_tag[vid] = 1;
82903e4b 826 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 827 be_vid_config(adapter, false, 0);
8e586137
JP
828
829 return 0;
6b7c5b94
SP
830}
831
8e586137 832static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
833{
834 struct be_adapter *adapter = netdev_priv(netdev);
835
1da87b7f 836 adapter->vlans_added--;
1da87b7f 837
ba343c77 838 if (!be_physfn(adapter))
8e586137 839 return 0;
ba343c77 840
6b7c5b94 841 adapter->vlan_tag[vid] = 0;
82903e4b 842 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 843 be_vid_config(adapter, false, 0);
8e586137
JP
844
845 return 0;
6b7c5b94
SP
846}
847
a54769f5 848static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
849{
850 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 851
24307eef 852 if (netdev->flags & IFF_PROMISC) {
5b8821b7 853 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
854 adapter->promiscuous = true;
855 goto done;
6b7c5b94
SP
856 }
857
25985edc 858 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
859 if (adapter->promiscuous) {
860 adapter->promiscuous = false;
5b8821b7 861 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
862
863 if (adapter->vlans_added)
864 be_vid_config(adapter, false, 0);
6b7c5b94
SP
865 }
866
e7b909a6 867 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 868 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
869 netdev_mc_count(netdev) > BE_MAX_MC) {
870 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 871 goto done;
6b7c5b94 872 }
6b7c5b94 873
5b8821b7 874 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
875done:
876 return;
6b7c5b94
SP
877}
878
ba343c77
SB
879static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
880{
881 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 882 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
883 int status;
884
11ac75ed 885 if (!sriov_enabled(adapter))
ba343c77
SB
886 return -EPERM;
887
11ac75ed 888 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
889 return -EINVAL;
890
590c391d
PR
891 if (lancer_chip(adapter)) {
892 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
893 } else {
11ac75ed
SP
894 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
895 vf_cfg->pmac_id, vf + 1);
ba343c77 896
11ac75ed
SP
897 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
898 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
899 }
900
64600ea5 901 if (status)
ba343c77
SB
902 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
903 mac, vf);
64600ea5 904 else
11ac75ed 905 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 906
ba343c77
SB
907 return status;
908}
909
64600ea5
AK
910static int be_get_vf_config(struct net_device *netdev, int vf,
911 struct ifla_vf_info *vi)
912{
913 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 914 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 915
11ac75ed 916 if (!sriov_enabled(adapter))
64600ea5
AK
917 return -EPERM;
918
11ac75ed 919 if (vf >= adapter->num_vfs)
64600ea5
AK
920 return -EINVAL;
921
922 vi->vf = vf;
11ac75ed
SP
923 vi->tx_rate = vf_cfg->tx_rate;
924 vi->vlan = vf_cfg->vlan_tag;
64600ea5 925 vi->qos = 0;
11ac75ed 926 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
927
928 return 0;
929}
930
1da87b7f
AK
931static int be_set_vf_vlan(struct net_device *netdev,
932 int vf, u16 vlan, u8 qos)
933{
934 struct be_adapter *adapter = netdev_priv(netdev);
935 int status = 0;
936
11ac75ed 937 if (!sriov_enabled(adapter))
1da87b7f
AK
938 return -EPERM;
939
11ac75ed 940 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
941 return -EINVAL;
942
943 if (vlan) {
11ac75ed 944 adapter->vf_cfg[vf].vlan_tag = vlan;
1da87b7f
AK
945 adapter->vlans_added++;
946 } else {
11ac75ed 947 adapter->vf_cfg[vf].vlan_tag = 0;
1da87b7f
AK
948 adapter->vlans_added--;
949 }
950
951 status = be_vid_config(adapter, true, vf);
952
953 if (status)
954 dev_info(&adapter->pdev->dev,
955 "VLAN %d config on VF %d failed\n", vlan, vf);
956 return status;
957}
958
e1d18735
AK
959static int be_set_vf_tx_rate(struct net_device *netdev,
960 int vf, int rate)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
963 int status = 0;
964
11ac75ed 965 if (!sriov_enabled(adapter))
e1d18735
AK
966 return -EPERM;
967
11ac75ed 968 if (vf >= adapter->num_vfs || rate < 0)
e1d18735
AK
969 return -EINVAL;
970
971 if (rate > 10000)
972 rate = 10000;
973
11ac75ed 974 adapter->vf_cfg[vf].tx_rate = rate;
856c4012 975 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
976
977 if (status)
978 dev_info(&adapter->pdev->dev,
979 "tx rate %d on VF %d failed\n", rate, vf);
980 return status;
981}
982
ac124ff9 983static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 984{
ac124ff9
SP
985 struct be_eq_obj *rx_eq = &rxo->rx_eq;
986 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 987 ulong now = jiffies;
ac124ff9 988 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
989 u64 pkts;
990 unsigned int start, eqd;
ac124ff9
SP
991
992 if (!rx_eq->enable_aic)
993 return;
6b7c5b94 994
4097f663 995 /* Wrapped around */
3abcdeda
SP
996 if (time_before(now, stats->rx_jiffies)) {
997 stats->rx_jiffies = now;
4097f663
SP
998 return;
999 }
6b7c5b94 1000
ac124ff9
SP
1001 /* Update once a second */
1002 if (delta < HZ)
6b7c5b94
SP
1003 return;
1004
ab1594e9
SP
1005 do {
1006 start = u64_stats_fetch_begin_bh(&stats->sync);
1007 pkts = stats->rx_pkts;
1008 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1009
68c3e5a7 1010 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1011 stats->rx_pkts_prev = pkts;
3abcdeda 1012 stats->rx_jiffies = now;
ac124ff9
SP
1013 eqd = stats->rx_pps / 110000;
1014 eqd = eqd << 3;
1015 if (eqd > rx_eq->max_eqd)
1016 eqd = rx_eq->max_eqd;
1017 if (eqd < rx_eq->min_eqd)
1018 eqd = rx_eq->min_eqd;
1019 if (eqd < 10)
1020 eqd = 0;
1021 if (eqd != rx_eq->cur_eqd) {
1022 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1023 rx_eq->cur_eqd = eqd;
1024 }
6b7c5b94
SP
1025}
1026
3abcdeda 1027static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1028 struct be_rx_compl_info *rxcp)
4097f663 1029{
ac124ff9 1030 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1031
ab1594e9 1032 u64_stats_update_begin(&stats->sync);
3abcdeda 1033 stats->rx_compl++;
2e588f84 1034 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1035 stats->rx_pkts++;
2e588f84 1036 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1037 stats->rx_mcast_pkts++;
2e588f84 1038 if (rxcp->err)
ac124ff9 1039 stats->rx_compl_err++;
ab1594e9 1040 u64_stats_update_end(&stats->sync);
4097f663
SP
1041}
1042
2e588f84 1043static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1044{
19fad86f
PR
1045 /* L4 checksum is not reliable for non TCP/UDP packets.
1046 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1047 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1048 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1049}
1050
6b7c5b94 1051static struct be_rx_page_info *
3abcdeda
SP
1052get_rx_page_info(struct be_adapter *adapter,
1053 struct be_rx_obj *rxo,
1054 u16 frag_idx)
6b7c5b94
SP
1055{
1056 struct be_rx_page_info *rx_page_info;
3abcdeda 1057 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1058
3abcdeda 1059 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1060 BUG_ON(!rx_page_info->page);
1061
205859a2 1062 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1063 dma_unmap_page(&adapter->pdev->dev,
1064 dma_unmap_addr(rx_page_info, bus),
1065 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1066 rx_page_info->last_page_user = false;
1067 }
6b7c5b94
SP
1068
1069 atomic_dec(&rxq->used);
1070 return rx_page_info;
1071}
1072
1073/* Throwaway the data in the Rx completion */
1074static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1075 struct be_rx_obj *rxo,
2e588f84 1076 struct be_rx_compl_info *rxcp)
6b7c5b94 1077{
3abcdeda 1078 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1079 struct be_rx_page_info *page_info;
2e588f84 1080 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1081
e80d9da6 1082 for (i = 0; i < num_rcvd; i++) {
2e588f84 1083 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1084 put_page(page_info->page);
1085 memset(page_info, 0, sizeof(*page_info));
2e588f84 1086 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1087 }
1088}
1089
1090/*
1091 * skb_fill_rx_data forms a complete skb for an ether frame
1092 * indicated by rxcp.
1093 */
3abcdeda 1094static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1095 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1096{
3abcdeda 1097 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1098 struct be_rx_page_info *page_info;
2e588f84
SP
1099 u16 i, j;
1100 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1101 u8 *start;
6b7c5b94 1102
2e588f84 1103 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1104 start = page_address(page_info->page) + page_info->page_offset;
1105 prefetch(start);
1106
1107 /* Copy data in the first descriptor of this completion */
2e588f84 1108 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1109
1110 /* Copy the header portion into skb_data */
2e588f84 1111 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1112 memcpy(skb->data, start, hdr_len);
1113 skb->len = curr_frag_len;
1114 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1115 /* Complete packet has now been moved to data */
1116 put_page(page_info->page);
1117 skb->data_len = 0;
1118 skb->tail += curr_frag_len;
1119 } else {
1120 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1121 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1122 skb_shinfo(skb)->frags[0].page_offset =
1123 page_info->page_offset + hdr_len;
9e903e08 1124 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1125 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1126 skb->truesize += rx_frag_size;
6b7c5b94
SP
1127 skb->tail += hdr_len;
1128 }
205859a2 1129 page_info->page = NULL;
6b7c5b94 1130
2e588f84
SP
1131 if (rxcp->pkt_size <= rx_frag_size) {
1132 BUG_ON(rxcp->num_rcvd != 1);
1133 return;
6b7c5b94
SP
1134 }
1135
1136 /* More frags present for this completion */
2e588f84
SP
1137 index_inc(&rxcp->rxq_idx, rxq->len);
1138 remaining = rxcp->pkt_size - curr_frag_len;
1139 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1140 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1141 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1142
bd46cb6c
AK
1143 /* Coalesce all frags from the same physical page in one slot */
1144 if (page_info->page_offset == 0) {
1145 /* Fresh page */
1146 j++;
b061b39e 1147 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1148 skb_shinfo(skb)->frags[j].page_offset =
1149 page_info->page_offset;
9e903e08 1150 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1151 skb_shinfo(skb)->nr_frags++;
1152 } else {
1153 put_page(page_info->page);
1154 }
1155
9e903e08 1156 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1157 skb->len += curr_frag_len;
1158 skb->data_len += curr_frag_len;
bdb28a97 1159 skb->truesize += rx_frag_size;
2e588f84
SP
1160 remaining -= curr_frag_len;
1161 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1162 page_info->page = NULL;
6b7c5b94 1163 }
bd46cb6c 1164 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1165}
1166
5be93b9a 1167/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1168static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1169 struct be_rx_obj *rxo,
2e588f84 1170 struct be_rx_compl_info *rxcp)
6b7c5b94 1171{
6332c8d3 1172 struct net_device *netdev = adapter->netdev;
6b7c5b94 1173 struct sk_buff *skb;
89420424 1174
6332c8d3 1175 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1176 if (unlikely(!skb)) {
ac124ff9 1177 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1178 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1179 return;
1180 }
1181
2e588f84 1182 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1183
6332c8d3 1184 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1185 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1186 else
1187 skb_checksum_none_assert(skb);
6b7c5b94 1188
6332c8d3 1189 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1190 if (adapter->netdev->features & NETIF_F_RXHASH)
1191 skb->rxhash = rxcp->rss_hash;
1192
6b7c5b94 1193
343e43c0 1194 if (rxcp->vlanf)
4c5102f9
AK
1195 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1196
1197 netif_receive_skb(skb);
6b7c5b94
SP
1198}
1199
5be93b9a
AK
1200/* Process the RX completion indicated by rxcp when GRO is enabled */
1201static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1202 struct be_rx_obj *rxo,
2e588f84 1203 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1204{
1205 struct be_rx_page_info *page_info;
5be93b9a 1206 struct sk_buff *skb = NULL;
3abcdeda
SP
1207 struct be_queue_info *rxq = &rxo->q;
1208 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1209 u16 remaining, curr_frag_len;
1210 u16 i, j;
3968fa1e 1211
5be93b9a
AK
1212 skb = napi_get_frags(&eq_obj->napi);
1213 if (!skb) {
3abcdeda 1214 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1215 return;
1216 }
1217
2e588f84
SP
1218 remaining = rxcp->pkt_size;
1219 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1220 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1221
1222 curr_frag_len = min(remaining, rx_frag_size);
1223
bd46cb6c
AK
1224 /* Coalesce all frags from the same physical page in one slot */
1225 if (i == 0 || page_info->page_offset == 0) {
1226 /* First frag or Fresh page */
1227 j++;
b061b39e 1228 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1229 skb_shinfo(skb)->frags[j].page_offset =
1230 page_info->page_offset;
9e903e08 1231 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1232 } else {
1233 put_page(page_info->page);
1234 }
9e903e08 1235 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1236 skb->truesize += rx_frag_size;
bd46cb6c 1237 remaining -= curr_frag_len;
2e588f84 1238 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1239 memset(page_info, 0, sizeof(*page_info));
1240 }
bd46cb6c 1241 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1242
5be93b9a 1243 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1244 skb->len = rxcp->pkt_size;
1245 skb->data_len = rxcp->pkt_size;
5be93b9a 1246 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1247 if (adapter->netdev->features & NETIF_F_RXHASH)
1248 skb->rxhash = rxcp->rss_hash;
5be93b9a 1249
343e43c0 1250 if (rxcp->vlanf)
4c5102f9
AK
1251 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1252
1253 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1254}
1255
1256static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1257 struct be_eth_rx_compl *compl,
1258 struct be_rx_compl_info *rxcp)
1259{
1260 rxcp->pkt_size =
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1262 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1263 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1264 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1265 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1266 rxcp->ip_csum =
1267 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1268 rxcp->l4_csum =
1269 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1270 rxcp->ipv6 =
1271 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1272 rxcp->rxq_idx =
1273 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1274 rxcp->num_rcvd =
1275 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1276 rxcp->pkt_type =
1277 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1278 rxcp->rss_hash =
1279 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1280 if (rxcp->vlanf) {
1281 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1282 compl);
1283 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1284 compl);
15d72184 1285 }
12004ae9 1286 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1287}
1288
1289static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1290 struct be_eth_rx_compl *compl,
1291 struct be_rx_compl_info *rxcp)
1292{
1293 rxcp->pkt_size =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1295 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1296 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1297 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1298 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1299 rxcp->ip_csum =
1300 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1301 rxcp->l4_csum =
1302 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1303 rxcp->ipv6 =
1304 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1305 rxcp->rxq_idx =
1306 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1307 rxcp->num_rcvd =
1308 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1309 rxcp->pkt_type =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1311 rxcp->rss_hash =
1312 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1313 if (rxcp->vlanf) {
1314 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1315 compl);
1316 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1317 compl);
15d72184 1318 }
12004ae9 1319 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1320}
1321
1322static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1323{
1324 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1325 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1326 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1327
2e588f84
SP
1328 /* For checking the valid bit it is Ok to use either definition as the
1329 * valid bit is at the same position in both v0 and v1 Rx compl */
1330 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1331 return NULL;
6b7c5b94 1332
2e588f84
SP
1333 rmb();
1334 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1335
2e588f84
SP
1336 if (adapter->be3_native)
1337 be_parse_rx_compl_v1(adapter, compl, rxcp);
1338 else
1339 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1340
15d72184
SP
1341 if (rxcp->vlanf) {
1342 /* vlanf could be wrongly set in some cards.
1343 * ignore if vtm is not set */
752961a1 1344 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1345 rxcp->vlanf = 0;
6b7c5b94 1346
15d72184 1347 if (!lancer_chip(adapter))
3c709f8f 1348 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1349
939cf306 1350 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1351 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1352 rxcp->vlanf = 0;
1353 }
2e588f84
SP
1354
1355 /* As the compl has been parsed, reset it; we wont touch it again */
1356 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1357
3abcdeda 1358 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1359 return rxcp;
1360}
1361
1829b086 1362static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1363{
6b7c5b94 1364 u32 order = get_order(size);
1829b086 1365
6b7c5b94 1366 if (order > 0)
1829b086
ED
1367 gfp |= __GFP_COMP;
1368 return alloc_pages(gfp, order);
6b7c5b94
SP
1369}
1370
1371/*
1372 * Allocate a page, split it to fragments of size rx_frag_size and post as
1373 * receive buffers to BE
1374 */
1829b086 1375static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1376{
3abcdeda
SP
1377 struct be_adapter *adapter = rxo->adapter;
1378 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1379 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1380 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1381 struct page *pagep = NULL;
1382 struct be_eth_rx_d *rxd;
1383 u64 page_dmaaddr = 0, frag_dmaaddr;
1384 u32 posted, page_offset = 0;
1385
3abcdeda 1386 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1387 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1388 if (!pagep) {
1829b086 1389 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1390 if (unlikely(!pagep)) {
ac124ff9 1391 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1392 break;
1393 }
2b7bcebf
IV
1394 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1395 0, adapter->big_page_size,
1396 DMA_FROM_DEVICE);
6b7c5b94
SP
1397 page_info->page_offset = 0;
1398 } else {
1399 get_page(pagep);
1400 page_info->page_offset = page_offset + rx_frag_size;
1401 }
1402 page_offset = page_info->page_offset;
1403 page_info->page = pagep;
fac6da5b 1404 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1405 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1406
1407 rxd = queue_head_node(rxq);
1408 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1409 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1410
1411 /* Any space left in the current big page for another frag? */
1412 if ((page_offset + rx_frag_size + rx_frag_size) >
1413 adapter->big_page_size) {
1414 pagep = NULL;
1415 page_info->last_page_user = true;
1416 }
26d92f92
SP
1417
1418 prev_page_info = page_info;
1419 queue_head_inc(rxq);
6b7c5b94
SP
1420 page_info = &page_info_tbl[rxq->head];
1421 }
1422 if (pagep)
26d92f92 1423 prev_page_info->last_page_user = true;
6b7c5b94
SP
1424
1425 if (posted) {
6b7c5b94 1426 atomic_add(posted, &rxq->used);
8788fdc2 1427 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1428 } else if (atomic_read(&rxq->used) == 0) {
1429 /* Let be_worker replenish when memory is available */
3abcdeda 1430 rxo->rx_post_starved = true;
6b7c5b94 1431 }
6b7c5b94
SP
1432}
1433
5fb379ee 1434static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1435{
6b7c5b94
SP
1436 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1437
1438 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1439 return NULL;
1440
f3eb62d2 1441 rmb();
6b7c5b94
SP
1442 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1443
1444 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1445
1446 queue_tail_inc(tx_cq);
1447 return txcp;
1448}
1449
3c8def97
SP
1450static u16 be_tx_compl_process(struct be_adapter *adapter,
1451 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1452{
3c8def97 1453 struct be_queue_info *txq = &txo->q;
a73b796e 1454 struct be_eth_wrb *wrb;
3c8def97 1455 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1456 struct sk_buff *sent_skb;
ec43b1a6
SP
1457 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1458 bool unmap_skb_hdr = true;
6b7c5b94 1459
ec43b1a6 1460 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1461 BUG_ON(!sent_skb);
ec43b1a6
SP
1462 sent_skbs[txq->tail] = NULL;
1463
1464 /* skip header wrb */
a73b796e 1465 queue_tail_inc(txq);
6b7c5b94 1466
ec43b1a6 1467 do {
6b7c5b94 1468 cur_index = txq->tail;
a73b796e 1469 wrb = queue_tail_node(txq);
2b7bcebf
IV
1470 unmap_tx_frag(&adapter->pdev->dev, wrb,
1471 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1472 unmap_skb_hdr = false;
1473
6b7c5b94
SP
1474 num_wrbs++;
1475 queue_tail_inc(txq);
ec43b1a6 1476 } while (cur_index != last_index);
6b7c5b94 1477
6b7c5b94 1478 kfree_skb(sent_skb);
4d586b82 1479 return num_wrbs;
6b7c5b94
SP
1480}
1481
859b1e4e
SP
1482static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1483{
1484 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1485
1486 if (!eqe->evt)
1487 return NULL;
1488
f3eb62d2 1489 rmb();
859b1e4e
SP
1490 eqe->evt = le32_to_cpu(eqe->evt);
1491 queue_tail_inc(&eq_obj->q);
1492 return eqe;
1493}
1494
1495static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1496 struct be_eq_obj *eq_obj,
1497 bool rearm)
859b1e4e
SP
1498{
1499 struct be_eq_entry *eqe;
1500 u16 num = 0;
1501
1502 while ((eqe = event_get(eq_obj)) != NULL) {
1503 eqe->evt = 0;
1504 num++;
1505 }
1506
1507 /* Deal with any spurious interrupts that come
1508 * without events
1509 */
3c8def97
SP
1510 if (!num)
1511 rearm = true;
1512
1513 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1514 if (num)
1515 napi_schedule(&eq_obj->napi);
1516
1517 return num;
1518}
1519
1520/* Just read and notify events without processing them.
1521 * Used at the time of destroying event queues */
1522static void be_eq_clean(struct be_adapter *adapter,
1523 struct be_eq_obj *eq_obj)
1524{
1525 struct be_eq_entry *eqe;
1526 u16 num = 0;
1527
1528 while ((eqe = event_get(eq_obj)) != NULL) {
1529 eqe->evt = 0;
1530 num++;
1531 }
1532
1533 if (num)
1534 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1535}
1536
3abcdeda 1537static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1538{
1539 struct be_rx_page_info *page_info;
3abcdeda
SP
1540 struct be_queue_info *rxq = &rxo->q;
1541 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1542 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1543 u16 tail;
1544
1545 /* First cleanup pending rx completions */
3abcdeda
SP
1546 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1547 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1548 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1549 }
1550
1551 /* Then free posted rx buffer that were not used */
1552 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1553 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1554 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1555 put_page(page_info->page);
1556 memset(page_info, 0, sizeof(*page_info));
1557 }
1558 BUG_ON(atomic_read(&rxq->used));
482c9e79 1559 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1560}
1561
3c8def97
SP
1562static void be_tx_compl_clean(struct be_adapter *adapter,
1563 struct be_tx_obj *txo)
6b7c5b94 1564{
3c8def97
SP
1565 struct be_queue_info *tx_cq = &txo->cq;
1566 struct be_queue_info *txq = &txo->q;
a8e9179a 1567 struct be_eth_tx_compl *txcp;
4d586b82 1568 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1569 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1570 struct sk_buff *sent_skb;
1571 bool dummy_wrb;
a8e9179a
SP
1572
1573 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1574 do {
1575 while ((txcp = be_tx_compl_get(tx_cq))) {
1576 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1577 wrb_index, txcp);
3c8def97 1578 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1579 cmpl++;
1580 }
1581 if (cmpl) {
1582 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1583 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1584 cmpl = 0;
4d586b82 1585 num_wrbs = 0;
a8e9179a
SP
1586 }
1587
1588 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1589 break;
1590
1591 mdelay(1);
1592 } while (true);
1593
1594 if (atomic_read(&txq->used))
1595 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1596 atomic_read(&txq->used));
b03388d6
SP
1597
1598 /* free posted tx for which compls will never arrive */
1599 while (atomic_read(&txq->used)) {
1600 sent_skb = sent_skbs[txq->tail];
1601 end_idx = txq->tail;
1602 index_adv(&end_idx,
fe6d2a38
SP
1603 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1604 txq->len);
3c8def97 1605 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1606 atomic_sub(num_wrbs, &txq->used);
b03388d6 1607 }
6b7c5b94
SP
1608}
1609
5fb379ee
SP
1610static void be_mcc_queues_destroy(struct be_adapter *adapter)
1611{
1612 struct be_queue_info *q;
5fb379ee 1613
8788fdc2 1614 q = &adapter->mcc_obj.q;
5fb379ee 1615 if (q->created)
8788fdc2 1616 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1617 be_queue_free(adapter, q);
1618
8788fdc2 1619 q = &adapter->mcc_obj.cq;
5fb379ee 1620 if (q->created)
8788fdc2 1621 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1622 be_queue_free(adapter, q);
1623}
1624
1625/* Must be called only after TX qs are created as MCC shares TX EQ */
1626static int be_mcc_queues_create(struct be_adapter *adapter)
1627{
1628 struct be_queue_info *q, *cq;
5fb379ee
SP
1629
1630 /* Alloc MCC compl queue */
8788fdc2 1631 cq = &adapter->mcc_obj.cq;
5fb379ee 1632 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1633 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1634 goto err;
1635
1636 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1637 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1638 goto mcc_cq_free;
1639
1640 /* Alloc MCC queue */
8788fdc2 1641 q = &adapter->mcc_obj.q;
5fb379ee
SP
1642 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1643 goto mcc_cq_destroy;
1644
1645 /* Ask BE to create MCC queue */
8788fdc2 1646 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1647 goto mcc_q_free;
1648
1649 return 0;
1650
1651mcc_q_free:
1652 be_queue_free(adapter, q);
1653mcc_cq_destroy:
8788fdc2 1654 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1655mcc_cq_free:
1656 be_queue_free(adapter, cq);
1657err:
1658 return -1;
1659}
1660
6b7c5b94
SP
1661static void be_tx_queues_destroy(struct be_adapter *adapter)
1662{
1663 struct be_queue_info *q;
3c8def97
SP
1664 struct be_tx_obj *txo;
1665 u8 i;
6b7c5b94 1666
3c8def97
SP
1667 for_all_tx_queues(adapter, txo, i) {
1668 q = &txo->q;
1669 if (q->created)
1670 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1671 be_queue_free(adapter, q);
6b7c5b94 1672
3c8def97
SP
1673 q = &txo->cq;
1674 if (q->created)
1675 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1676 be_queue_free(adapter, q);
1677 }
6b7c5b94 1678
859b1e4e
SP
1679 /* Clear any residual events */
1680 be_eq_clean(adapter, &adapter->tx_eq);
1681
6b7c5b94
SP
1682 q = &adapter->tx_eq.q;
1683 if (q->created)
8788fdc2 1684 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1685 be_queue_free(adapter, q);
1686}
1687
dafc0fe3
SP
1688static int be_num_txqs_want(struct be_adapter *adapter)
1689{
11ac75ed 1690 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
dafc0fe3
SP
1691 lancer_chip(adapter) || !be_physfn(adapter) ||
1692 adapter->generation == BE_GEN2)
1693 return 1;
1694 else
1695 return MAX_TX_QS;
1696}
1697
3c8def97 1698/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1699static int be_tx_queues_create(struct be_adapter *adapter)
1700{
1701 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1702 struct be_tx_obj *txo;
1703 u8 i;
6b7c5b94 1704
dafc0fe3 1705 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1706 if (adapter->num_tx_qs != MAX_TX_QS) {
1707 rtnl_lock();
dafc0fe3
SP
1708 netif_set_real_num_tx_queues(adapter->netdev,
1709 adapter->num_tx_qs);
3bb62f4f
PR
1710 rtnl_unlock();
1711 }
dafc0fe3 1712
6b7c5b94
SP
1713 adapter->tx_eq.max_eqd = 0;
1714 adapter->tx_eq.min_eqd = 0;
1715 adapter->tx_eq.cur_eqd = 96;
1716 adapter->tx_eq.enable_aic = false;
3c8def97 1717
6b7c5b94 1718 eq = &adapter->tx_eq.q;
3c8def97
SP
1719 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1720 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1721 return -1;
1722
8788fdc2 1723 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1724 goto err;
ecd62107 1725 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1726
3c8def97
SP
1727 for_all_tx_queues(adapter, txo, i) {
1728 cq = &txo->cq;
1729 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1730 sizeof(struct be_eth_tx_compl)))
3c8def97 1731 goto err;
6b7c5b94 1732
3c8def97
SP
1733 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1734 goto err;
6b7c5b94 1735
3c8def97
SP
1736 q = &txo->q;
1737 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1738 sizeof(struct be_eth_wrb)))
1739 goto err;
3c8def97 1740 }
6b7c5b94
SP
1741 return 0;
1742
3c8def97
SP
1743err:
1744 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1745 return -1;
1746}
1747
1748static void be_rx_queues_destroy(struct be_adapter *adapter)
1749{
1750 struct be_queue_info *q;
3abcdeda
SP
1751 struct be_rx_obj *rxo;
1752 int i;
1753
1754 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1755 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1756
1757 q = &rxo->cq;
1758 if (q->created)
1759 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1760 be_queue_free(adapter, q);
1761
3abcdeda 1762 q = &rxo->rx_eq.q;
482c9e79 1763 if (q->created)
3abcdeda 1764 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1765 be_queue_free(adapter, q);
6b7c5b94 1766 }
6b7c5b94
SP
1767}
1768
ac6a0c4a
SP
1769static u32 be_num_rxqs_want(struct be_adapter *adapter)
1770{
c814fd36 1771 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
11ac75ed
SP
1772 !sriov_enabled(adapter) && be_physfn(adapter) &&
1773 !be_is_mc(adapter)) {
ac6a0c4a
SP
1774 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1775 } else {
1776 dev_warn(&adapter->pdev->dev,
1777 "No support for multiple RX queues\n");
1778 return 1;
1779 }
1780}
1781
6b7c5b94
SP
1782static int be_rx_queues_create(struct be_adapter *adapter)
1783{
1784 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1785 struct be_rx_obj *rxo;
1786 int rc, i;
6b7c5b94 1787
ac6a0c4a
SP
1788 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1789 msix_enabled(adapter) ?
1790 adapter->num_msix_vec - 1 : 1);
1791 if (adapter->num_rx_qs != MAX_RX_QS)
1792 dev_warn(&adapter->pdev->dev,
1793 "Can create only %d RX queues", adapter->num_rx_qs);
1794
6b7c5b94 1795 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1796 for_all_rx_queues(adapter, rxo, i) {
1797 rxo->adapter = adapter;
1798 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1799 rxo->rx_eq.enable_aic = true;
1800
1801 /* EQ */
1802 eq = &rxo->rx_eq.q;
1803 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1804 sizeof(struct be_eq_entry));
1805 if (rc)
1806 goto err;
1807
1808 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1809 if (rc)
1810 goto err;
1811
ecd62107 1812 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1813
3abcdeda
SP
1814 /* CQ */
1815 cq = &rxo->cq;
1816 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1817 sizeof(struct be_eth_rx_compl));
1818 if (rc)
1819 goto err;
1820
1821 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1822 if (rc)
1823 goto err;
482c9e79
SP
1824
1825 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1826 q = &rxo->q;
1827 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1828 sizeof(struct be_eth_rx_d));
1829 if (rc)
1830 goto err;
1831
3abcdeda 1832 }
6b7c5b94
SP
1833
1834 return 0;
3abcdeda
SP
1835err:
1836 be_rx_queues_destroy(adapter);
1837 return -1;
6b7c5b94 1838}
6b7c5b94 1839
fe6d2a38 1840static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1841{
fe6d2a38
SP
1842 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1843 if (!eqe->evt)
1844 return false;
1845 else
1846 return true;
b628bde2
SP
1847}
1848
6b7c5b94
SP
1849static irqreturn_t be_intx(int irq, void *dev)
1850{
1851 struct be_adapter *adapter = dev;
3abcdeda 1852 struct be_rx_obj *rxo;
fe6d2a38 1853 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1854
fe6d2a38
SP
1855 if (lancer_chip(adapter)) {
1856 if (event_peek(&adapter->tx_eq))
3c8def97 1857 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1858 for_all_rx_queues(adapter, rxo, i) {
1859 if (event_peek(&rxo->rx_eq))
3c8def97 1860 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1861 }
6b7c5b94 1862
fe6d2a38
SP
1863 if (!(tx || rx))
1864 return IRQ_NONE;
3abcdeda 1865
fe6d2a38
SP
1866 } else {
1867 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1868 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1869 if (!isr)
1870 return IRQ_NONE;
1871
ecd62107 1872 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1873 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1874
1875 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1876 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1877 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1878 }
3abcdeda 1879 }
c001c213 1880
8788fdc2 1881 return IRQ_HANDLED;
6b7c5b94
SP
1882}
1883
1884static irqreturn_t be_msix_rx(int irq, void *dev)
1885{
3abcdeda
SP
1886 struct be_rx_obj *rxo = dev;
1887 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1888
3c8def97 1889 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1890
1891 return IRQ_HANDLED;
1892}
1893
5fb379ee 1894static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1895{
1896 struct be_adapter *adapter = dev;
1897
3c8def97 1898 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1899
1900 return IRQ_HANDLED;
1901}
1902
2e588f84 1903static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1904{
2e588f84 1905 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1906}
1907
49b05221 1908static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1909{
1910 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1911 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1912 struct be_adapter *adapter = rxo->adapter;
1913 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1914 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1915 u32 work_done;
1916
ac124ff9 1917 rx_stats(rxo)->rx_polls++;
6b7c5b94 1918 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1919 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1920 if (!rxcp)
1921 break;
1922
12004ae9
SP
1923 /* Is it a flush compl that has no data */
1924 if (unlikely(rxcp->num_rcvd == 0))
1925 goto loop_continue;
1926
1927 /* Discard compl with partial DMA Lancer B0 */
1928 if (unlikely(!rxcp->pkt_size)) {
1929 be_rx_compl_discard(adapter, rxo, rxcp);
1930 goto loop_continue;
1931 }
1932
1933 /* On BE drop pkts that arrive due to imperfect filtering in
1934 * promiscuous mode on some skews
1935 */
1936 if (unlikely(rxcp->port != adapter->port_num &&
1937 !lancer_chip(adapter))) {
009dd872 1938 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1939 goto loop_continue;
64642811 1940 }
009dd872 1941
12004ae9
SP
1942 if (do_gro(rxcp))
1943 be_rx_compl_process_gro(adapter, rxo, rxcp);
1944 else
1945 be_rx_compl_process(adapter, rxo, rxcp);
1946loop_continue:
2e588f84 1947 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1948 }
1949
9372cacb
PR
1950 be_cq_notify(adapter, rx_cq->id, false, work_done);
1951
6b7c5b94 1952 /* Refill the queue */
857c9905 1953 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1954 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1955
1956 /* All consumed */
1957 if (work_done < budget) {
1958 napi_complete(napi);
9372cacb
PR
1959 /* Arm CQ */
1960 be_cq_notify(adapter, rx_cq->id, true, 0);
6b7c5b94
SP
1961 }
1962 return work_done;
1963}
1964
f31e50a8
SP
1965/* As TX and MCC share the same EQ check for both TX and MCC completions.
1966 * For TX/MCC we don't honour budget; consume everything
1967 */
1968static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1969{
f31e50a8
SP
1970 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1971 struct be_adapter *adapter =
1972 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1973 struct be_tx_obj *txo;
6b7c5b94 1974 struct be_eth_tx_compl *txcp;
3c8def97
SP
1975 int tx_compl, mcc_compl, status = 0;
1976 u8 i;
1977 u16 num_wrbs;
1978
1979 for_all_tx_queues(adapter, txo, i) {
1980 tx_compl = 0;
1981 num_wrbs = 0;
1982 while ((txcp = be_tx_compl_get(&txo->cq))) {
1983 num_wrbs += be_tx_compl_process(adapter, txo,
1984 AMAP_GET_BITS(struct amap_eth_tx_compl,
1985 wrb_index, txcp));
1986 tx_compl++;
1987 }
1988 if (tx_compl) {
1989 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1990
1991 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1992
3c8def97
SP
1993 /* As Tx wrbs have been freed up, wake up netdev queue
1994 * if it was stopped due to lack of tx wrbs. */
1995 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1996 atomic_read(&txo->q.used) < txo->q.len / 2) {
1997 netif_wake_subqueue(adapter->netdev, i);
1998 }
1999
ab1594e9 2000 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 2001 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 2002 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 2003 }
6b7c5b94
SP
2004 }
2005
f31e50a8
SP
2006 mcc_compl = be_process_mcc(adapter, &status);
2007
f31e50a8
SP
2008 if (mcc_compl) {
2009 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2010 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2011 }
2012
3c8def97 2013 napi_complete(napi);
6b7c5b94 2014
3c8def97 2015 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 2016 adapter->drv_stats.tx_events++;
6b7c5b94
SP
2017 return 1;
2018}
2019
d053de91 2020void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 2021{
e1cfb67a
PR
2022 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2023 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2024 u32 i;
2025
72f02485
SP
2026 if (adapter->eeh_err || adapter->ue_detected)
2027 return;
2028
e1cfb67a
PR
2029 if (lancer_chip(adapter)) {
2030 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2031 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2032 sliport_err1 = ioread32(adapter->db +
2033 SLIPORT_ERROR1_OFFSET);
2034 sliport_err2 = ioread32(adapter->db +
2035 SLIPORT_ERROR2_OFFSET);
2036 }
2037 } else {
2038 pci_read_config_dword(adapter->pdev,
2039 PCICFG_UE_STATUS_LOW, &ue_lo);
2040 pci_read_config_dword(adapter->pdev,
2041 PCICFG_UE_STATUS_HIGH, &ue_hi);
2042 pci_read_config_dword(adapter->pdev,
2043 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2044 pci_read_config_dword(adapter->pdev,
2045 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2046
2047 ue_lo = (ue_lo & (~ue_lo_mask));
2048 ue_hi = (ue_hi & (~ue_hi_mask));
2049 }
7c185276 2050
e1cfb67a
PR
2051 if (ue_lo || ue_hi ||
2052 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2053 adapter->ue_detected = true;
7acc2087 2054 adapter->eeh_err = true;
434b3648
SP
2055 dev_err(&adapter->pdev->dev,
2056 "Unrecoverable error in the card\n");
d053de91
AK
2057 }
2058
e1cfb67a
PR
2059 if (ue_lo) {
2060 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2061 if (ue_lo & 1)
7c185276
AK
2062 dev_err(&adapter->pdev->dev,
2063 "UE: %s bit set\n", ue_status_low_desc[i]);
2064 }
2065 }
e1cfb67a
PR
2066 if (ue_hi) {
2067 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2068 if (ue_hi & 1)
7c185276
AK
2069 dev_err(&adapter->pdev->dev,
2070 "UE: %s bit set\n", ue_status_hi_desc[i]);
2071 }
2072 }
2073
e1cfb67a
PR
2074 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2075 dev_err(&adapter->pdev->dev,
2076 "sliport status 0x%x\n", sliport_status);
2077 dev_err(&adapter->pdev->dev,
2078 "sliport error1 0x%x\n", sliport_err1);
2079 dev_err(&adapter->pdev->dev,
2080 "sliport error2 0x%x\n", sliport_err2);
2081 }
7c185276
AK
2082}
2083
8d56ff11
SP
2084static void be_msix_disable(struct be_adapter *adapter)
2085{
ac6a0c4a 2086 if (msix_enabled(adapter)) {
8d56ff11 2087 pci_disable_msix(adapter->pdev);
ac6a0c4a 2088 adapter->num_msix_vec = 0;
3abcdeda
SP
2089 }
2090}
2091
6b7c5b94
SP
2092static void be_msix_enable(struct be_adapter *adapter)
2093{
3abcdeda 2094#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2095 int i, status, num_vec;
6b7c5b94 2096
ac6a0c4a 2097 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2098
ac6a0c4a 2099 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2100 adapter->msix_entries[i].entry = i;
2101
ac6a0c4a 2102 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2103 if (status == 0) {
2104 goto done;
2105 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2106 num_vec = status;
3abcdeda 2107 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2108 num_vec) == 0)
3abcdeda 2109 goto done;
3abcdeda
SP
2110 }
2111 return;
2112done:
ac6a0c4a
SP
2113 adapter->num_msix_vec = num_vec;
2114 return;
6b7c5b94
SP
2115}
2116
f9449ab7 2117static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2118{
344dbf10 2119 be_check_sriov_fn_type(adapter);
11ac75ed 2120
6dedec81 2121#ifdef CONFIG_PCI_IOV
ba343c77 2122 if (be_physfn(adapter) && num_vfs) {
81be8f0a 2123 int status, pos;
11ac75ed 2124 u16 dev_vfs;
81be8f0a
AK
2125
2126 pos = pci_find_ext_capability(adapter->pdev,
2127 PCI_EXT_CAP_ID_SRIOV);
2128 pci_read_config_word(adapter->pdev,
11ac75ed 2129 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
81be8f0a 2130
11ac75ed
SP
2131 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2132 if (adapter->num_vfs != num_vfs)
81be8f0a 2133 dev_info(&adapter->pdev->dev,
11ac75ed
SP
2134 "Device supports %d VFs and not %d\n",
2135 adapter->num_vfs, num_vfs);
6dedec81 2136
11ac75ed
SP
2137 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2138 if (status)
2139 adapter->num_vfs = 0;
f9449ab7 2140
11ac75ed 2141 if (adapter->num_vfs) {
f9449ab7
SP
2142 adapter->vf_cfg = kcalloc(num_vfs,
2143 sizeof(struct be_vf_cfg),
2144 GFP_KERNEL);
2145 if (!adapter->vf_cfg)
2146 return -ENOMEM;
2147 }
ba343c77
SB
2148 }
2149#endif
f9449ab7 2150 return 0;
ba343c77
SB
2151}
2152
2153static void be_sriov_disable(struct be_adapter *adapter)
2154{
2155#ifdef CONFIG_PCI_IOV
11ac75ed 2156 if (sriov_enabled(adapter)) {
ba343c77 2157 pci_disable_sriov(adapter->pdev);
f9449ab7 2158 kfree(adapter->vf_cfg);
11ac75ed 2159 adapter->num_vfs = 0;
ba343c77
SB
2160 }
2161#endif
2162}
2163
fe6d2a38
SP
2164static inline int be_msix_vec_get(struct be_adapter *adapter,
2165 struct be_eq_obj *eq_obj)
6b7c5b94 2166{
ecd62107 2167 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2168}
2169
b628bde2
SP
2170static int be_request_irq(struct be_adapter *adapter,
2171 struct be_eq_obj *eq_obj,
3abcdeda 2172 void *handler, char *desc, void *context)
6b7c5b94
SP
2173{
2174 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2175 int vec;
2176
2177 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2178 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2179 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2180}
2181
3abcdeda
SP
2182static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2183 void *context)
b628bde2 2184{
fe6d2a38 2185 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2186 free_irq(vec, context);
b628bde2 2187}
6b7c5b94 2188
b628bde2
SP
2189static int be_msix_register(struct be_adapter *adapter)
2190{
3abcdeda
SP
2191 struct be_rx_obj *rxo;
2192 int status, i;
2193 char qname[10];
b628bde2 2194
3abcdeda
SP
2195 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2196 adapter);
6b7c5b94
SP
2197 if (status)
2198 goto err;
2199
3abcdeda
SP
2200 for_all_rx_queues(adapter, rxo, i) {
2201 sprintf(qname, "rxq%d", i);
2202 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2203 qname, rxo);
2204 if (status)
2205 goto err_msix;
2206 }
b628bde2 2207
6b7c5b94 2208 return 0;
b628bde2 2209
3abcdeda
SP
2210err_msix:
2211 be_free_irq(adapter, &adapter->tx_eq, adapter);
2212
2213 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2214 be_free_irq(adapter, &rxo->rx_eq, rxo);
2215
6b7c5b94
SP
2216err:
2217 dev_warn(&adapter->pdev->dev,
2218 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2219 be_msix_disable(adapter);
6b7c5b94
SP
2220 return status;
2221}
2222
2223static int be_irq_register(struct be_adapter *adapter)
2224{
2225 struct net_device *netdev = adapter->netdev;
2226 int status;
2227
ac6a0c4a 2228 if (msix_enabled(adapter)) {
6b7c5b94
SP
2229 status = be_msix_register(adapter);
2230 if (status == 0)
2231 goto done;
ba343c77
SB
2232 /* INTx is not supported for VF */
2233 if (!be_physfn(adapter))
2234 return status;
6b7c5b94
SP
2235 }
2236
2237 /* INTx */
2238 netdev->irq = adapter->pdev->irq;
2239 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2240 adapter);
2241 if (status) {
2242 dev_err(&adapter->pdev->dev,
2243 "INTx request IRQ failed - err %d\n", status);
2244 return status;
2245 }
2246done:
2247 adapter->isr_registered = true;
2248 return 0;
2249}
2250
2251static void be_irq_unregister(struct be_adapter *adapter)
2252{
2253 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2254 struct be_rx_obj *rxo;
2255 int i;
6b7c5b94
SP
2256
2257 if (!adapter->isr_registered)
2258 return;
2259
2260 /* INTx */
ac6a0c4a 2261 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2262 free_irq(netdev->irq, adapter);
2263 goto done;
2264 }
2265
2266 /* MSIx */
3abcdeda
SP
2267 be_free_irq(adapter, &adapter->tx_eq, adapter);
2268
2269 for_all_rx_queues(adapter, rxo, i)
2270 be_free_irq(adapter, &rxo->rx_eq, rxo);
2271
6b7c5b94
SP
2272done:
2273 adapter->isr_registered = false;
6b7c5b94
SP
2274}
2275
482c9e79
SP
2276static void be_rx_queues_clear(struct be_adapter *adapter)
2277{
2278 struct be_queue_info *q;
2279 struct be_rx_obj *rxo;
2280 int i;
2281
2282 for_all_rx_queues(adapter, rxo, i) {
2283 q = &rxo->q;
2284 if (q->created) {
2285 be_cmd_rxq_destroy(adapter, q);
2286 /* After the rxq is invalidated, wait for a grace time
2287 * of 1ms for all dma to end and the flush compl to
2288 * arrive
2289 */
2290 mdelay(1);
2291 be_rx_q_clean(adapter, rxo);
2292 }
2293
2294 /* Clear any residual events */
2295 q = &rxo->rx_eq.q;
2296 if (q->created)
2297 be_eq_clean(adapter, &rxo->rx_eq);
2298 }
2299}
2300
889cd4b2
SP
2301static int be_close(struct net_device *netdev)
2302{
2303 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2304 struct be_rx_obj *rxo;
3c8def97 2305 struct be_tx_obj *txo;
889cd4b2 2306 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2307 int vec, i;
889cd4b2 2308
889cd4b2
SP
2309 be_async_mcc_disable(adapter);
2310
fe6d2a38
SP
2311 if (!lancer_chip(adapter))
2312 be_intr_set(adapter, false);
889cd4b2 2313
63fcb27f
PR
2314 for_all_rx_queues(adapter, rxo, i)
2315 napi_disable(&rxo->rx_eq.napi);
2316
2317 napi_disable(&tx_eq->napi);
2318
2319 if (lancer_chip(adapter)) {
63fcb27f
PR
2320 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2321 for_all_rx_queues(adapter, rxo, i)
2322 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2323 for_all_tx_queues(adapter, txo, i)
2324 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2325 }
2326
ac6a0c4a 2327 if (msix_enabled(adapter)) {
fe6d2a38 2328 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2329 synchronize_irq(vec);
3abcdeda
SP
2330
2331 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2332 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2333 synchronize_irq(vec);
2334 }
889cd4b2
SP
2335 } else {
2336 synchronize_irq(netdev->irq);
2337 }
2338 be_irq_unregister(adapter);
2339
889cd4b2
SP
2340 /* Wait for all pending tx completions to arrive so that
2341 * all tx skbs are freed.
2342 */
3c8def97
SP
2343 for_all_tx_queues(adapter, txo, i)
2344 be_tx_compl_clean(adapter, txo);
889cd4b2 2345
482c9e79
SP
2346 be_rx_queues_clear(adapter);
2347 return 0;
2348}
2349
2350static int be_rx_queues_setup(struct be_adapter *adapter)
2351{
2352 struct be_rx_obj *rxo;
e9008ee9
PR
2353 int rc, i, j;
2354 u8 rsstable[128];
482c9e79
SP
2355
2356 for_all_rx_queues(adapter, rxo, i) {
2357 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2358 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2359 adapter->if_handle,
2360 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2361 if (rc)
2362 return rc;
2363 }
2364
2365 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2366 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2367 for_all_rss_queues(adapter, rxo, i) {
2368 if ((j + i) >= 128)
2369 break;
2370 rsstable[j + i] = rxo->rss_id;
2371 }
2372 }
2373 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79 2374
482c9e79
SP
2375 if (rc)
2376 return rc;
2377 }
2378
2379 /* First time posting */
2380 for_all_rx_queues(adapter, rxo, i) {
2381 be_post_rx_frags(rxo, GFP_KERNEL);
2382 napi_enable(&rxo->rx_eq.napi);
2383 }
889cd4b2
SP
2384 return 0;
2385}
2386
6b7c5b94
SP
2387static int be_open(struct net_device *netdev)
2388{
2389 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2390 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2391 struct be_rx_obj *rxo;
3abcdeda 2392 int status, i;
5fb379ee 2393
482c9e79
SP
2394 status = be_rx_queues_setup(adapter);
2395 if (status)
2396 goto err;
2397
5fb379ee
SP
2398 napi_enable(&tx_eq->napi);
2399
2400 be_irq_register(adapter);
2401
fe6d2a38
SP
2402 if (!lancer_chip(adapter))
2403 be_intr_set(adapter, true);
5fb379ee
SP
2404
2405 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2406 for_all_rx_queues(adapter, rxo, i) {
2407 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2408 be_cq_notify(adapter, rxo->cq.id, true, 0);
2409 }
8788fdc2 2410 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2411
7a1e9b20
SP
2412 /* Now that interrupts are on we can process async mcc */
2413 be_async_mcc_enable(adapter);
2414
889cd4b2
SP
2415 return 0;
2416err:
2417 be_close(adapter->netdev);
2418 return -EIO;
5fb379ee
SP
2419}
2420
71d8d1b5
AK
2421static int be_setup_wol(struct be_adapter *adapter, bool enable)
2422{
2423 struct be_dma_mem cmd;
2424 int status = 0;
2425 u8 mac[ETH_ALEN];
2426
2427 memset(mac, 0, ETH_ALEN);
2428
2429 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2430 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2431 GFP_KERNEL);
71d8d1b5
AK
2432 if (cmd.va == NULL)
2433 return -1;
2434 memset(cmd.va, 0, cmd.size);
2435
2436 if (enable) {
2437 status = pci_write_config_dword(adapter->pdev,
2438 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2439 if (status) {
2440 dev_err(&adapter->pdev->dev,
2381a55c 2441 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2442 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2443 cmd.dma);
71d8d1b5
AK
2444 return status;
2445 }
2446 status = be_cmd_enable_magic_wol(adapter,
2447 adapter->netdev->dev_addr, &cmd);
2448 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2449 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2450 } else {
2451 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2452 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2453 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2454 }
2455
2b7bcebf 2456 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2457 return status;
2458}
2459
6d87f5c3
AK
2460/*
2461 * Generate a seed MAC address from the PF MAC Address using jhash.
2462 * MAC Address for VFs are assigned incrementally starting from the seed.
2463 * These addresses are programmed in the ASIC by the PF and the VF driver
2464 * queries for the MAC address during its probe.
2465 */
2466static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2467{
f9449ab7 2468 u32 vf;
3abcdeda 2469 int status = 0;
6d87f5c3 2470 u8 mac[ETH_ALEN];
11ac75ed 2471 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2472
2473 be_vf_eth_addr_generate(adapter, mac);
2474
11ac75ed 2475 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2476 if (lancer_chip(adapter)) {
2477 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2478 } else {
2479 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2480 vf_cfg->if_handle,
2481 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2482 }
2483
6d87f5c3
AK
2484 if (status)
2485 dev_err(&adapter->pdev->dev,
590c391d 2486 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2487 else
11ac75ed 2488 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2489
2490 mac[5] += 1;
2491 }
2492 return status;
2493}
2494
f9449ab7 2495static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2496{
11ac75ed 2497 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2498 u32 vf;
2499
11ac75ed 2500 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2501 if (lancer_chip(adapter))
2502 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2503 else
11ac75ed
SP
2504 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2505 vf_cfg->pmac_id, vf + 1);
f9449ab7 2506
11ac75ed
SP
2507 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2508 }
6d87f5c3
AK
2509}
2510
a54769f5
SP
2511static int be_clear(struct be_adapter *adapter)
2512{
11ac75ed 2513 if (sriov_enabled(adapter))
f9449ab7
SP
2514 be_vf_clear(adapter);
2515
2516 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2517
2518 be_mcc_queues_destroy(adapter);
2519 be_rx_queues_destroy(adapter);
2520 be_tx_queues_destroy(adapter);
a54769f5
SP
2521
2522 /* tell fw we're done with firing cmds */
2523 be_cmd_fw_clean(adapter);
2524 return 0;
2525}
2526
30128031
SP
2527static void be_vf_setup_init(struct be_adapter *adapter)
2528{
11ac75ed 2529 struct be_vf_cfg *vf_cfg;
30128031
SP
2530 int vf;
2531
11ac75ed
SP
2532 for_all_vfs(adapter, vf_cfg, vf) {
2533 vf_cfg->if_handle = -1;
2534 vf_cfg->pmac_id = -1;
30128031
SP
2535 }
2536}
2537
f9449ab7
SP
2538static int be_vf_setup(struct be_adapter *adapter)
2539{
11ac75ed 2540 struct be_vf_cfg *vf_cfg;
f9449ab7
SP
2541 u32 cap_flags, en_flags, vf;
2542 u16 lnk_speed;
2543 int status;
2544
30128031
SP
2545 be_vf_setup_init(adapter);
2546
590c391d
PR
2547 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2548 BE_IF_FLAGS_MULTICAST;
11ac75ed 2549 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2550 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
11ac75ed 2551 &vf_cfg->if_handle, NULL, vf + 1);
f9449ab7
SP
2552 if (status)
2553 goto err;
f9449ab7
SP
2554 }
2555
590c391d
PR
2556 status = be_vf_eth_addr_config(adapter);
2557 if (status)
2558 goto err;
f9449ab7 2559
11ac75ed 2560 for_all_vfs(adapter, vf_cfg, vf) {
f9449ab7 2561 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
11ac75ed 2562 vf + 1);
f9449ab7
SP
2563 if (status)
2564 goto err;
11ac75ed 2565 vf_cfg->tx_rate = lnk_speed * 10;
f9449ab7
SP
2566 }
2567 return 0;
2568err:
2569 return status;
2570}
2571
30128031
SP
2572static void be_setup_init(struct be_adapter *adapter)
2573{
2574 adapter->vlan_prio_bmap = 0xff;
2575 adapter->link_speed = -1;
2576 adapter->if_handle = -1;
2577 adapter->be3_native = false;
2578 adapter->promiscuous = false;
2579 adapter->eq_next_idx = 0;
2580}
2581
590c391d
PR
2582static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2583{
2584 u32 pmac_id;
2585 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2586 if (status != 0)
2587 goto do_none;
2588 status = be_cmd_mac_addr_query(adapter, mac,
2589 MAC_ADDRESS_TYPE_NETWORK,
2590 false, adapter->if_handle, pmac_id);
2591 if (status != 0)
2592 goto do_none;
2593 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2594 &adapter->pmac_id, 0);
2595do_none:
2596 return status;
2597}
2598
5fb379ee
SP
2599static int be_setup(struct be_adapter *adapter)
2600{
5fb379ee 2601 struct net_device *netdev = adapter->netdev;
f9449ab7 2602 u32 cap_flags, en_flags;
a54769f5 2603 u32 tx_fc, rx_fc;
293c4a7d 2604 int status, i;
ba343c77 2605 u8 mac[ETH_ALEN];
293c4a7d 2606 struct be_tx_obj *txo;
ba343c77 2607
30128031 2608 be_setup_init(adapter);
6b7c5b94 2609
f9449ab7 2610 be_cmd_req_native_mode(adapter);
73d540f2 2611
f9449ab7 2612 status = be_tx_queues_create(adapter);
6b7c5b94 2613 if (status != 0)
a54769f5 2614 goto err;
6b7c5b94 2615
f9449ab7 2616 status = be_rx_queues_create(adapter);
6b7c5b94 2617 if (status != 0)
a54769f5 2618 goto err;
6b7c5b94 2619
f9449ab7 2620 status = be_mcc_queues_create(adapter);
6b7c5b94 2621 if (status != 0)
a54769f5 2622 goto err;
6b7c5b94 2623
f9449ab7
SP
2624 memset(mac, 0, ETH_ALEN);
2625 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
590c391d 2626 true /*permanent */, 0, 0);
f9449ab7
SP
2627 if (status)
2628 return status;
2629 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2630 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2631
f9449ab7
SP
2632 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2633 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2634 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2635 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2636
f9449ab7
SP
2637 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2638 cap_flags |= BE_IF_FLAGS_RSS;
2639 en_flags |= BE_IF_FLAGS_RSS;
2640 }
2641 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2642 netdev->dev_addr, &adapter->if_handle,
2643 &adapter->pmac_id, 0);
5fb379ee 2644 if (status != 0)
a54769f5 2645 goto err;
6b7c5b94 2646
293c4a7d
PR
2647 for_all_tx_queues(adapter, txo, i) {
2648 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2649 if (status)
2650 goto err;
2651 }
2652
590c391d
PR
2653 /* The VF's permanent mac queried from card is incorrect.
2654 * For BEx: Query the mac configued by the PF using if_handle
2655 * For Lancer: Get and use mac_list to obtain mac address.
2656 */
2657 if (!be_physfn(adapter)) {
2658 if (lancer_chip(adapter))
2659 status = be_configure_mac_from_list(adapter, mac);
2660 else
2661 status = be_cmd_mac_addr_query(adapter, mac,
2662 MAC_ADDRESS_TYPE_NETWORK, false,
2663 adapter->if_handle, 0);
f9449ab7
SP
2664 if (!status) {
2665 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2666 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2667 }
2668 }
0dffc83e 2669
04b71175 2670 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2671
a54769f5
SP
2672 status = be_vid_config(adapter, false, 0);
2673 if (status)
2674 goto err;
7ab8b0b4 2675
a54769f5 2676 be_set_rx_mode(adapter->netdev);
5fb379ee 2677
a54769f5 2678 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d
PR
2679 /* For Lancer: It is legal for this cmd to fail on VF */
2680 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5 2681 goto err;
590c391d 2682
a54769f5
SP
2683 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2684 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2685 adapter->rx_fc);
590c391d
PR
2686 /* For Lancer: It is legal for this cmd to fail on VF */
2687 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
a54769f5
SP
2688 goto err;
2689 }
2dc1deb6 2690
a54769f5 2691 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2692
11ac75ed 2693 if (sriov_enabled(adapter)) {
f9449ab7
SP
2694 status = be_vf_setup(adapter);
2695 if (status)
2696 goto err;
2697 }
2698
2699 return 0;
a54769f5
SP
2700err:
2701 be_clear(adapter);
2702 return status;
2703}
6b7c5b94 2704
66268739
IV
2705#ifdef CONFIG_NET_POLL_CONTROLLER
2706static void be_netpoll(struct net_device *netdev)
2707{
2708 struct be_adapter *adapter = netdev_priv(netdev);
2709 struct be_rx_obj *rxo;
2710 int i;
2711
2712 event_handle(adapter, &adapter->tx_eq, false);
2713 for_all_rx_queues(adapter, rxo, i)
2714 event_handle(adapter, &rxo->rx_eq, true);
2715}
2716#endif
2717
84517482 2718#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2719static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2720 const u8 *p, u32 img_start, int image_size,
2721 int hdr_size)
fa9a6fed
SB
2722{
2723 u32 crc_offset;
2724 u8 flashed_crc[4];
2725 int status;
3f0d4560
AK
2726
2727 crc_offset = hdr_size + img_start + image_size - 4;
2728
fa9a6fed 2729 p += crc_offset;
3f0d4560
AK
2730
2731 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2732 (image_size - 4));
fa9a6fed
SB
2733 if (status) {
2734 dev_err(&adapter->pdev->dev,
2735 "could not get crc from flash, not flashing redboot\n");
2736 return false;
2737 }
2738
2739 /*update redboot only if crc does not match*/
2740 if (!memcmp(flashed_crc, p, 4))
2741 return false;
2742 else
2743 return true;
fa9a6fed
SB
2744}
2745
306f1348
SP
2746static bool phy_flashing_required(struct be_adapter *adapter)
2747{
2748 int status = 0;
2749 struct be_phy_info phy_info;
2750
2751 status = be_cmd_get_phy_info(adapter, &phy_info);
2752 if (status)
2753 return false;
2754 if ((phy_info.phy_type == TN_8022) &&
2755 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2756 return true;
2757 }
2758 return false;
2759}
2760
3f0d4560 2761static int be_flash_data(struct be_adapter *adapter,
84517482 2762 const struct firmware *fw,
3f0d4560
AK
2763 struct be_dma_mem *flash_cmd, int num_of_images)
2764
84517482 2765{
3f0d4560
AK
2766 int status = 0, i, filehdr_size = 0;
2767 u32 total_bytes = 0, flash_op;
84517482
AK
2768 int num_bytes;
2769 const u8 *p = fw->data;
2770 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2771 const struct flash_comp *pflashcomp;
9fe96934 2772 int num_comp;
3f0d4560 2773
306f1348 2774 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2775 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2776 FLASH_IMAGE_MAX_SIZE_g3},
2777 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2778 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2779 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2780 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2781 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2782 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2783 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2784 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2785 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2786 FLASH_IMAGE_MAX_SIZE_g3},
2787 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2788 FLASH_IMAGE_MAX_SIZE_g3},
2789 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2790 FLASH_IMAGE_MAX_SIZE_g3},
2791 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2792 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2793 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2794 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2795 };
215faf9c 2796 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2797 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2798 FLASH_IMAGE_MAX_SIZE_g2},
2799 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2800 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2801 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2802 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2803 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2804 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2805 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2806 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2807 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2808 FLASH_IMAGE_MAX_SIZE_g2},
2809 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2810 FLASH_IMAGE_MAX_SIZE_g2},
2811 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2812 FLASH_IMAGE_MAX_SIZE_g2}
2813 };
2814
2815 if (adapter->generation == BE_GEN3) {
2816 pflashcomp = gen3_flash_types;
2817 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2818 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2819 } else {
2820 pflashcomp = gen2_flash_types;
2821 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2822 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2823 }
9fe96934
SB
2824 for (i = 0; i < num_comp; i++) {
2825 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2826 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2827 continue;
306f1348
SP
2828 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2829 if (!phy_flashing_required(adapter))
2830 continue;
2831 }
3f0d4560
AK
2832 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2833 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2834 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2835 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2836 continue;
2837 p = fw->data;
2838 p += filehdr_size + pflashcomp[i].offset
2839 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2840 if (p + pflashcomp[i].size > fw->data + fw->size)
2841 return -1;
2842 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2843 while (total_bytes) {
2844 if (total_bytes > 32*1024)
2845 num_bytes = 32*1024;
2846 else
2847 num_bytes = total_bytes;
2848 total_bytes -= num_bytes;
306f1348
SP
2849 if (!total_bytes) {
2850 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2851 flash_op = FLASHROM_OPER_PHY_FLASH;
2852 else
2853 flash_op = FLASHROM_OPER_FLASH;
2854 } else {
2855 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2856 flash_op = FLASHROM_OPER_PHY_SAVE;
2857 else
2858 flash_op = FLASHROM_OPER_SAVE;
2859 }
3f0d4560
AK
2860 memcpy(req->params.data_buf, p, num_bytes);
2861 p += num_bytes;
2862 status = be_cmd_write_flashrom(adapter, flash_cmd,
2863 pflashcomp[i].optype, flash_op, num_bytes);
2864 if (status) {
306f1348
SP
2865 if ((status == ILLEGAL_IOCTL_REQ) &&
2866 (pflashcomp[i].optype ==
2867 IMG_TYPE_PHY_FW))
2868 break;
3f0d4560
AK
2869 dev_err(&adapter->pdev->dev,
2870 "cmd to write to flash rom failed.\n");
2871 return -1;
2872 }
84517482 2873 }
84517482 2874 }
84517482
AK
2875 return 0;
2876}
2877
3f0d4560
AK
2878static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2879{
2880 if (fhdr == NULL)
2881 return 0;
2882 if (fhdr->build[0] == '3')
2883 return BE_GEN3;
2884 else if (fhdr->build[0] == '2')
2885 return BE_GEN2;
2886 else
2887 return 0;
2888}
2889
485bf569
SN
2890static int lancer_fw_download(struct be_adapter *adapter,
2891 const struct firmware *fw)
84517482 2892{
485bf569
SN
2893#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2894#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2895 struct be_dma_mem flash_cmd;
485bf569
SN
2896 const u8 *data_ptr = NULL;
2897 u8 *dest_image_ptr = NULL;
2898 size_t image_size = 0;
2899 u32 chunk_size = 0;
2900 u32 data_written = 0;
2901 u32 offset = 0;
2902 int status = 0;
2903 u8 add_status = 0;
84517482 2904
485bf569 2905 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2906 dev_err(&adapter->pdev->dev,
485bf569
SN
2907 "FW Image not properly aligned. "
2908 "Length must be 4 byte aligned.\n");
2909 status = -EINVAL;
2910 goto lancer_fw_exit;
d9efd2af
SB
2911 }
2912
485bf569
SN
2913 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2914 + LANCER_FW_DOWNLOAD_CHUNK;
2915 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2916 &flash_cmd.dma, GFP_KERNEL);
2917 if (!flash_cmd.va) {
2918 status = -ENOMEM;
2919 dev_err(&adapter->pdev->dev,
2920 "Memory allocation failure while flashing\n");
2921 goto lancer_fw_exit;
2922 }
84517482 2923
485bf569
SN
2924 dest_image_ptr = flash_cmd.va +
2925 sizeof(struct lancer_cmd_req_write_object);
2926 image_size = fw->size;
2927 data_ptr = fw->data;
2928
2929 while (image_size) {
2930 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2931
2932 /* Copy the image chunk content. */
2933 memcpy(dest_image_ptr, data_ptr, chunk_size);
2934
2935 status = lancer_cmd_write_object(adapter, &flash_cmd,
2936 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2937 &data_written, &add_status);
2938
2939 if (status)
2940 break;
2941
2942 offset += data_written;
2943 data_ptr += data_written;
2944 image_size -= data_written;
2945 }
2946
2947 if (!status) {
2948 /* Commit the FW written */
2949 status = lancer_cmd_write_object(adapter, &flash_cmd,
2950 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2951 &data_written, &add_status);
2952 }
2953
2954 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2955 flash_cmd.dma);
2956 if (status) {
2957 dev_err(&adapter->pdev->dev,
2958 "Firmware load error. "
2959 "Status code: 0x%x Additional Status: 0x%x\n",
2960 status, add_status);
2961 goto lancer_fw_exit;
2962 }
2963
2964 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2965lancer_fw_exit:
2966 return status;
2967}
2968
2969static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2970{
2971 struct flash_file_hdr_g2 *fhdr;
2972 struct flash_file_hdr_g3 *fhdr3;
2973 struct image_hdr *img_hdr_ptr = NULL;
2974 struct be_dma_mem flash_cmd;
2975 const u8 *p;
2976 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2977
2978 p = fw->data;
3f0d4560 2979 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2980
84517482 2981 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2982 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2983 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2984 if (!flash_cmd.va) {
2985 status = -ENOMEM;
2986 dev_err(&adapter->pdev->dev,
2987 "Memory allocation failure while flashing\n");
485bf569 2988 goto be_fw_exit;
84517482
AK
2989 }
2990
3f0d4560
AK
2991 if ((adapter->generation == BE_GEN3) &&
2992 (get_ufigen_type(fhdr) == BE_GEN3)) {
2993 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2994 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2995 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2996 img_hdr_ptr = (struct image_hdr *) (fw->data +
2997 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2998 i * sizeof(struct image_hdr)));
2999 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3000 status = be_flash_data(adapter, fw, &flash_cmd,
3001 num_imgs);
3f0d4560
AK
3002 }
3003 } else if ((adapter->generation == BE_GEN2) &&
3004 (get_ufigen_type(fhdr) == BE_GEN2)) {
3005 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3006 } else {
3007 dev_err(&adapter->pdev->dev,
3008 "UFI and Interface are not compatible for flashing\n");
3009 status = -1;
84517482
AK
3010 }
3011
2b7bcebf
IV
3012 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3013 flash_cmd.dma);
84517482
AK
3014 if (status) {
3015 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3016 goto be_fw_exit;
84517482
AK
3017 }
3018
af901ca1 3019 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3020
485bf569
SN
3021be_fw_exit:
3022 return status;
3023}
3024
3025int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3026{
3027 const struct firmware *fw;
3028 int status;
3029
3030 if (!netif_running(adapter->netdev)) {
3031 dev_err(&adapter->pdev->dev,
3032 "Firmware load not allowed (interface is down)\n");
3033 return -1;
3034 }
3035
3036 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3037 if (status)
3038 goto fw_exit;
3039
3040 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3041
3042 if (lancer_chip(adapter))
3043 status = lancer_fw_download(adapter, fw);
3044 else
3045 status = be_fw_download(adapter, fw);
3046
84517482
AK
3047fw_exit:
3048 release_firmware(fw);
3049 return status;
3050}
3051
6b7c5b94
SP
3052static struct net_device_ops be_netdev_ops = {
3053 .ndo_open = be_open,
3054 .ndo_stop = be_close,
3055 .ndo_start_xmit = be_xmit,
a54769f5 3056 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3057 .ndo_set_mac_address = be_mac_addr_set,
3058 .ndo_change_mtu = be_change_mtu,
ab1594e9 3059 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3060 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3061 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3062 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3063 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3064 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3065 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3066 .ndo_get_vf_config = be_get_vf_config,
3067#ifdef CONFIG_NET_POLL_CONTROLLER
3068 .ndo_poll_controller = be_netpoll,
3069#endif
6b7c5b94
SP
3070};
3071
3072static void be_netdev_init(struct net_device *netdev)
3073{
3074 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
3075 struct be_rx_obj *rxo;
3076 int i;
6b7c5b94 3077
6332c8d3 3078 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3079 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3080 NETIF_F_HW_VLAN_TX;
3081 if (be_multi_rxq(adapter))
3082 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3083
3084 netdev->features |= netdev->hw_features |
8b8ddc68 3085 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3086
eb8a50d9 3087 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3088 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3089
6b7c5b94
SP
3090 netdev->flags |= IFF_MULTICAST;
3091
c190e3c8
AK
3092 netif_set_gso_max_size(netdev, 65535);
3093
6b7c5b94
SP
3094 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3095
3096 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3097
3abcdeda
SP
3098 for_all_rx_queues(adapter, rxo, i)
3099 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3100 BE_NAPI_WEIGHT);
3101
5fb379ee 3102 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 3103 BE_NAPI_WEIGHT);
6b7c5b94
SP
3104}
3105
3106static void be_unmap_pci_bars(struct be_adapter *adapter)
3107{
8788fdc2
SP
3108 if (adapter->csr)
3109 iounmap(adapter->csr);
3110 if (adapter->db)
3111 iounmap(adapter->db);
6b7c5b94
SP
3112}
3113
3114static int be_map_pci_bars(struct be_adapter *adapter)
3115{
3116 u8 __iomem *addr;
db3ea781 3117 int db_reg;
6b7c5b94 3118
fe6d2a38
SP
3119 if (lancer_chip(adapter)) {
3120 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3121 pci_resource_len(adapter->pdev, 0));
3122 if (addr == NULL)
3123 return -ENOMEM;
3124 adapter->db = addr;
3125 return 0;
3126 }
3127
ba343c77
SB
3128 if (be_physfn(adapter)) {
3129 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3130 pci_resource_len(adapter->pdev, 2));
3131 if (addr == NULL)
3132 return -ENOMEM;
3133 adapter->csr = addr;
3134 }
6b7c5b94 3135
ba343c77 3136 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3137 db_reg = 4;
3138 } else {
ba343c77
SB
3139 if (be_physfn(adapter))
3140 db_reg = 4;
3141 else
3142 db_reg = 0;
3143 }
3144 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3145 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3146 if (addr == NULL)
3147 goto pci_map_err;
ba343c77
SB
3148 adapter->db = addr;
3149
6b7c5b94
SP
3150 return 0;
3151pci_map_err:
3152 be_unmap_pci_bars(adapter);
3153 return -ENOMEM;
3154}
3155
3156
3157static void be_ctrl_cleanup(struct be_adapter *adapter)
3158{
8788fdc2 3159 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3160
3161 be_unmap_pci_bars(adapter);
3162
3163 if (mem->va)
2b7bcebf
IV
3164 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3165 mem->dma);
e7b909a6 3166
5b8821b7 3167 mem = &adapter->rx_filter;
e7b909a6 3168 if (mem->va)
2b7bcebf
IV
3169 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3170 mem->dma);
6b7c5b94
SP
3171}
3172
6b7c5b94
SP
3173static int be_ctrl_init(struct be_adapter *adapter)
3174{
8788fdc2
SP
3175 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3176 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3177 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3178 int status;
6b7c5b94
SP
3179
3180 status = be_map_pci_bars(adapter);
3181 if (status)
e7b909a6 3182 goto done;
6b7c5b94
SP
3183
3184 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3185 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3186 mbox_mem_alloc->size,
3187 &mbox_mem_alloc->dma,
3188 GFP_KERNEL);
6b7c5b94 3189 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3190 status = -ENOMEM;
3191 goto unmap_pci_bars;
6b7c5b94
SP
3192 }
3193 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3194 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3195 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3196 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3197
5b8821b7
SP
3198 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3199 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3200 &rx_filter->dma, GFP_KERNEL);
3201 if (rx_filter->va == NULL) {
e7b909a6
SP
3202 status = -ENOMEM;
3203 goto free_mbox;
3204 }
5b8821b7 3205 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3206
2984961c 3207 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3208 spin_lock_init(&adapter->mcc_lock);
3209 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3210
dd131e76 3211 init_completion(&adapter->flash_compl);
cf588477 3212 pci_save_state(adapter->pdev);
6b7c5b94 3213 return 0;
e7b909a6
SP
3214
3215free_mbox:
2b7bcebf
IV
3216 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3217 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3218
3219unmap_pci_bars:
3220 be_unmap_pci_bars(adapter);
3221
3222done:
3223 return status;
6b7c5b94
SP
3224}
3225
3226static void be_stats_cleanup(struct be_adapter *adapter)
3227{
3abcdeda 3228 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3229
3230 if (cmd->va)
2b7bcebf
IV
3231 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3232 cmd->va, cmd->dma);
6b7c5b94
SP
3233}
3234
3235static int be_stats_init(struct be_adapter *adapter)
3236{
3abcdeda 3237 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3238
005d5696 3239 if (adapter->generation == BE_GEN2) {
89a88ab8 3240 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3241 } else {
3242 if (lancer_chip(adapter))
3243 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3244 else
3245 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3246 }
2b7bcebf
IV
3247 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3248 GFP_KERNEL);
6b7c5b94
SP
3249 if (cmd->va == NULL)
3250 return -1;
d291b9af 3251 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3252 return 0;
3253}
3254
3255static void __devexit be_remove(struct pci_dev *pdev)
3256{
3257 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3258
6b7c5b94
SP
3259 if (!adapter)
3260 return;
3261
f203af70
SK
3262 cancel_delayed_work_sync(&adapter->work);
3263
6b7c5b94
SP
3264 unregister_netdev(adapter->netdev);
3265
5fb379ee
SP
3266 be_clear(adapter);
3267
6b7c5b94
SP
3268 be_stats_cleanup(adapter);
3269
3270 be_ctrl_cleanup(adapter);
3271
ba343c77
SB
3272 be_sriov_disable(adapter);
3273
8d56ff11 3274 be_msix_disable(adapter);
6b7c5b94
SP
3275
3276 pci_set_drvdata(pdev, NULL);
3277 pci_release_regions(pdev);
3278 pci_disable_device(pdev);
3279
3280 free_netdev(adapter->netdev);
3281}
3282
2243e2e9 3283static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3284{
6b7c5b94
SP
3285 int status;
3286
3abcdeda
SP
3287 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3288 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3289 if (status)
3290 return status;
3291
752961a1 3292 if (adapter->function_mode & FLEX10_MODE)
82903e4b
AK
3293 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3294 else
3295 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3296
9e1453c5
AK
3297 status = be_cmd_get_cntl_attributes(adapter);
3298 if (status)
3299 return status;
3300
2243e2e9 3301 return 0;
6b7c5b94
SP
3302}
3303
fe6d2a38
SP
3304static int be_dev_family_check(struct be_adapter *adapter)
3305{
3306 struct pci_dev *pdev = adapter->pdev;
3307 u32 sli_intf = 0, if_type;
3308
3309 switch (pdev->device) {
3310 case BE_DEVICE_ID1:
3311 case OC_DEVICE_ID1:
3312 adapter->generation = BE_GEN2;
3313 break;
3314 case BE_DEVICE_ID2:
3315 case OC_DEVICE_ID2:
ecedb6ae 3316 case OC_DEVICE_ID5:
fe6d2a38
SP
3317 adapter->generation = BE_GEN3;
3318 break;
3319 case OC_DEVICE_ID3:
12f4d0a8 3320 case OC_DEVICE_ID4:
fe6d2a38
SP
3321 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3322 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3323 SLI_INTF_IF_TYPE_SHIFT;
3324
3325 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3326 if_type != 0x02) {
3327 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3328 return -EINVAL;
3329 }
fe6d2a38
SP
3330 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3331 SLI_INTF_FAMILY_SHIFT);
3332 adapter->generation = BE_GEN3;
3333 break;
3334 default:
3335 adapter->generation = 0;
3336 }
3337 return 0;
3338}
3339
37eed1cb
PR
3340static int lancer_wait_ready(struct be_adapter *adapter)
3341{
d8110f62 3342#define SLIPORT_READY_TIMEOUT 30
37eed1cb
PR
3343 u32 sliport_status;
3344 int status = 0, i;
3345
3346 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3347 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3348 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3349 break;
3350
d8110f62 3351 msleep(1000);
37eed1cb
PR
3352 }
3353
3354 if (i == SLIPORT_READY_TIMEOUT)
3355 status = -1;
3356
3357 return status;
3358}
3359
3360static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3361{
3362 int status;
3363 u32 sliport_status, err, reset_needed;
3364 status = lancer_wait_ready(adapter);
3365 if (!status) {
3366 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3367 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3368 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3369 if (err && reset_needed) {
3370 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3371 adapter->db + SLIPORT_CONTROL_OFFSET);
3372
3373 /* check adapter has corrected the error */
3374 status = lancer_wait_ready(adapter);
3375 sliport_status = ioread32(adapter->db +
3376 SLIPORT_STATUS_OFFSET);
3377 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3378 SLIPORT_STATUS_RN_MASK);
3379 if (status || sliport_status)
3380 status = -1;
3381 } else if (err || reset_needed) {
3382 status = -1;
3383 }
3384 }
3385 return status;
3386}
3387
d8110f62
PR
3388static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3389{
3390 int status;
3391 u32 sliport_status;
3392
3393 if (adapter->eeh_err || adapter->ue_detected)
3394 return;
3395
3396 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3397
3398 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3399 dev_err(&adapter->pdev->dev,
3400 "Adapter in error state."
3401 "Trying to recover.\n");
3402
3403 status = lancer_test_and_set_rdy_state(adapter);
3404 if (status)
3405 goto err;
3406
3407 netif_device_detach(adapter->netdev);
3408
3409 if (netif_running(adapter->netdev))
3410 be_close(adapter->netdev);
3411
3412 be_clear(adapter);
3413
3414 adapter->fw_timeout = false;
3415
3416 status = be_setup(adapter);
3417 if (status)
3418 goto err;
3419
3420 if (netif_running(adapter->netdev)) {
3421 status = be_open(adapter->netdev);
3422 if (status)
3423 goto err;
3424 }
3425
3426 netif_device_attach(adapter->netdev);
3427
3428 dev_err(&adapter->pdev->dev,
3429 "Adapter error recovery succeeded\n");
3430 }
3431 return;
3432err:
3433 dev_err(&adapter->pdev->dev,
3434 "Adapter error recovery failed\n");
3435}
3436
3437static void be_worker(struct work_struct *work)
3438{
3439 struct be_adapter *adapter =
3440 container_of(work, struct be_adapter, work.work);
3441 struct be_rx_obj *rxo;
3442 int i;
3443
3444 if (lancer_chip(adapter))
3445 lancer_test_and_recover_fn_err(adapter);
3446
3447 be_detect_dump_ue(adapter);
3448
3449 /* when interrupts are not yet enabled, just reap any pending
3450 * mcc completions */
3451 if (!netif_running(adapter->netdev)) {
3452 int mcc_compl, status = 0;
3453
3454 mcc_compl = be_process_mcc(adapter, &status);
3455
3456 if (mcc_compl) {
3457 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3458 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3459 }
3460
3461 goto reschedule;
3462 }
3463
3464 if (!adapter->stats_cmd_sent) {
3465 if (lancer_chip(adapter))
3466 lancer_cmd_get_pport_stats(adapter,
3467 &adapter->stats_cmd);
3468 else
3469 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3470 }
3471
3472 for_all_rx_queues(adapter, rxo, i) {
3473 be_rx_eqd_update(adapter, rxo);
3474
3475 if (rxo->rx_post_starved) {
3476 rxo->rx_post_starved = false;
3477 be_post_rx_frags(rxo, GFP_KERNEL);
3478 }
3479 }
3480
3481reschedule:
3482 adapter->work_counter++;
3483 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3484}
3485
6b7c5b94
SP
3486static int __devinit be_probe(struct pci_dev *pdev,
3487 const struct pci_device_id *pdev_id)
3488{
3489 int status = 0;
3490 struct be_adapter *adapter;
3491 struct net_device *netdev;
6b7c5b94
SP
3492
3493 status = pci_enable_device(pdev);
3494 if (status)
3495 goto do_none;
3496
3497 status = pci_request_regions(pdev, DRV_NAME);
3498 if (status)
3499 goto disable_dev;
3500 pci_set_master(pdev);
3501
3c8def97 3502 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3503 if (netdev == NULL) {
3504 status = -ENOMEM;
3505 goto rel_reg;
3506 }
3507 adapter = netdev_priv(netdev);
3508 adapter->pdev = pdev;
3509 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3510
3511 status = be_dev_family_check(adapter);
63657b9c 3512 if (status)
fe6d2a38
SP
3513 goto free_netdev;
3514
6b7c5b94 3515 adapter->netdev = netdev;
2243e2e9 3516 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3517
2b7bcebf 3518 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3519 if (!status) {
3520 netdev->features |= NETIF_F_HIGHDMA;
3521 } else {
2b7bcebf 3522 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3523 if (status) {
3524 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3525 goto free_netdev;
3526 }
3527 }
3528
f9449ab7
SP
3529 status = be_sriov_enable(adapter);
3530 if (status)
3531 goto free_netdev;
ba343c77 3532
6b7c5b94
SP
3533 status = be_ctrl_init(adapter);
3534 if (status)
f9449ab7 3535 goto disable_sriov;
6b7c5b94 3536
37eed1cb 3537 if (lancer_chip(adapter)) {
d8110f62
PR
3538 status = lancer_wait_ready(adapter);
3539 if (!status) {
3540 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3541 adapter->db + SLIPORT_CONTROL_OFFSET);
3542 status = lancer_test_and_set_rdy_state(adapter);
3543 }
37eed1cb
PR
3544 if (status) {
3545 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3546 goto ctrl_clean;
37eed1cb
PR
3547 }
3548 }
3549
2243e2e9 3550 /* sync up with fw's ready state */
ba343c77
SB
3551 if (be_physfn(adapter)) {
3552 status = be_cmd_POST(adapter);
3553 if (status)
3554 goto ctrl_clean;
ba343c77 3555 }
6b7c5b94 3556
2243e2e9
SP
3557 /* tell fw we're ready to fire cmds */
3558 status = be_cmd_fw_init(adapter);
6b7c5b94 3559 if (status)
2243e2e9
SP
3560 goto ctrl_clean;
3561
a4b4dfab
AK
3562 status = be_cmd_reset_function(adapter);
3563 if (status)
3564 goto ctrl_clean;
556ae191 3565
2243e2e9
SP
3566 status = be_stats_init(adapter);
3567 if (status)
3568 goto ctrl_clean;
3569
3570 status = be_get_config(adapter);
6b7c5b94
SP
3571 if (status)
3572 goto stats_clean;
6b7c5b94 3573
b9ab82c7
SP
3574 /* The INTR bit may be set in the card when probed by a kdump kernel
3575 * after a crash.
3576 */
3577 if (!lancer_chip(adapter))
3578 be_intr_set(adapter, false);
3579
3abcdeda
SP
3580 be_msix_enable(adapter);
3581
6b7c5b94 3582 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3583 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3584
5fb379ee
SP
3585 status = be_setup(adapter);
3586 if (status)
3abcdeda 3587 goto msix_disable;
2243e2e9 3588
3abcdeda 3589 be_netdev_init(netdev);
6b7c5b94
SP
3590 status = register_netdev(netdev);
3591 if (status != 0)
5fb379ee 3592 goto unsetup;
6b7c5b94 3593
c4ca2374 3594 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3595
f203af70 3596 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3597 return 0;
3598
5fb379ee
SP
3599unsetup:
3600 be_clear(adapter);
3abcdeda
SP
3601msix_disable:
3602 be_msix_disable(adapter);
6b7c5b94
SP
3603stats_clean:
3604 be_stats_cleanup(adapter);
3605ctrl_clean:
3606 be_ctrl_cleanup(adapter);
f9449ab7 3607disable_sriov:
ba343c77 3608 be_sriov_disable(adapter);
f9449ab7 3609free_netdev:
fe6d2a38 3610 free_netdev(netdev);
8d56ff11 3611 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3612rel_reg:
3613 pci_release_regions(pdev);
3614disable_dev:
3615 pci_disable_device(pdev);
3616do_none:
c4ca2374 3617 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3618 return status;
3619}
3620
3621static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3622{
3623 struct be_adapter *adapter = pci_get_drvdata(pdev);
3624 struct net_device *netdev = adapter->netdev;
3625
a4ca055f 3626 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3627 if (adapter->wol)
3628 be_setup_wol(adapter, true);
3629
6b7c5b94
SP
3630 netif_device_detach(netdev);
3631 if (netif_running(netdev)) {
3632 rtnl_lock();
3633 be_close(netdev);
3634 rtnl_unlock();
3635 }
9b0365f1 3636 be_clear(adapter);
6b7c5b94 3637
a4ca055f 3638 be_msix_disable(adapter);
6b7c5b94
SP
3639 pci_save_state(pdev);
3640 pci_disable_device(pdev);
3641 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3642 return 0;
3643}
3644
3645static int be_resume(struct pci_dev *pdev)
3646{
3647 int status = 0;
3648 struct be_adapter *adapter = pci_get_drvdata(pdev);
3649 struct net_device *netdev = adapter->netdev;
3650
3651 netif_device_detach(netdev);
3652
3653 status = pci_enable_device(pdev);
3654 if (status)
3655 return status;
3656
3657 pci_set_power_state(pdev, 0);
3658 pci_restore_state(pdev);
3659
a4ca055f 3660 be_msix_enable(adapter);
2243e2e9
SP
3661 /* tell fw we're ready to fire cmds */
3662 status = be_cmd_fw_init(adapter);
3663 if (status)
3664 return status;
3665
9b0365f1 3666 be_setup(adapter);
6b7c5b94
SP
3667 if (netif_running(netdev)) {
3668 rtnl_lock();
3669 be_open(netdev);
3670 rtnl_unlock();
3671 }
3672 netif_device_attach(netdev);
71d8d1b5
AK
3673
3674 if (adapter->wol)
3675 be_setup_wol(adapter, false);
a4ca055f
AK
3676
3677 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3678 return 0;
3679}
3680
82456b03
SP
3681/*
3682 * An FLR will stop BE from DMAing any data.
3683 */
3684static void be_shutdown(struct pci_dev *pdev)
3685{
3686 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3687
2d5d4154
AK
3688 if (!adapter)
3689 return;
82456b03 3690
0f4a6828 3691 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3692
2d5d4154 3693 netif_device_detach(adapter->netdev);
82456b03 3694
82456b03
SP
3695 if (adapter->wol)
3696 be_setup_wol(adapter, true);
3697
57841869
AK
3698 be_cmd_reset_function(adapter);
3699
82456b03 3700 pci_disable_device(pdev);
82456b03
SP
3701}
3702
cf588477
SP
3703static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3704 pci_channel_state_t state)
3705{
3706 struct be_adapter *adapter = pci_get_drvdata(pdev);
3707 struct net_device *netdev = adapter->netdev;
3708
3709 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3710
3711 adapter->eeh_err = true;
3712
3713 netif_device_detach(netdev);
3714
3715 if (netif_running(netdev)) {
3716 rtnl_lock();
3717 be_close(netdev);
3718 rtnl_unlock();
3719 }
3720 be_clear(adapter);
3721
3722 if (state == pci_channel_io_perm_failure)
3723 return PCI_ERS_RESULT_DISCONNECT;
3724
3725 pci_disable_device(pdev);
3726
3727 return PCI_ERS_RESULT_NEED_RESET;
3728}
3729
3730static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3731{
3732 struct be_adapter *adapter = pci_get_drvdata(pdev);
3733 int status;
3734
3735 dev_info(&adapter->pdev->dev, "EEH reset\n");
3736 adapter->eeh_err = false;
6589ade0
SP
3737 adapter->ue_detected = false;
3738 adapter->fw_timeout = false;
cf588477
SP
3739
3740 status = pci_enable_device(pdev);
3741 if (status)
3742 return PCI_ERS_RESULT_DISCONNECT;
3743
3744 pci_set_master(pdev);
3745 pci_set_power_state(pdev, 0);
3746 pci_restore_state(pdev);
3747
3748 /* Check if card is ok and fw is ready */
3749 status = be_cmd_POST(adapter);
3750 if (status)
3751 return PCI_ERS_RESULT_DISCONNECT;
3752
3753 return PCI_ERS_RESULT_RECOVERED;
3754}
3755
3756static void be_eeh_resume(struct pci_dev *pdev)
3757{
3758 int status = 0;
3759 struct be_adapter *adapter = pci_get_drvdata(pdev);
3760 struct net_device *netdev = adapter->netdev;
3761
3762 dev_info(&adapter->pdev->dev, "EEH resume\n");
3763
3764 pci_save_state(pdev);
3765
3766 /* tell fw we're ready to fire cmds */
3767 status = be_cmd_fw_init(adapter);
3768 if (status)
3769 goto err;
3770
3771 status = be_setup(adapter);
3772 if (status)
3773 goto err;
3774
3775 if (netif_running(netdev)) {
3776 status = be_open(netdev);
3777 if (status)
3778 goto err;
3779 }
3780 netif_device_attach(netdev);
3781 return;
3782err:
3783 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3784}
3785
3786static struct pci_error_handlers be_eeh_handlers = {
3787 .error_detected = be_eeh_err_detected,
3788 .slot_reset = be_eeh_reset,
3789 .resume = be_eeh_resume,
3790};
3791
6b7c5b94
SP
3792static struct pci_driver be_driver = {
3793 .name = DRV_NAME,
3794 .id_table = be_dev_ids,
3795 .probe = be_probe,
3796 .remove = be_remove,
3797 .suspend = be_suspend,
cf588477 3798 .resume = be_resume,
82456b03 3799 .shutdown = be_shutdown,
cf588477 3800 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3801};
3802
3803static int __init be_init_module(void)
3804{
8e95a202
JP
3805 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3806 rx_frag_size != 2048) {
6b7c5b94
SP
3807 printk(KERN_WARNING DRV_NAME
3808 " : Module param rx_frag_size must be 2048/4096/8192."
3809 " Using 2048\n");
3810 rx_frag_size = 2048;
3811 }
6b7c5b94
SP
3812
3813 return pci_register_driver(&be_driver);
3814}
3815module_init(be_init_module);
3816
3817static void __exit be_exit_module(void)
3818{
3819 pci_unregister_driver(&be_driver);
3820}
3821module_exit(be_exit_module);
This page took 0.573236 seconds and 5 git commands to generate.