be2net: Support for version 1 of stats for BE3
[deliverable/linux.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
18#include "be.h"
8788fdc2 19#include "be_cmds.h"
65f71b8b 20#include <asm/div64.h>
6b7c5b94
SP
21
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
2e588f84 28static ushort rx_frag_size = 2048;
ba343c77 29static unsigned int num_vfs;
2e588f84 30module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
3abcdeda
SP
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
6b7c5b94
SP
45 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276
AK
48/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
6b7c5b94
SP
118
119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120{
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
2b7bcebf
IV
123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
6b7c5b94
SP
125}
126
127static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
2b7bcebf
IV
136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
6b7c5b94
SP
138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142}
143
8788fdc2 144static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 145{
8788fdc2 146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 149
cf588477
SP
150 if (adapter->eeh_err)
151 return;
152
5f0b849e 153 if (!enabled && enable)
6b7c5b94 154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else if (enabled && !enable)
6b7c5b94 156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 157 else
6b7c5b94 158 return;
5f0b849e 159
6b7c5b94
SP
160 iowrite32(reg, addr);
161}
162
8788fdc2 163static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
164{
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
168
169 wmb();
8788fdc2 170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
171}
172
8788fdc2 173static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
174{
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
178
179 wmb();
8788fdc2 180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
181}
182
8788fdc2 183static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
184 bool arm, bool clear_int, u16 num_popped)
185{
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
190
191 if (adapter->eeh_err)
192 return;
193
6b7c5b94
SP
194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
201}
202
8788fdc2 203void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
204{
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
209
210 if (adapter->eeh_err)
211 return;
212
6b7c5b94
SP
213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
217}
218
6b7c5b94
SP
219static int be_mac_addr_set(struct net_device *netdev, void *p)
220{
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
ca9e4988
AK
225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
ba343c77
SB
228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
f8617e08
AK
234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
a65027e4
SP
236 if (status)
237 return status;
6b7c5b94 238
a65027e4 239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 240 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 241netdev_addr:
6b7c5b94
SP
242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246}
247
89a88ab8
AK
248static void populate_be2_stats(struct be_adapter *adapter)
249{
250
251 struct be_drv_stats *drvs = &adapter->drv_stats;
252 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
253 struct be_port_rxf_stats_v0 *port_stats =
254 be_port_rxf_stats_from_cmd(adapter);
255 struct be_rxf_stats_v0 *rxf_stats =
256 be_rxf_stats_from_cmd(adapter);
257
258 drvs->rx_pause_frames = port_stats->rx_pause_frames;
259 drvs->rx_crc_errors = port_stats->rx_crc_errors;
260 drvs->rx_control_frames = port_stats->rx_control_frames;
261 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
262 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
263 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
264 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
265 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
266 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
267 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
268 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
269 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
270 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
271 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
272 drvs->rx_input_fifo_overflow_drop =
273 port_stats->rx_input_fifo_overflow;
274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
276 drvs->rx_address_match_errors =
277 port_stats->rx_address_match_errors;
278 drvs->rx_alignment_symbol_errors =
279 port_stats->rx_alignment_symbol_errors;
280
281 drvs->tx_pauseframes = port_stats->tx_pauseframes;
282 drvs->tx_controlframes = port_stats->tx_controlframes;
283
284 if (adapter->port_num)
285 drvs->jabber_events =
286 rxf_stats->port1_jabber_events;
287 else
288 drvs->jabber_events =
289 rxf_stats->port0_jabber_events;
290 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
291 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
292 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
293 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
294 drvs->forwarded_packets = rxf_stats->forwarded_packets;
295 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
296 drvs->rx_drops_no_tpre_descr =
297 rxf_stats->rx_drops_no_tpre_descr;
298 drvs->rx_drops_too_many_frags =
299 rxf_stats->rx_drops_too_many_frags;
300 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
301}
302
303static void populate_be3_stats(struct be_adapter *adapter)
304{
305 struct be_drv_stats *drvs = &adapter->drv_stats;
306 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
307
308 struct be_rxf_stats_v1 *rxf_stats =
309 be_rxf_stats_from_cmd(adapter);
310 struct be_port_rxf_stats_v1 *port_stats =
311 be_port_rxf_stats_from_cmd(adapter);
312
313 drvs->rx_priority_pause_frames = 0;
314 drvs->pmem_fifo_overflow_drop = 0;
315 drvs->rx_pause_frames = port_stats->rx_pause_frames;
316 drvs->rx_crc_errors = port_stats->rx_crc_errors;
317 drvs->rx_control_frames = port_stats->rx_control_frames;
318 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
319 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
320 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
321 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
322 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
323 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
324 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
325 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
326 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
327 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
328 drvs->rx_dropped_header_too_small =
329 port_stats->rx_dropped_header_too_small;
330 drvs->rx_input_fifo_overflow_drop =
331 port_stats->rx_input_fifo_overflow_drop;
332 drvs->rx_address_match_errors =
333 port_stats->rx_address_match_errors;
334 drvs->rx_alignment_symbol_errors =
335 port_stats->rx_alignment_symbol_errors;
336 drvs->rxpp_fifo_overflow_drop =
337 port_stats->rxpp_fifo_overflow_drop;
338 drvs->tx_pauseframes = port_stats->tx_pauseframes;
339 drvs->tx_controlframes = port_stats->tx_controlframes;
340 drvs->jabber_events = port_stats->jabber_events;
341 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
342 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
343 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
344 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
345 drvs->forwarded_packets = rxf_stats->forwarded_packets;
346 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
347 drvs->rx_drops_no_tpre_descr =
348 rxf_stats->rx_drops_no_tpre_descr;
349 drvs->rx_drops_too_many_frags =
350 rxf_stats->rx_drops_too_many_frags;
351 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352}
353
354
355
356void be_parse_stats(struct be_adapter *adapter)
357{
358 if (adapter->generation == BE_GEN3)
359 populate_be3_stats(adapter);
360 else
361 populate_be2_stats(adapter);
362}
363
b31c50a7 364void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 365{
89a88ab8 366 struct be_drv_stats *drvs = &adapter->drv_stats;
78122a52 367 struct net_device_stats *dev_stats = &adapter->netdev->stats;
3abcdeda
SP
368 struct be_rx_obj *rxo;
369 int i;
6b7c5b94 370
3abcdeda
SP
371 memset(dev_stats, 0, sizeof(*dev_stats));
372 for_all_rx_queues(adapter, rxo, i) {
373 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
374 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
375 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
376 /* no space in linux buffers: best possible approximation */
89a88ab8
AK
377 if (adapter->generation == BE_GEN3) {
378 struct be_erx_stats_v1 *erx_stats =
379 be_erx_stats_from_cmd(adapter);
380 dev_stats->rx_dropped +=
381 erx_stats->rx_drops_no_fragments[rxo->q.id];
382 } else {
383 struct be_erx_stats_v0 *erx_stats =
384 be_erx_stats_from_cmd(adapter);
385 dev_stats->rx_dropped +=
386 erx_stats->rx_drops_no_fragments[rxo->q.id];
387 }
3abcdeda
SP
388 }
389
390 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
391 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
6b7c5b94
SP
392
393 /* bad pkts received */
89a88ab8
AK
394 dev_stats->rx_errors = drvs->rx_crc_errors +
395 drvs->rx_alignment_symbol_errors +
396 drvs->rx_in_range_errors +
397 drvs->rx_out_range_errors +
398 drvs->rx_frame_too_long +
399 drvs->rx_dropped_too_small +
400 drvs->rx_dropped_too_short +
401 drvs->rx_dropped_header_too_small +
402 drvs->rx_dropped_tcp_length +
403 drvs->rx_dropped_runt +
404 drvs->rx_tcp_checksum_errs +
405 drvs->rx_ip_checksum_errs +
406 drvs->rx_udp_checksum_errs;
68110868 407
6b7c5b94 408 /* detailed rx errors */
89a88ab8
AK
409 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
410 drvs->rx_out_range_errors +
411 drvs->rx_frame_too_long;
68110868 412
89a88ab8 413 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
414
415 /* frame alignment errors */
89a88ab8 416 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 417
6b7c5b94
SP
418 /* receiver fifo overrun */
419 /* drops_no_pbuf is no per i/f, it's per BE card */
89a88ab8
AK
420 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
421 drvs->rx_input_fifo_overflow_drop +
422 drvs->rx_drops_no_pbuf;
6b7c5b94
SP
423}
424
8788fdc2 425void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 426{
6b7c5b94
SP
427 struct net_device *netdev = adapter->netdev;
428
6b7c5b94 429 /* If link came up or went down */
a8f447bd 430 if (adapter->link_up != link_up) {
0dffc83e 431 adapter->link_speed = -1;
a8f447bd 432 if (link_up) {
6b7c5b94
SP
433 netif_carrier_on(netdev);
434 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd 435 } else {
a8f447bd
SP
436 netif_carrier_off(netdev);
437 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 438 }
a8f447bd 439 adapter->link_up = link_up;
6b7c5b94 440 }
6b7c5b94
SP
441}
442
443/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 444static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 445{
3abcdeda
SP
446 struct be_eq_obj *rx_eq = &rxo->rx_eq;
447 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
448 ulong now = jiffies;
449 u32 eqd;
450
451 if (!rx_eq->enable_aic)
452 return;
453
454 /* Wrapped around */
455 if (time_before(now, stats->rx_fps_jiffies)) {
456 stats->rx_fps_jiffies = now;
457 return;
458 }
6b7c5b94
SP
459
460 /* Update once a second */
4097f663 461 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
462 return;
463
3abcdeda 464 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 465 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 466
4097f663 467 stats->rx_fps_jiffies = now;
3abcdeda
SP
468 stats->prev_rx_frags = stats->rx_frags;
469 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
470 eqd = eqd << 3;
471 if (eqd > rx_eq->max_eqd)
472 eqd = rx_eq->max_eqd;
473 if (eqd < rx_eq->min_eqd)
474 eqd = rx_eq->min_eqd;
475 if (eqd < 10)
476 eqd = 0;
477 if (eqd != rx_eq->cur_eqd)
8788fdc2 478 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
479
480 rx_eq->cur_eqd = eqd;
481}
482
65f71b8b
SH
483static u32 be_calc_rate(u64 bytes, unsigned long ticks)
484{
485 u64 rate = bytes;
486
487 do_div(rate, ticks / HZ);
488 rate <<= 3; /* bytes/sec -> bits/sec */
489 do_div(rate, 1000000ul); /* MB/Sec */
490
491 return rate;
492}
493
4097f663
SP
494static void be_tx_rate_update(struct be_adapter *adapter)
495{
3abcdeda 496 struct be_tx_stats *stats = tx_stats(adapter);
4097f663
SP
497 ulong now = jiffies;
498
499 /* Wrapped around? */
500 if (time_before(now, stats->be_tx_jiffies)) {
501 stats->be_tx_jiffies = now;
502 return;
503 }
504
505 /* Update tx rate once in two seconds */
506 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
507 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
508 - stats->be_tx_bytes_prev,
509 now - stats->be_tx_jiffies);
4097f663
SP
510 stats->be_tx_jiffies = now;
511 stats->be_tx_bytes_prev = stats->be_tx_bytes;
512 }
513}
514
6b7c5b94 515static void be_tx_stats_update(struct be_adapter *adapter,
91992e44 516 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 517{
3abcdeda 518 struct be_tx_stats *stats = tx_stats(adapter);
6b7c5b94
SP
519 stats->be_tx_reqs++;
520 stats->be_tx_wrbs += wrb_cnt;
521 stats->be_tx_bytes += copied;
91992e44 522 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
523 if (stopped)
524 stats->be_tx_stops++;
6b7c5b94
SP
525}
526
527/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
528static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
529 bool *dummy)
6b7c5b94 530{
ebc8d2ab
DM
531 int cnt = (skb->len > skb->data_len);
532
533 cnt += skb_shinfo(skb)->nr_frags;
534
6b7c5b94
SP
535 /* to account for hdr wrb */
536 cnt++;
fe6d2a38
SP
537 if (lancer_chip(adapter) || !(cnt & 1)) {
538 *dummy = false;
539 } else {
6b7c5b94
SP
540 /* add a dummy to make it an even num */
541 cnt++;
542 *dummy = true;
fe6d2a38 543 }
6b7c5b94
SP
544 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
545 return cnt;
546}
547
548static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
549{
550 wrb->frag_pa_hi = upper_32_bits(addr);
551 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
552 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
553}
554
cc4ce020
SK
555static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
556 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 557{
cc4ce020
SK
558 u8 vlan_prio = 0;
559 u16 vlan_tag = 0;
560
6b7c5b94
SP
561 memset(hdr, 0, sizeof(*hdr));
562
563 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
564
49e4b847 565 if (skb_is_gso(skb)) {
6b7c5b94
SP
566 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
568 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 569 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
571 if (lancer_chip(adapter) && adapter->sli_family ==
572 LANCER_A0_SLI_FAMILY) {
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
574 if (is_tcp_pkt(skb))
575 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
576 tcpcs, hdr, 1);
577 else if (is_udp_pkt(skb))
578 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
579 udpcs, hdr, 1);
580 }
6b7c5b94
SP
581 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
582 if (is_tcp_pkt(skb))
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
584 else if (is_udp_pkt(skb))
585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
586 }
587
cc4ce020 588 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
6b7c5b94 589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
590 vlan_tag = vlan_tx_tag_get(skb);
591 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
592 /* If vlan priority provided by OS is NOT in available bmap */
593 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
594 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
595 adapter->recommended_prio;
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
597 }
598
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
603}
604
2b7bcebf 605static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
606 bool unmap_single)
607{
608 dma_addr_t dma;
609
610 be_dws_le_to_cpu(wrb, sizeof(*wrb));
611
612 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 613 if (wrb->frag_len) {
7101e111 614 if (unmap_single)
2b7bcebf
IV
615 dma_unmap_single(dev, dma, wrb->frag_len,
616 DMA_TO_DEVICE);
7101e111 617 else
2b7bcebf 618 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
619 }
620}
6b7c5b94
SP
621
622static int make_tx_wrbs(struct be_adapter *adapter,
623 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
624{
7101e111
SP
625 dma_addr_t busaddr;
626 int i, copied = 0;
2b7bcebf 627 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
628 struct sk_buff *first_skb = skb;
629 struct be_queue_info *txq = &adapter->tx_obj.q;
630 struct be_eth_wrb *wrb;
631 struct be_eth_hdr_wrb *hdr;
7101e111
SP
632 bool map_single = false;
633 u16 map_head;
6b7c5b94 634
6b7c5b94
SP
635 hdr = queue_head_node(txq);
636 queue_head_inc(txq);
7101e111 637 map_head = txq->head;
6b7c5b94 638
ebc8d2ab 639 if (skb->len > skb->data_len) {
e743d313 640 int len = skb_headlen(skb);
2b7bcebf
IV
641 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
642 if (dma_mapping_error(dev, busaddr))
7101e111
SP
643 goto dma_err;
644 map_single = true;
ebc8d2ab
DM
645 wrb = queue_head_node(txq);
646 wrb_fill(wrb, busaddr, len);
647 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648 queue_head_inc(txq);
649 copied += len;
650 }
6b7c5b94 651
ebc8d2ab
DM
652 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
653 struct skb_frag_struct *frag =
654 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
655 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
656 frag->size, DMA_TO_DEVICE);
657 if (dma_mapping_error(dev, busaddr))
7101e111 658 goto dma_err;
ebc8d2ab
DM
659 wrb = queue_head_node(txq);
660 wrb_fill(wrb, busaddr, frag->size);
661 be_dws_cpu_to_le(wrb, sizeof(*wrb));
662 queue_head_inc(txq);
663 copied += frag->size;
6b7c5b94
SP
664 }
665
666 if (dummy_wrb) {
667 wrb = queue_head_node(txq);
668 wrb_fill(wrb, 0, 0);
669 be_dws_cpu_to_le(wrb, sizeof(*wrb));
670 queue_head_inc(txq);
671 }
672
cc4ce020 673 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
674 be_dws_cpu_to_le(hdr, sizeof(*hdr));
675
676 return copied;
7101e111
SP
677dma_err:
678 txq->head = map_head;
679 while (copied) {
680 wrb = queue_head_node(txq);
2b7bcebf 681 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
682 map_single = false;
683 copied -= wrb->frag_len;
684 queue_head_inc(txq);
685 }
686 return 0;
6b7c5b94
SP
687}
688
61357325 689static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 690 struct net_device *netdev)
6b7c5b94
SP
691{
692 struct be_adapter *adapter = netdev_priv(netdev);
693 struct be_tx_obj *tx_obj = &adapter->tx_obj;
694 struct be_queue_info *txq = &tx_obj->q;
695 u32 wrb_cnt = 0, copied = 0;
696 u32 start = txq->head;
697 bool dummy_wrb, stopped = false;
698
fe6d2a38 699 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94
SP
700
701 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
702 if (copied) {
703 /* record the sent skb in the sent_skb table */
704 BUG_ON(tx_obj->sent_skb_list[start]);
705 tx_obj->sent_skb_list[start] = skb;
706
707 /* Ensure txq has space for the next skb; Else stop the queue
708 * *BEFORE* ringing the tx doorbell, so that we serialze the
709 * tx compls of the current transmit which'll wake up the queue
710 */
7101e111 711 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
712 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
713 txq->len) {
714 netif_stop_queue(netdev);
715 stopped = true;
716 }
6b7c5b94 717
c190e3c8 718 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 719
91992e44
AK
720 be_tx_stats_update(adapter, wrb_cnt, copied,
721 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
722 } else {
723 txq->head = start;
724 dev_kfree_skb_any(skb);
6b7c5b94 725 }
6b7c5b94
SP
726 return NETDEV_TX_OK;
727}
728
729static int be_change_mtu(struct net_device *netdev, int new_mtu)
730{
731 struct be_adapter *adapter = netdev_priv(netdev);
732 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
733 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
734 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
735 dev_info(&adapter->pdev->dev,
736 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
737 BE_MIN_MTU,
738 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
739 return -EINVAL;
740 }
741 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
742 netdev->mtu, new_mtu);
743 netdev->mtu = new_mtu;
744 return 0;
745}
746
747/*
82903e4b
AK
748 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
749 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 750 */
1da87b7f 751static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 752{
6b7c5b94
SP
753 u16 vtag[BE_NUM_VLANS_SUPPORTED];
754 u16 ntags = 0, i;
82903e4b 755 int status = 0;
1da87b7f
AK
756 u32 if_handle;
757
758 if (vf) {
759 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
760 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
761 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
762 }
6b7c5b94 763
82903e4b 764 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 765 /* Construct VLAN Table to give to HW */
b738127d 766 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
767 if (adapter->vlan_tag[i]) {
768 vtag[ntags] = cpu_to_le16(i);
769 ntags++;
770 }
771 }
b31c50a7
SP
772 status = be_cmd_vlan_config(adapter, adapter->if_handle,
773 vtag, ntags, 1, 0);
6b7c5b94 774 } else {
b31c50a7
SP
775 status = be_cmd_vlan_config(adapter, adapter->if_handle,
776 NULL, 0, 1, 1);
6b7c5b94 777 }
1da87b7f 778
b31c50a7 779 return status;
6b7c5b94
SP
780}
781
782static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
783{
784 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 785
6b7c5b94 786 adapter->vlan_grp = grp;
6b7c5b94
SP
787}
788
789static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
790{
791 struct be_adapter *adapter = netdev_priv(netdev);
792
1da87b7f 793 adapter->vlans_added++;
ba343c77
SB
794 if (!be_physfn(adapter))
795 return;
796
6b7c5b94 797 adapter->vlan_tag[vid] = 1;
82903e4b 798 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 799 be_vid_config(adapter, false, 0);
6b7c5b94
SP
800}
801
802static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
803{
804 struct be_adapter *adapter = netdev_priv(netdev);
805
1da87b7f
AK
806 adapter->vlans_added--;
807 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
808
ba343c77
SB
809 if (!be_physfn(adapter))
810 return;
811
6b7c5b94 812 adapter->vlan_tag[vid] = 0;
82903e4b 813 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 814 be_vid_config(adapter, false, 0);
6b7c5b94
SP
815}
816
24307eef 817static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
818{
819 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 820
24307eef 821 if (netdev->flags & IFF_PROMISC) {
ecd0bf0f 822 be_cmd_promiscuous_config(adapter, true);
24307eef
SP
823 adapter->promiscuous = true;
824 goto done;
6b7c5b94
SP
825 }
826
25985edc 827 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
828 if (adapter->promiscuous) {
829 adapter->promiscuous = false;
ecd0bf0f 830 be_cmd_promiscuous_config(adapter, false);
6b7c5b94
SP
831 }
832
e7b909a6 833 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
834 if (netdev->flags & IFF_ALLMULTI ||
835 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 836 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 837 &adapter->mc_cmd_mem);
24307eef 838 goto done;
6b7c5b94 839 }
6b7c5b94 840
0ddf477b 841 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 842 &adapter->mc_cmd_mem);
24307eef
SP
843done:
844 return;
6b7c5b94
SP
845}
846
ba343c77
SB
847static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
848{
849 struct be_adapter *adapter = netdev_priv(netdev);
850 int status;
851
852 if (!adapter->sriov_enabled)
853 return -EPERM;
854
855 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
856 return -EINVAL;
857
64600ea5
AK
858 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
859 status = be_cmd_pmac_del(adapter,
860 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 861 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 862
64600ea5
AK
863 status = be_cmd_pmac_add(adapter, mac,
864 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 865 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
866
867 if (status)
ba343c77
SB
868 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
869 mac, vf);
64600ea5
AK
870 else
871 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
872
ba343c77
SB
873 return status;
874}
875
64600ea5
AK
876static int be_get_vf_config(struct net_device *netdev, int vf,
877 struct ifla_vf_info *vi)
878{
879 struct be_adapter *adapter = netdev_priv(netdev);
880
881 if (!adapter->sriov_enabled)
882 return -EPERM;
883
884 if (vf >= num_vfs)
885 return -EINVAL;
886
887 vi->vf = vf;
e1d18735 888 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 889 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
890 vi->qos = 0;
891 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
892
893 return 0;
894}
895
1da87b7f
AK
896static int be_set_vf_vlan(struct net_device *netdev,
897 int vf, u16 vlan, u8 qos)
898{
899 struct be_adapter *adapter = netdev_priv(netdev);
900 int status = 0;
901
902 if (!adapter->sriov_enabled)
903 return -EPERM;
904
905 if ((vf >= num_vfs) || (vlan > 4095))
906 return -EINVAL;
907
908 if (vlan) {
909 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
910 adapter->vlans_added++;
911 } else {
912 adapter->vf_cfg[vf].vf_vlan_tag = 0;
913 adapter->vlans_added--;
914 }
915
916 status = be_vid_config(adapter, true, vf);
917
918 if (status)
919 dev_info(&adapter->pdev->dev,
920 "VLAN %d config on VF %d failed\n", vlan, vf);
921 return status;
922}
923
e1d18735
AK
924static int be_set_vf_tx_rate(struct net_device *netdev,
925 int vf, int rate)
926{
927 struct be_adapter *adapter = netdev_priv(netdev);
928 int status = 0;
929
930 if (!adapter->sriov_enabled)
931 return -EPERM;
932
933 if ((vf >= num_vfs) || (rate < 0))
934 return -EINVAL;
935
936 if (rate > 10000)
937 rate = 10000;
938
939 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 940 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
941
942 if (status)
943 dev_info(&adapter->pdev->dev,
944 "tx rate %d on VF %d failed\n", rate, vf);
945 return status;
946}
947
3abcdeda 948static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 949{
3abcdeda 950 struct be_rx_stats *stats = &rxo->stats;
4097f663 951 ulong now = jiffies;
6b7c5b94 952
4097f663 953 /* Wrapped around */
3abcdeda
SP
954 if (time_before(now, stats->rx_jiffies)) {
955 stats->rx_jiffies = now;
4097f663
SP
956 return;
957 }
6b7c5b94
SP
958
959 /* Update the rate once in two seconds */
3abcdeda 960 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
961 return;
962
3abcdeda
SP
963 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
964 now - stats->rx_jiffies);
965 stats->rx_jiffies = now;
966 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
967}
968
3abcdeda 969static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 970 struct be_rx_compl_info *rxcp)
4097f663 971{
3abcdeda 972 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 973
3abcdeda 974 stats->rx_compl++;
2e588f84
SP
975 stats->rx_frags += rxcp->num_rcvd;
976 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 977 stats->rx_pkts++;
2e588f84 978 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 979 stats->rx_mcast_pkts++;
2e588f84
SP
980 if (rxcp->err)
981 stats->rxcp_err++;
4097f663
SP
982}
983
2e588f84 984static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 985{
19fad86f
PR
986 /* L4 checksum is not reliable for non TCP/UDP packets.
987 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
988 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
989 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
990}
991
6b7c5b94 992static struct be_rx_page_info *
3abcdeda
SP
993get_rx_page_info(struct be_adapter *adapter,
994 struct be_rx_obj *rxo,
995 u16 frag_idx)
6b7c5b94
SP
996{
997 struct be_rx_page_info *rx_page_info;
3abcdeda 998 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 999
3abcdeda 1000 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1001 BUG_ON(!rx_page_info->page);
1002
205859a2 1003 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1004 dma_unmap_page(&adapter->pdev->dev,
1005 dma_unmap_addr(rx_page_info, bus),
1006 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1007 rx_page_info->last_page_user = false;
1008 }
6b7c5b94
SP
1009
1010 atomic_dec(&rxq->used);
1011 return rx_page_info;
1012}
1013
1014/* Throwaway the data in the Rx completion */
1015static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1016 struct be_rx_obj *rxo,
2e588f84 1017 struct be_rx_compl_info *rxcp)
6b7c5b94 1018{
3abcdeda 1019 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1020 struct be_rx_page_info *page_info;
2e588f84 1021 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1022
e80d9da6 1023 for (i = 0; i < num_rcvd; i++) {
2e588f84 1024 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1025 put_page(page_info->page);
1026 memset(page_info, 0, sizeof(*page_info));
2e588f84 1027 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1028 }
1029}
1030
1031/*
1032 * skb_fill_rx_data forms a complete skb for an ether frame
1033 * indicated by rxcp.
1034 */
3abcdeda 1035static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1036 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1037{
3abcdeda 1038 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1039 struct be_rx_page_info *page_info;
2e588f84
SP
1040 u16 i, j;
1041 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1042 u8 *start;
6b7c5b94 1043
2e588f84 1044 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1045 start = page_address(page_info->page) + page_info->page_offset;
1046 prefetch(start);
1047
1048 /* Copy data in the first descriptor of this completion */
2e588f84 1049 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1050
1051 /* Copy the header portion into skb_data */
2e588f84 1052 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1053 memcpy(skb->data, start, hdr_len);
1054 skb->len = curr_frag_len;
1055 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1056 /* Complete packet has now been moved to data */
1057 put_page(page_info->page);
1058 skb->data_len = 0;
1059 skb->tail += curr_frag_len;
1060 } else {
1061 skb_shinfo(skb)->nr_frags = 1;
1062 skb_shinfo(skb)->frags[0].page = page_info->page;
1063 skb_shinfo(skb)->frags[0].page_offset =
1064 page_info->page_offset + hdr_len;
1065 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1066 skb->data_len = curr_frag_len - hdr_len;
1067 skb->tail += hdr_len;
1068 }
205859a2 1069 page_info->page = NULL;
6b7c5b94 1070
2e588f84
SP
1071 if (rxcp->pkt_size <= rx_frag_size) {
1072 BUG_ON(rxcp->num_rcvd != 1);
1073 return;
6b7c5b94
SP
1074 }
1075
1076 /* More frags present for this completion */
2e588f84
SP
1077 index_inc(&rxcp->rxq_idx, rxq->len);
1078 remaining = rxcp->pkt_size - curr_frag_len;
1079 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1080 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1081 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1082
bd46cb6c
AK
1083 /* Coalesce all frags from the same physical page in one slot */
1084 if (page_info->page_offset == 0) {
1085 /* Fresh page */
1086 j++;
1087 skb_shinfo(skb)->frags[j].page = page_info->page;
1088 skb_shinfo(skb)->frags[j].page_offset =
1089 page_info->page_offset;
1090 skb_shinfo(skb)->frags[j].size = 0;
1091 skb_shinfo(skb)->nr_frags++;
1092 } else {
1093 put_page(page_info->page);
1094 }
1095
1096 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1097 skb->len += curr_frag_len;
1098 skb->data_len += curr_frag_len;
6b7c5b94 1099
2e588f84
SP
1100 remaining -= curr_frag_len;
1101 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1102 page_info->page = NULL;
6b7c5b94 1103 }
bd46cb6c 1104 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1105}
1106
5be93b9a 1107/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1108static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1109 struct be_rx_obj *rxo,
2e588f84 1110 struct be_rx_compl_info *rxcp)
6b7c5b94 1111{
6332c8d3 1112 struct net_device *netdev = adapter->netdev;
6b7c5b94 1113 struct sk_buff *skb;
89420424 1114
6332c8d3 1115 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1116 if (unlikely(!skb)) {
6b7c5b94
SP
1117 if (net_ratelimit())
1118 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
3abcdeda 1119 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1120 return;
1121 }
1122
2e588f84 1123 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1124
6332c8d3 1125 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1126 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1127 else
1128 skb_checksum_none_assert(skb);
6b7c5b94
SP
1129
1130 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1131 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1132 if (adapter->netdev->features & NETIF_F_RXHASH)
1133 skb->rxhash = rxcp->rss_hash;
1134
6b7c5b94 1135
2e588f84 1136 if (unlikely(rxcp->vlanf)) {
82903e4b 1137 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
1138 kfree_skb(skb);
1139 return;
1140 }
6709d952
SK
1141 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1142 rxcp->vlan_tag);
6b7c5b94
SP
1143 } else {
1144 netif_receive_skb(skb);
1145 }
6b7c5b94
SP
1146}
1147
5be93b9a
AK
1148/* Process the RX completion indicated by rxcp when GRO is enabled */
1149static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1150 struct be_rx_obj *rxo,
2e588f84 1151 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1152{
1153 struct be_rx_page_info *page_info;
5be93b9a 1154 struct sk_buff *skb = NULL;
3abcdeda
SP
1155 struct be_queue_info *rxq = &rxo->q;
1156 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1157 u16 remaining, curr_frag_len;
1158 u16 i, j;
3968fa1e 1159
5be93b9a
AK
1160 skb = napi_get_frags(&eq_obj->napi);
1161 if (!skb) {
3abcdeda 1162 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1163 return;
1164 }
1165
2e588f84
SP
1166 remaining = rxcp->pkt_size;
1167 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1168 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1169
1170 curr_frag_len = min(remaining, rx_frag_size);
1171
bd46cb6c
AK
1172 /* Coalesce all frags from the same physical page in one slot */
1173 if (i == 0 || page_info->page_offset == 0) {
1174 /* First frag or Fresh page */
1175 j++;
5be93b9a
AK
1176 skb_shinfo(skb)->frags[j].page = page_info->page;
1177 skb_shinfo(skb)->frags[j].page_offset =
1178 page_info->page_offset;
1179 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1180 } else {
1181 put_page(page_info->page);
1182 }
5be93b9a 1183 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1184
bd46cb6c 1185 remaining -= curr_frag_len;
2e588f84 1186 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1187 memset(page_info, 0, sizeof(*page_info));
1188 }
bd46cb6c 1189 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1190
5be93b9a 1191 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1192 skb->len = rxcp->pkt_size;
1193 skb->data_len = rxcp->pkt_size;
1194 skb->truesize += rxcp->pkt_size;
5be93b9a 1195 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1196 if (adapter->netdev->features & NETIF_F_RXHASH)
1197 skb->rxhash = rxcp->rss_hash;
5be93b9a 1198
2e588f84 1199 if (likely(!rxcp->vlanf))
5be93b9a 1200 napi_gro_frags(&eq_obj->napi);
2e588f84 1201 else
6709d952
SK
1202 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1203 rxcp->vlan_tag);
2e588f84
SP
1204}
1205
1206static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1207 struct be_eth_rx_compl *compl,
1208 struct be_rx_compl_info *rxcp)
1209{
1210 rxcp->pkt_size =
1211 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1212 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1213 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1214 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1215 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1216 rxcp->ip_csum =
1217 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1218 rxcp->l4_csum =
1219 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1220 rxcp->ipv6 =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1222 rxcp->rxq_idx =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1224 rxcp->num_rcvd =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1226 rxcp->pkt_type =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1228 rxcp->rss_hash =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1230 if (rxcp->vlanf) {
1231 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1232 compl);
1233 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1234 compl);
15d72184 1235 }
2e588f84
SP
1236}
1237
1238static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1239 struct be_eth_rx_compl *compl,
1240 struct be_rx_compl_info *rxcp)
1241{
1242 rxcp->pkt_size =
1243 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1244 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1245 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1246 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1247 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1248 rxcp->ip_csum =
1249 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1250 rxcp->l4_csum =
1251 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1252 rxcp->ipv6 =
1253 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1254 rxcp->rxq_idx =
1255 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1256 rxcp->num_rcvd =
1257 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1258 rxcp->pkt_type =
1259 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1260 rxcp->rss_hash =
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1262 if (rxcp->vlanf) {
1263 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1264 compl);
1265 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1266 compl);
15d72184 1267 }
2e588f84
SP
1268}
1269
1270static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1271{
1272 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1273 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1274 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1275
2e588f84
SP
1276 /* For checking the valid bit it is Ok to use either definition as the
1277 * valid bit is at the same position in both v0 and v1 Rx compl */
1278 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1279 return NULL;
6b7c5b94 1280
2e588f84
SP
1281 rmb();
1282 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1283
2e588f84
SP
1284 if (adapter->be3_native)
1285 be_parse_rx_compl_v1(adapter, compl, rxcp);
1286 else
1287 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1288
15d72184
SP
1289 if (rxcp->vlanf) {
1290 /* vlanf could be wrongly set in some cards.
1291 * ignore if vtm is not set */
1292 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1293 rxcp->vlanf = 0;
6b7c5b94 1294
15d72184 1295 if (!lancer_chip(adapter))
3c709f8f 1296 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1297
3c709f8f
DM
1298 if (((adapter->pvid & VLAN_VID_MASK) ==
1299 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1300 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1301 rxcp->vlanf = 0;
1302 }
2e588f84
SP
1303
1304 /* As the compl has been parsed, reset it; we wont touch it again */
1305 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1306
3abcdeda 1307 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1308 return rxcp;
1309}
1310
1829b086 1311static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1312{
6b7c5b94 1313 u32 order = get_order(size);
1829b086 1314
6b7c5b94 1315 if (order > 0)
1829b086
ED
1316 gfp |= __GFP_COMP;
1317 return alloc_pages(gfp, order);
6b7c5b94
SP
1318}
1319
1320/*
1321 * Allocate a page, split it to fragments of size rx_frag_size and post as
1322 * receive buffers to BE
1323 */
1829b086 1324static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1325{
3abcdeda
SP
1326 struct be_adapter *adapter = rxo->adapter;
1327 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1328 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1329 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1330 struct page *pagep = NULL;
1331 struct be_eth_rx_d *rxd;
1332 u64 page_dmaaddr = 0, frag_dmaaddr;
1333 u32 posted, page_offset = 0;
1334
3abcdeda 1335 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1336 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1337 if (!pagep) {
1829b086 1338 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1339 if (unlikely(!pagep)) {
3abcdeda 1340 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1341 break;
1342 }
2b7bcebf
IV
1343 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1344 0, adapter->big_page_size,
1345 DMA_FROM_DEVICE);
6b7c5b94
SP
1346 page_info->page_offset = 0;
1347 } else {
1348 get_page(pagep);
1349 page_info->page_offset = page_offset + rx_frag_size;
1350 }
1351 page_offset = page_info->page_offset;
1352 page_info->page = pagep;
fac6da5b 1353 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1354 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1355
1356 rxd = queue_head_node(rxq);
1357 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1358 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1359
1360 /* Any space left in the current big page for another frag? */
1361 if ((page_offset + rx_frag_size + rx_frag_size) >
1362 adapter->big_page_size) {
1363 pagep = NULL;
1364 page_info->last_page_user = true;
1365 }
26d92f92
SP
1366
1367 prev_page_info = page_info;
1368 queue_head_inc(rxq);
6b7c5b94
SP
1369 page_info = &page_info_tbl[rxq->head];
1370 }
1371 if (pagep)
26d92f92 1372 prev_page_info->last_page_user = true;
6b7c5b94
SP
1373
1374 if (posted) {
6b7c5b94 1375 atomic_add(posted, &rxq->used);
8788fdc2 1376 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1377 } else if (atomic_read(&rxq->used) == 0) {
1378 /* Let be_worker replenish when memory is available */
3abcdeda 1379 rxo->rx_post_starved = true;
6b7c5b94 1380 }
6b7c5b94
SP
1381}
1382
5fb379ee 1383static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1384{
6b7c5b94
SP
1385 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1386
1387 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1388 return NULL;
1389
f3eb62d2 1390 rmb();
6b7c5b94
SP
1391 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1392
1393 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1394
1395 queue_tail_inc(tx_cq);
1396 return txcp;
1397}
1398
4d586b82 1399static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
6b7c5b94
SP
1400{
1401 struct be_queue_info *txq = &adapter->tx_obj.q;
a73b796e 1402 struct be_eth_wrb *wrb;
6b7c5b94
SP
1403 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1404 struct sk_buff *sent_skb;
ec43b1a6
SP
1405 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1406 bool unmap_skb_hdr = true;
6b7c5b94 1407
ec43b1a6 1408 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1409 BUG_ON(!sent_skb);
ec43b1a6
SP
1410 sent_skbs[txq->tail] = NULL;
1411
1412 /* skip header wrb */
a73b796e 1413 queue_tail_inc(txq);
6b7c5b94 1414
ec43b1a6 1415 do {
6b7c5b94 1416 cur_index = txq->tail;
a73b796e 1417 wrb = queue_tail_node(txq);
2b7bcebf
IV
1418 unmap_tx_frag(&adapter->pdev->dev, wrb,
1419 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1420 unmap_skb_hdr = false;
1421
6b7c5b94
SP
1422 num_wrbs++;
1423 queue_tail_inc(txq);
ec43b1a6 1424 } while (cur_index != last_index);
6b7c5b94 1425
6b7c5b94 1426 kfree_skb(sent_skb);
4d586b82 1427 return num_wrbs;
6b7c5b94
SP
1428}
1429
859b1e4e
SP
1430static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1431{
1432 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1433
1434 if (!eqe->evt)
1435 return NULL;
1436
f3eb62d2 1437 rmb();
859b1e4e
SP
1438 eqe->evt = le32_to_cpu(eqe->evt);
1439 queue_tail_inc(&eq_obj->q);
1440 return eqe;
1441}
1442
1443static int event_handle(struct be_adapter *adapter,
1444 struct be_eq_obj *eq_obj)
1445{
1446 struct be_eq_entry *eqe;
1447 u16 num = 0;
1448
1449 while ((eqe = event_get(eq_obj)) != NULL) {
1450 eqe->evt = 0;
1451 num++;
1452 }
1453
1454 /* Deal with any spurious interrupts that come
1455 * without events
1456 */
1457 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1458 if (num)
1459 napi_schedule(&eq_obj->napi);
1460
1461 return num;
1462}
1463
1464/* Just read and notify events without processing them.
1465 * Used at the time of destroying event queues */
1466static void be_eq_clean(struct be_adapter *adapter,
1467 struct be_eq_obj *eq_obj)
1468{
1469 struct be_eq_entry *eqe;
1470 u16 num = 0;
1471
1472 while ((eqe = event_get(eq_obj)) != NULL) {
1473 eqe->evt = 0;
1474 num++;
1475 }
1476
1477 if (num)
1478 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1479}
1480
3abcdeda 1481static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1482{
1483 struct be_rx_page_info *page_info;
3abcdeda
SP
1484 struct be_queue_info *rxq = &rxo->q;
1485 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1486 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1487 u16 tail;
1488
1489 /* First cleanup pending rx completions */
3abcdeda
SP
1490 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1491 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1492 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1493 }
1494
1495 /* Then free posted rx buffer that were not used */
1496 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1497 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1498 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1499 put_page(page_info->page);
1500 memset(page_info, 0, sizeof(*page_info));
1501 }
1502 BUG_ON(atomic_read(&rxq->used));
1503}
1504
a8e9179a 1505static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1506{
a8e9179a 1507 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1508 struct be_queue_info *txq = &adapter->tx_obj.q;
a8e9179a 1509 struct be_eth_tx_compl *txcp;
4d586b82 1510 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1511 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1512 struct sk_buff *sent_skb;
1513 bool dummy_wrb;
a8e9179a
SP
1514
1515 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1516 do {
1517 while ((txcp = be_tx_compl_get(tx_cq))) {
1518 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1519 wrb_index, txcp);
4d586b82 1520 num_wrbs += be_tx_compl_process(adapter, end_idx);
a8e9179a
SP
1521 cmpl++;
1522 }
1523 if (cmpl) {
1524 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1525 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1526 cmpl = 0;
4d586b82 1527 num_wrbs = 0;
a8e9179a
SP
1528 }
1529
1530 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1531 break;
1532
1533 mdelay(1);
1534 } while (true);
1535
1536 if (atomic_read(&txq->used))
1537 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1538 atomic_read(&txq->used));
b03388d6
SP
1539
1540 /* free posted tx for which compls will never arrive */
1541 while (atomic_read(&txq->used)) {
1542 sent_skb = sent_skbs[txq->tail];
1543 end_idx = txq->tail;
1544 index_adv(&end_idx,
fe6d2a38
SP
1545 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1546 txq->len);
4d586b82
PR
1547 num_wrbs = be_tx_compl_process(adapter, end_idx);
1548 atomic_sub(num_wrbs, &txq->used);
b03388d6 1549 }
6b7c5b94
SP
1550}
1551
5fb379ee
SP
1552static void be_mcc_queues_destroy(struct be_adapter *adapter)
1553{
1554 struct be_queue_info *q;
5fb379ee 1555
8788fdc2 1556 q = &adapter->mcc_obj.q;
5fb379ee 1557 if (q->created)
8788fdc2 1558 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1559 be_queue_free(adapter, q);
1560
8788fdc2 1561 q = &adapter->mcc_obj.cq;
5fb379ee 1562 if (q->created)
8788fdc2 1563 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1564 be_queue_free(adapter, q);
1565}
1566
1567/* Must be called only after TX qs are created as MCC shares TX EQ */
1568static int be_mcc_queues_create(struct be_adapter *adapter)
1569{
1570 struct be_queue_info *q, *cq;
5fb379ee
SP
1571
1572 /* Alloc MCC compl queue */
8788fdc2 1573 cq = &adapter->mcc_obj.cq;
5fb379ee 1574 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1575 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1576 goto err;
1577
1578 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1579 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1580 goto mcc_cq_free;
1581
1582 /* Alloc MCC queue */
8788fdc2 1583 q = &adapter->mcc_obj.q;
5fb379ee
SP
1584 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1585 goto mcc_cq_destroy;
1586
1587 /* Ask BE to create MCC queue */
8788fdc2 1588 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1589 goto mcc_q_free;
1590
1591 return 0;
1592
1593mcc_q_free:
1594 be_queue_free(adapter, q);
1595mcc_cq_destroy:
8788fdc2 1596 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1597mcc_cq_free:
1598 be_queue_free(adapter, cq);
1599err:
1600 return -1;
1601}
1602
6b7c5b94
SP
1603static void be_tx_queues_destroy(struct be_adapter *adapter)
1604{
1605 struct be_queue_info *q;
1606
1607 q = &adapter->tx_obj.q;
a8e9179a 1608 if (q->created)
8788fdc2 1609 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
6b7c5b94
SP
1610 be_queue_free(adapter, q);
1611
1612 q = &adapter->tx_obj.cq;
1613 if (q->created)
8788fdc2 1614 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1615 be_queue_free(adapter, q);
1616
859b1e4e
SP
1617 /* Clear any residual events */
1618 be_eq_clean(adapter, &adapter->tx_eq);
1619
6b7c5b94
SP
1620 q = &adapter->tx_eq.q;
1621 if (q->created)
8788fdc2 1622 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1623 be_queue_free(adapter, q);
1624}
1625
1626static int be_tx_queues_create(struct be_adapter *adapter)
1627{
1628 struct be_queue_info *eq, *q, *cq;
1629
1630 adapter->tx_eq.max_eqd = 0;
1631 adapter->tx_eq.min_eqd = 0;
1632 adapter->tx_eq.cur_eqd = 96;
1633 adapter->tx_eq.enable_aic = false;
1634 /* Alloc Tx Event queue */
1635 eq = &adapter->tx_eq.q;
1636 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1637 return -1;
1638
1639 /* Ask BE to create Tx Event queue */
8788fdc2 1640 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
6b7c5b94 1641 goto tx_eq_free;
fe6d2a38 1642
ecd62107 1643 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1644
ba343c77 1645
6b7c5b94
SP
1646 /* Alloc TX eth compl queue */
1647 cq = &adapter->tx_obj.cq;
1648 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1649 sizeof(struct be_eth_tx_compl)))
1650 goto tx_eq_destroy;
1651
1652 /* Ask BE to create Tx eth compl queue */
8788fdc2 1653 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
6b7c5b94
SP
1654 goto tx_cq_free;
1655
1656 /* Alloc TX eth queue */
1657 q = &adapter->tx_obj.q;
1658 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1659 goto tx_cq_destroy;
1660
1661 /* Ask BE to create Tx eth queue */
8788fdc2 1662 if (be_cmd_txq_create(adapter, q, cq))
6b7c5b94
SP
1663 goto tx_q_free;
1664 return 0;
1665
1666tx_q_free:
1667 be_queue_free(adapter, q);
1668tx_cq_destroy:
8788fdc2 1669 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1670tx_cq_free:
1671 be_queue_free(adapter, cq);
1672tx_eq_destroy:
8788fdc2 1673 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1674tx_eq_free:
1675 be_queue_free(adapter, eq);
1676 return -1;
1677}
1678
1679static void be_rx_queues_destroy(struct be_adapter *adapter)
1680{
1681 struct be_queue_info *q;
3abcdeda
SP
1682 struct be_rx_obj *rxo;
1683 int i;
1684
1685 for_all_rx_queues(adapter, rxo, i) {
1686 q = &rxo->q;
1687 if (q->created) {
1688 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1689 /* After the rxq is invalidated, wait for a grace time
1690 * of 1ms for all dma to end and the flush compl to
1691 * arrive
1692 */
1693 mdelay(1);
1694 be_rx_q_clean(adapter, rxo);
1695 }
1696 be_queue_free(adapter, q);
1697
1698 q = &rxo->cq;
1699 if (q->created)
1700 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1701 be_queue_free(adapter, q);
1702
1703 /* Clear any residual events */
1704 q = &rxo->rx_eq.q;
1705 if (q->created) {
1706 be_eq_clean(adapter, &rxo->rx_eq);
1707 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1708 }
1709 be_queue_free(adapter, q);
6b7c5b94 1710 }
6b7c5b94
SP
1711}
1712
ac6a0c4a
SP
1713static u32 be_num_rxqs_want(struct be_adapter *adapter)
1714{
1715 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1716 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1717 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1718 } else {
1719 dev_warn(&adapter->pdev->dev,
1720 "No support for multiple RX queues\n");
1721 return 1;
1722 }
1723}
1724
6b7c5b94
SP
1725static int be_rx_queues_create(struct be_adapter *adapter)
1726{
1727 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1728 struct be_rx_obj *rxo;
1729 int rc, i;
6b7c5b94 1730
ac6a0c4a
SP
1731 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1732 msix_enabled(adapter) ?
1733 adapter->num_msix_vec - 1 : 1);
1734 if (adapter->num_rx_qs != MAX_RX_QS)
1735 dev_warn(&adapter->pdev->dev,
1736 "Can create only %d RX queues", adapter->num_rx_qs);
1737
6b7c5b94 1738 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1739 for_all_rx_queues(adapter, rxo, i) {
1740 rxo->adapter = adapter;
1741 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1742 rxo->rx_eq.enable_aic = true;
1743
1744 /* EQ */
1745 eq = &rxo->rx_eq.q;
1746 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1747 sizeof(struct be_eq_entry));
1748 if (rc)
1749 goto err;
1750
1751 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1752 if (rc)
1753 goto err;
1754
ecd62107 1755 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1756
3abcdeda
SP
1757 /* CQ */
1758 cq = &rxo->cq;
1759 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1760 sizeof(struct be_eth_rx_compl));
1761 if (rc)
1762 goto err;
1763
1764 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1765 if (rc)
1766 goto err;
3abcdeda
SP
1767 /* Rx Q */
1768 q = &rxo->q;
1769 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1770 sizeof(struct be_eth_rx_d));
1771 if (rc)
1772 goto err;
1773
1774 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1775 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1776 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1777 if (rc)
1778 goto err;
1779 }
1780
1781 if (be_multi_rxq(adapter)) {
1782 u8 rsstable[MAX_RSS_QS];
1783
1784 for_all_rss_queues(adapter, rxo, i)
1785 rsstable[i] = rxo->rss_id;
1786
1787 rc = be_cmd_rss_config(adapter, rsstable,
1788 adapter->num_rx_qs - 1);
1789 if (rc)
1790 goto err;
1791 }
6b7c5b94
SP
1792
1793 return 0;
3abcdeda
SP
1794err:
1795 be_rx_queues_destroy(adapter);
1796 return -1;
6b7c5b94 1797}
6b7c5b94 1798
fe6d2a38 1799static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1800{
fe6d2a38
SP
1801 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1802 if (!eqe->evt)
1803 return false;
1804 else
1805 return true;
b628bde2
SP
1806}
1807
6b7c5b94
SP
1808static irqreturn_t be_intx(int irq, void *dev)
1809{
1810 struct be_adapter *adapter = dev;
3abcdeda 1811 struct be_rx_obj *rxo;
fe6d2a38 1812 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1813
fe6d2a38
SP
1814 if (lancer_chip(adapter)) {
1815 if (event_peek(&adapter->tx_eq))
1816 tx = event_handle(adapter, &adapter->tx_eq);
1817 for_all_rx_queues(adapter, rxo, i) {
1818 if (event_peek(&rxo->rx_eq))
1819 rx |= event_handle(adapter, &rxo->rx_eq);
1820 }
6b7c5b94 1821
fe6d2a38
SP
1822 if (!(tx || rx))
1823 return IRQ_NONE;
3abcdeda 1824
fe6d2a38
SP
1825 } else {
1826 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1827 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1828 if (!isr)
1829 return IRQ_NONE;
1830
ecd62107 1831 if ((1 << adapter->tx_eq.eq_idx & isr))
fe6d2a38
SP
1832 event_handle(adapter, &adapter->tx_eq);
1833
1834 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1835 if ((1 << rxo->rx_eq.eq_idx & isr))
fe6d2a38
SP
1836 event_handle(adapter, &rxo->rx_eq);
1837 }
3abcdeda 1838 }
c001c213 1839
8788fdc2 1840 return IRQ_HANDLED;
6b7c5b94
SP
1841}
1842
1843static irqreturn_t be_msix_rx(int irq, void *dev)
1844{
3abcdeda
SP
1845 struct be_rx_obj *rxo = dev;
1846 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1847
3abcdeda 1848 event_handle(adapter, &rxo->rx_eq);
6b7c5b94
SP
1849
1850 return IRQ_HANDLED;
1851}
1852
5fb379ee 1853static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1854{
1855 struct be_adapter *adapter = dev;
1856
8788fdc2 1857 event_handle(adapter, &adapter->tx_eq);
6b7c5b94
SP
1858
1859 return IRQ_HANDLED;
1860}
1861
2e588f84 1862static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1863{
2e588f84 1864 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1865}
1866
49b05221 1867static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1868{
1869 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1870 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1871 struct be_adapter *adapter = rxo->adapter;
1872 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1873 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1874 u32 work_done;
1875
3abcdeda 1876 rxo->stats.rx_polls++;
6b7c5b94 1877 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1878 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1879 if (!rxcp)
1880 break;
1881
e80d9da6 1882 /* Ignore flush completions */
009dd872 1883 if (rxcp->num_rcvd && rxcp->pkt_size) {
2e588f84 1884 if (do_gro(rxcp))
64642811
SP
1885 be_rx_compl_process_gro(adapter, rxo, rxcp);
1886 else
1887 be_rx_compl_process(adapter, rxo, rxcp);
009dd872
PR
1888 } else if (rxcp->pkt_size == 0) {
1889 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1890 }
009dd872 1891
2e588f84 1892 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1893 }
1894
6b7c5b94 1895 /* Refill the queue */
3abcdeda 1896 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1897 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1898
1899 /* All consumed */
1900 if (work_done < budget) {
1901 napi_complete(napi);
8788fdc2 1902 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1903 } else {
1904 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1905 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1906 }
1907 return work_done;
1908}
1909
f31e50a8
SP
1910/* As TX and MCC share the same EQ check for both TX and MCC completions.
1911 * For TX/MCC we don't honour budget; consume everything
1912 */
1913static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1914{
f31e50a8
SP
1915 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1916 struct be_adapter *adapter =
1917 container_of(tx_eq, struct be_adapter, tx_eq);
5fb379ee
SP
1918 struct be_queue_info *txq = &adapter->tx_obj.q;
1919 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1920 struct be_eth_tx_compl *txcp;
f31e50a8 1921 int tx_compl = 0, mcc_compl, status = 0;
4d586b82 1922 u16 end_idx, num_wrbs = 0;
6b7c5b94 1923
5fb379ee 1924 while ((txcp = be_tx_compl_get(tx_cq))) {
6b7c5b94 1925 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
f31e50a8 1926 wrb_index, txcp);
4d586b82 1927 num_wrbs += be_tx_compl_process(adapter, end_idx);
f31e50a8 1928 tx_compl++;
6b7c5b94
SP
1929 }
1930
f31e50a8
SP
1931 mcc_compl = be_process_mcc(adapter, &status);
1932
1933 napi_complete(napi);
1934
1935 if (mcc_compl) {
1936 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1937 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1938 }
1939
1940 if (tx_compl) {
1941 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
5fb379ee 1942
4d586b82
PR
1943 atomic_sub(num_wrbs, &txq->used);
1944
5fb379ee
SP
1945 /* As Tx wrbs have been freed up, wake up netdev queue if
1946 * it was stopped due to lack of tx wrbs.
1947 */
1948 if (netif_queue_stopped(adapter->netdev) &&
6b7c5b94 1949 atomic_read(&txq->used) < txq->len / 2) {
5fb379ee
SP
1950 netif_wake_queue(adapter->netdev);
1951 }
1952
3abcdeda
SP
1953 tx_stats(adapter)->be_tx_events++;
1954 tx_stats(adapter)->be_tx_compl += tx_compl;
6b7c5b94 1955 }
6b7c5b94
SP
1956
1957 return 1;
1958}
1959
d053de91 1960void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1961{
1962 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1963 u32 i;
1964
1965 pci_read_config_dword(adapter->pdev,
1966 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1967 pci_read_config_dword(adapter->pdev,
1968 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1969 pci_read_config_dword(adapter->pdev,
1970 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1971 pci_read_config_dword(adapter->pdev,
1972 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1973
1974 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1975 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1976
d053de91
AK
1977 if (ue_status_lo || ue_status_hi) {
1978 adapter->ue_detected = true;
7acc2087 1979 adapter->eeh_err = true;
d053de91
AK
1980 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1981 }
1982
7c185276
AK
1983 if (ue_status_lo) {
1984 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1985 if (ue_status_lo & 1)
1986 dev_err(&adapter->pdev->dev,
1987 "UE: %s bit set\n", ue_status_low_desc[i]);
1988 }
1989 }
1990 if (ue_status_hi) {
1991 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1992 if (ue_status_hi & 1)
1993 dev_err(&adapter->pdev->dev,
1994 "UE: %s bit set\n", ue_status_hi_desc[i]);
1995 }
1996 }
1997
1998}
1999
ea1dae11
SP
2000static void be_worker(struct work_struct *work)
2001{
2002 struct be_adapter *adapter =
2003 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
2004 struct be_rx_obj *rxo;
2005 int i;
ea1dae11 2006
16da8250
SP
2007 if (!adapter->ue_detected && !lancer_chip(adapter))
2008 be_detect_dump_ue(adapter);
2009
f203af70
SK
2010 /* when interrupts are not yet enabled, just reap any pending
2011 * mcc completions */
2012 if (!netif_running(adapter->netdev)) {
2013 int mcc_compl, status = 0;
2014
2015 mcc_compl = be_process_mcc(adapter, &status);
2016
2017 if (mcc_compl) {
2018 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2019 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2020 }
9b037f38 2021
f203af70
SK
2022 goto reschedule;
2023 }
2024
b2aebe6d 2025 if (!adapter->stats_cmd_sent)
3abcdeda 2026 be_cmd_get_stats(adapter, &adapter->stats_cmd);
ea1dae11 2027
4097f663 2028 be_tx_rate_update(adapter);
4097f663 2029
3abcdeda
SP
2030 for_all_rx_queues(adapter, rxo, i) {
2031 be_rx_rate_update(rxo);
2032 be_rx_eqd_update(adapter, rxo);
2033
2034 if (rxo->rx_post_starved) {
2035 rxo->rx_post_starved = false;
1829b086 2036 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2037 }
ea1dae11
SP
2038 }
2039
f203af70 2040reschedule:
e74fbd03 2041 adapter->work_counter++;
ea1dae11
SP
2042 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2043}
2044
8d56ff11
SP
2045static void be_msix_disable(struct be_adapter *adapter)
2046{
ac6a0c4a 2047 if (msix_enabled(adapter)) {
8d56ff11 2048 pci_disable_msix(adapter->pdev);
ac6a0c4a 2049 adapter->num_msix_vec = 0;
3abcdeda
SP
2050 }
2051}
2052
6b7c5b94
SP
2053static void be_msix_enable(struct be_adapter *adapter)
2054{
3abcdeda 2055#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2056 int i, status, num_vec;
6b7c5b94 2057
ac6a0c4a 2058 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2059
ac6a0c4a 2060 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2061 adapter->msix_entries[i].entry = i;
2062
ac6a0c4a 2063 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2064 if (status == 0) {
2065 goto done;
2066 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2067 num_vec = status;
3abcdeda 2068 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2069 num_vec) == 0)
3abcdeda 2070 goto done;
3abcdeda
SP
2071 }
2072 return;
2073done:
ac6a0c4a
SP
2074 adapter->num_msix_vec = num_vec;
2075 return;
6b7c5b94
SP
2076}
2077
ba343c77
SB
2078static void be_sriov_enable(struct be_adapter *adapter)
2079{
344dbf10 2080 be_check_sriov_fn_type(adapter);
6dedec81 2081#ifdef CONFIG_PCI_IOV
ba343c77 2082 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2083 int status, pos;
2084 u16 nvfs;
2085
2086 pos = pci_find_ext_capability(adapter->pdev,
2087 PCI_EXT_CAP_ID_SRIOV);
2088 pci_read_config_word(adapter->pdev,
2089 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2090
2091 if (num_vfs > nvfs) {
2092 dev_info(&adapter->pdev->dev,
2093 "Device supports %d VFs and not %d\n",
2094 nvfs, num_vfs);
2095 num_vfs = nvfs;
2096 }
6dedec81 2097
ba343c77
SB
2098 status = pci_enable_sriov(adapter->pdev, num_vfs);
2099 adapter->sriov_enabled = status ? false : true;
2100 }
2101#endif
ba343c77
SB
2102}
2103
2104static void be_sriov_disable(struct be_adapter *adapter)
2105{
2106#ifdef CONFIG_PCI_IOV
2107 if (adapter->sriov_enabled) {
2108 pci_disable_sriov(adapter->pdev);
2109 adapter->sriov_enabled = false;
2110 }
2111#endif
2112}
2113
fe6d2a38
SP
2114static inline int be_msix_vec_get(struct be_adapter *adapter,
2115 struct be_eq_obj *eq_obj)
6b7c5b94 2116{
ecd62107 2117 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2118}
2119
b628bde2
SP
2120static int be_request_irq(struct be_adapter *adapter,
2121 struct be_eq_obj *eq_obj,
3abcdeda 2122 void *handler, char *desc, void *context)
6b7c5b94
SP
2123{
2124 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2125 int vec;
2126
2127 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2128 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2129 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2130}
2131
3abcdeda
SP
2132static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2133 void *context)
b628bde2 2134{
fe6d2a38 2135 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2136 free_irq(vec, context);
b628bde2 2137}
6b7c5b94 2138
b628bde2
SP
2139static int be_msix_register(struct be_adapter *adapter)
2140{
3abcdeda
SP
2141 struct be_rx_obj *rxo;
2142 int status, i;
2143 char qname[10];
b628bde2 2144
3abcdeda
SP
2145 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2146 adapter);
6b7c5b94
SP
2147 if (status)
2148 goto err;
2149
3abcdeda
SP
2150 for_all_rx_queues(adapter, rxo, i) {
2151 sprintf(qname, "rxq%d", i);
2152 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2153 qname, rxo);
2154 if (status)
2155 goto err_msix;
2156 }
b628bde2 2157
6b7c5b94 2158 return 0;
b628bde2 2159
3abcdeda
SP
2160err_msix:
2161 be_free_irq(adapter, &adapter->tx_eq, adapter);
2162
2163 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2164 be_free_irq(adapter, &rxo->rx_eq, rxo);
2165
6b7c5b94
SP
2166err:
2167 dev_warn(&adapter->pdev->dev,
2168 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2169 be_msix_disable(adapter);
6b7c5b94
SP
2170 return status;
2171}
2172
2173static int be_irq_register(struct be_adapter *adapter)
2174{
2175 struct net_device *netdev = adapter->netdev;
2176 int status;
2177
ac6a0c4a 2178 if (msix_enabled(adapter)) {
6b7c5b94
SP
2179 status = be_msix_register(adapter);
2180 if (status == 0)
2181 goto done;
ba343c77
SB
2182 /* INTx is not supported for VF */
2183 if (!be_physfn(adapter))
2184 return status;
6b7c5b94
SP
2185 }
2186
2187 /* INTx */
2188 netdev->irq = adapter->pdev->irq;
2189 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2190 adapter);
2191 if (status) {
2192 dev_err(&adapter->pdev->dev,
2193 "INTx request IRQ failed - err %d\n", status);
2194 return status;
2195 }
2196done:
2197 adapter->isr_registered = true;
2198 return 0;
2199}
2200
2201static void be_irq_unregister(struct be_adapter *adapter)
2202{
2203 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2204 struct be_rx_obj *rxo;
2205 int i;
6b7c5b94
SP
2206
2207 if (!adapter->isr_registered)
2208 return;
2209
2210 /* INTx */
ac6a0c4a 2211 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2212 free_irq(netdev->irq, adapter);
2213 goto done;
2214 }
2215
2216 /* MSIx */
3abcdeda
SP
2217 be_free_irq(adapter, &adapter->tx_eq, adapter);
2218
2219 for_all_rx_queues(adapter, rxo, i)
2220 be_free_irq(adapter, &rxo->rx_eq, rxo);
2221
6b7c5b94
SP
2222done:
2223 adapter->isr_registered = false;
6b7c5b94
SP
2224}
2225
889cd4b2
SP
2226static int be_close(struct net_device *netdev)
2227{
2228 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2229 struct be_rx_obj *rxo;
889cd4b2 2230 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2231 int vec, i;
889cd4b2 2232
889cd4b2
SP
2233 be_async_mcc_disable(adapter);
2234
889cd4b2
SP
2235 netif_carrier_off(netdev);
2236 adapter->link_up = false;
2237
fe6d2a38
SP
2238 if (!lancer_chip(adapter))
2239 be_intr_set(adapter, false);
889cd4b2 2240
63fcb27f
PR
2241 for_all_rx_queues(adapter, rxo, i)
2242 napi_disable(&rxo->rx_eq.napi);
2243
2244 napi_disable(&tx_eq->napi);
2245
2246 if (lancer_chip(adapter)) {
2247 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2248 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2249 for_all_rx_queues(adapter, rxo, i)
2250 be_cq_notify(adapter, rxo->cq.id, false, 0);
2251 }
2252
ac6a0c4a 2253 if (msix_enabled(adapter)) {
fe6d2a38 2254 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2255 synchronize_irq(vec);
3abcdeda
SP
2256
2257 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2258 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2259 synchronize_irq(vec);
2260 }
889cd4b2
SP
2261 } else {
2262 synchronize_irq(netdev->irq);
2263 }
2264 be_irq_unregister(adapter);
2265
889cd4b2
SP
2266 /* Wait for all pending tx completions to arrive so that
2267 * all tx skbs are freed.
2268 */
2269 be_tx_compl_clean(adapter);
2270
2271 return 0;
2272}
2273
6b7c5b94
SP
2274static int be_open(struct net_device *netdev)
2275{
2276 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2277 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2278 struct be_rx_obj *rxo;
a8f447bd 2279 bool link_up;
3abcdeda 2280 int status, i;
0388f251
SB
2281 u8 mac_speed;
2282 u16 link_speed;
5fb379ee 2283
3abcdeda 2284 for_all_rx_queues(adapter, rxo, i) {
1829b086 2285 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda
SP
2286 napi_enable(&rxo->rx_eq.napi);
2287 }
5fb379ee
SP
2288 napi_enable(&tx_eq->napi);
2289
2290 be_irq_register(adapter);
2291
fe6d2a38
SP
2292 if (!lancer_chip(adapter))
2293 be_intr_set(adapter, true);
5fb379ee
SP
2294
2295 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2296 for_all_rx_queues(adapter, rxo, i) {
2297 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2298 be_cq_notify(adapter, rxo->cq.id, true, 0);
2299 }
8788fdc2 2300 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2301
7a1e9b20
SP
2302 /* Now that interrupts are on we can process async mcc */
2303 be_async_mcc_enable(adapter);
2304
0388f251 2305 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
187e8756 2306 &link_speed, 0);
a8f447bd 2307 if (status)
889cd4b2 2308 goto err;
a8f447bd 2309 be_link_status_update(adapter, link_up);
5fb379ee 2310
889cd4b2 2311 if (be_physfn(adapter)) {
1da87b7f 2312 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2313 if (status)
2314 goto err;
4f2aa89c 2315
ba343c77
SB
2316 status = be_cmd_set_flow_control(adapter,
2317 adapter->tx_fc, adapter->rx_fc);
2318 if (status)
889cd4b2 2319 goto err;
ba343c77 2320 }
4f2aa89c 2321
889cd4b2
SP
2322 return 0;
2323err:
2324 be_close(adapter->netdev);
2325 return -EIO;
5fb379ee
SP
2326}
2327
71d8d1b5
AK
2328static int be_setup_wol(struct be_adapter *adapter, bool enable)
2329{
2330 struct be_dma_mem cmd;
2331 int status = 0;
2332 u8 mac[ETH_ALEN];
2333
2334 memset(mac, 0, ETH_ALEN);
2335
2336 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2337 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2338 GFP_KERNEL);
71d8d1b5
AK
2339 if (cmd.va == NULL)
2340 return -1;
2341 memset(cmd.va, 0, cmd.size);
2342
2343 if (enable) {
2344 status = pci_write_config_dword(adapter->pdev,
2345 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2346 if (status) {
2347 dev_err(&adapter->pdev->dev,
2381a55c 2348 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2349 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2350 cmd.dma);
71d8d1b5
AK
2351 return status;
2352 }
2353 status = be_cmd_enable_magic_wol(adapter,
2354 adapter->netdev->dev_addr, &cmd);
2355 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2356 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2357 } else {
2358 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2359 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2360 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2361 }
2362
2b7bcebf 2363 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2364 return status;
2365}
2366
6d87f5c3
AK
2367/*
2368 * Generate a seed MAC address from the PF MAC Address using jhash.
2369 * MAC Address for VFs are assigned incrementally starting from the seed.
2370 * These addresses are programmed in the ASIC by the PF and the VF driver
2371 * queries for the MAC address during its probe.
2372 */
2373static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2374{
2375 u32 vf = 0;
3abcdeda 2376 int status = 0;
6d87f5c3
AK
2377 u8 mac[ETH_ALEN];
2378
2379 be_vf_eth_addr_generate(adapter, mac);
2380
2381 for (vf = 0; vf < num_vfs; vf++) {
2382 status = be_cmd_pmac_add(adapter, mac,
2383 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2384 &adapter->vf_cfg[vf].vf_pmac_id,
2385 vf + 1);
6d87f5c3
AK
2386 if (status)
2387 dev_err(&adapter->pdev->dev,
2388 "Mac address add failed for VF %d\n", vf);
2389 else
2390 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2391
2392 mac[5] += 1;
2393 }
2394 return status;
2395}
2396
2397static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2398{
2399 u32 vf;
2400
2401 for (vf = 0; vf < num_vfs; vf++) {
2402 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2403 be_cmd_pmac_del(adapter,
2404 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2405 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2406 }
2407}
2408
5fb379ee
SP
2409static int be_setup(struct be_adapter *adapter)
2410{
5fb379ee 2411 struct net_device *netdev = adapter->netdev;
ba343c77 2412 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2413 int status;
ba343c77
SB
2414 u8 mac[ETH_ALEN];
2415
f21b538c
PR
2416 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2417 BE_IF_FLAGS_BROADCAST |
2418 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2419
ba343c77
SB
2420 if (be_physfn(adapter)) {
2421 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2422 BE_IF_FLAGS_PROMISCUOUS |
2423 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2424 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2425
ac6a0c4a 2426 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2427 cap_flags |= BE_IF_FLAGS_RSS;
2428 en_flags |= BE_IF_FLAGS_RSS;
2429 }
ba343c77 2430 }
73d540f2
SP
2431
2432 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2433 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2434 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2435 if (status != 0)
2436 goto do_none;
2437
ba343c77 2438 if (be_physfn(adapter)) {
c99ac3e7
AK
2439 if (adapter->sriov_enabled) {
2440 while (vf < num_vfs) {
2441 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2442 BE_IF_FLAGS_BROADCAST;
2443 status = be_cmd_if_create(adapter, cap_flags,
2444 en_flags, mac, true,
64600ea5 2445 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2446 NULL, vf+1);
c99ac3e7
AK
2447 if (status) {
2448 dev_err(&adapter->pdev->dev,
2449 "Interface Create failed for VF %d\n",
2450 vf);
2451 goto if_destroy;
2452 }
2453 adapter->vf_cfg[vf].vf_pmac_id =
2454 BE_INVALID_PMAC_ID;
2455 vf++;
ba343c77 2456 }
84e5b9f7 2457 }
c99ac3e7 2458 } else {
ba343c77
SB
2459 status = be_cmd_mac_addr_query(adapter, mac,
2460 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2461 if (!status) {
2462 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2463 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2464 }
2465 }
2466
6b7c5b94
SP
2467 status = be_tx_queues_create(adapter);
2468 if (status != 0)
2469 goto if_destroy;
2470
2471 status = be_rx_queues_create(adapter);
2472 if (status != 0)
2473 goto tx_qs_destroy;
2474
5fb379ee
SP
2475 status = be_mcc_queues_create(adapter);
2476 if (status != 0)
2477 goto rx_qs_destroy;
6b7c5b94 2478
0dffc83e
AK
2479 adapter->link_speed = -1;
2480
6b7c5b94
SP
2481 return 0;
2482
5fb379ee
SP
2483rx_qs_destroy:
2484 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2485tx_qs_destroy:
2486 be_tx_queues_destroy(adapter);
2487if_destroy:
c99ac3e7
AK
2488 if (be_physfn(adapter) && adapter->sriov_enabled)
2489 for (vf = 0; vf < num_vfs; vf++)
2490 if (adapter->vf_cfg[vf].vf_if_handle)
2491 be_cmd_if_destroy(adapter,
658681f7
AK
2492 adapter->vf_cfg[vf].vf_if_handle,
2493 vf + 1);
2494 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2495do_none:
2496 return status;
2497}
2498
5fb379ee
SP
2499static int be_clear(struct be_adapter *adapter)
2500{
7ab8b0b4
AK
2501 int vf;
2502
c99ac3e7 2503 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2504 be_vf_eth_addr_rem(adapter);
2505
1a8887d8 2506 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2507 be_rx_queues_destroy(adapter);
2508 be_tx_queues_destroy(adapter);
1f5db833 2509 adapter->eq_next_idx = 0;
5fb379ee 2510
7ab8b0b4
AK
2511 if (be_physfn(adapter) && adapter->sriov_enabled)
2512 for (vf = 0; vf < num_vfs; vf++)
2513 if (adapter->vf_cfg[vf].vf_if_handle)
2514 be_cmd_if_destroy(adapter,
2515 adapter->vf_cfg[vf].vf_if_handle,
2516 vf + 1);
2517
658681f7 2518 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2519
2243e2e9
SP
2520 /* tell fw we're done with firing cmds */
2521 be_cmd_fw_clean(adapter);
5fb379ee
SP
2522 return 0;
2523}
2524
6b7c5b94 2525
84517482 2526#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2527static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2528 const u8 *p, u32 img_start, int image_size,
2529 int hdr_size)
fa9a6fed
SB
2530{
2531 u32 crc_offset;
2532 u8 flashed_crc[4];
2533 int status;
3f0d4560
AK
2534
2535 crc_offset = hdr_size + img_start + image_size - 4;
2536
fa9a6fed 2537 p += crc_offset;
3f0d4560
AK
2538
2539 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2540 (image_size - 4));
fa9a6fed
SB
2541 if (status) {
2542 dev_err(&adapter->pdev->dev,
2543 "could not get crc from flash, not flashing redboot\n");
2544 return false;
2545 }
2546
2547 /*update redboot only if crc does not match*/
2548 if (!memcmp(flashed_crc, p, 4))
2549 return false;
2550 else
2551 return true;
fa9a6fed
SB
2552}
2553
3f0d4560 2554static int be_flash_data(struct be_adapter *adapter,
84517482 2555 const struct firmware *fw,
3f0d4560
AK
2556 struct be_dma_mem *flash_cmd, int num_of_images)
2557
84517482 2558{
3f0d4560
AK
2559 int status = 0, i, filehdr_size = 0;
2560 u32 total_bytes = 0, flash_op;
84517482
AK
2561 int num_bytes;
2562 const u8 *p = fw->data;
2563 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2564 const struct flash_comp *pflashcomp;
9fe96934 2565 int num_comp;
3f0d4560 2566
215faf9c 2567 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2568 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2569 FLASH_IMAGE_MAX_SIZE_g3},
2570 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2571 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2572 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2573 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2574 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2575 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2576 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2577 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2578 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2579 FLASH_IMAGE_MAX_SIZE_g3},
2580 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2581 FLASH_IMAGE_MAX_SIZE_g3},
2582 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2583 FLASH_IMAGE_MAX_SIZE_g3},
2584 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2585 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2586 };
215faf9c 2587 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2588 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2589 FLASH_IMAGE_MAX_SIZE_g2},
2590 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2591 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2592 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2593 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2594 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2595 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2596 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2597 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2598 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2599 FLASH_IMAGE_MAX_SIZE_g2},
2600 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2601 FLASH_IMAGE_MAX_SIZE_g2},
2602 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2603 FLASH_IMAGE_MAX_SIZE_g2}
2604 };
2605
2606 if (adapter->generation == BE_GEN3) {
2607 pflashcomp = gen3_flash_types;
2608 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2609 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2610 } else {
2611 pflashcomp = gen2_flash_types;
2612 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2613 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2614 }
9fe96934
SB
2615 for (i = 0; i < num_comp; i++) {
2616 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2617 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2618 continue;
3f0d4560
AK
2619 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2620 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2621 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2622 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2623 continue;
2624 p = fw->data;
2625 p += filehdr_size + pflashcomp[i].offset
2626 + (num_of_images * sizeof(struct image_hdr));
2627 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2628 return -1;
3f0d4560
AK
2629 total_bytes = pflashcomp[i].size;
2630 while (total_bytes) {
2631 if (total_bytes > 32*1024)
2632 num_bytes = 32*1024;
2633 else
2634 num_bytes = total_bytes;
2635 total_bytes -= num_bytes;
2636
2637 if (!total_bytes)
2638 flash_op = FLASHROM_OPER_FLASH;
2639 else
2640 flash_op = FLASHROM_OPER_SAVE;
2641 memcpy(req->params.data_buf, p, num_bytes);
2642 p += num_bytes;
2643 status = be_cmd_write_flashrom(adapter, flash_cmd,
2644 pflashcomp[i].optype, flash_op, num_bytes);
2645 if (status) {
2646 dev_err(&adapter->pdev->dev,
2647 "cmd to write to flash rom failed.\n");
2648 return -1;
2649 }
2650 yield();
84517482 2651 }
84517482 2652 }
84517482
AK
2653 return 0;
2654}
2655
3f0d4560
AK
2656static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2657{
2658 if (fhdr == NULL)
2659 return 0;
2660 if (fhdr->build[0] == '3')
2661 return BE_GEN3;
2662 else if (fhdr->build[0] == '2')
2663 return BE_GEN2;
2664 else
2665 return 0;
2666}
2667
84517482
AK
2668int be_load_fw(struct be_adapter *adapter, u8 *func)
2669{
2670 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2671 const struct firmware *fw;
3f0d4560
AK
2672 struct flash_file_hdr_g2 *fhdr;
2673 struct flash_file_hdr_g3 *fhdr3;
2674 struct image_hdr *img_hdr_ptr = NULL;
84517482 2675 struct be_dma_mem flash_cmd;
8b93b710 2676 int status, i = 0, num_imgs = 0;
84517482 2677 const u8 *p;
84517482 2678
d9efd2af
SB
2679 if (!netif_running(adapter->netdev)) {
2680 dev_err(&adapter->pdev->dev,
2681 "Firmware load not allowed (interface is down)\n");
2682 return -EPERM;
2683 }
2684
84517482
AK
2685 strcpy(fw_file, func);
2686
2687 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2688 if (status)
2689 goto fw_exit;
2690
2691 p = fw->data;
3f0d4560 2692 fhdr = (struct flash_file_hdr_g2 *) p;
84517482
AK
2693 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2694
84517482 2695 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2696 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2697 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2698 if (!flash_cmd.va) {
2699 status = -ENOMEM;
2700 dev_err(&adapter->pdev->dev,
2701 "Memory allocation failure while flashing\n");
2702 goto fw_exit;
2703 }
2704
3f0d4560
AK
2705 if ((adapter->generation == BE_GEN3) &&
2706 (get_ufigen_type(fhdr) == BE_GEN3)) {
2707 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2708 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2709 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2710 img_hdr_ptr = (struct image_hdr *) (fw->data +
2711 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2712 i * sizeof(struct image_hdr)));
2713 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2714 status = be_flash_data(adapter, fw, &flash_cmd,
2715 num_imgs);
3f0d4560
AK
2716 }
2717 } else if ((adapter->generation == BE_GEN2) &&
2718 (get_ufigen_type(fhdr) == BE_GEN2)) {
2719 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2720 } else {
2721 dev_err(&adapter->pdev->dev,
2722 "UFI and Interface are not compatible for flashing\n");
2723 status = -1;
84517482
AK
2724 }
2725
2b7bcebf
IV
2726 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2727 flash_cmd.dma);
84517482
AK
2728 if (status) {
2729 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2730 goto fw_exit;
2731 }
2732
af901ca1 2733 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482
AK
2734
2735fw_exit:
2736 release_firmware(fw);
2737 return status;
2738}
2739
6b7c5b94
SP
2740static struct net_device_ops be_netdev_ops = {
2741 .ndo_open = be_open,
2742 .ndo_stop = be_close,
2743 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2744 .ndo_set_rx_mode = be_set_multicast_list,
2745 .ndo_set_mac_address = be_mac_addr_set,
2746 .ndo_change_mtu = be_change_mtu,
2747 .ndo_validate_addr = eth_validate_addr,
2748 .ndo_vlan_rx_register = be_vlan_register,
2749 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2750 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2751 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2752 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2753 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2754 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2755};
2756
2757static void be_netdev_init(struct net_device *netdev)
2758{
2759 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2760 struct be_rx_obj *rxo;
2761 int i;
6b7c5b94 2762
6332c8d3 2763 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2764 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2765 NETIF_F_HW_VLAN_TX;
2766 if (be_multi_rxq(adapter))
2767 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2768
2769 netdev->features |= netdev->hw_features |
8b8ddc68 2770 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2771
79032644
MM
2772 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2773 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2774
fe6d2a38
SP
2775 if (lancer_chip(adapter))
2776 netdev->vlan_features |= NETIF_F_TSO6;
2777
6b7c5b94
SP
2778 netdev->flags |= IFF_MULTICAST;
2779
9e90c961
AK
2780 /* Default settings for Rx and Tx flow control */
2781 adapter->rx_fc = true;
2782 adapter->tx_fc = true;
2783
c190e3c8
AK
2784 netif_set_gso_max_size(netdev, 65535);
2785
6b7c5b94
SP
2786 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2787
2788 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2789
3abcdeda
SP
2790 for_all_rx_queues(adapter, rxo, i)
2791 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2792 BE_NAPI_WEIGHT);
2793
5fb379ee 2794 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2795 BE_NAPI_WEIGHT);
6b7c5b94
SP
2796}
2797
2798static void be_unmap_pci_bars(struct be_adapter *adapter)
2799{
8788fdc2
SP
2800 if (adapter->csr)
2801 iounmap(adapter->csr);
2802 if (adapter->db)
2803 iounmap(adapter->db);
ba343c77 2804 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2805 iounmap(adapter->pcicfg);
6b7c5b94
SP
2806}
2807
2808static int be_map_pci_bars(struct be_adapter *adapter)
2809{
2810 u8 __iomem *addr;
ba343c77 2811 int pcicfg_reg, db_reg;
6b7c5b94 2812
fe6d2a38
SP
2813 if (lancer_chip(adapter)) {
2814 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2815 pci_resource_len(adapter->pdev, 0));
2816 if (addr == NULL)
2817 return -ENOMEM;
2818 adapter->db = addr;
2819 return 0;
2820 }
2821
ba343c77
SB
2822 if (be_physfn(adapter)) {
2823 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2824 pci_resource_len(adapter->pdev, 2));
2825 if (addr == NULL)
2826 return -ENOMEM;
2827 adapter->csr = addr;
2828 }
6b7c5b94 2829
ba343c77 2830 if (adapter->generation == BE_GEN2) {
7b139c83 2831 pcicfg_reg = 1;
ba343c77
SB
2832 db_reg = 4;
2833 } else {
7b139c83 2834 pcicfg_reg = 0;
ba343c77
SB
2835 if (be_physfn(adapter))
2836 db_reg = 4;
2837 else
2838 db_reg = 0;
2839 }
2840 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2841 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2842 if (addr == NULL)
2843 goto pci_map_err;
ba343c77
SB
2844 adapter->db = addr;
2845
2846 if (be_physfn(adapter)) {
2847 addr = ioremap_nocache(
2848 pci_resource_start(adapter->pdev, pcicfg_reg),
2849 pci_resource_len(adapter->pdev, pcicfg_reg));
2850 if (addr == NULL)
2851 goto pci_map_err;
2852 adapter->pcicfg = addr;
2853 } else
2854 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
2855
2856 return 0;
2857pci_map_err:
2858 be_unmap_pci_bars(adapter);
2859 return -ENOMEM;
2860}
2861
2862
2863static void be_ctrl_cleanup(struct be_adapter *adapter)
2864{
8788fdc2 2865 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2866
2867 be_unmap_pci_bars(adapter);
2868
2869 if (mem->va)
2b7bcebf
IV
2870 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2871 mem->dma);
e7b909a6
SP
2872
2873 mem = &adapter->mc_cmd_mem;
2874 if (mem->va)
2b7bcebf
IV
2875 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2876 mem->dma);
6b7c5b94
SP
2877}
2878
6b7c5b94
SP
2879static int be_ctrl_init(struct be_adapter *adapter)
2880{
8788fdc2
SP
2881 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2882 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 2883 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 2884 int status;
6b7c5b94
SP
2885
2886 status = be_map_pci_bars(adapter);
2887 if (status)
e7b909a6 2888 goto done;
6b7c5b94
SP
2889
2890 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
2891 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2892 mbox_mem_alloc->size,
2893 &mbox_mem_alloc->dma,
2894 GFP_KERNEL);
6b7c5b94 2895 if (!mbox_mem_alloc->va) {
e7b909a6
SP
2896 status = -ENOMEM;
2897 goto unmap_pci_bars;
6b7c5b94 2898 }
e7b909a6 2899
6b7c5b94
SP
2900 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2901 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2902 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2903 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
2904
2905 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2b7bcebf
IV
2906 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2907 mc_cmd_mem->size, &mc_cmd_mem->dma,
2908 GFP_KERNEL);
e7b909a6
SP
2909 if (mc_cmd_mem->va == NULL) {
2910 status = -ENOMEM;
2911 goto free_mbox;
2912 }
2913 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2914
2984961c 2915 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
2916 spin_lock_init(&adapter->mcc_lock);
2917 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 2918
dd131e76 2919 init_completion(&adapter->flash_compl);
cf588477 2920 pci_save_state(adapter->pdev);
6b7c5b94 2921 return 0;
e7b909a6
SP
2922
2923free_mbox:
2b7bcebf
IV
2924 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2925 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
2926
2927unmap_pci_bars:
2928 be_unmap_pci_bars(adapter);
2929
2930done:
2931 return status;
6b7c5b94
SP
2932}
2933
2934static void be_stats_cleanup(struct be_adapter *adapter)
2935{
3abcdeda 2936 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2937
2938 if (cmd->va)
2b7bcebf
IV
2939 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2940 cmd->va, cmd->dma);
6b7c5b94
SP
2941}
2942
2943static int be_stats_init(struct be_adapter *adapter)
2944{
3abcdeda 2945 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 2946
89a88ab8
AK
2947 if (adapter->generation == BE_GEN2)
2948 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
2949 else
2950 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
2b7bcebf
IV
2951 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2952 GFP_KERNEL);
6b7c5b94
SP
2953 if (cmd->va == NULL)
2954 return -1;
d291b9af 2955 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
2956 return 0;
2957}
2958
2959static void __devexit be_remove(struct pci_dev *pdev)
2960{
2961 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 2962
6b7c5b94
SP
2963 if (!adapter)
2964 return;
2965
f203af70
SK
2966 cancel_delayed_work_sync(&adapter->work);
2967
6b7c5b94
SP
2968 unregister_netdev(adapter->netdev);
2969
5fb379ee
SP
2970 be_clear(adapter);
2971
6b7c5b94
SP
2972 be_stats_cleanup(adapter);
2973
2974 be_ctrl_cleanup(adapter);
2975
48f5a191 2976 kfree(adapter->vf_cfg);
ba343c77
SB
2977 be_sriov_disable(adapter);
2978
8d56ff11 2979 be_msix_disable(adapter);
6b7c5b94
SP
2980
2981 pci_set_drvdata(pdev, NULL);
2982 pci_release_regions(pdev);
2983 pci_disable_device(pdev);
2984
2985 free_netdev(adapter->netdev);
2986}
2987
2243e2e9 2988static int be_get_config(struct be_adapter *adapter)
6b7c5b94 2989{
6b7c5b94 2990 int status;
2243e2e9 2991 u8 mac[ETH_ALEN];
6b7c5b94 2992
2243e2e9 2993 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
2994 if (status)
2995 return status;
2996
3abcdeda
SP
2997 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2998 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
2999 if (status)
3000 return status;
3001
2243e2e9 3002 memset(mac, 0, ETH_ALEN);
ba343c77
SB
3003
3004 if (be_physfn(adapter)) {
3005 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 3006 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 3007
ba343c77
SB
3008 if (status)
3009 return status;
ca9e4988 3010
ba343c77
SB
3011 if (!is_valid_ether_addr(mac))
3012 return -EADDRNOTAVAIL;
3013
3014 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3015 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3016 }
6b7c5b94 3017
3486be29 3018 if (adapter->function_mode & 0x400)
82903e4b
AK
3019 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3020 else
3021 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3022
9e1453c5
AK
3023 status = be_cmd_get_cntl_attributes(adapter);
3024 if (status)
3025 return status;
3026
2e588f84 3027 be_cmd_check_native_mode(adapter);
2243e2e9 3028 return 0;
6b7c5b94
SP
3029}
3030
fe6d2a38
SP
3031static int be_dev_family_check(struct be_adapter *adapter)
3032{
3033 struct pci_dev *pdev = adapter->pdev;
3034 u32 sli_intf = 0, if_type;
3035
3036 switch (pdev->device) {
3037 case BE_DEVICE_ID1:
3038 case OC_DEVICE_ID1:
3039 adapter->generation = BE_GEN2;
3040 break;
3041 case BE_DEVICE_ID2:
3042 case OC_DEVICE_ID2:
3043 adapter->generation = BE_GEN3;
3044 break;
3045 case OC_DEVICE_ID3:
3046 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3047 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3048 SLI_INTF_IF_TYPE_SHIFT;
3049
3050 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3051 if_type != 0x02) {
3052 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3053 return -EINVAL;
3054 }
3055 if (num_vfs > 0) {
3056 dev_err(&pdev->dev, "VFs not supported\n");
3057 return -EINVAL;
3058 }
3059 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3060 SLI_INTF_FAMILY_SHIFT);
3061 adapter->generation = BE_GEN3;
3062 break;
3063 default:
3064 adapter->generation = 0;
3065 }
3066 return 0;
3067}
3068
37eed1cb
PR
3069static int lancer_wait_ready(struct be_adapter *adapter)
3070{
3071#define SLIPORT_READY_TIMEOUT 500
3072 u32 sliport_status;
3073 int status = 0, i;
3074
3075 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3076 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3077 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3078 break;
3079
3080 msleep(20);
3081 }
3082
3083 if (i == SLIPORT_READY_TIMEOUT)
3084 status = -1;
3085
3086 return status;
3087}
3088
3089static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3090{
3091 int status;
3092 u32 sliport_status, err, reset_needed;
3093 status = lancer_wait_ready(adapter);
3094 if (!status) {
3095 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3096 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3097 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3098 if (err && reset_needed) {
3099 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3100 adapter->db + SLIPORT_CONTROL_OFFSET);
3101
3102 /* check adapter has corrected the error */
3103 status = lancer_wait_ready(adapter);
3104 sliport_status = ioread32(adapter->db +
3105 SLIPORT_STATUS_OFFSET);
3106 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3107 SLIPORT_STATUS_RN_MASK);
3108 if (status || sliport_status)
3109 status = -1;
3110 } else if (err || reset_needed) {
3111 status = -1;
3112 }
3113 }
3114 return status;
3115}
3116
6b7c5b94
SP
3117static int __devinit be_probe(struct pci_dev *pdev,
3118 const struct pci_device_id *pdev_id)
3119{
3120 int status = 0;
3121 struct be_adapter *adapter;
3122 struct net_device *netdev;
6b7c5b94
SP
3123
3124 status = pci_enable_device(pdev);
3125 if (status)
3126 goto do_none;
3127
3128 status = pci_request_regions(pdev, DRV_NAME);
3129 if (status)
3130 goto disable_dev;
3131 pci_set_master(pdev);
3132
3133 netdev = alloc_etherdev(sizeof(struct be_adapter));
3134 if (netdev == NULL) {
3135 status = -ENOMEM;
3136 goto rel_reg;
3137 }
3138 adapter = netdev_priv(netdev);
3139 adapter->pdev = pdev;
3140 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3141
3142 status = be_dev_family_check(adapter);
63657b9c 3143 if (status)
fe6d2a38
SP
3144 goto free_netdev;
3145
6b7c5b94 3146 adapter->netdev = netdev;
2243e2e9 3147 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3148
2b7bcebf 3149 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3150 if (!status) {
3151 netdev->features |= NETIF_F_HIGHDMA;
3152 } else {
2b7bcebf 3153 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3154 if (status) {
3155 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3156 goto free_netdev;
3157 }
3158 }
3159
ba343c77 3160 be_sriov_enable(adapter);
48f5a191
AK
3161 if (adapter->sriov_enabled) {
3162 adapter->vf_cfg = kcalloc(num_vfs,
3163 sizeof(struct be_vf_cfg), GFP_KERNEL);
3164
3165 if (!adapter->vf_cfg)
3166 goto free_netdev;
3167 }
ba343c77 3168
6b7c5b94
SP
3169 status = be_ctrl_init(adapter);
3170 if (status)
48f5a191 3171 goto free_vf_cfg;
6b7c5b94 3172
37eed1cb
PR
3173 if (lancer_chip(adapter)) {
3174 status = lancer_test_and_set_rdy_state(adapter);
3175 if (status) {
3176 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3177 goto ctrl_clean;
37eed1cb
PR
3178 }
3179 }
3180
2243e2e9 3181 /* sync up with fw's ready state */
ba343c77
SB
3182 if (be_physfn(adapter)) {
3183 status = be_cmd_POST(adapter);
3184 if (status)
3185 goto ctrl_clean;
ba343c77 3186 }
6b7c5b94 3187
2243e2e9
SP
3188 /* tell fw we're ready to fire cmds */
3189 status = be_cmd_fw_init(adapter);
6b7c5b94 3190 if (status)
2243e2e9
SP
3191 goto ctrl_clean;
3192
a4b4dfab
AK
3193 status = be_cmd_reset_function(adapter);
3194 if (status)
3195 goto ctrl_clean;
556ae191 3196
2243e2e9
SP
3197 status = be_stats_init(adapter);
3198 if (status)
3199 goto ctrl_clean;
3200
3201 status = be_get_config(adapter);
6b7c5b94
SP
3202 if (status)
3203 goto stats_clean;
6b7c5b94 3204
3abcdeda
SP
3205 be_msix_enable(adapter);
3206
6b7c5b94 3207 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3208
5fb379ee
SP
3209 status = be_setup(adapter);
3210 if (status)
3abcdeda 3211 goto msix_disable;
2243e2e9 3212
3abcdeda 3213 be_netdev_init(netdev);
6b7c5b94
SP
3214 status = register_netdev(netdev);
3215 if (status != 0)
5fb379ee 3216 goto unsetup;
63a76944 3217 netif_carrier_off(netdev);
6b7c5b94 3218
e6319365 3219 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42
AK
3220 u8 mac_speed;
3221 bool link_up;
3222 u16 vf, lnk_speed;
3223
e6319365
AK
3224 status = be_vf_eth_addr_config(adapter);
3225 if (status)
3226 goto unreg_netdev;
d0381c42
AK
3227
3228 for (vf = 0; vf < num_vfs; vf++) {
3229 status = be_cmd_link_status_query(adapter, &link_up,
3230 &mac_speed, &lnk_speed, vf + 1);
3231 if (!status)
3232 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3233 else
3234 goto unreg_netdev;
3235 }
e6319365
AK
3236 }
3237
c4ca2374 3238 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
f203af70 3239 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3240 return 0;
3241
e6319365
AK
3242unreg_netdev:
3243 unregister_netdev(netdev);
5fb379ee
SP
3244unsetup:
3245 be_clear(adapter);
3abcdeda
SP
3246msix_disable:
3247 be_msix_disable(adapter);
6b7c5b94
SP
3248stats_clean:
3249 be_stats_cleanup(adapter);
3250ctrl_clean:
3251 be_ctrl_cleanup(adapter);
48f5a191
AK
3252free_vf_cfg:
3253 kfree(adapter->vf_cfg);
6b7c5b94 3254free_netdev:
ba343c77 3255 be_sriov_disable(adapter);
fe6d2a38 3256 free_netdev(netdev);
8d56ff11 3257 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3258rel_reg:
3259 pci_release_regions(pdev);
3260disable_dev:
3261 pci_disable_device(pdev);
3262do_none:
c4ca2374 3263 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3264 return status;
3265}
3266
3267static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3268{
3269 struct be_adapter *adapter = pci_get_drvdata(pdev);
3270 struct net_device *netdev = adapter->netdev;
3271
a4ca055f 3272 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3273 if (adapter->wol)
3274 be_setup_wol(adapter, true);
3275
6b7c5b94
SP
3276 netif_device_detach(netdev);
3277 if (netif_running(netdev)) {
3278 rtnl_lock();
3279 be_close(netdev);
3280 rtnl_unlock();
3281 }
9e90c961 3282 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3283 be_clear(adapter);
6b7c5b94 3284
a4ca055f 3285 be_msix_disable(adapter);
6b7c5b94
SP
3286 pci_save_state(pdev);
3287 pci_disable_device(pdev);
3288 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3289 return 0;
3290}
3291
3292static int be_resume(struct pci_dev *pdev)
3293{
3294 int status = 0;
3295 struct be_adapter *adapter = pci_get_drvdata(pdev);
3296 struct net_device *netdev = adapter->netdev;
3297
3298 netif_device_detach(netdev);
3299
3300 status = pci_enable_device(pdev);
3301 if (status)
3302 return status;
3303
3304 pci_set_power_state(pdev, 0);
3305 pci_restore_state(pdev);
3306
a4ca055f 3307 be_msix_enable(adapter);
2243e2e9
SP
3308 /* tell fw we're ready to fire cmds */
3309 status = be_cmd_fw_init(adapter);
3310 if (status)
3311 return status;
3312
9b0365f1 3313 be_setup(adapter);
6b7c5b94
SP
3314 if (netif_running(netdev)) {
3315 rtnl_lock();
3316 be_open(netdev);
3317 rtnl_unlock();
3318 }
3319 netif_device_attach(netdev);
71d8d1b5
AK
3320
3321 if (adapter->wol)
3322 be_setup_wol(adapter, false);
a4ca055f
AK
3323
3324 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3325 return 0;
3326}
3327
82456b03
SP
3328/*
3329 * An FLR will stop BE from DMAing any data.
3330 */
3331static void be_shutdown(struct pci_dev *pdev)
3332{
3333 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3334
2d5d4154
AK
3335 if (!adapter)
3336 return;
82456b03 3337
0f4a6828 3338 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3339
2d5d4154 3340 netif_device_detach(adapter->netdev);
82456b03 3341
82456b03
SP
3342 if (adapter->wol)
3343 be_setup_wol(adapter, true);
3344
57841869
AK
3345 be_cmd_reset_function(adapter);
3346
82456b03 3347 pci_disable_device(pdev);
82456b03
SP
3348}
3349
cf588477
SP
3350static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3351 pci_channel_state_t state)
3352{
3353 struct be_adapter *adapter = pci_get_drvdata(pdev);
3354 struct net_device *netdev = adapter->netdev;
3355
3356 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3357
3358 adapter->eeh_err = true;
3359
3360 netif_device_detach(netdev);
3361
3362 if (netif_running(netdev)) {
3363 rtnl_lock();
3364 be_close(netdev);
3365 rtnl_unlock();
3366 }
3367 be_clear(adapter);
3368
3369 if (state == pci_channel_io_perm_failure)
3370 return PCI_ERS_RESULT_DISCONNECT;
3371
3372 pci_disable_device(pdev);
3373
3374 return PCI_ERS_RESULT_NEED_RESET;
3375}
3376
3377static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3378{
3379 struct be_adapter *adapter = pci_get_drvdata(pdev);
3380 int status;
3381
3382 dev_info(&adapter->pdev->dev, "EEH reset\n");
3383 adapter->eeh_err = false;
3384
3385 status = pci_enable_device(pdev);
3386 if (status)
3387 return PCI_ERS_RESULT_DISCONNECT;
3388
3389 pci_set_master(pdev);
3390 pci_set_power_state(pdev, 0);
3391 pci_restore_state(pdev);
3392
3393 /* Check if card is ok and fw is ready */
3394 status = be_cmd_POST(adapter);
3395 if (status)
3396 return PCI_ERS_RESULT_DISCONNECT;
3397
3398 return PCI_ERS_RESULT_RECOVERED;
3399}
3400
3401static void be_eeh_resume(struct pci_dev *pdev)
3402{
3403 int status = 0;
3404 struct be_adapter *adapter = pci_get_drvdata(pdev);
3405 struct net_device *netdev = adapter->netdev;
3406
3407 dev_info(&adapter->pdev->dev, "EEH resume\n");
3408
3409 pci_save_state(pdev);
3410
3411 /* tell fw we're ready to fire cmds */
3412 status = be_cmd_fw_init(adapter);
3413 if (status)
3414 goto err;
3415
3416 status = be_setup(adapter);
3417 if (status)
3418 goto err;
3419
3420 if (netif_running(netdev)) {
3421 status = be_open(netdev);
3422 if (status)
3423 goto err;
3424 }
3425 netif_device_attach(netdev);
3426 return;
3427err:
3428 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3429}
3430
3431static struct pci_error_handlers be_eeh_handlers = {
3432 .error_detected = be_eeh_err_detected,
3433 .slot_reset = be_eeh_reset,
3434 .resume = be_eeh_resume,
3435};
3436
6b7c5b94
SP
3437static struct pci_driver be_driver = {
3438 .name = DRV_NAME,
3439 .id_table = be_dev_ids,
3440 .probe = be_probe,
3441 .remove = be_remove,
3442 .suspend = be_suspend,
cf588477 3443 .resume = be_resume,
82456b03 3444 .shutdown = be_shutdown,
cf588477 3445 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3446};
3447
3448static int __init be_init_module(void)
3449{
8e95a202
JP
3450 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3451 rx_frag_size != 2048) {
6b7c5b94
SP
3452 printk(KERN_WARNING DRV_NAME
3453 " : Module param rx_frag_size must be 2048/4096/8192."
3454 " Using 2048\n");
3455 rx_frag_size = 2048;
3456 }
6b7c5b94
SP
3457
3458 return pci_register_driver(&be_driver);
3459}
3460module_init(be_init_module);
3461
3462static void __exit be_exit_module(void)
3463{
3464 pci_unregister_driver(&be_driver);
3465}
3466module_exit(be_exit_module);
This page took 0.501735 seconds and 5 git commands to generate.