be2net: pass domain id to be_cmd_link_status_query
[deliverable/linux.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
18#include "be.h"
8788fdc2 19#include "be_cmds.h"
65f71b8b 20#include <asm/div64.h>
6b7c5b94
SP
21
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
2e588f84 28static ushort rx_frag_size = 2048;
ba343c77 29static unsigned int num_vfs;
2e588f84 30module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
3abcdeda
SP
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
6b7c5b94
SP
45 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276
AK
48/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
6b7c5b94
SP
118
119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120{
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
2b7bcebf
IV
123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
6b7c5b94
SP
125}
126
127static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
2b7bcebf
IV
136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
6b7c5b94
SP
138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142}
143
8788fdc2 144static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 145{
8788fdc2 146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 149
cf588477
SP
150 if (adapter->eeh_err)
151 return;
152
5f0b849e 153 if (!enabled && enable)
6b7c5b94 154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else if (enabled && !enable)
6b7c5b94 156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 157 else
6b7c5b94 158 return;
5f0b849e 159
6b7c5b94
SP
160 iowrite32(reg, addr);
161}
162
8788fdc2 163static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
164{
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
168
169 wmb();
8788fdc2 170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
171}
172
8788fdc2 173static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
174{
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
178
179 wmb();
8788fdc2 180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
181}
182
8788fdc2 183static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
184 bool arm, bool clear_int, u16 num_popped)
185{
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
190
191 if (adapter->eeh_err)
192 return;
193
6b7c5b94
SP
194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
201}
202
8788fdc2 203void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
204{
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
209
210 if (adapter->eeh_err)
211 return;
212
6b7c5b94
SP
213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
217}
218
6b7c5b94
SP
219static int be_mac_addr_set(struct net_device *netdev, void *p)
220{
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
ca9e4988
AK
225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
ba343c77
SB
228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
f8617e08
AK
234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
a65027e4
SP
236 if (status)
237 return status;
6b7c5b94 238
a65027e4 239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 240 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 241netdev_addr:
6b7c5b94
SP
242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246}
247
b31c50a7 248void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 249{
3abcdeda 250 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
6b7c5b94
SP
251 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252 struct be_port_rxf_stats *port_stats =
253 &rxf_stats->port[adapter->port_num];
78122a52 254 struct net_device_stats *dev_stats = &adapter->netdev->stats;
68110868 255 struct be_erx_stats *erx_stats = &hw_stats->erx;
3abcdeda
SP
256 struct be_rx_obj *rxo;
257 int i;
6b7c5b94 258
3abcdeda
SP
259 memset(dev_stats, 0, sizeof(*dev_stats));
260 for_all_rx_queues(adapter, rxo, i) {
261 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264 /* no space in linux buffers: best possible approximation */
265 dev_stats->rx_dropped +=
266 erx_stats->rx_drops_no_fragments[rxo->q.id];
267 }
268
269 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
6b7c5b94
SP
271
272 /* bad pkts received */
273 dev_stats->rx_errors = port_stats->rx_crc_errors +
274 port_stats->rx_alignment_symbol_errors +
275 port_stats->rx_in_range_errors +
68110868
SP
276 port_stats->rx_out_range_errors +
277 port_stats->rx_frame_too_long +
278 port_stats->rx_dropped_too_small +
279 port_stats->rx_dropped_too_short +
280 port_stats->rx_dropped_header_too_small +
281 port_stats->rx_dropped_tcp_length +
282 port_stats->rx_dropped_runt +
283 port_stats->rx_tcp_checksum_errs +
284 port_stats->rx_ip_checksum_errs +
285 port_stats->rx_udp_checksum_errs;
286
6b7c5b94
SP
287 /* detailed rx errors */
288 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
68110868
SP
289 port_stats->rx_out_range_errors +
290 port_stats->rx_frame_too_long;
291
6b7c5b94
SP
292 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294 /* frame alignment errors */
295 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
68110868 296
6b7c5b94
SP
297 /* receiver fifo overrun */
298 /* drops_no_pbuf is no per i/f, it's per BE card */
299 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300 port_stats->rx_input_fifo_overflow +
301 rxf_stats->rx_drops_no_pbuf;
6b7c5b94
SP
302}
303
8788fdc2 304void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 305{
6b7c5b94
SP
306 struct net_device *netdev = adapter->netdev;
307
6b7c5b94 308 /* If link came up or went down */
a8f447bd 309 if (adapter->link_up != link_up) {
0dffc83e 310 adapter->link_speed = -1;
a8f447bd 311 if (link_up) {
6b7c5b94
SP
312 netif_carrier_on(netdev);
313 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd 314 } else {
a8f447bd
SP
315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 317 }
a8f447bd 318 adapter->link_up = link_up;
6b7c5b94 319 }
6b7c5b94
SP
320}
321
322/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 324{
3abcdeda
SP
325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
6b7c5b94
SP
338
339 /* Update once a second */
4097f663 340 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
341 return;
342
3abcdeda 343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 344 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 345
4097f663 346 stats->rx_fps_jiffies = now;
3abcdeda
SP
347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
8788fdc2 357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
358
359 rx_eq->cur_eqd = eqd;
360}
361
65f71b8b
SH
362static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363{
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371}
372
4097f663
SP
373static void be_tx_rate_update(struct be_adapter *adapter)
374{
3abcdeda 375 struct be_tx_stats *stats = tx_stats(adapter);
4097f663
SP
376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
4097f663
SP
389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392}
393
6b7c5b94 394static void be_tx_stats_update(struct be_adapter *adapter,
91992e44 395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 396{
3abcdeda 397 struct be_tx_stats *stats = tx_stats(adapter);
6b7c5b94
SP
398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
91992e44 401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
402 if (stopped)
403 stats->be_tx_stops++;
6b7c5b94
SP
404}
405
406/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
407static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408 bool *dummy)
6b7c5b94 409{
ebc8d2ab
DM
410 int cnt = (skb->len > skb->data_len);
411
412 cnt += skb_shinfo(skb)->nr_frags;
413
6b7c5b94
SP
414 /* to account for hdr wrb */
415 cnt++;
fe6d2a38
SP
416 if (lancer_chip(adapter) || !(cnt & 1)) {
417 *dummy = false;
418 } else {
6b7c5b94
SP
419 /* add a dummy to make it an even num */
420 cnt++;
421 *dummy = true;
fe6d2a38 422 }
6b7c5b94
SP
423 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424 return cnt;
425}
426
427static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428{
429 wrb->frag_pa_hi = upper_32_bits(addr);
430 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432}
433
cc4ce020
SK
434static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 436{
cc4ce020
SK
437 u8 vlan_prio = 0;
438 u16 vlan_tag = 0;
439
6b7c5b94
SP
440 memset(hdr, 0, sizeof(*hdr));
441
442 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
49e4b847 444 if (skb_is_gso(skb)) {
6b7c5b94
SP
445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 448 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
450 if (lancer_chip(adapter) && adapter->sli_family ==
451 LANCER_A0_SLI_FAMILY) {
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453 if (is_tcp_pkt(skb))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455 tcpcs, hdr, 1);
456 else if (is_udp_pkt(skb))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458 udpcs, hdr, 1);
459 }
6b7c5b94
SP
460 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461 if (is_tcp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463 else if (is_udp_pkt(skb))
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465 }
466
cc4ce020 467 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
6b7c5b94 468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
469 vlan_tag = vlan_tx_tag_get(skb);
470 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471 /* If vlan priority provided by OS is NOT in available bmap */
472 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474 adapter->recommended_prio;
475 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
476 }
477
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482}
483
2b7bcebf 484static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
485 bool unmap_single)
486{
487 dma_addr_t dma;
488
489 be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 492 if (wrb->frag_len) {
7101e111 493 if (unmap_single)
2b7bcebf
IV
494 dma_unmap_single(dev, dma, wrb->frag_len,
495 DMA_TO_DEVICE);
7101e111 496 else
2b7bcebf 497 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
498 }
499}
6b7c5b94
SP
500
501static int make_tx_wrbs(struct be_adapter *adapter,
502 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503{
7101e111
SP
504 dma_addr_t busaddr;
505 int i, copied = 0;
2b7bcebf 506 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
507 struct sk_buff *first_skb = skb;
508 struct be_queue_info *txq = &adapter->tx_obj.q;
509 struct be_eth_wrb *wrb;
510 struct be_eth_hdr_wrb *hdr;
7101e111
SP
511 bool map_single = false;
512 u16 map_head;
6b7c5b94 513
6b7c5b94
SP
514 hdr = queue_head_node(txq);
515 queue_head_inc(txq);
7101e111 516 map_head = txq->head;
6b7c5b94 517
ebc8d2ab 518 if (skb->len > skb->data_len) {
e743d313 519 int len = skb_headlen(skb);
2b7bcebf
IV
520 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521 if (dma_mapping_error(dev, busaddr))
7101e111
SP
522 goto dma_err;
523 map_single = true;
ebc8d2ab
DM
524 wrb = queue_head_node(txq);
525 wrb_fill(wrb, busaddr, len);
526 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527 queue_head_inc(txq);
528 copied += len;
529 }
6b7c5b94 530
ebc8d2ab
DM
531 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532 struct skb_frag_struct *frag =
533 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
534 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535 frag->size, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, busaddr))
7101e111 537 goto dma_err;
ebc8d2ab
DM
538 wrb = queue_head_node(txq);
539 wrb_fill(wrb, busaddr, frag->size);
540 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541 queue_head_inc(txq);
542 copied += frag->size;
6b7c5b94
SP
543 }
544
545 if (dummy_wrb) {
546 wrb = queue_head_node(txq);
547 wrb_fill(wrb, 0, 0);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549 queue_head_inc(txq);
550 }
551
cc4ce020 552 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
553 be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555 return copied;
7101e111
SP
556dma_err:
557 txq->head = map_head;
558 while (copied) {
559 wrb = queue_head_node(txq);
2b7bcebf 560 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
561 map_single = false;
562 copied -= wrb->frag_len;
563 queue_head_inc(txq);
564 }
565 return 0;
6b7c5b94
SP
566}
567
61357325 568static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 569 struct net_device *netdev)
6b7c5b94
SP
570{
571 struct be_adapter *adapter = netdev_priv(netdev);
572 struct be_tx_obj *tx_obj = &adapter->tx_obj;
573 struct be_queue_info *txq = &tx_obj->q;
574 u32 wrb_cnt = 0, copied = 0;
575 u32 start = txq->head;
576 bool dummy_wrb, stopped = false;
577
fe6d2a38 578 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94
SP
579
580 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
581 if (copied) {
582 /* record the sent skb in the sent_skb table */
583 BUG_ON(tx_obj->sent_skb_list[start]);
584 tx_obj->sent_skb_list[start] = skb;
585
586 /* Ensure txq has space for the next skb; Else stop the queue
587 * *BEFORE* ringing the tx doorbell, so that we serialze the
588 * tx compls of the current transmit which'll wake up the queue
589 */
7101e111 590 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
591 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592 txq->len) {
593 netif_stop_queue(netdev);
594 stopped = true;
595 }
6b7c5b94 596
c190e3c8 597 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 598
91992e44
AK
599 be_tx_stats_update(adapter, wrb_cnt, copied,
600 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
601 } else {
602 txq->head = start;
603 dev_kfree_skb_any(skb);
6b7c5b94 604 }
6b7c5b94
SP
605 return NETDEV_TX_OK;
606}
607
608static int be_change_mtu(struct net_device *netdev, int new_mtu)
609{
610 struct be_adapter *adapter = netdev_priv(netdev);
611 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
612 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
614 dev_info(&adapter->pdev->dev,
615 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
616 BE_MIN_MTU,
617 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
618 return -EINVAL;
619 }
620 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621 netdev->mtu, new_mtu);
622 netdev->mtu = new_mtu;
623 return 0;
624}
625
626/*
82903e4b
AK
627 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 629 */
1da87b7f 630static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 631{
6b7c5b94
SP
632 u16 vtag[BE_NUM_VLANS_SUPPORTED];
633 u16 ntags = 0, i;
82903e4b 634 int status = 0;
1da87b7f
AK
635 u32 if_handle;
636
637 if (vf) {
638 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641 }
6b7c5b94 642
82903e4b 643 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 644 /* Construct VLAN Table to give to HW */
b738127d 645 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
646 if (adapter->vlan_tag[i]) {
647 vtag[ntags] = cpu_to_le16(i);
648 ntags++;
649 }
650 }
b31c50a7
SP
651 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652 vtag, ntags, 1, 0);
6b7c5b94 653 } else {
b31c50a7
SP
654 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655 NULL, 0, 1, 1);
6b7c5b94 656 }
1da87b7f 657
b31c50a7 658 return status;
6b7c5b94
SP
659}
660
661static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662{
663 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 664
6b7c5b94 665 adapter->vlan_grp = grp;
6b7c5b94
SP
666}
667
668static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669{
670 struct be_adapter *adapter = netdev_priv(netdev);
671
1da87b7f 672 adapter->vlans_added++;
ba343c77
SB
673 if (!be_physfn(adapter))
674 return;
675
6b7c5b94 676 adapter->vlan_tag[vid] = 1;
82903e4b 677 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 678 be_vid_config(adapter, false, 0);
6b7c5b94
SP
679}
680
681static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682{
683 struct be_adapter *adapter = netdev_priv(netdev);
684
1da87b7f
AK
685 adapter->vlans_added--;
686 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
ba343c77
SB
688 if (!be_physfn(adapter))
689 return;
690
6b7c5b94 691 adapter->vlan_tag[vid] = 0;
82903e4b 692 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 693 be_vid_config(adapter, false, 0);
6b7c5b94
SP
694}
695
24307eef 696static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 699
24307eef 700 if (netdev->flags & IFF_PROMISC) {
8788fdc2 701 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
24307eef
SP
702 adapter->promiscuous = true;
703 goto done;
6b7c5b94
SP
704 }
705
25985edc 706 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
707 if (adapter->promiscuous) {
708 adapter->promiscuous = false;
8788fdc2 709 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
6b7c5b94
SP
710 }
711
e7b909a6 712 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
713 if (netdev->flags & IFF_ALLMULTI ||
714 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 715 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 716 &adapter->mc_cmd_mem);
24307eef 717 goto done;
6b7c5b94 718 }
6b7c5b94 719
0ddf477b 720 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 721 &adapter->mc_cmd_mem);
24307eef
SP
722done:
723 return;
6b7c5b94
SP
724}
725
ba343c77
SB
726static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 int status;
730
731 if (!adapter->sriov_enabled)
732 return -EPERM;
733
734 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735 return -EINVAL;
736
64600ea5
AK
737 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738 status = be_cmd_pmac_del(adapter,
739 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 740 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 741
64600ea5
AK
742 status = be_cmd_pmac_add(adapter, mac,
743 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 744 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
745
746 if (status)
ba343c77
SB
747 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748 mac, vf);
64600ea5
AK
749 else
750 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
ba343c77
SB
752 return status;
753}
754
64600ea5
AK
755static int be_get_vf_config(struct net_device *netdev, int vf,
756 struct ifla_vf_info *vi)
757{
758 struct be_adapter *adapter = netdev_priv(netdev);
759
760 if (!adapter->sriov_enabled)
761 return -EPERM;
762
763 if (vf >= num_vfs)
764 return -EINVAL;
765
766 vi->vf = vf;
e1d18735 767 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 768 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
769 vi->qos = 0;
770 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772 return 0;
773}
774
1da87b7f
AK
775static int be_set_vf_vlan(struct net_device *netdev,
776 int vf, u16 vlan, u8 qos)
777{
778 struct be_adapter *adapter = netdev_priv(netdev);
779 int status = 0;
780
781 if (!adapter->sriov_enabled)
782 return -EPERM;
783
784 if ((vf >= num_vfs) || (vlan > 4095))
785 return -EINVAL;
786
787 if (vlan) {
788 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789 adapter->vlans_added++;
790 } else {
791 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792 adapter->vlans_added--;
793 }
794
795 status = be_vid_config(adapter, true, vf);
796
797 if (status)
798 dev_info(&adapter->pdev->dev,
799 "VLAN %d config on VF %d failed\n", vlan, vf);
800 return status;
801}
802
e1d18735
AK
803static int be_set_vf_tx_rate(struct net_device *netdev,
804 int vf, int rate)
805{
806 struct be_adapter *adapter = netdev_priv(netdev);
807 int status = 0;
808
809 if (!adapter->sriov_enabled)
810 return -EPERM;
811
812 if ((vf >= num_vfs) || (rate < 0))
813 return -EINVAL;
814
815 if (rate > 10000)
816 rate = 10000;
817
818 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 819 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
820
821 if (status)
822 dev_info(&adapter->pdev->dev,
823 "tx rate %d on VF %d failed\n", rate, vf);
824 return status;
825}
826
3abcdeda 827static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 828{
3abcdeda 829 struct be_rx_stats *stats = &rxo->stats;
4097f663 830 ulong now = jiffies;
6b7c5b94 831
4097f663 832 /* Wrapped around */
3abcdeda
SP
833 if (time_before(now, stats->rx_jiffies)) {
834 stats->rx_jiffies = now;
4097f663
SP
835 return;
836 }
6b7c5b94
SP
837
838 /* Update the rate once in two seconds */
3abcdeda 839 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
840 return;
841
3abcdeda
SP
842 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843 now - stats->rx_jiffies);
844 stats->rx_jiffies = now;
845 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
846}
847
3abcdeda 848static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 849 struct be_rx_compl_info *rxcp)
4097f663 850{
3abcdeda 851 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 852
3abcdeda 853 stats->rx_compl++;
2e588f84
SP
854 stats->rx_frags += rxcp->num_rcvd;
855 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 856 stats->rx_pkts++;
2e588f84 857 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 858 stats->rx_mcast_pkts++;
2e588f84
SP
859 if (rxcp->err)
860 stats->rxcp_err++;
4097f663
SP
861}
862
2e588f84 863static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 864{
19fad86f
PR
865 /* L4 checksum is not reliable for non TCP/UDP packets.
866 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
867 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
869}
870
6b7c5b94 871static struct be_rx_page_info *
3abcdeda
SP
872get_rx_page_info(struct be_adapter *adapter,
873 struct be_rx_obj *rxo,
874 u16 frag_idx)
6b7c5b94
SP
875{
876 struct be_rx_page_info *rx_page_info;
3abcdeda 877 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 878
3abcdeda 879 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
880 BUG_ON(!rx_page_info->page);
881
205859a2 882 if (rx_page_info->last_page_user) {
2b7bcebf
IV
883 dma_unmap_page(&adapter->pdev->dev,
884 dma_unmap_addr(rx_page_info, bus),
885 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
886 rx_page_info->last_page_user = false;
887 }
6b7c5b94
SP
888
889 atomic_dec(&rxq->used);
890 return rx_page_info;
891}
892
893/* Throwaway the data in the Rx completion */
894static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 895 struct be_rx_obj *rxo,
2e588f84 896 struct be_rx_compl_info *rxcp)
6b7c5b94 897{
3abcdeda 898 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 899 struct be_rx_page_info *page_info;
2e588f84 900 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 901
e80d9da6 902 for (i = 0; i < num_rcvd; i++) {
2e588f84 903 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
904 put_page(page_info->page);
905 memset(page_info, 0, sizeof(*page_info));
2e588f84 906 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
907 }
908}
909
910/*
911 * skb_fill_rx_data forms a complete skb for an ether frame
912 * indicated by rxcp.
913 */
3abcdeda 914static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 915 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 916{
3abcdeda 917 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 918 struct be_rx_page_info *page_info;
2e588f84
SP
919 u16 i, j;
920 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 921 u8 *start;
6b7c5b94 922
2e588f84 923 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
924 start = page_address(page_info->page) + page_info->page_offset;
925 prefetch(start);
926
927 /* Copy data in the first descriptor of this completion */
2e588f84 928 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
929
930 /* Copy the header portion into skb_data */
2e588f84 931 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
932 memcpy(skb->data, start, hdr_len);
933 skb->len = curr_frag_len;
934 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935 /* Complete packet has now been moved to data */
936 put_page(page_info->page);
937 skb->data_len = 0;
938 skb->tail += curr_frag_len;
939 } else {
940 skb_shinfo(skb)->nr_frags = 1;
941 skb_shinfo(skb)->frags[0].page = page_info->page;
942 skb_shinfo(skb)->frags[0].page_offset =
943 page_info->page_offset + hdr_len;
944 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945 skb->data_len = curr_frag_len - hdr_len;
946 skb->tail += hdr_len;
947 }
205859a2 948 page_info->page = NULL;
6b7c5b94 949
2e588f84
SP
950 if (rxcp->pkt_size <= rx_frag_size) {
951 BUG_ON(rxcp->num_rcvd != 1);
952 return;
6b7c5b94
SP
953 }
954
955 /* More frags present for this completion */
2e588f84
SP
956 index_inc(&rxcp->rxq_idx, rxq->len);
957 remaining = rxcp->pkt_size - curr_frag_len;
958 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 961
bd46cb6c
AK
962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info->page_offset == 0) {
964 /* Fresh page */
965 j++;
966 skb_shinfo(skb)->frags[j].page = page_info->page;
967 skb_shinfo(skb)->frags[j].page_offset =
968 page_info->page_offset;
969 skb_shinfo(skb)->frags[j].size = 0;
970 skb_shinfo(skb)->nr_frags++;
971 } else {
972 put_page(page_info->page);
973 }
974
975 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
976 skb->len += curr_frag_len;
977 skb->data_len += curr_frag_len;
6b7c5b94 978
2e588f84
SP
979 remaining -= curr_frag_len;
980 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 981 page_info->page = NULL;
6b7c5b94 982 }
bd46cb6c 983 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
984}
985
5be93b9a 986/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 987static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 988 struct be_rx_obj *rxo,
2e588f84 989 struct be_rx_compl_info *rxcp)
6b7c5b94 990{
6332c8d3 991 struct net_device *netdev = adapter->netdev;
6b7c5b94 992 struct sk_buff *skb;
89420424 993
6332c8d3 994 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 995 if (unlikely(!skb)) {
6b7c5b94
SP
996 if (net_ratelimit())
997 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
3abcdeda 998 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
999 return;
1000 }
1001
2e588f84 1002 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1003
6332c8d3 1004 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1005 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1006 else
1007 skb_checksum_none_assert(skb);
6b7c5b94
SP
1008
1009 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1010 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1011 if (adapter->netdev->features & NETIF_F_RXHASH)
1012 skb->rxhash = rxcp->rss_hash;
1013
6b7c5b94 1014
2e588f84 1015 if (unlikely(rxcp->vlanf)) {
82903e4b 1016 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
1017 kfree_skb(skb);
1018 return;
1019 }
2e588f84 1020 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
6b7c5b94
SP
1021 } else {
1022 netif_receive_skb(skb);
1023 }
6b7c5b94
SP
1024}
1025
5be93b9a
AK
1026/* Process the RX completion indicated by rxcp when GRO is enabled */
1027static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1028 struct be_rx_obj *rxo,
2e588f84 1029 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1030{
1031 struct be_rx_page_info *page_info;
5be93b9a 1032 struct sk_buff *skb = NULL;
3abcdeda
SP
1033 struct be_queue_info *rxq = &rxo->q;
1034 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1035 u16 remaining, curr_frag_len;
1036 u16 i, j;
3968fa1e 1037
5be93b9a
AK
1038 skb = napi_get_frags(&eq_obj->napi);
1039 if (!skb) {
3abcdeda 1040 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1041 return;
1042 }
1043
2e588f84
SP
1044 remaining = rxcp->pkt_size;
1045 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1046 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1047
1048 curr_frag_len = min(remaining, rx_frag_size);
1049
bd46cb6c
AK
1050 /* Coalesce all frags from the same physical page in one slot */
1051 if (i == 0 || page_info->page_offset == 0) {
1052 /* First frag or Fresh page */
1053 j++;
5be93b9a
AK
1054 skb_shinfo(skb)->frags[j].page = page_info->page;
1055 skb_shinfo(skb)->frags[j].page_offset =
1056 page_info->page_offset;
1057 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1058 } else {
1059 put_page(page_info->page);
1060 }
5be93b9a 1061 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1062
bd46cb6c 1063 remaining -= curr_frag_len;
2e588f84 1064 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1065 memset(page_info, 0, sizeof(*page_info));
1066 }
bd46cb6c 1067 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1068
5be93b9a 1069 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1070 skb->len = rxcp->pkt_size;
1071 skb->data_len = rxcp->pkt_size;
1072 skb->truesize += rxcp->pkt_size;
5be93b9a 1073 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1074 if (adapter->netdev->features & NETIF_F_RXHASH)
1075 skb->rxhash = rxcp->rss_hash;
5be93b9a 1076
2e588f84 1077 if (likely(!rxcp->vlanf))
5be93b9a 1078 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1079 else
1080 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1081}
1082
1083static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1084 struct be_eth_rx_compl *compl,
1085 struct be_rx_compl_info *rxcp)
1086{
1087 rxcp->pkt_size =
1088 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1089 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1090 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1091 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1092 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1093 rxcp->ip_csum =
1094 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1095 rxcp->l4_csum =
1096 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1097 rxcp->ipv6 =
1098 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1099 rxcp->rxq_idx =
1100 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1101 rxcp->num_rcvd =
1102 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1103 rxcp->pkt_type =
1104 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1105 rxcp->rss_hash =
1106 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1107 if (rxcp->vlanf) {
1108 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1109 compl);
1110 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1111 compl);
1112 }
2e588f84
SP
1113}
1114
1115static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1116 struct be_eth_rx_compl *compl,
1117 struct be_rx_compl_info *rxcp)
1118{
1119 rxcp->pkt_size =
1120 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1121 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1122 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1123 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1124 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1125 rxcp->ip_csum =
1126 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1127 rxcp->l4_csum =
1128 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1129 rxcp->ipv6 =
1130 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1131 rxcp->rxq_idx =
1132 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1133 rxcp->num_rcvd =
1134 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1135 rxcp->pkt_type =
1136 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1137 rxcp->rss_hash =
1138 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1139 if (rxcp->vlanf) {
1140 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1141 compl);
1142 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1143 compl);
1144 }
2e588f84
SP
1145}
1146
1147static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1148{
1149 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1150 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1151 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1152
2e588f84
SP
1153 /* For checking the valid bit it is Ok to use either definition as the
1154 * valid bit is at the same position in both v0 and v1 Rx compl */
1155 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1156 return NULL;
6b7c5b94 1157
2e588f84
SP
1158 rmb();
1159 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1160
2e588f84
SP
1161 if (adapter->be3_native)
1162 be_parse_rx_compl_v1(adapter, compl, rxcp);
1163 else
1164 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1165
15d72184
SP
1166 if (rxcp->vlanf) {
1167 /* vlanf could be wrongly set in some cards.
1168 * ignore if vtm is not set */
1169 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1170 rxcp->vlanf = 0;
6b7c5b94 1171
15d72184
SP
1172 if (!lancer_chip(adapter))
1173 rxcp->vid = swab16(rxcp->vid);
6b7c5b94 1174
15d72184
SP
1175 if ((adapter->pvid == rxcp->vid) &&
1176 !adapter->vlan_tag[rxcp->vid])
1177 rxcp->vlanf = 0;
1178 }
2e588f84
SP
1179
1180 /* As the compl has been parsed, reset it; we wont touch it again */
1181 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1182
3abcdeda 1183 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1184 return rxcp;
1185}
1186
1829b086 1187static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1188{
6b7c5b94 1189 u32 order = get_order(size);
1829b086 1190
6b7c5b94 1191 if (order > 0)
1829b086
ED
1192 gfp |= __GFP_COMP;
1193 return alloc_pages(gfp, order);
6b7c5b94
SP
1194}
1195
1196/*
1197 * Allocate a page, split it to fragments of size rx_frag_size and post as
1198 * receive buffers to BE
1199 */
1829b086 1200static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1201{
3abcdeda
SP
1202 struct be_adapter *adapter = rxo->adapter;
1203 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1204 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1205 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1206 struct page *pagep = NULL;
1207 struct be_eth_rx_d *rxd;
1208 u64 page_dmaaddr = 0, frag_dmaaddr;
1209 u32 posted, page_offset = 0;
1210
3abcdeda 1211 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1212 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1213 if (!pagep) {
1829b086 1214 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1215 if (unlikely(!pagep)) {
3abcdeda 1216 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1217 break;
1218 }
2b7bcebf
IV
1219 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1220 0, adapter->big_page_size,
1221 DMA_FROM_DEVICE);
6b7c5b94
SP
1222 page_info->page_offset = 0;
1223 } else {
1224 get_page(pagep);
1225 page_info->page_offset = page_offset + rx_frag_size;
1226 }
1227 page_offset = page_info->page_offset;
1228 page_info->page = pagep;
fac6da5b 1229 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1230 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1231
1232 rxd = queue_head_node(rxq);
1233 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1234 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1235
1236 /* Any space left in the current big page for another frag? */
1237 if ((page_offset + rx_frag_size + rx_frag_size) >
1238 adapter->big_page_size) {
1239 pagep = NULL;
1240 page_info->last_page_user = true;
1241 }
26d92f92
SP
1242
1243 prev_page_info = page_info;
1244 queue_head_inc(rxq);
6b7c5b94
SP
1245 page_info = &page_info_tbl[rxq->head];
1246 }
1247 if (pagep)
26d92f92 1248 prev_page_info->last_page_user = true;
6b7c5b94
SP
1249
1250 if (posted) {
6b7c5b94 1251 atomic_add(posted, &rxq->used);
8788fdc2 1252 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1253 } else if (atomic_read(&rxq->used) == 0) {
1254 /* Let be_worker replenish when memory is available */
3abcdeda 1255 rxo->rx_post_starved = true;
6b7c5b94 1256 }
6b7c5b94
SP
1257}
1258
5fb379ee 1259static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1260{
6b7c5b94
SP
1261 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1262
1263 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1264 return NULL;
1265
f3eb62d2 1266 rmb();
6b7c5b94
SP
1267 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1268
1269 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1270
1271 queue_tail_inc(tx_cq);
1272 return txcp;
1273}
1274
1275static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1276{
1277 struct be_queue_info *txq = &adapter->tx_obj.q;
a73b796e 1278 struct be_eth_wrb *wrb;
6b7c5b94
SP
1279 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1280 struct sk_buff *sent_skb;
ec43b1a6
SP
1281 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1282 bool unmap_skb_hdr = true;
6b7c5b94 1283
ec43b1a6 1284 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1285 BUG_ON(!sent_skb);
ec43b1a6
SP
1286 sent_skbs[txq->tail] = NULL;
1287
1288 /* skip header wrb */
a73b796e 1289 queue_tail_inc(txq);
6b7c5b94 1290
ec43b1a6 1291 do {
6b7c5b94 1292 cur_index = txq->tail;
a73b796e 1293 wrb = queue_tail_node(txq);
2b7bcebf
IV
1294 unmap_tx_frag(&adapter->pdev->dev, wrb,
1295 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1296 unmap_skb_hdr = false;
1297
6b7c5b94
SP
1298 num_wrbs++;
1299 queue_tail_inc(txq);
ec43b1a6 1300 } while (cur_index != last_index);
6b7c5b94
SP
1301
1302 atomic_sub(num_wrbs, &txq->used);
a73b796e 1303
6b7c5b94
SP
1304 kfree_skb(sent_skb);
1305}
1306
859b1e4e
SP
1307static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1308{
1309 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1310
1311 if (!eqe->evt)
1312 return NULL;
1313
f3eb62d2 1314 rmb();
859b1e4e
SP
1315 eqe->evt = le32_to_cpu(eqe->evt);
1316 queue_tail_inc(&eq_obj->q);
1317 return eqe;
1318}
1319
1320static int event_handle(struct be_adapter *adapter,
1321 struct be_eq_obj *eq_obj)
1322{
1323 struct be_eq_entry *eqe;
1324 u16 num = 0;
1325
1326 while ((eqe = event_get(eq_obj)) != NULL) {
1327 eqe->evt = 0;
1328 num++;
1329 }
1330
1331 /* Deal with any spurious interrupts that come
1332 * without events
1333 */
1334 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1335 if (num)
1336 napi_schedule(&eq_obj->napi);
1337
1338 return num;
1339}
1340
1341/* Just read and notify events without processing them.
1342 * Used at the time of destroying event queues */
1343static void be_eq_clean(struct be_adapter *adapter,
1344 struct be_eq_obj *eq_obj)
1345{
1346 struct be_eq_entry *eqe;
1347 u16 num = 0;
1348
1349 while ((eqe = event_get(eq_obj)) != NULL) {
1350 eqe->evt = 0;
1351 num++;
1352 }
1353
1354 if (num)
1355 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1356}
1357
3abcdeda 1358static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1359{
1360 struct be_rx_page_info *page_info;
3abcdeda
SP
1361 struct be_queue_info *rxq = &rxo->q;
1362 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1363 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1364 u16 tail;
1365
1366 /* First cleanup pending rx completions */
3abcdeda
SP
1367 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1368 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1369 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1370 }
1371
1372 /* Then free posted rx buffer that were not used */
1373 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1374 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1375 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1376 put_page(page_info->page);
1377 memset(page_info, 0, sizeof(*page_info));
1378 }
1379 BUG_ON(atomic_read(&rxq->used));
1380}
1381
a8e9179a 1382static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1383{
a8e9179a 1384 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1385 struct be_queue_info *txq = &adapter->tx_obj.q;
a8e9179a
SP
1386 struct be_eth_tx_compl *txcp;
1387 u16 end_idx, cmpl = 0, timeo = 0;
b03388d6
SP
1388 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1389 struct sk_buff *sent_skb;
1390 bool dummy_wrb;
a8e9179a
SP
1391
1392 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1393 do {
1394 while ((txcp = be_tx_compl_get(tx_cq))) {
1395 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1396 wrb_index, txcp);
1397 be_tx_compl_process(adapter, end_idx);
1398 cmpl++;
1399 }
1400 if (cmpl) {
1401 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1402 cmpl = 0;
1403 }
1404
1405 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1406 break;
1407
1408 mdelay(1);
1409 } while (true);
1410
1411 if (atomic_read(&txq->used))
1412 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1413 atomic_read(&txq->used));
b03388d6
SP
1414
1415 /* free posted tx for which compls will never arrive */
1416 while (atomic_read(&txq->used)) {
1417 sent_skb = sent_skbs[txq->tail];
1418 end_idx = txq->tail;
1419 index_adv(&end_idx,
fe6d2a38
SP
1420 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1421 txq->len);
b03388d6
SP
1422 be_tx_compl_process(adapter, end_idx);
1423 }
6b7c5b94
SP
1424}
1425
5fb379ee
SP
1426static void be_mcc_queues_destroy(struct be_adapter *adapter)
1427{
1428 struct be_queue_info *q;
5fb379ee 1429
8788fdc2 1430 q = &adapter->mcc_obj.q;
5fb379ee 1431 if (q->created)
8788fdc2 1432 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1433 be_queue_free(adapter, q);
1434
8788fdc2 1435 q = &adapter->mcc_obj.cq;
5fb379ee 1436 if (q->created)
8788fdc2 1437 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1438 be_queue_free(adapter, q);
1439}
1440
1441/* Must be called only after TX qs are created as MCC shares TX EQ */
1442static int be_mcc_queues_create(struct be_adapter *adapter)
1443{
1444 struct be_queue_info *q, *cq;
5fb379ee
SP
1445
1446 /* Alloc MCC compl queue */
8788fdc2 1447 cq = &adapter->mcc_obj.cq;
5fb379ee 1448 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1449 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1450 goto err;
1451
1452 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1453 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1454 goto mcc_cq_free;
1455
1456 /* Alloc MCC queue */
8788fdc2 1457 q = &adapter->mcc_obj.q;
5fb379ee
SP
1458 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1459 goto mcc_cq_destroy;
1460
1461 /* Ask BE to create MCC queue */
8788fdc2 1462 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1463 goto mcc_q_free;
1464
1465 return 0;
1466
1467mcc_q_free:
1468 be_queue_free(adapter, q);
1469mcc_cq_destroy:
8788fdc2 1470 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1471mcc_cq_free:
1472 be_queue_free(adapter, cq);
1473err:
1474 return -1;
1475}
1476
6b7c5b94
SP
1477static void be_tx_queues_destroy(struct be_adapter *adapter)
1478{
1479 struct be_queue_info *q;
1480
1481 q = &adapter->tx_obj.q;
a8e9179a 1482 if (q->created)
8788fdc2 1483 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
6b7c5b94
SP
1484 be_queue_free(adapter, q);
1485
1486 q = &adapter->tx_obj.cq;
1487 if (q->created)
8788fdc2 1488 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1489 be_queue_free(adapter, q);
1490
859b1e4e
SP
1491 /* Clear any residual events */
1492 be_eq_clean(adapter, &adapter->tx_eq);
1493
6b7c5b94
SP
1494 q = &adapter->tx_eq.q;
1495 if (q->created)
8788fdc2 1496 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1497 be_queue_free(adapter, q);
1498}
1499
1500static int be_tx_queues_create(struct be_adapter *adapter)
1501{
1502 struct be_queue_info *eq, *q, *cq;
1503
1504 adapter->tx_eq.max_eqd = 0;
1505 adapter->tx_eq.min_eqd = 0;
1506 adapter->tx_eq.cur_eqd = 96;
1507 adapter->tx_eq.enable_aic = false;
1508 /* Alloc Tx Event queue */
1509 eq = &adapter->tx_eq.q;
1510 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1511 return -1;
1512
1513 /* Ask BE to create Tx Event queue */
8788fdc2 1514 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
6b7c5b94 1515 goto tx_eq_free;
fe6d2a38 1516
ecd62107 1517 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1518
ba343c77 1519
6b7c5b94
SP
1520 /* Alloc TX eth compl queue */
1521 cq = &adapter->tx_obj.cq;
1522 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1523 sizeof(struct be_eth_tx_compl)))
1524 goto tx_eq_destroy;
1525
1526 /* Ask BE to create Tx eth compl queue */
8788fdc2 1527 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
6b7c5b94
SP
1528 goto tx_cq_free;
1529
1530 /* Alloc TX eth queue */
1531 q = &adapter->tx_obj.q;
1532 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1533 goto tx_cq_destroy;
1534
1535 /* Ask BE to create Tx eth queue */
8788fdc2 1536 if (be_cmd_txq_create(adapter, q, cq))
6b7c5b94
SP
1537 goto tx_q_free;
1538 return 0;
1539
1540tx_q_free:
1541 be_queue_free(adapter, q);
1542tx_cq_destroy:
8788fdc2 1543 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1544tx_cq_free:
1545 be_queue_free(adapter, cq);
1546tx_eq_destroy:
8788fdc2 1547 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1548tx_eq_free:
1549 be_queue_free(adapter, eq);
1550 return -1;
1551}
1552
1553static void be_rx_queues_destroy(struct be_adapter *adapter)
1554{
1555 struct be_queue_info *q;
3abcdeda
SP
1556 struct be_rx_obj *rxo;
1557 int i;
1558
1559 for_all_rx_queues(adapter, rxo, i) {
1560 q = &rxo->q;
1561 if (q->created) {
1562 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1563 /* After the rxq is invalidated, wait for a grace time
1564 * of 1ms for all dma to end and the flush compl to
1565 * arrive
1566 */
1567 mdelay(1);
1568 be_rx_q_clean(adapter, rxo);
1569 }
1570 be_queue_free(adapter, q);
1571
1572 q = &rxo->cq;
1573 if (q->created)
1574 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1575 be_queue_free(adapter, q);
1576
1577 /* Clear any residual events */
1578 q = &rxo->rx_eq.q;
1579 if (q->created) {
1580 be_eq_clean(adapter, &rxo->rx_eq);
1581 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1582 }
1583 be_queue_free(adapter, q);
6b7c5b94 1584 }
6b7c5b94
SP
1585}
1586
ac6a0c4a
SP
1587static u32 be_num_rxqs_want(struct be_adapter *adapter)
1588{
1589 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1590 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1591 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1592 } else {
1593 dev_warn(&adapter->pdev->dev,
1594 "No support for multiple RX queues\n");
1595 return 1;
1596 }
1597}
1598
6b7c5b94
SP
1599static int be_rx_queues_create(struct be_adapter *adapter)
1600{
1601 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1602 struct be_rx_obj *rxo;
1603 int rc, i;
6b7c5b94 1604
ac6a0c4a
SP
1605 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1606 msix_enabled(adapter) ?
1607 adapter->num_msix_vec - 1 : 1);
1608 if (adapter->num_rx_qs != MAX_RX_QS)
1609 dev_warn(&adapter->pdev->dev,
1610 "Can create only %d RX queues", adapter->num_rx_qs);
1611
6b7c5b94 1612 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1613 for_all_rx_queues(adapter, rxo, i) {
1614 rxo->adapter = adapter;
1615 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1616 rxo->rx_eq.enable_aic = true;
1617
1618 /* EQ */
1619 eq = &rxo->rx_eq.q;
1620 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1621 sizeof(struct be_eq_entry));
1622 if (rc)
1623 goto err;
1624
1625 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1626 if (rc)
1627 goto err;
1628
ecd62107 1629 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1630
3abcdeda
SP
1631 /* CQ */
1632 cq = &rxo->cq;
1633 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1634 sizeof(struct be_eth_rx_compl));
1635 if (rc)
1636 goto err;
1637
1638 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1639 if (rc)
1640 goto err;
3abcdeda
SP
1641 /* Rx Q */
1642 q = &rxo->q;
1643 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1644 sizeof(struct be_eth_rx_d));
1645 if (rc)
1646 goto err;
1647
1648 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1649 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1650 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1651 if (rc)
1652 goto err;
1653 }
1654
1655 if (be_multi_rxq(adapter)) {
1656 u8 rsstable[MAX_RSS_QS];
1657
1658 for_all_rss_queues(adapter, rxo, i)
1659 rsstable[i] = rxo->rss_id;
1660
1661 rc = be_cmd_rss_config(adapter, rsstable,
1662 adapter->num_rx_qs - 1);
1663 if (rc)
1664 goto err;
1665 }
6b7c5b94
SP
1666
1667 return 0;
3abcdeda
SP
1668err:
1669 be_rx_queues_destroy(adapter);
1670 return -1;
6b7c5b94 1671}
6b7c5b94 1672
fe6d2a38 1673static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1674{
fe6d2a38
SP
1675 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1676 if (!eqe->evt)
1677 return false;
1678 else
1679 return true;
b628bde2
SP
1680}
1681
6b7c5b94
SP
1682static irqreturn_t be_intx(int irq, void *dev)
1683{
1684 struct be_adapter *adapter = dev;
3abcdeda 1685 struct be_rx_obj *rxo;
fe6d2a38 1686 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1687
fe6d2a38
SP
1688 if (lancer_chip(adapter)) {
1689 if (event_peek(&adapter->tx_eq))
1690 tx = event_handle(adapter, &adapter->tx_eq);
1691 for_all_rx_queues(adapter, rxo, i) {
1692 if (event_peek(&rxo->rx_eq))
1693 rx |= event_handle(adapter, &rxo->rx_eq);
1694 }
6b7c5b94 1695
fe6d2a38
SP
1696 if (!(tx || rx))
1697 return IRQ_NONE;
3abcdeda 1698
fe6d2a38
SP
1699 } else {
1700 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1701 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1702 if (!isr)
1703 return IRQ_NONE;
1704
ecd62107 1705 if ((1 << adapter->tx_eq.eq_idx & isr))
fe6d2a38
SP
1706 event_handle(adapter, &adapter->tx_eq);
1707
1708 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1709 if ((1 << rxo->rx_eq.eq_idx & isr))
fe6d2a38
SP
1710 event_handle(adapter, &rxo->rx_eq);
1711 }
3abcdeda 1712 }
c001c213 1713
8788fdc2 1714 return IRQ_HANDLED;
6b7c5b94
SP
1715}
1716
1717static irqreturn_t be_msix_rx(int irq, void *dev)
1718{
3abcdeda
SP
1719 struct be_rx_obj *rxo = dev;
1720 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1721
3abcdeda 1722 event_handle(adapter, &rxo->rx_eq);
6b7c5b94
SP
1723
1724 return IRQ_HANDLED;
1725}
1726
5fb379ee 1727static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1728{
1729 struct be_adapter *adapter = dev;
1730
8788fdc2 1731 event_handle(adapter, &adapter->tx_eq);
6b7c5b94
SP
1732
1733 return IRQ_HANDLED;
1734}
1735
2e588f84 1736static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1737{
2e588f84 1738 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1739}
1740
49b05221 1741static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1742{
1743 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1744 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1745 struct be_adapter *adapter = rxo->adapter;
1746 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1747 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1748 u32 work_done;
1749
3abcdeda 1750 rxo->stats.rx_polls++;
6b7c5b94 1751 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1752 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1753 if (!rxcp)
1754 break;
1755
e80d9da6 1756 /* Ignore flush completions */
2e588f84
SP
1757 if (rxcp->num_rcvd) {
1758 if (do_gro(rxcp))
64642811
SP
1759 be_rx_compl_process_gro(adapter, rxo, rxcp);
1760 else
1761 be_rx_compl_process(adapter, rxo, rxcp);
1762 }
2e588f84 1763 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1764 }
1765
6b7c5b94 1766 /* Refill the queue */
3abcdeda 1767 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1768 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1769
1770 /* All consumed */
1771 if (work_done < budget) {
1772 napi_complete(napi);
8788fdc2 1773 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1774 } else {
1775 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1776 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1777 }
1778 return work_done;
1779}
1780
f31e50a8
SP
1781/* As TX and MCC share the same EQ check for both TX and MCC completions.
1782 * For TX/MCC we don't honour budget; consume everything
1783 */
1784static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1785{
f31e50a8
SP
1786 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1787 struct be_adapter *adapter =
1788 container_of(tx_eq, struct be_adapter, tx_eq);
5fb379ee
SP
1789 struct be_queue_info *txq = &adapter->tx_obj.q;
1790 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1791 struct be_eth_tx_compl *txcp;
f31e50a8 1792 int tx_compl = 0, mcc_compl, status = 0;
6b7c5b94
SP
1793 u16 end_idx;
1794
5fb379ee 1795 while ((txcp = be_tx_compl_get(tx_cq))) {
6b7c5b94 1796 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
f31e50a8 1797 wrb_index, txcp);
6b7c5b94 1798 be_tx_compl_process(adapter, end_idx);
f31e50a8 1799 tx_compl++;
6b7c5b94
SP
1800 }
1801
f31e50a8
SP
1802 mcc_compl = be_process_mcc(adapter, &status);
1803
1804 napi_complete(napi);
1805
1806 if (mcc_compl) {
1807 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1808 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1809 }
1810
1811 if (tx_compl) {
1812 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
5fb379ee
SP
1813
1814 /* As Tx wrbs have been freed up, wake up netdev queue if
1815 * it was stopped due to lack of tx wrbs.
1816 */
1817 if (netif_queue_stopped(adapter->netdev) &&
6b7c5b94 1818 atomic_read(&txq->used) < txq->len / 2) {
5fb379ee
SP
1819 netif_wake_queue(adapter->netdev);
1820 }
1821
3abcdeda
SP
1822 tx_stats(adapter)->be_tx_events++;
1823 tx_stats(adapter)->be_tx_compl += tx_compl;
6b7c5b94 1824 }
6b7c5b94
SP
1825
1826 return 1;
1827}
1828
d053de91 1829void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1830{
1831 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1832 u32 i;
1833
1834 pci_read_config_dword(adapter->pdev,
1835 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1836 pci_read_config_dword(adapter->pdev,
1837 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1838 pci_read_config_dword(adapter->pdev,
1839 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1840 pci_read_config_dword(adapter->pdev,
1841 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1842
1843 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1844 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1845
d053de91
AK
1846 if (ue_status_lo || ue_status_hi) {
1847 adapter->ue_detected = true;
7acc2087 1848 adapter->eeh_err = true;
d053de91
AK
1849 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1850 }
1851
7c185276
AK
1852 if (ue_status_lo) {
1853 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1854 if (ue_status_lo & 1)
1855 dev_err(&adapter->pdev->dev,
1856 "UE: %s bit set\n", ue_status_low_desc[i]);
1857 }
1858 }
1859 if (ue_status_hi) {
1860 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1861 if (ue_status_hi & 1)
1862 dev_err(&adapter->pdev->dev,
1863 "UE: %s bit set\n", ue_status_hi_desc[i]);
1864 }
1865 }
1866
1867}
1868
ea1dae11
SP
1869static void be_worker(struct work_struct *work)
1870{
1871 struct be_adapter *adapter =
1872 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1873 struct be_rx_obj *rxo;
1874 int i;
ea1dae11 1875
16da8250
SP
1876 if (!adapter->ue_detected && !lancer_chip(adapter))
1877 be_detect_dump_ue(adapter);
1878
f203af70
SK
1879 /* when interrupts are not yet enabled, just reap any pending
1880 * mcc completions */
1881 if (!netif_running(adapter->netdev)) {
1882 int mcc_compl, status = 0;
1883
1884 mcc_compl = be_process_mcc(adapter, &status);
1885
1886 if (mcc_compl) {
1887 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1888 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1889 }
9b037f38 1890
f203af70
SK
1891 goto reschedule;
1892 }
1893
b2aebe6d 1894 if (!adapter->stats_cmd_sent)
3abcdeda 1895 be_cmd_get_stats(adapter, &adapter->stats_cmd);
ea1dae11 1896
4097f663 1897 be_tx_rate_update(adapter);
4097f663 1898
3abcdeda
SP
1899 for_all_rx_queues(adapter, rxo, i) {
1900 be_rx_rate_update(rxo);
1901 be_rx_eqd_update(adapter, rxo);
1902
1903 if (rxo->rx_post_starved) {
1904 rxo->rx_post_starved = false;
1829b086 1905 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 1906 }
ea1dae11
SP
1907 }
1908
f203af70 1909reschedule:
ea1dae11
SP
1910 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1911}
1912
8d56ff11
SP
1913static void be_msix_disable(struct be_adapter *adapter)
1914{
ac6a0c4a 1915 if (msix_enabled(adapter)) {
8d56ff11 1916 pci_disable_msix(adapter->pdev);
ac6a0c4a 1917 adapter->num_msix_vec = 0;
3abcdeda
SP
1918 }
1919}
1920
6b7c5b94
SP
1921static void be_msix_enable(struct be_adapter *adapter)
1922{
3abcdeda 1923#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 1924 int i, status, num_vec;
6b7c5b94 1925
ac6a0c4a 1926 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 1927
ac6a0c4a 1928 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
1929 adapter->msix_entries[i].entry = i;
1930
ac6a0c4a 1931 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
1932 if (status == 0) {
1933 goto done;
1934 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 1935 num_vec = status;
3abcdeda 1936 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 1937 num_vec) == 0)
3abcdeda 1938 goto done;
3abcdeda
SP
1939 }
1940 return;
1941done:
ac6a0c4a
SP
1942 adapter->num_msix_vec = num_vec;
1943 return;
6b7c5b94
SP
1944}
1945
ba343c77
SB
1946static void be_sriov_enable(struct be_adapter *adapter)
1947{
344dbf10 1948 be_check_sriov_fn_type(adapter);
6dedec81 1949#ifdef CONFIG_PCI_IOV
ba343c77 1950 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
1951 int status, pos;
1952 u16 nvfs;
1953
1954 pos = pci_find_ext_capability(adapter->pdev,
1955 PCI_EXT_CAP_ID_SRIOV);
1956 pci_read_config_word(adapter->pdev,
1957 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1958
1959 if (num_vfs > nvfs) {
1960 dev_info(&adapter->pdev->dev,
1961 "Device supports %d VFs and not %d\n",
1962 nvfs, num_vfs);
1963 num_vfs = nvfs;
1964 }
6dedec81 1965
ba343c77
SB
1966 status = pci_enable_sriov(adapter->pdev, num_vfs);
1967 adapter->sriov_enabled = status ? false : true;
1968 }
1969#endif
ba343c77
SB
1970}
1971
1972static void be_sriov_disable(struct be_adapter *adapter)
1973{
1974#ifdef CONFIG_PCI_IOV
1975 if (adapter->sriov_enabled) {
1976 pci_disable_sriov(adapter->pdev);
1977 adapter->sriov_enabled = false;
1978 }
1979#endif
1980}
1981
fe6d2a38
SP
1982static inline int be_msix_vec_get(struct be_adapter *adapter,
1983 struct be_eq_obj *eq_obj)
6b7c5b94 1984{
ecd62107 1985 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
1986}
1987
b628bde2
SP
1988static int be_request_irq(struct be_adapter *adapter,
1989 struct be_eq_obj *eq_obj,
3abcdeda 1990 void *handler, char *desc, void *context)
6b7c5b94
SP
1991{
1992 struct net_device *netdev = adapter->netdev;
b628bde2
SP
1993 int vec;
1994
1995 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 1996 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 1997 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
1998}
1999
3abcdeda
SP
2000static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2001 void *context)
b628bde2 2002{
fe6d2a38 2003 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2004 free_irq(vec, context);
b628bde2 2005}
6b7c5b94 2006
b628bde2
SP
2007static int be_msix_register(struct be_adapter *adapter)
2008{
3abcdeda
SP
2009 struct be_rx_obj *rxo;
2010 int status, i;
2011 char qname[10];
b628bde2 2012
3abcdeda
SP
2013 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2014 adapter);
6b7c5b94
SP
2015 if (status)
2016 goto err;
2017
3abcdeda
SP
2018 for_all_rx_queues(adapter, rxo, i) {
2019 sprintf(qname, "rxq%d", i);
2020 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2021 qname, rxo);
2022 if (status)
2023 goto err_msix;
2024 }
b628bde2 2025
6b7c5b94 2026 return 0;
b628bde2 2027
3abcdeda
SP
2028err_msix:
2029 be_free_irq(adapter, &adapter->tx_eq, adapter);
2030
2031 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2032 be_free_irq(adapter, &rxo->rx_eq, rxo);
2033
6b7c5b94
SP
2034err:
2035 dev_warn(&adapter->pdev->dev,
2036 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2037 be_msix_disable(adapter);
6b7c5b94
SP
2038 return status;
2039}
2040
2041static int be_irq_register(struct be_adapter *adapter)
2042{
2043 struct net_device *netdev = adapter->netdev;
2044 int status;
2045
ac6a0c4a 2046 if (msix_enabled(adapter)) {
6b7c5b94
SP
2047 status = be_msix_register(adapter);
2048 if (status == 0)
2049 goto done;
ba343c77
SB
2050 /* INTx is not supported for VF */
2051 if (!be_physfn(adapter))
2052 return status;
6b7c5b94
SP
2053 }
2054
2055 /* INTx */
2056 netdev->irq = adapter->pdev->irq;
2057 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2058 adapter);
2059 if (status) {
2060 dev_err(&adapter->pdev->dev,
2061 "INTx request IRQ failed - err %d\n", status);
2062 return status;
2063 }
2064done:
2065 adapter->isr_registered = true;
2066 return 0;
2067}
2068
2069static void be_irq_unregister(struct be_adapter *adapter)
2070{
2071 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2072 struct be_rx_obj *rxo;
2073 int i;
6b7c5b94
SP
2074
2075 if (!adapter->isr_registered)
2076 return;
2077
2078 /* INTx */
ac6a0c4a 2079 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2080 free_irq(netdev->irq, adapter);
2081 goto done;
2082 }
2083
2084 /* MSIx */
3abcdeda
SP
2085 be_free_irq(adapter, &adapter->tx_eq, adapter);
2086
2087 for_all_rx_queues(adapter, rxo, i)
2088 be_free_irq(adapter, &rxo->rx_eq, rxo);
2089
6b7c5b94
SP
2090done:
2091 adapter->isr_registered = false;
6b7c5b94
SP
2092}
2093
889cd4b2
SP
2094static int be_close(struct net_device *netdev)
2095{
2096 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2097 struct be_rx_obj *rxo;
889cd4b2 2098 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2099 int vec, i;
889cd4b2 2100
889cd4b2
SP
2101 be_async_mcc_disable(adapter);
2102
889cd4b2
SP
2103 netif_carrier_off(netdev);
2104 adapter->link_up = false;
2105
fe6d2a38
SP
2106 if (!lancer_chip(adapter))
2107 be_intr_set(adapter, false);
889cd4b2 2108
63fcb27f
PR
2109 for_all_rx_queues(adapter, rxo, i)
2110 napi_disable(&rxo->rx_eq.napi);
2111
2112 napi_disable(&tx_eq->napi);
2113
2114 if (lancer_chip(adapter)) {
2115 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2116 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2117 for_all_rx_queues(adapter, rxo, i)
2118 be_cq_notify(adapter, rxo->cq.id, false, 0);
2119 }
2120
ac6a0c4a 2121 if (msix_enabled(adapter)) {
fe6d2a38 2122 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2123 synchronize_irq(vec);
3abcdeda
SP
2124
2125 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2126 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2127 synchronize_irq(vec);
2128 }
889cd4b2
SP
2129 } else {
2130 synchronize_irq(netdev->irq);
2131 }
2132 be_irq_unregister(adapter);
2133
889cd4b2
SP
2134 /* Wait for all pending tx completions to arrive so that
2135 * all tx skbs are freed.
2136 */
2137 be_tx_compl_clean(adapter);
2138
2139 return 0;
2140}
2141
6b7c5b94
SP
2142static int be_open(struct net_device *netdev)
2143{
2144 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2145 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2146 struct be_rx_obj *rxo;
a8f447bd 2147 bool link_up;
3abcdeda 2148 int status, i;
0388f251
SB
2149 u8 mac_speed;
2150 u16 link_speed;
5fb379ee 2151
3abcdeda 2152 for_all_rx_queues(adapter, rxo, i) {
1829b086 2153 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda
SP
2154 napi_enable(&rxo->rx_eq.napi);
2155 }
5fb379ee
SP
2156 napi_enable(&tx_eq->napi);
2157
2158 be_irq_register(adapter);
2159
fe6d2a38
SP
2160 if (!lancer_chip(adapter))
2161 be_intr_set(adapter, true);
5fb379ee
SP
2162
2163 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2164 for_all_rx_queues(adapter, rxo, i) {
2165 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2166 be_cq_notify(adapter, rxo->cq.id, true, 0);
2167 }
8788fdc2 2168 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2169
7a1e9b20
SP
2170 /* Now that interrupts are on we can process async mcc */
2171 be_async_mcc_enable(adapter);
2172
0388f251 2173 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
187e8756 2174 &link_speed, 0);
a8f447bd 2175 if (status)
889cd4b2 2176 goto err;
a8f447bd 2177 be_link_status_update(adapter, link_up);
5fb379ee 2178
889cd4b2 2179 if (be_physfn(adapter)) {
1da87b7f 2180 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2181 if (status)
2182 goto err;
4f2aa89c 2183
ba343c77
SB
2184 status = be_cmd_set_flow_control(adapter,
2185 adapter->tx_fc, adapter->rx_fc);
2186 if (status)
889cd4b2 2187 goto err;
ba343c77 2188 }
4f2aa89c 2189
889cd4b2
SP
2190 return 0;
2191err:
2192 be_close(adapter->netdev);
2193 return -EIO;
5fb379ee
SP
2194}
2195
71d8d1b5
AK
2196static int be_setup_wol(struct be_adapter *adapter, bool enable)
2197{
2198 struct be_dma_mem cmd;
2199 int status = 0;
2200 u8 mac[ETH_ALEN];
2201
2202 memset(mac, 0, ETH_ALEN);
2203
2204 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2205 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2206 GFP_KERNEL);
71d8d1b5
AK
2207 if (cmd.va == NULL)
2208 return -1;
2209 memset(cmd.va, 0, cmd.size);
2210
2211 if (enable) {
2212 status = pci_write_config_dword(adapter->pdev,
2213 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2214 if (status) {
2215 dev_err(&adapter->pdev->dev,
2381a55c 2216 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2217 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2218 cmd.dma);
71d8d1b5
AK
2219 return status;
2220 }
2221 status = be_cmd_enable_magic_wol(adapter,
2222 adapter->netdev->dev_addr, &cmd);
2223 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2224 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2225 } else {
2226 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2227 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2228 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2229 }
2230
2b7bcebf 2231 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2232 return status;
2233}
2234
6d87f5c3
AK
2235/*
2236 * Generate a seed MAC address from the PF MAC Address using jhash.
2237 * MAC Address for VFs are assigned incrementally starting from the seed.
2238 * These addresses are programmed in the ASIC by the PF and the VF driver
2239 * queries for the MAC address during its probe.
2240 */
2241static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2242{
2243 u32 vf = 0;
3abcdeda 2244 int status = 0;
6d87f5c3
AK
2245 u8 mac[ETH_ALEN];
2246
2247 be_vf_eth_addr_generate(adapter, mac);
2248
2249 for (vf = 0; vf < num_vfs; vf++) {
2250 status = be_cmd_pmac_add(adapter, mac,
2251 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2252 &adapter->vf_cfg[vf].vf_pmac_id,
2253 vf + 1);
6d87f5c3
AK
2254 if (status)
2255 dev_err(&adapter->pdev->dev,
2256 "Mac address add failed for VF %d\n", vf);
2257 else
2258 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2259
2260 mac[5] += 1;
2261 }
2262 return status;
2263}
2264
2265static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2266{
2267 u32 vf;
2268
2269 for (vf = 0; vf < num_vfs; vf++) {
2270 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2271 be_cmd_pmac_del(adapter,
2272 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2273 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2274 }
2275}
2276
5fb379ee
SP
2277static int be_setup(struct be_adapter *adapter)
2278{
5fb379ee 2279 struct net_device *netdev = adapter->netdev;
ba343c77 2280 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2281 int status;
ba343c77
SB
2282 u8 mac[ETH_ALEN];
2283
f21b538c
PR
2284 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2285 BE_IF_FLAGS_BROADCAST |
2286 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2287
ba343c77
SB
2288 if (be_physfn(adapter)) {
2289 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2290 BE_IF_FLAGS_PROMISCUOUS |
2291 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2292 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2293
ac6a0c4a 2294 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2295 cap_flags |= BE_IF_FLAGS_RSS;
2296 en_flags |= BE_IF_FLAGS_RSS;
2297 }
ba343c77 2298 }
73d540f2
SP
2299
2300 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2301 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2302 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2303 if (status != 0)
2304 goto do_none;
2305
ba343c77 2306 if (be_physfn(adapter)) {
c99ac3e7
AK
2307 if (adapter->sriov_enabled) {
2308 while (vf < num_vfs) {
2309 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2310 BE_IF_FLAGS_BROADCAST;
2311 status = be_cmd_if_create(adapter, cap_flags,
2312 en_flags, mac, true,
64600ea5 2313 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2314 NULL, vf+1);
c99ac3e7
AK
2315 if (status) {
2316 dev_err(&adapter->pdev->dev,
2317 "Interface Create failed for VF %d\n",
2318 vf);
2319 goto if_destroy;
2320 }
2321 adapter->vf_cfg[vf].vf_pmac_id =
2322 BE_INVALID_PMAC_ID;
2323 vf++;
ba343c77 2324 }
84e5b9f7 2325 }
c99ac3e7 2326 } else {
ba343c77
SB
2327 status = be_cmd_mac_addr_query(adapter, mac,
2328 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2329 if (!status) {
2330 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2331 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2332 }
2333 }
2334
6b7c5b94
SP
2335 status = be_tx_queues_create(adapter);
2336 if (status != 0)
2337 goto if_destroy;
2338
2339 status = be_rx_queues_create(adapter);
2340 if (status != 0)
2341 goto tx_qs_destroy;
2342
5fb379ee
SP
2343 status = be_mcc_queues_create(adapter);
2344 if (status != 0)
2345 goto rx_qs_destroy;
6b7c5b94 2346
0dffc83e
AK
2347 adapter->link_speed = -1;
2348
6b7c5b94
SP
2349 return 0;
2350
5fb379ee
SP
2351rx_qs_destroy:
2352 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2353tx_qs_destroy:
2354 be_tx_queues_destroy(adapter);
2355if_destroy:
c99ac3e7
AK
2356 if (be_physfn(adapter) && adapter->sriov_enabled)
2357 for (vf = 0; vf < num_vfs; vf++)
2358 if (adapter->vf_cfg[vf].vf_if_handle)
2359 be_cmd_if_destroy(adapter,
658681f7
AK
2360 adapter->vf_cfg[vf].vf_if_handle,
2361 vf + 1);
2362 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2363do_none:
2364 return status;
2365}
2366
5fb379ee
SP
2367static int be_clear(struct be_adapter *adapter)
2368{
7ab8b0b4
AK
2369 int vf;
2370
c99ac3e7 2371 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2372 be_vf_eth_addr_rem(adapter);
2373
1a8887d8 2374 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2375 be_rx_queues_destroy(adapter);
2376 be_tx_queues_destroy(adapter);
1f5db833 2377 adapter->eq_next_idx = 0;
5fb379ee 2378
7ab8b0b4
AK
2379 if (be_physfn(adapter) && adapter->sriov_enabled)
2380 for (vf = 0; vf < num_vfs; vf++)
2381 if (adapter->vf_cfg[vf].vf_if_handle)
2382 be_cmd_if_destroy(adapter,
2383 adapter->vf_cfg[vf].vf_if_handle,
2384 vf + 1);
2385
658681f7 2386 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2387
2243e2e9
SP
2388 /* tell fw we're done with firing cmds */
2389 be_cmd_fw_clean(adapter);
5fb379ee
SP
2390 return 0;
2391}
2392
6b7c5b94 2393
84517482 2394#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2395static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2396 const u8 *p, u32 img_start, int image_size,
2397 int hdr_size)
fa9a6fed
SB
2398{
2399 u32 crc_offset;
2400 u8 flashed_crc[4];
2401 int status;
3f0d4560
AK
2402
2403 crc_offset = hdr_size + img_start + image_size - 4;
2404
fa9a6fed 2405 p += crc_offset;
3f0d4560
AK
2406
2407 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2408 (image_size - 4));
fa9a6fed
SB
2409 if (status) {
2410 dev_err(&adapter->pdev->dev,
2411 "could not get crc from flash, not flashing redboot\n");
2412 return false;
2413 }
2414
2415 /*update redboot only if crc does not match*/
2416 if (!memcmp(flashed_crc, p, 4))
2417 return false;
2418 else
2419 return true;
fa9a6fed
SB
2420}
2421
3f0d4560 2422static int be_flash_data(struct be_adapter *adapter,
84517482 2423 const struct firmware *fw,
3f0d4560
AK
2424 struct be_dma_mem *flash_cmd, int num_of_images)
2425
84517482 2426{
3f0d4560
AK
2427 int status = 0, i, filehdr_size = 0;
2428 u32 total_bytes = 0, flash_op;
84517482
AK
2429 int num_bytes;
2430 const u8 *p = fw->data;
2431 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2432 const struct flash_comp *pflashcomp;
9fe96934 2433 int num_comp;
3f0d4560 2434
215faf9c 2435 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2436 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2437 FLASH_IMAGE_MAX_SIZE_g3},
2438 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2439 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2440 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2441 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2442 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2443 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2444 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2445 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2446 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2447 FLASH_IMAGE_MAX_SIZE_g3},
2448 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2449 FLASH_IMAGE_MAX_SIZE_g3},
2450 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2451 FLASH_IMAGE_MAX_SIZE_g3},
2452 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2453 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2454 };
215faf9c 2455 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2456 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2457 FLASH_IMAGE_MAX_SIZE_g2},
2458 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2459 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2460 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2461 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2462 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2463 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2464 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2465 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2466 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2467 FLASH_IMAGE_MAX_SIZE_g2},
2468 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2469 FLASH_IMAGE_MAX_SIZE_g2},
2470 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2471 FLASH_IMAGE_MAX_SIZE_g2}
2472 };
2473
2474 if (adapter->generation == BE_GEN3) {
2475 pflashcomp = gen3_flash_types;
2476 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2477 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2478 } else {
2479 pflashcomp = gen2_flash_types;
2480 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2481 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2482 }
9fe96934
SB
2483 for (i = 0; i < num_comp; i++) {
2484 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2485 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2486 continue;
3f0d4560
AK
2487 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2488 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2489 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2490 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2491 continue;
2492 p = fw->data;
2493 p += filehdr_size + pflashcomp[i].offset
2494 + (num_of_images * sizeof(struct image_hdr));
2495 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2496 return -1;
3f0d4560
AK
2497 total_bytes = pflashcomp[i].size;
2498 while (total_bytes) {
2499 if (total_bytes > 32*1024)
2500 num_bytes = 32*1024;
2501 else
2502 num_bytes = total_bytes;
2503 total_bytes -= num_bytes;
2504
2505 if (!total_bytes)
2506 flash_op = FLASHROM_OPER_FLASH;
2507 else
2508 flash_op = FLASHROM_OPER_SAVE;
2509 memcpy(req->params.data_buf, p, num_bytes);
2510 p += num_bytes;
2511 status = be_cmd_write_flashrom(adapter, flash_cmd,
2512 pflashcomp[i].optype, flash_op, num_bytes);
2513 if (status) {
2514 dev_err(&adapter->pdev->dev,
2515 "cmd to write to flash rom failed.\n");
2516 return -1;
2517 }
2518 yield();
84517482 2519 }
84517482 2520 }
84517482
AK
2521 return 0;
2522}
2523
3f0d4560
AK
2524static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2525{
2526 if (fhdr == NULL)
2527 return 0;
2528 if (fhdr->build[0] == '3')
2529 return BE_GEN3;
2530 else if (fhdr->build[0] == '2')
2531 return BE_GEN2;
2532 else
2533 return 0;
2534}
2535
84517482
AK
2536int be_load_fw(struct be_adapter *adapter, u8 *func)
2537{
2538 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2539 const struct firmware *fw;
3f0d4560
AK
2540 struct flash_file_hdr_g2 *fhdr;
2541 struct flash_file_hdr_g3 *fhdr3;
2542 struct image_hdr *img_hdr_ptr = NULL;
84517482 2543 struct be_dma_mem flash_cmd;
8b93b710 2544 int status, i = 0, num_imgs = 0;
84517482 2545 const u8 *p;
84517482 2546
d9efd2af
SB
2547 if (!netif_running(adapter->netdev)) {
2548 dev_err(&adapter->pdev->dev,
2549 "Firmware load not allowed (interface is down)\n");
2550 return -EPERM;
2551 }
2552
84517482
AK
2553 strcpy(fw_file, func);
2554
2555 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2556 if (status)
2557 goto fw_exit;
2558
2559 p = fw->data;
3f0d4560 2560 fhdr = (struct flash_file_hdr_g2 *) p;
84517482
AK
2561 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2562
84517482 2563 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2564 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2565 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2566 if (!flash_cmd.va) {
2567 status = -ENOMEM;
2568 dev_err(&adapter->pdev->dev,
2569 "Memory allocation failure while flashing\n");
2570 goto fw_exit;
2571 }
2572
3f0d4560
AK
2573 if ((adapter->generation == BE_GEN3) &&
2574 (get_ufigen_type(fhdr) == BE_GEN3)) {
2575 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2576 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2577 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2578 img_hdr_ptr = (struct image_hdr *) (fw->data +
2579 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2580 i * sizeof(struct image_hdr)));
2581 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2582 status = be_flash_data(adapter, fw, &flash_cmd,
2583 num_imgs);
3f0d4560
AK
2584 }
2585 } else if ((adapter->generation == BE_GEN2) &&
2586 (get_ufigen_type(fhdr) == BE_GEN2)) {
2587 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2588 } else {
2589 dev_err(&adapter->pdev->dev,
2590 "UFI and Interface are not compatible for flashing\n");
2591 status = -1;
84517482
AK
2592 }
2593
2b7bcebf
IV
2594 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2595 flash_cmd.dma);
84517482
AK
2596 if (status) {
2597 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2598 goto fw_exit;
2599 }
2600
af901ca1 2601 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482
AK
2602
2603fw_exit:
2604 release_firmware(fw);
2605 return status;
2606}
2607
6b7c5b94
SP
2608static struct net_device_ops be_netdev_ops = {
2609 .ndo_open = be_open,
2610 .ndo_stop = be_close,
2611 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2612 .ndo_set_rx_mode = be_set_multicast_list,
2613 .ndo_set_mac_address = be_mac_addr_set,
2614 .ndo_change_mtu = be_change_mtu,
2615 .ndo_validate_addr = eth_validate_addr,
2616 .ndo_vlan_rx_register = be_vlan_register,
2617 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2618 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2619 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2620 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2621 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2622 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2623};
2624
2625static void be_netdev_init(struct net_device *netdev)
2626{
2627 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2628 struct be_rx_obj *rxo;
2629 int i;
6b7c5b94 2630
6332c8d3 2631 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2632 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2633 NETIF_F_HW_VLAN_TX;
2634 if (be_multi_rxq(adapter))
2635 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2636
2637 netdev->features |= netdev->hw_features |
8b8ddc68 2638 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2639
79032644
MM
2640 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2641 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2642
fe6d2a38
SP
2643 if (lancer_chip(adapter))
2644 netdev->vlan_features |= NETIF_F_TSO6;
2645
6b7c5b94
SP
2646 netdev->flags |= IFF_MULTICAST;
2647
9e90c961
AK
2648 /* Default settings for Rx and Tx flow control */
2649 adapter->rx_fc = true;
2650 adapter->tx_fc = true;
2651
c190e3c8
AK
2652 netif_set_gso_max_size(netdev, 65535);
2653
6b7c5b94
SP
2654 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2655
2656 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2657
3abcdeda
SP
2658 for_all_rx_queues(adapter, rxo, i)
2659 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2660 BE_NAPI_WEIGHT);
2661
5fb379ee 2662 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2663 BE_NAPI_WEIGHT);
6b7c5b94
SP
2664}
2665
2666static void be_unmap_pci_bars(struct be_adapter *adapter)
2667{
8788fdc2
SP
2668 if (adapter->csr)
2669 iounmap(adapter->csr);
2670 if (adapter->db)
2671 iounmap(adapter->db);
ba343c77 2672 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2673 iounmap(adapter->pcicfg);
6b7c5b94
SP
2674}
2675
2676static int be_map_pci_bars(struct be_adapter *adapter)
2677{
2678 u8 __iomem *addr;
ba343c77 2679 int pcicfg_reg, db_reg;
6b7c5b94 2680
fe6d2a38
SP
2681 if (lancer_chip(adapter)) {
2682 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2683 pci_resource_len(adapter->pdev, 0));
2684 if (addr == NULL)
2685 return -ENOMEM;
2686 adapter->db = addr;
2687 return 0;
2688 }
2689
ba343c77
SB
2690 if (be_physfn(adapter)) {
2691 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2692 pci_resource_len(adapter->pdev, 2));
2693 if (addr == NULL)
2694 return -ENOMEM;
2695 adapter->csr = addr;
2696 }
6b7c5b94 2697
ba343c77 2698 if (adapter->generation == BE_GEN2) {
7b139c83 2699 pcicfg_reg = 1;
ba343c77
SB
2700 db_reg = 4;
2701 } else {
7b139c83 2702 pcicfg_reg = 0;
ba343c77
SB
2703 if (be_physfn(adapter))
2704 db_reg = 4;
2705 else
2706 db_reg = 0;
2707 }
2708 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2709 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2710 if (addr == NULL)
2711 goto pci_map_err;
ba343c77
SB
2712 adapter->db = addr;
2713
2714 if (be_physfn(adapter)) {
2715 addr = ioremap_nocache(
2716 pci_resource_start(adapter->pdev, pcicfg_reg),
2717 pci_resource_len(adapter->pdev, pcicfg_reg));
2718 if (addr == NULL)
2719 goto pci_map_err;
2720 adapter->pcicfg = addr;
2721 } else
2722 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
2723
2724 return 0;
2725pci_map_err:
2726 be_unmap_pci_bars(adapter);
2727 return -ENOMEM;
2728}
2729
2730
2731static void be_ctrl_cleanup(struct be_adapter *adapter)
2732{
8788fdc2 2733 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2734
2735 be_unmap_pci_bars(adapter);
2736
2737 if (mem->va)
2b7bcebf
IV
2738 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2739 mem->dma);
e7b909a6
SP
2740
2741 mem = &adapter->mc_cmd_mem;
2742 if (mem->va)
2b7bcebf
IV
2743 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2744 mem->dma);
6b7c5b94
SP
2745}
2746
6b7c5b94
SP
2747static int be_ctrl_init(struct be_adapter *adapter)
2748{
8788fdc2
SP
2749 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2750 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 2751 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 2752 int status;
6b7c5b94
SP
2753
2754 status = be_map_pci_bars(adapter);
2755 if (status)
e7b909a6 2756 goto done;
6b7c5b94
SP
2757
2758 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
2759 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2760 mbox_mem_alloc->size,
2761 &mbox_mem_alloc->dma,
2762 GFP_KERNEL);
6b7c5b94 2763 if (!mbox_mem_alloc->va) {
e7b909a6
SP
2764 status = -ENOMEM;
2765 goto unmap_pci_bars;
6b7c5b94 2766 }
e7b909a6 2767
6b7c5b94
SP
2768 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2769 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2770 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2771 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
2772
2773 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2b7bcebf
IV
2774 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2775 mc_cmd_mem->size, &mc_cmd_mem->dma,
2776 GFP_KERNEL);
e7b909a6
SP
2777 if (mc_cmd_mem->va == NULL) {
2778 status = -ENOMEM;
2779 goto free_mbox;
2780 }
2781 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2782
2984961c 2783 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
2784 spin_lock_init(&adapter->mcc_lock);
2785 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 2786
dd131e76 2787 init_completion(&adapter->flash_compl);
cf588477 2788 pci_save_state(adapter->pdev);
6b7c5b94 2789 return 0;
e7b909a6
SP
2790
2791free_mbox:
2b7bcebf
IV
2792 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2793 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
2794
2795unmap_pci_bars:
2796 be_unmap_pci_bars(adapter);
2797
2798done:
2799 return status;
6b7c5b94
SP
2800}
2801
2802static void be_stats_cleanup(struct be_adapter *adapter)
2803{
3abcdeda 2804 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2805
2806 if (cmd->va)
2b7bcebf
IV
2807 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2808 cmd->va, cmd->dma);
6b7c5b94
SP
2809}
2810
2811static int be_stats_init(struct be_adapter *adapter)
2812{
3abcdeda 2813 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2814
2815 cmd->size = sizeof(struct be_cmd_req_get_stats);
2b7bcebf
IV
2816 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2817 GFP_KERNEL);
6b7c5b94
SP
2818 if (cmd->va == NULL)
2819 return -1;
d291b9af 2820 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
2821 return 0;
2822}
2823
2824static void __devexit be_remove(struct pci_dev *pdev)
2825{
2826 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 2827
6b7c5b94
SP
2828 if (!adapter)
2829 return;
2830
f203af70
SK
2831 cancel_delayed_work_sync(&adapter->work);
2832
6b7c5b94
SP
2833 unregister_netdev(adapter->netdev);
2834
5fb379ee
SP
2835 be_clear(adapter);
2836
6b7c5b94
SP
2837 be_stats_cleanup(adapter);
2838
2839 be_ctrl_cleanup(adapter);
2840
48f5a191 2841 kfree(adapter->vf_cfg);
ba343c77
SB
2842 be_sriov_disable(adapter);
2843
8d56ff11 2844 be_msix_disable(adapter);
6b7c5b94
SP
2845
2846 pci_set_drvdata(pdev, NULL);
2847 pci_release_regions(pdev);
2848 pci_disable_device(pdev);
2849
2850 free_netdev(adapter->netdev);
2851}
2852
2243e2e9 2853static int be_get_config(struct be_adapter *adapter)
6b7c5b94 2854{
6b7c5b94 2855 int status;
2243e2e9 2856 u8 mac[ETH_ALEN];
6b7c5b94 2857
2243e2e9 2858 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
2859 if (status)
2860 return status;
2861
3abcdeda
SP
2862 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2863 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
2864 if (status)
2865 return status;
2866
2243e2e9 2867 memset(mac, 0, ETH_ALEN);
ba343c77
SB
2868
2869 if (be_physfn(adapter)) {
2870 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 2871 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 2872
ba343c77
SB
2873 if (status)
2874 return status;
ca9e4988 2875
ba343c77
SB
2876 if (!is_valid_ether_addr(mac))
2877 return -EADDRNOTAVAIL;
2878
2879 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2880 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2881 }
6b7c5b94 2882
3486be29 2883 if (adapter->function_mode & 0x400)
82903e4b
AK
2884 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2885 else
2886 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2887
9e1453c5
AK
2888 status = be_cmd_get_cntl_attributes(adapter);
2889 if (status)
2890 return status;
2891
2e588f84 2892 be_cmd_check_native_mode(adapter);
2243e2e9 2893 return 0;
6b7c5b94
SP
2894}
2895
fe6d2a38
SP
2896static int be_dev_family_check(struct be_adapter *adapter)
2897{
2898 struct pci_dev *pdev = adapter->pdev;
2899 u32 sli_intf = 0, if_type;
2900
2901 switch (pdev->device) {
2902 case BE_DEVICE_ID1:
2903 case OC_DEVICE_ID1:
2904 adapter->generation = BE_GEN2;
2905 break;
2906 case BE_DEVICE_ID2:
2907 case OC_DEVICE_ID2:
2908 adapter->generation = BE_GEN3;
2909 break;
2910 case OC_DEVICE_ID3:
2911 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2912 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2913 SLI_INTF_IF_TYPE_SHIFT;
2914
2915 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2916 if_type != 0x02) {
2917 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2918 return -EINVAL;
2919 }
2920 if (num_vfs > 0) {
2921 dev_err(&pdev->dev, "VFs not supported\n");
2922 return -EINVAL;
2923 }
2924 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2925 SLI_INTF_FAMILY_SHIFT);
2926 adapter->generation = BE_GEN3;
2927 break;
2928 default:
2929 adapter->generation = 0;
2930 }
2931 return 0;
2932}
2933
37eed1cb
PR
2934static int lancer_wait_ready(struct be_adapter *adapter)
2935{
2936#define SLIPORT_READY_TIMEOUT 500
2937 u32 sliport_status;
2938 int status = 0, i;
2939
2940 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2941 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2942 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2943 break;
2944
2945 msleep(20);
2946 }
2947
2948 if (i == SLIPORT_READY_TIMEOUT)
2949 status = -1;
2950
2951 return status;
2952}
2953
2954static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2955{
2956 int status;
2957 u32 sliport_status, err, reset_needed;
2958 status = lancer_wait_ready(adapter);
2959 if (!status) {
2960 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2961 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2962 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2963 if (err && reset_needed) {
2964 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2965 adapter->db + SLIPORT_CONTROL_OFFSET);
2966
2967 /* check adapter has corrected the error */
2968 status = lancer_wait_ready(adapter);
2969 sliport_status = ioread32(adapter->db +
2970 SLIPORT_STATUS_OFFSET);
2971 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2972 SLIPORT_STATUS_RN_MASK);
2973 if (status || sliport_status)
2974 status = -1;
2975 } else if (err || reset_needed) {
2976 status = -1;
2977 }
2978 }
2979 return status;
2980}
2981
6b7c5b94
SP
2982static int __devinit be_probe(struct pci_dev *pdev,
2983 const struct pci_device_id *pdev_id)
2984{
2985 int status = 0;
2986 struct be_adapter *adapter;
2987 struct net_device *netdev;
6b7c5b94
SP
2988
2989 status = pci_enable_device(pdev);
2990 if (status)
2991 goto do_none;
2992
2993 status = pci_request_regions(pdev, DRV_NAME);
2994 if (status)
2995 goto disable_dev;
2996 pci_set_master(pdev);
2997
2998 netdev = alloc_etherdev(sizeof(struct be_adapter));
2999 if (netdev == NULL) {
3000 status = -ENOMEM;
3001 goto rel_reg;
3002 }
3003 adapter = netdev_priv(netdev);
3004 adapter->pdev = pdev;
3005 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3006
3007 status = be_dev_family_check(adapter);
63657b9c 3008 if (status)
fe6d2a38
SP
3009 goto free_netdev;
3010
6b7c5b94 3011 adapter->netdev = netdev;
2243e2e9 3012 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3013
2b7bcebf 3014 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3015 if (!status) {
3016 netdev->features |= NETIF_F_HIGHDMA;
3017 } else {
2b7bcebf 3018 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3019 if (status) {
3020 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3021 goto free_netdev;
3022 }
3023 }
3024
ba343c77 3025 be_sriov_enable(adapter);
48f5a191
AK
3026 if (adapter->sriov_enabled) {
3027 adapter->vf_cfg = kcalloc(num_vfs,
3028 sizeof(struct be_vf_cfg), GFP_KERNEL);
3029
3030 if (!adapter->vf_cfg)
3031 goto free_netdev;
3032 }
ba343c77 3033
6b7c5b94
SP
3034 status = be_ctrl_init(adapter);
3035 if (status)
48f5a191 3036 goto free_vf_cfg;
6b7c5b94 3037
37eed1cb
PR
3038 if (lancer_chip(adapter)) {
3039 status = lancer_test_and_set_rdy_state(adapter);
3040 if (status) {
3041 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3042 goto ctrl_clean;
37eed1cb
PR
3043 }
3044 }
3045
2243e2e9 3046 /* sync up with fw's ready state */
ba343c77
SB
3047 if (be_physfn(adapter)) {
3048 status = be_cmd_POST(adapter);
3049 if (status)
3050 goto ctrl_clean;
ba343c77 3051 }
6b7c5b94 3052
2243e2e9
SP
3053 /* tell fw we're ready to fire cmds */
3054 status = be_cmd_fw_init(adapter);
6b7c5b94 3055 if (status)
2243e2e9
SP
3056 goto ctrl_clean;
3057
a4b4dfab
AK
3058 status = be_cmd_reset_function(adapter);
3059 if (status)
3060 goto ctrl_clean;
556ae191 3061
2243e2e9
SP
3062 status = be_stats_init(adapter);
3063 if (status)
3064 goto ctrl_clean;
3065
3066 status = be_get_config(adapter);
6b7c5b94
SP
3067 if (status)
3068 goto stats_clean;
6b7c5b94 3069
3abcdeda
SP
3070 be_msix_enable(adapter);
3071
6b7c5b94 3072 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3073
5fb379ee
SP
3074 status = be_setup(adapter);
3075 if (status)
3abcdeda 3076 goto msix_disable;
2243e2e9 3077
3abcdeda 3078 be_netdev_init(netdev);
6b7c5b94
SP
3079 status = register_netdev(netdev);
3080 if (status != 0)
5fb379ee 3081 goto unsetup;
63a76944 3082 netif_carrier_off(netdev);
6b7c5b94 3083
e6319365
AK
3084 if (be_physfn(adapter) && adapter->sriov_enabled) {
3085 status = be_vf_eth_addr_config(adapter);
3086 if (status)
3087 goto unreg_netdev;
3088 }
3089
c4ca2374 3090 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
f203af70 3091 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3092 return 0;
3093
e6319365
AK
3094unreg_netdev:
3095 unregister_netdev(netdev);
5fb379ee
SP
3096unsetup:
3097 be_clear(adapter);
3abcdeda
SP
3098msix_disable:
3099 be_msix_disable(adapter);
6b7c5b94
SP
3100stats_clean:
3101 be_stats_cleanup(adapter);
3102ctrl_clean:
3103 be_ctrl_cleanup(adapter);
48f5a191
AK
3104free_vf_cfg:
3105 kfree(adapter->vf_cfg);
6b7c5b94 3106free_netdev:
ba343c77 3107 be_sriov_disable(adapter);
fe6d2a38 3108 free_netdev(netdev);
8d56ff11 3109 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3110rel_reg:
3111 pci_release_regions(pdev);
3112disable_dev:
3113 pci_disable_device(pdev);
3114do_none:
c4ca2374 3115 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3116 return status;
3117}
3118
3119static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3120{
3121 struct be_adapter *adapter = pci_get_drvdata(pdev);
3122 struct net_device *netdev = adapter->netdev;
3123
a4ca055f 3124 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3125 if (adapter->wol)
3126 be_setup_wol(adapter, true);
3127
6b7c5b94
SP
3128 netif_device_detach(netdev);
3129 if (netif_running(netdev)) {
3130 rtnl_lock();
3131 be_close(netdev);
3132 rtnl_unlock();
3133 }
9e90c961 3134 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3135 be_clear(adapter);
6b7c5b94 3136
a4ca055f 3137 be_msix_disable(adapter);
6b7c5b94
SP
3138 pci_save_state(pdev);
3139 pci_disable_device(pdev);
3140 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3141 return 0;
3142}
3143
3144static int be_resume(struct pci_dev *pdev)
3145{
3146 int status = 0;
3147 struct be_adapter *adapter = pci_get_drvdata(pdev);
3148 struct net_device *netdev = adapter->netdev;
3149
3150 netif_device_detach(netdev);
3151
3152 status = pci_enable_device(pdev);
3153 if (status)
3154 return status;
3155
3156 pci_set_power_state(pdev, 0);
3157 pci_restore_state(pdev);
3158
a4ca055f 3159 be_msix_enable(adapter);
2243e2e9
SP
3160 /* tell fw we're ready to fire cmds */
3161 status = be_cmd_fw_init(adapter);
3162 if (status)
3163 return status;
3164
9b0365f1 3165 be_setup(adapter);
6b7c5b94
SP
3166 if (netif_running(netdev)) {
3167 rtnl_lock();
3168 be_open(netdev);
3169 rtnl_unlock();
3170 }
3171 netif_device_attach(netdev);
71d8d1b5
AK
3172
3173 if (adapter->wol)
3174 be_setup_wol(adapter, false);
a4ca055f
AK
3175
3176 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3177 return 0;
3178}
3179
82456b03
SP
3180/*
3181 * An FLR will stop BE from DMAing any data.
3182 */
3183static void be_shutdown(struct pci_dev *pdev)
3184{
3185 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3186
2d5d4154
AK
3187 if (!adapter)
3188 return;
82456b03 3189
0f4a6828 3190 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3191
2d5d4154 3192 netif_device_detach(adapter->netdev);
82456b03 3193
82456b03
SP
3194 if (adapter->wol)
3195 be_setup_wol(adapter, true);
3196
57841869
AK
3197 be_cmd_reset_function(adapter);
3198
82456b03 3199 pci_disable_device(pdev);
82456b03
SP
3200}
3201
cf588477
SP
3202static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3203 pci_channel_state_t state)
3204{
3205 struct be_adapter *adapter = pci_get_drvdata(pdev);
3206 struct net_device *netdev = adapter->netdev;
3207
3208 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3209
3210 adapter->eeh_err = true;
3211
3212 netif_device_detach(netdev);
3213
3214 if (netif_running(netdev)) {
3215 rtnl_lock();
3216 be_close(netdev);
3217 rtnl_unlock();
3218 }
3219 be_clear(adapter);
3220
3221 if (state == pci_channel_io_perm_failure)
3222 return PCI_ERS_RESULT_DISCONNECT;
3223
3224 pci_disable_device(pdev);
3225
3226 return PCI_ERS_RESULT_NEED_RESET;
3227}
3228
3229static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3230{
3231 struct be_adapter *adapter = pci_get_drvdata(pdev);
3232 int status;
3233
3234 dev_info(&adapter->pdev->dev, "EEH reset\n");
3235 adapter->eeh_err = false;
3236
3237 status = pci_enable_device(pdev);
3238 if (status)
3239 return PCI_ERS_RESULT_DISCONNECT;
3240
3241 pci_set_master(pdev);
3242 pci_set_power_state(pdev, 0);
3243 pci_restore_state(pdev);
3244
3245 /* Check if card is ok and fw is ready */
3246 status = be_cmd_POST(adapter);
3247 if (status)
3248 return PCI_ERS_RESULT_DISCONNECT;
3249
3250 return PCI_ERS_RESULT_RECOVERED;
3251}
3252
3253static void be_eeh_resume(struct pci_dev *pdev)
3254{
3255 int status = 0;
3256 struct be_adapter *adapter = pci_get_drvdata(pdev);
3257 struct net_device *netdev = adapter->netdev;
3258
3259 dev_info(&adapter->pdev->dev, "EEH resume\n");
3260
3261 pci_save_state(pdev);
3262
3263 /* tell fw we're ready to fire cmds */
3264 status = be_cmd_fw_init(adapter);
3265 if (status)
3266 goto err;
3267
3268 status = be_setup(adapter);
3269 if (status)
3270 goto err;
3271
3272 if (netif_running(netdev)) {
3273 status = be_open(netdev);
3274 if (status)
3275 goto err;
3276 }
3277 netif_device_attach(netdev);
3278 return;
3279err:
3280 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3281}
3282
3283static struct pci_error_handlers be_eeh_handlers = {
3284 .error_detected = be_eeh_err_detected,
3285 .slot_reset = be_eeh_reset,
3286 .resume = be_eeh_resume,
3287};
3288
6b7c5b94
SP
3289static struct pci_driver be_driver = {
3290 .name = DRV_NAME,
3291 .id_table = be_dev_ids,
3292 .probe = be_probe,
3293 .remove = be_remove,
3294 .suspend = be_suspend,
cf588477 3295 .resume = be_resume,
82456b03 3296 .shutdown = be_shutdown,
cf588477 3297 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3298};
3299
3300static int __init be_init_module(void)
3301{
8e95a202
JP
3302 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3303 rx_frag_size != 2048) {
6b7c5b94
SP
3304 printk(KERN_WARNING DRV_NAME
3305 " : Module param rx_frag_size must be 2048/4096/8192."
3306 " Using 2048\n");
3307 rx_frag_size = 2048;
3308 }
6b7c5b94
SP
3309
3310 return pci_register_driver(&be_driver);
3311}
3312module_init(be_init_module);
3313
3314static void __exit be_exit_module(void)
3315{
3316 pci_unregister_driver(&be_driver);
3317}
3318module_exit(be_exit_module);
This page took 0.475094 seconds and 5 git commands to generate.