be2net: add rxhash support
[deliverable/linux.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
18#include "be.h"
8788fdc2 19#include "be_cmds.h"
65f71b8b 20#include <asm/div64.h>
6b7c5b94
SP
21
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
2e588f84 28static ushort rx_frag_size = 2048;
ba343c77 29static unsigned int num_vfs;
2e588f84 30module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
3abcdeda
SP
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
6b7c5b94
SP
45 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276
AK
48/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
6b7c5b94
SP
118
119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120{
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
2b7bcebf
IV
123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
6b7c5b94
SP
125}
126
127static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
2b7bcebf
IV
136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
6b7c5b94
SP
138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142}
143
8788fdc2 144static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 145{
8788fdc2 146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 149
cf588477
SP
150 if (adapter->eeh_err)
151 return;
152
5f0b849e 153 if (!enabled && enable)
6b7c5b94 154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else if (enabled && !enable)
6b7c5b94 156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 157 else
6b7c5b94 158 return;
5f0b849e 159
6b7c5b94
SP
160 iowrite32(reg, addr);
161}
162
8788fdc2 163static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
164{
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
168
169 wmb();
8788fdc2 170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
171}
172
8788fdc2 173static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
174{
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
178
179 wmb();
8788fdc2 180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
181}
182
8788fdc2 183static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
184 bool arm, bool clear_int, u16 num_popped)
185{
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
190
191 if (adapter->eeh_err)
192 return;
193
6b7c5b94
SP
194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
201}
202
8788fdc2 203void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
204{
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
209
210 if (adapter->eeh_err)
211 return;
212
6b7c5b94
SP
213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
217}
218
6b7c5b94
SP
219static int be_mac_addr_set(struct net_device *netdev, void *p)
220{
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
ca9e4988
AK
225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
ba343c77
SB
228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
f8617e08
AK
234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
a65027e4
SP
236 if (status)
237 return status;
6b7c5b94 238
a65027e4 239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 240 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 241netdev_addr:
6b7c5b94
SP
242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246}
247
b31c50a7 248void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 249{
3abcdeda 250 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
6b7c5b94
SP
251 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252 struct be_port_rxf_stats *port_stats =
253 &rxf_stats->port[adapter->port_num];
78122a52 254 struct net_device_stats *dev_stats = &adapter->netdev->stats;
68110868 255 struct be_erx_stats *erx_stats = &hw_stats->erx;
3abcdeda
SP
256 struct be_rx_obj *rxo;
257 int i;
6b7c5b94 258
3abcdeda
SP
259 memset(dev_stats, 0, sizeof(*dev_stats));
260 for_all_rx_queues(adapter, rxo, i) {
261 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264 /* no space in linux buffers: best possible approximation */
265 dev_stats->rx_dropped +=
266 erx_stats->rx_drops_no_fragments[rxo->q.id];
267 }
268
269 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
6b7c5b94
SP
271
272 /* bad pkts received */
273 dev_stats->rx_errors = port_stats->rx_crc_errors +
274 port_stats->rx_alignment_symbol_errors +
275 port_stats->rx_in_range_errors +
68110868
SP
276 port_stats->rx_out_range_errors +
277 port_stats->rx_frame_too_long +
278 port_stats->rx_dropped_too_small +
279 port_stats->rx_dropped_too_short +
280 port_stats->rx_dropped_header_too_small +
281 port_stats->rx_dropped_tcp_length +
282 port_stats->rx_dropped_runt +
283 port_stats->rx_tcp_checksum_errs +
284 port_stats->rx_ip_checksum_errs +
285 port_stats->rx_udp_checksum_errs;
286
6b7c5b94
SP
287 /* detailed rx errors */
288 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
68110868
SP
289 port_stats->rx_out_range_errors +
290 port_stats->rx_frame_too_long;
291
6b7c5b94
SP
292 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294 /* frame alignment errors */
295 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
68110868 296
6b7c5b94
SP
297 /* receiver fifo overrun */
298 /* drops_no_pbuf is no per i/f, it's per BE card */
299 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300 port_stats->rx_input_fifo_overflow +
301 rxf_stats->rx_drops_no_pbuf;
6b7c5b94
SP
302}
303
8788fdc2 304void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 305{
6b7c5b94
SP
306 struct net_device *netdev = adapter->netdev;
307
6b7c5b94 308 /* If link came up or went down */
a8f447bd 309 if (adapter->link_up != link_up) {
0dffc83e 310 adapter->link_speed = -1;
a8f447bd 311 if (link_up) {
6b7c5b94
SP
312 netif_carrier_on(netdev);
313 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd 314 } else {
a8f447bd
SP
315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 317 }
a8f447bd 318 adapter->link_up = link_up;
6b7c5b94 319 }
6b7c5b94
SP
320}
321
322/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 324{
3abcdeda
SP
325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
6b7c5b94
SP
338
339 /* Update once a second */
4097f663 340 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
341 return;
342
3abcdeda 343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 344 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 345
4097f663 346 stats->rx_fps_jiffies = now;
3abcdeda
SP
347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
8788fdc2 357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
358
359 rx_eq->cur_eqd = eqd;
360}
361
65f71b8b
SH
362static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363{
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371}
372
4097f663
SP
373static void be_tx_rate_update(struct be_adapter *adapter)
374{
3abcdeda 375 struct be_tx_stats *stats = tx_stats(adapter);
4097f663
SP
376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
4097f663
SP
389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392}
393
6b7c5b94 394static void be_tx_stats_update(struct be_adapter *adapter,
91992e44 395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 396{
3abcdeda 397 struct be_tx_stats *stats = tx_stats(adapter);
6b7c5b94
SP
398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
91992e44 401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
402 if (stopped)
403 stats->be_tx_stops++;
6b7c5b94
SP
404}
405
406/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
407static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408 bool *dummy)
6b7c5b94 409{
ebc8d2ab
DM
410 int cnt = (skb->len > skb->data_len);
411
412 cnt += skb_shinfo(skb)->nr_frags;
413
6b7c5b94
SP
414 /* to account for hdr wrb */
415 cnt++;
fe6d2a38
SP
416 if (lancer_chip(adapter) || !(cnt & 1)) {
417 *dummy = false;
418 } else {
6b7c5b94
SP
419 /* add a dummy to make it an even num */
420 cnt++;
421 *dummy = true;
fe6d2a38 422 }
6b7c5b94
SP
423 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424 return cnt;
425}
426
427static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428{
429 wrb->frag_pa_hi = upper_32_bits(addr);
430 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432}
433
cc4ce020
SK
434static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 436{
cc4ce020
SK
437 u8 vlan_prio = 0;
438 u16 vlan_tag = 0;
439
6b7c5b94
SP
440 memset(hdr, 0, sizeof(*hdr));
441
442 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
49e4b847 444 if (skb_is_gso(skb)) {
6b7c5b94
SP
445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 448 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
450 if (lancer_chip(adapter) && adapter->sli_family ==
451 LANCER_A0_SLI_FAMILY) {
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453 if (is_tcp_pkt(skb))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455 tcpcs, hdr, 1);
456 else if (is_udp_pkt(skb))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458 udpcs, hdr, 1);
459 }
6b7c5b94
SP
460 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461 if (is_tcp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463 else if (is_udp_pkt(skb))
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465 }
466
cc4ce020 467 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
6b7c5b94 468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
469 vlan_tag = vlan_tx_tag_get(skb);
470 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471 /* If vlan priority provided by OS is NOT in available bmap */
472 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474 adapter->recommended_prio;
475 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
476 }
477
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482}
483
2b7bcebf 484static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
485 bool unmap_single)
486{
487 dma_addr_t dma;
488
489 be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 492 if (wrb->frag_len) {
7101e111 493 if (unmap_single)
2b7bcebf
IV
494 dma_unmap_single(dev, dma, wrb->frag_len,
495 DMA_TO_DEVICE);
7101e111 496 else
2b7bcebf 497 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
498 }
499}
6b7c5b94
SP
500
501static int make_tx_wrbs(struct be_adapter *adapter,
502 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503{
7101e111
SP
504 dma_addr_t busaddr;
505 int i, copied = 0;
2b7bcebf 506 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
507 struct sk_buff *first_skb = skb;
508 struct be_queue_info *txq = &adapter->tx_obj.q;
509 struct be_eth_wrb *wrb;
510 struct be_eth_hdr_wrb *hdr;
7101e111
SP
511 bool map_single = false;
512 u16 map_head;
6b7c5b94 513
6b7c5b94
SP
514 hdr = queue_head_node(txq);
515 queue_head_inc(txq);
7101e111 516 map_head = txq->head;
6b7c5b94 517
ebc8d2ab 518 if (skb->len > skb->data_len) {
e743d313 519 int len = skb_headlen(skb);
2b7bcebf
IV
520 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521 if (dma_mapping_error(dev, busaddr))
7101e111
SP
522 goto dma_err;
523 map_single = true;
ebc8d2ab
DM
524 wrb = queue_head_node(txq);
525 wrb_fill(wrb, busaddr, len);
526 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527 queue_head_inc(txq);
528 copied += len;
529 }
6b7c5b94 530
ebc8d2ab
DM
531 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532 struct skb_frag_struct *frag =
533 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
534 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535 frag->size, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, busaddr))
7101e111 537 goto dma_err;
ebc8d2ab
DM
538 wrb = queue_head_node(txq);
539 wrb_fill(wrb, busaddr, frag->size);
540 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541 queue_head_inc(txq);
542 copied += frag->size;
6b7c5b94
SP
543 }
544
545 if (dummy_wrb) {
546 wrb = queue_head_node(txq);
547 wrb_fill(wrb, 0, 0);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549 queue_head_inc(txq);
550 }
551
cc4ce020 552 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
553 be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555 return copied;
7101e111
SP
556dma_err:
557 txq->head = map_head;
558 while (copied) {
559 wrb = queue_head_node(txq);
2b7bcebf 560 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
561 map_single = false;
562 copied -= wrb->frag_len;
563 queue_head_inc(txq);
564 }
565 return 0;
6b7c5b94
SP
566}
567
61357325 568static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 569 struct net_device *netdev)
6b7c5b94
SP
570{
571 struct be_adapter *adapter = netdev_priv(netdev);
572 struct be_tx_obj *tx_obj = &adapter->tx_obj;
573 struct be_queue_info *txq = &tx_obj->q;
574 u32 wrb_cnt = 0, copied = 0;
575 u32 start = txq->head;
576 bool dummy_wrb, stopped = false;
577
fe6d2a38 578 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94
SP
579
580 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
581 if (copied) {
582 /* record the sent skb in the sent_skb table */
583 BUG_ON(tx_obj->sent_skb_list[start]);
584 tx_obj->sent_skb_list[start] = skb;
585
586 /* Ensure txq has space for the next skb; Else stop the queue
587 * *BEFORE* ringing the tx doorbell, so that we serialze the
588 * tx compls of the current transmit which'll wake up the queue
589 */
7101e111 590 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
591 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592 txq->len) {
593 netif_stop_queue(netdev);
594 stopped = true;
595 }
6b7c5b94 596
c190e3c8 597 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 598
91992e44
AK
599 be_tx_stats_update(adapter, wrb_cnt, copied,
600 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
601 } else {
602 txq->head = start;
603 dev_kfree_skb_any(skb);
6b7c5b94 604 }
6b7c5b94
SP
605 return NETDEV_TX_OK;
606}
607
608static int be_change_mtu(struct net_device *netdev, int new_mtu)
609{
610 struct be_adapter *adapter = netdev_priv(netdev);
611 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
612 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
614 dev_info(&adapter->pdev->dev,
615 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
616 BE_MIN_MTU,
617 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
618 return -EINVAL;
619 }
620 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621 netdev->mtu, new_mtu);
622 netdev->mtu = new_mtu;
623 return 0;
624}
625
626/*
82903e4b
AK
627 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 629 */
1da87b7f 630static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 631{
6b7c5b94
SP
632 u16 vtag[BE_NUM_VLANS_SUPPORTED];
633 u16 ntags = 0, i;
82903e4b 634 int status = 0;
1da87b7f
AK
635 u32 if_handle;
636
637 if (vf) {
638 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641 }
6b7c5b94 642
82903e4b 643 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 644 /* Construct VLAN Table to give to HW */
b738127d 645 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
646 if (adapter->vlan_tag[i]) {
647 vtag[ntags] = cpu_to_le16(i);
648 ntags++;
649 }
650 }
b31c50a7
SP
651 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652 vtag, ntags, 1, 0);
6b7c5b94 653 } else {
b31c50a7
SP
654 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655 NULL, 0, 1, 1);
6b7c5b94 656 }
1da87b7f 657
b31c50a7 658 return status;
6b7c5b94
SP
659}
660
661static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662{
663 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 664
6b7c5b94 665 adapter->vlan_grp = grp;
6b7c5b94
SP
666}
667
668static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669{
670 struct be_adapter *adapter = netdev_priv(netdev);
671
1da87b7f 672 adapter->vlans_added++;
ba343c77
SB
673 if (!be_physfn(adapter))
674 return;
675
6b7c5b94 676 adapter->vlan_tag[vid] = 1;
82903e4b 677 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 678 be_vid_config(adapter, false, 0);
6b7c5b94
SP
679}
680
681static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682{
683 struct be_adapter *adapter = netdev_priv(netdev);
684
1da87b7f
AK
685 adapter->vlans_added--;
686 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
ba343c77
SB
688 if (!be_physfn(adapter))
689 return;
690
6b7c5b94 691 adapter->vlan_tag[vid] = 0;
82903e4b 692 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 693 be_vid_config(adapter, false, 0);
6b7c5b94
SP
694}
695
24307eef 696static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 699
24307eef 700 if (netdev->flags & IFF_PROMISC) {
8788fdc2 701 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
24307eef
SP
702 adapter->promiscuous = true;
703 goto done;
6b7c5b94
SP
704 }
705
24307eef
SP
706 /* BE was previously in promiscous mode; disable it */
707 if (adapter->promiscuous) {
708 adapter->promiscuous = false;
8788fdc2 709 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
6b7c5b94
SP
710 }
711
e7b909a6 712 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
713 if (netdev->flags & IFF_ALLMULTI ||
714 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 715 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 716 &adapter->mc_cmd_mem);
24307eef 717 goto done;
6b7c5b94 718 }
6b7c5b94 719
0ddf477b 720 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 721 &adapter->mc_cmd_mem);
24307eef
SP
722done:
723 return;
6b7c5b94
SP
724}
725
ba343c77
SB
726static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 int status;
730
731 if (!adapter->sriov_enabled)
732 return -EPERM;
733
734 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735 return -EINVAL;
736
64600ea5
AK
737 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738 status = be_cmd_pmac_del(adapter,
739 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 740 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 741
64600ea5
AK
742 status = be_cmd_pmac_add(adapter, mac,
743 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 744 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
745
746 if (status)
ba343c77
SB
747 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748 mac, vf);
64600ea5
AK
749 else
750 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
ba343c77
SB
752 return status;
753}
754
64600ea5
AK
755static int be_get_vf_config(struct net_device *netdev, int vf,
756 struct ifla_vf_info *vi)
757{
758 struct be_adapter *adapter = netdev_priv(netdev);
759
760 if (!adapter->sriov_enabled)
761 return -EPERM;
762
763 if (vf >= num_vfs)
764 return -EINVAL;
765
766 vi->vf = vf;
e1d18735 767 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 768 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
769 vi->qos = 0;
770 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772 return 0;
773}
774
1da87b7f
AK
775static int be_set_vf_vlan(struct net_device *netdev,
776 int vf, u16 vlan, u8 qos)
777{
778 struct be_adapter *adapter = netdev_priv(netdev);
779 int status = 0;
780
781 if (!adapter->sriov_enabled)
782 return -EPERM;
783
784 if ((vf >= num_vfs) || (vlan > 4095))
785 return -EINVAL;
786
787 if (vlan) {
788 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789 adapter->vlans_added++;
790 } else {
791 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792 adapter->vlans_added--;
793 }
794
795 status = be_vid_config(adapter, true, vf);
796
797 if (status)
798 dev_info(&adapter->pdev->dev,
799 "VLAN %d config on VF %d failed\n", vlan, vf);
800 return status;
801}
802
e1d18735
AK
803static int be_set_vf_tx_rate(struct net_device *netdev,
804 int vf, int rate)
805{
806 struct be_adapter *adapter = netdev_priv(netdev);
807 int status = 0;
808
809 if (!adapter->sriov_enabled)
810 return -EPERM;
811
812 if ((vf >= num_vfs) || (rate < 0))
813 return -EINVAL;
814
815 if (rate > 10000)
816 rate = 10000;
817
818 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 819 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
820
821 if (status)
822 dev_info(&adapter->pdev->dev,
823 "tx rate %d on VF %d failed\n", rate, vf);
824 return status;
825}
826
3abcdeda 827static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 828{
3abcdeda 829 struct be_rx_stats *stats = &rxo->stats;
4097f663 830 ulong now = jiffies;
6b7c5b94 831
4097f663 832 /* Wrapped around */
3abcdeda
SP
833 if (time_before(now, stats->rx_jiffies)) {
834 stats->rx_jiffies = now;
4097f663
SP
835 return;
836 }
6b7c5b94
SP
837
838 /* Update the rate once in two seconds */
3abcdeda 839 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
840 return;
841
3abcdeda
SP
842 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843 now - stats->rx_jiffies);
844 stats->rx_jiffies = now;
845 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
846}
847
3abcdeda 848static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 849 struct be_rx_compl_info *rxcp)
4097f663 850{
3abcdeda 851 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 852
3abcdeda 853 stats->rx_compl++;
2e588f84
SP
854 stats->rx_frags += rxcp->num_rcvd;
855 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 856 stats->rx_pkts++;
2e588f84 857 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 858 stats->rx_mcast_pkts++;
2e588f84
SP
859 if (rxcp->err)
860 stats->rxcp_err++;
4097f663
SP
861}
862
2e588f84 863static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 864{
19fad86f
PR
865 /* L4 checksum is not reliable for non TCP/UDP packets.
866 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
867 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
869}
870
6b7c5b94 871static struct be_rx_page_info *
3abcdeda
SP
872get_rx_page_info(struct be_adapter *adapter,
873 struct be_rx_obj *rxo,
874 u16 frag_idx)
6b7c5b94
SP
875{
876 struct be_rx_page_info *rx_page_info;
3abcdeda 877 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 878
3abcdeda 879 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
880 BUG_ON(!rx_page_info->page);
881
205859a2 882 if (rx_page_info->last_page_user) {
2b7bcebf
IV
883 dma_unmap_page(&adapter->pdev->dev,
884 dma_unmap_addr(rx_page_info, bus),
885 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
886 rx_page_info->last_page_user = false;
887 }
6b7c5b94
SP
888
889 atomic_dec(&rxq->used);
890 return rx_page_info;
891}
892
893/* Throwaway the data in the Rx completion */
894static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 895 struct be_rx_obj *rxo,
2e588f84 896 struct be_rx_compl_info *rxcp)
6b7c5b94 897{
3abcdeda 898 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 899 struct be_rx_page_info *page_info;
2e588f84 900 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 901
e80d9da6 902 for (i = 0; i < num_rcvd; i++) {
2e588f84 903 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
904 put_page(page_info->page);
905 memset(page_info, 0, sizeof(*page_info));
2e588f84 906 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
907 }
908}
909
910/*
911 * skb_fill_rx_data forms a complete skb for an ether frame
912 * indicated by rxcp.
913 */
3abcdeda 914static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 915 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 916{
3abcdeda 917 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 918 struct be_rx_page_info *page_info;
2e588f84
SP
919 u16 i, j;
920 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 921 u8 *start;
6b7c5b94 922
2e588f84 923 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
924 start = page_address(page_info->page) + page_info->page_offset;
925 prefetch(start);
926
927 /* Copy data in the first descriptor of this completion */
2e588f84 928 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
929
930 /* Copy the header portion into skb_data */
2e588f84 931 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
932 memcpy(skb->data, start, hdr_len);
933 skb->len = curr_frag_len;
934 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935 /* Complete packet has now been moved to data */
936 put_page(page_info->page);
937 skb->data_len = 0;
938 skb->tail += curr_frag_len;
939 } else {
940 skb_shinfo(skb)->nr_frags = 1;
941 skb_shinfo(skb)->frags[0].page = page_info->page;
942 skb_shinfo(skb)->frags[0].page_offset =
943 page_info->page_offset + hdr_len;
944 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945 skb->data_len = curr_frag_len - hdr_len;
946 skb->tail += hdr_len;
947 }
205859a2 948 page_info->page = NULL;
6b7c5b94 949
2e588f84
SP
950 if (rxcp->pkt_size <= rx_frag_size) {
951 BUG_ON(rxcp->num_rcvd != 1);
952 return;
6b7c5b94
SP
953 }
954
955 /* More frags present for this completion */
2e588f84
SP
956 index_inc(&rxcp->rxq_idx, rxq->len);
957 remaining = rxcp->pkt_size - curr_frag_len;
958 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 961
bd46cb6c
AK
962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info->page_offset == 0) {
964 /* Fresh page */
965 j++;
966 skb_shinfo(skb)->frags[j].page = page_info->page;
967 skb_shinfo(skb)->frags[j].page_offset =
968 page_info->page_offset;
969 skb_shinfo(skb)->frags[j].size = 0;
970 skb_shinfo(skb)->nr_frags++;
971 } else {
972 put_page(page_info->page);
973 }
974
975 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
976 skb->len += curr_frag_len;
977 skb->data_len += curr_frag_len;
6b7c5b94 978
2e588f84
SP
979 remaining -= curr_frag_len;
980 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 981 page_info->page = NULL;
6b7c5b94 982 }
bd46cb6c 983 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
984}
985
5be93b9a 986/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 987static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 988 struct be_rx_obj *rxo,
2e588f84 989 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
990{
991 struct sk_buff *skb;
89420424 992
89d71a66 993 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
a058a632 994 if (unlikely(!skb)) {
6b7c5b94
SP
995 if (net_ratelimit())
996 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
3abcdeda 997 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
998 return;
999 }
1000
2e588f84 1001 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1002
c6ce2f4b 1003 if (likely(adapter->rx_csum && csum_passed(rxcp)))
728a9972 1004 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1005 else
1006 skb_checksum_none_assert(skb);
6b7c5b94
SP
1007
1008 skb->truesize = skb->len + sizeof(struct sk_buff);
1009 skb->protocol = eth_type_trans(skb, adapter->netdev);
4b972914
AK
1010 if (adapter->netdev->features & NETIF_F_RXHASH)
1011 skb->rxhash = rxcp->rss_hash;
1012
6b7c5b94 1013
2e588f84 1014 if (unlikely(rxcp->vlanf)) {
82903e4b 1015 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
1016 kfree_skb(skb);
1017 return;
1018 }
2e588f84 1019 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
6b7c5b94
SP
1020 } else {
1021 netif_receive_skb(skb);
1022 }
6b7c5b94
SP
1023}
1024
5be93b9a
AK
1025/* Process the RX completion indicated by rxcp when GRO is enabled */
1026static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1027 struct be_rx_obj *rxo,
2e588f84 1028 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1029{
1030 struct be_rx_page_info *page_info;
5be93b9a 1031 struct sk_buff *skb = NULL;
3abcdeda
SP
1032 struct be_queue_info *rxq = &rxo->q;
1033 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1034 u16 remaining, curr_frag_len;
1035 u16 i, j;
3968fa1e 1036
5be93b9a
AK
1037 skb = napi_get_frags(&eq_obj->napi);
1038 if (!skb) {
3abcdeda 1039 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1040 return;
1041 }
1042
2e588f84
SP
1043 remaining = rxcp->pkt_size;
1044 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1045 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1046
1047 curr_frag_len = min(remaining, rx_frag_size);
1048
bd46cb6c
AK
1049 /* Coalesce all frags from the same physical page in one slot */
1050 if (i == 0 || page_info->page_offset == 0) {
1051 /* First frag or Fresh page */
1052 j++;
5be93b9a
AK
1053 skb_shinfo(skb)->frags[j].page = page_info->page;
1054 skb_shinfo(skb)->frags[j].page_offset =
1055 page_info->page_offset;
1056 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1057 } else {
1058 put_page(page_info->page);
1059 }
5be93b9a 1060 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1061
bd46cb6c 1062 remaining -= curr_frag_len;
2e588f84 1063 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1064 memset(page_info, 0, sizeof(*page_info));
1065 }
bd46cb6c 1066 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1067
5be93b9a 1068 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1069 skb->len = rxcp->pkt_size;
1070 skb->data_len = rxcp->pkt_size;
1071 skb->truesize += rxcp->pkt_size;
5be93b9a 1072 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1073 if (adapter->netdev->features & NETIF_F_RXHASH)
1074 skb->rxhash = rxcp->rss_hash;
5be93b9a 1075
2e588f84 1076 if (likely(!rxcp->vlanf))
5be93b9a 1077 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1078 else
1079 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1080}
1081
1082static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1083 struct be_eth_rx_compl *compl,
1084 struct be_rx_compl_info *rxcp)
1085{
1086 rxcp->pkt_size =
1087 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1088 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1089 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1090 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1091 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1092 rxcp->ip_csum =
1093 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1094 rxcp->l4_csum =
1095 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1096 rxcp->ipv6 =
1097 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1098 rxcp->rxq_idx =
1099 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1100 rxcp->num_rcvd =
1101 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1102 rxcp->pkt_type =
1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1104 rxcp->rss_hash =
1105 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1106 if (rxcp->vlanf) {
1107 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1108 compl);
1109 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1110 compl);
1111 }
2e588f84
SP
1112}
1113
1114static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1115 struct be_eth_rx_compl *compl,
1116 struct be_rx_compl_info *rxcp)
1117{
1118 rxcp->pkt_size =
1119 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1120 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1121 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1122 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1123 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1124 rxcp->ip_csum =
1125 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1126 rxcp->l4_csum =
1127 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1128 rxcp->ipv6 =
1129 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1130 rxcp->rxq_idx =
1131 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1132 rxcp->num_rcvd =
1133 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1134 rxcp->pkt_type =
1135 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1136 rxcp->rss_hash =
1137 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1138 if (rxcp->vlanf) {
1139 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1140 compl);
1141 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1142 compl);
1143 }
2e588f84
SP
1144}
1145
1146static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1147{
1148 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1149 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1150 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1151
2e588f84
SP
1152 /* For checking the valid bit it is Ok to use either definition as the
1153 * valid bit is at the same position in both v0 and v1 Rx compl */
1154 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1155 return NULL;
6b7c5b94 1156
2e588f84
SP
1157 rmb();
1158 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1159
2e588f84
SP
1160 if (adapter->be3_native)
1161 be_parse_rx_compl_v1(adapter, compl, rxcp);
1162 else
1163 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1164
15d72184
SP
1165 if (rxcp->vlanf) {
1166 /* vlanf could be wrongly set in some cards.
1167 * ignore if vtm is not set */
1168 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1169 rxcp->vlanf = 0;
6b7c5b94 1170
15d72184
SP
1171 if (!lancer_chip(adapter))
1172 rxcp->vid = swab16(rxcp->vid);
6b7c5b94 1173
15d72184
SP
1174 if ((adapter->pvid == rxcp->vid) &&
1175 !adapter->vlan_tag[rxcp->vid])
1176 rxcp->vlanf = 0;
1177 }
2e588f84
SP
1178
1179 /* As the compl has been parsed, reset it; we wont touch it again */
1180 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1181
3abcdeda 1182 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1183 return rxcp;
1184}
1185
1829b086 1186static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1187{
6b7c5b94 1188 u32 order = get_order(size);
1829b086 1189
6b7c5b94 1190 if (order > 0)
1829b086
ED
1191 gfp |= __GFP_COMP;
1192 return alloc_pages(gfp, order);
6b7c5b94
SP
1193}
1194
1195/*
1196 * Allocate a page, split it to fragments of size rx_frag_size and post as
1197 * receive buffers to BE
1198 */
1829b086 1199static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1200{
3abcdeda
SP
1201 struct be_adapter *adapter = rxo->adapter;
1202 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1203 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1204 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1205 struct page *pagep = NULL;
1206 struct be_eth_rx_d *rxd;
1207 u64 page_dmaaddr = 0, frag_dmaaddr;
1208 u32 posted, page_offset = 0;
1209
3abcdeda 1210 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1211 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1212 if (!pagep) {
1829b086 1213 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1214 if (unlikely(!pagep)) {
3abcdeda 1215 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1216 break;
1217 }
2b7bcebf
IV
1218 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1219 0, adapter->big_page_size,
1220 DMA_FROM_DEVICE);
6b7c5b94
SP
1221 page_info->page_offset = 0;
1222 } else {
1223 get_page(pagep);
1224 page_info->page_offset = page_offset + rx_frag_size;
1225 }
1226 page_offset = page_info->page_offset;
1227 page_info->page = pagep;
fac6da5b 1228 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1229 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1230
1231 rxd = queue_head_node(rxq);
1232 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1233 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1234
1235 /* Any space left in the current big page for another frag? */
1236 if ((page_offset + rx_frag_size + rx_frag_size) >
1237 adapter->big_page_size) {
1238 pagep = NULL;
1239 page_info->last_page_user = true;
1240 }
26d92f92
SP
1241
1242 prev_page_info = page_info;
1243 queue_head_inc(rxq);
6b7c5b94
SP
1244 page_info = &page_info_tbl[rxq->head];
1245 }
1246 if (pagep)
26d92f92 1247 prev_page_info->last_page_user = true;
6b7c5b94
SP
1248
1249 if (posted) {
6b7c5b94 1250 atomic_add(posted, &rxq->used);
8788fdc2 1251 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1252 } else if (atomic_read(&rxq->used) == 0) {
1253 /* Let be_worker replenish when memory is available */
3abcdeda 1254 rxo->rx_post_starved = true;
6b7c5b94 1255 }
6b7c5b94
SP
1256}
1257
5fb379ee 1258static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1259{
6b7c5b94
SP
1260 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1261
1262 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1263 return NULL;
1264
f3eb62d2 1265 rmb();
6b7c5b94
SP
1266 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1267
1268 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1269
1270 queue_tail_inc(tx_cq);
1271 return txcp;
1272}
1273
1274static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1275{
1276 struct be_queue_info *txq = &adapter->tx_obj.q;
a73b796e 1277 struct be_eth_wrb *wrb;
6b7c5b94
SP
1278 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1279 struct sk_buff *sent_skb;
ec43b1a6
SP
1280 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1281 bool unmap_skb_hdr = true;
6b7c5b94 1282
ec43b1a6 1283 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1284 BUG_ON(!sent_skb);
ec43b1a6
SP
1285 sent_skbs[txq->tail] = NULL;
1286
1287 /* skip header wrb */
a73b796e 1288 queue_tail_inc(txq);
6b7c5b94 1289
ec43b1a6 1290 do {
6b7c5b94 1291 cur_index = txq->tail;
a73b796e 1292 wrb = queue_tail_node(txq);
2b7bcebf
IV
1293 unmap_tx_frag(&adapter->pdev->dev, wrb,
1294 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1295 unmap_skb_hdr = false;
1296
6b7c5b94
SP
1297 num_wrbs++;
1298 queue_tail_inc(txq);
ec43b1a6 1299 } while (cur_index != last_index);
6b7c5b94
SP
1300
1301 atomic_sub(num_wrbs, &txq->used);
a73b796e 1302
6b7c5b94
SP
1303 kfree_skb(sent_skb);
1304}
1305
859b1e4e
SP
1306static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1307{
1308 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1309
1310 if (!eqe->evt)
1311 return NULL;
1312
f3eb62d2 1313 rmb();
859b1e4e
SP
1314 eqe->evt = le32_to_cpu(eqe->evt);
1315 queue_tail_inc(&eq_obj->q);
1316 return eqe;
1317}
1318
1319static int event_handle(struct be_adapter *adapter,
1320 struct be_eq_obj *eq_obj)
1321{
1322 struct be_eq_entry *eqe;
1323 u16 num = 0;
1324
1325 while ((eqe = event_get(eq_obj)) != NULL) {
1326 eqe->evt = 0;
1327 num++;
1328 }
1329
1330 /* Deal with any spurious interrupts that come
1331 * without events
1332 */
1333 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1334 if (num)
1335 napi_schedule(&eq_obj->napi);
1336
1337 return num;
1338}
1339
1340/* Just read and notify events without processing them.
1341 * Used at the time of destroying event queues */
1342static void be_eq_clean(struct be_adapter *adapter,
1343 struct be_eq_obj *eq_obj)
1344{
1345 struct be_eq_entry *eqe;
1346 u16 num = 0;
1347
1348 while ((eqe = event_get(eq_obj)) != NULL) {
1349 eqe->evt = 0;
1350 num++;
1351 }
1352
1353 if (num)
1354 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1355}
1356
3abcdeda 1357static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1358{
1359 struct be_rx_page_info *page_info;
3abcdeda
SP
1360 struct be_queue_info *rxq = &rxo->q;
1361 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1362 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1363 u16 tail;
1364
1365 /* First cleanup pending rx completions */
3abcdeda
SP
1366 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1367 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1368 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1369 }
1370
1371 /* Then free posted rx buffer that were not used */
1372 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1373 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1374 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1375 put_page(page_info->page);
1376 memset(page_info, 0, sizeof(*page_info));
1377 }
1378 BUG_ON(atomic_read(&rxq->used));
1379}
1380
a8e9179a 1381static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1382{
a8e9179a 1383 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1384 struct be_queue_info *txq = &adapter->tx_obj.q;
a8e9179a
SP
1385 struct be_eth_tx_compl *txcp;
1386 u16 end_idx, cmpl = 0, timeo = 0;
b03388d6
SP
1387 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1388 struct sk_buff *sent_skb;
1389 bool dummy_wrb;
a8e9179a
SP
1390
1391 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1392 do {
1393 while ((txcp = be_tx_compl_get(tx_cq))) {
1394 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1395 wrb_index, txcp);
1396 be_tx_compl_process(adapter, end_idx);
1397 cmpl++;
1398 }
1399 if (cmpl) {
1400 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1401 cmpl = 0;
1402 }
1403
1404 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1405 break;
1406
1407 mdelay(1);
1408 } while (true);
1409
1410 if (atomic_read(&txq->used))
1411 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1412 atomic_read(&txq->used));
b03388d6
SP
1413
1414 /* free posted tx for which compls will never arrive */
1415 while (atomic_read(&txq->used)) {
1416 sent_skb = sent_skbs[txq->tail];
1417 end_idx = txq->tail;
1418 index_adv(&end_idx,
fe6d2a38
SP
1419 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1420 txq->len);
b03388d6
SP
1421 be_tx_compl_process(adapter, end_idx);
1422 }
6b7c5b94
SP
1423}
1424
5fb379ee
SP
1425static void be_mcc_queues_destroy(struct be_adapter *adapter)
1426{
1427 struct be_queue_info *q;
5fb379ee 1428
8788fdc2 1429 q = &adapter->mcc_obj.q;
5fb379ee 1430 if (q->created)
8788fdc2 1431 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1432 be_queue_free(adapter, q);
1433
8788fdc2 1434 q = &adapter->mcc_obj.cq;
5fb379ee 1435 if (q->created)
8788fdc2 1436 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1437 be_queue_free(adapter, q);
1438}
1439
1440/* Must be called only after TX qs are created as MCC shares TX EQ */
1441static int be_mcc_queues_create(struct be_adapter *adapter)
1442{
1443 struct be_queue_info *q, *cq;
5fb379ee
SP
1444
1445 /* Alloc MCC compl queue */
8788fdc2 1446 cq = &adapter->mcc_obj.cq;
5fb379ee 1447 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1448 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1449 goto err;
1450
1451 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1452 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1453 goto mcc_cq_free;
1454
1455 /* Alloc MCC queue */
8788fdc2 1456 q = &adapter->mcc_obj.q;
5fb379ee
SP
1457 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1458 goto mcc_cq_destroy;
1459
1460 /* Ask BE to create MCC queue */
8788fdc2 1461 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1462 goto mcc_q_free;
1463
1464 return 0;
1465
1466mcc_q_free:
1467 be_queue_free(adapter, q);
1468mcc_cq_destroy:
8788fdc2 1469 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1470mcc_cq_free:
1471 be_queue_free(adapter, cq);
1472err:
1473 return -1;
1474}
1475
6b7c5b94
SP
1476static void be_tx_queues_destroy(struct be_adapter *adapter)
1477{
1478 struct be_queue_info *q;
1479
1480 q = &adapter->tx_obj.q;
a8e9179a 1481 if (q->created)
8788fdc2 1482 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
6b7c5b94
SP
1483 be_queue_free(adapter, q);
1484
1485 q = &adapter->tx_obj.cq;
1486 if (q->created)
8788fdc2 1487 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1488 be_queue_free(adapter, q);
1489
859b1e4e
SP
1490 /* Clear any residual events */
1491 be_eq_clean(adapter, &adapter->tx_eq);
1492
6b7c5b94
SP
1493 q = &adapter->tx_eq.q;
1494 if (q->created)
8788fdc2 1495 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1496 be_queue_free(adapter, q);
1497}
1498
1499static int be_tx_queues_create(struct be_adapter *adapter)
1500{
1501 struct be_queue_info *eq, *q, *cq;
1502
1503 adapter->tx_eq.max_eqd = 0;
1504 adapter->tx_eq.min_eqd = 0;
1505 adapter->tx_eq.cur_eqd = 96;
1506 adapter->tx_eq.enable_aic = false;
1507 /* Alloc Tx Event queue */
1508 eq = &adapter->tx_eq.q;
1509 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1510 return -1;
1511
1512 /* Ask BE to create Tx Event queue */
8788fdc2 1513 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
6b7c5b94 1514 goto tx_eq_free;
fe6d2a38 1515
ecd62107 1516 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1517
ba343c77 1518
6b7c5b94
SP
1519 /* Alloc TX eth compl queue */
1520 cq = &adapter->tx_obj.cq;
1521 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1522 sizeof(struct be_eth_tx_compl)))
1523 goto tx_eq_destroy;
1524
1525 /* Ask BE to create Tx eth compl queue */
8788fdc2 1526 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
6b7c5b94
SP
1527 goto tx_cq_free;
1528
1529 /* Alloc TX eth queue */
1530 q = &adapter->tx_obj.q;
1531 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1532 goto tx_cq_destroy;
1533
1534 /* Ask BE to create Tx eth queue */
8788fdc2 1535 if (be_cmd_txq_create(adapter, q, cq))
6b7c5b94
SP
1536 goto tx_q_free;
1537 return 0;
1538
1539tx_q_free:
1540 be_queue_free(adapter, q);
1541tx_cq_destroy:
8788fdc2 1542 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1543tx_cq_free:
1544 be_queue_free(adapter, cq);
1545tx_eq_destroy:
8788fdc2 1546 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1547tx_eq_free:
1548 be_queue_free(adapter, eq);
1549 return -1;
1550}
1551
1552static void be_rx_queues_destroy(struct be_adapter *adapter)
1553{
1554 struct be_queue_info *q;
3abcdeda
SP
1555 struct be_rx_obj *rxo;
1556 int i;
1557
1558 for_all_rx_queues(adapter, rxo, i) {
1559 q = &rxo->q;
1560 if (q->created) {
1561 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1562 /* After the rxq is invalidated, wait for a grace time
1563 * of 1ms for all dma to end and the flush compl to
1564 * arrive
1565 */
1566 mdelay(1);
1567 be_rx_q_clean(adapter, rxo);
1568 }
1569 be_queue_free(adapter, q);
1570
1571 q = &rxo->cq;
1572 if (q->created)
1573 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1574 be_queue_free(adapter, q);
1575
1576 /* Clear any residual events */
1577 q = &rxo->rx_eq.q;
1578 if (q->created) {
1579 be_eq_clean(adapter, &rxo->rx_eq);
1580 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1581 }
1582 be_queue_free(adapter, q);
6b7c5b94 1583 }
6b7c5b94
SP
1584}
1585
ac6a0c4a
SP
1586static u32 be_num_rxqs_want(struct be_adapter *adapter)
1587{
1588 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1589 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1590 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1591 } else {
1592 dev_warn(&adapter->pdev->dev,
1593 "No support for multiple RX queues\n");
1594 return 1;
1595 }
1596}
1597
6b7c5b94
SP
1598static int be_rx_queues_create(struct be_adapter *adapter)
1599{
1600 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1601 struct be_rx_obj *rxo;
1602 int rc, i;
6b7c5b94 1603
ac6a0c4a
SP
1604 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1605 msix_enabled(adapter) ?
1606 adapter->num_msix_vec - 1 : 1);
1607 if (adapter->num_rx_qs != MAX_RX_QS)
1608 dev_warn(&adapter->pdev->dev,
1609 "Can create only %d RX queues", adapter->num_rx_qs);
1610
6b7c5b94 1611 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1612 for_all_rx_queues(adapter, rxo, i) {
1613 rxo->adapter = adapter;
1614 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1615 rxo->rx_eq.enable_aic = true;
1616
1617 /* EQ */
1618 eq = &rxo->rx_eq.q;
1619 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1620 sizeof(struct be_eq_entry));
1621 if (rc)
1622 goto err;
1623
1624 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1625 if (rc)
1626 goto err;
1627
ecd62107 1628 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1629
3abcdeda
SP
1630 /* CQ */
1631 cq = &rxo->cq;
1632 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1633 sizeof(struct be_eth_rx_compl));
1634 if (rc)
1635 goto err;
1636
1637 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1638 if (rc)
1639 goto err;
3abcdeda
SP
1640 /* Rx Q */
1641 q = &rxo->q;
1642 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1643 sizeof(struct be_eth_rx_d));
1644 if (rc)
1645 goto err;
1646
1647 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1648 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1649 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1650 if (rc)
1651 goto err;
1652 }
1653
1654 if (be_multi_rxq(adapter)) {
1655 u8 rsstable[MAX_RSS_QS];
1656
1657 for_all_rss_queues(adapter, rxo, i)
1658 rsstable[i] = rxo->rss_id;
1659
1660 rc = be_cmd_rss_config(adapter, rsstable,
1661 adapter->num_rx_qs - 1);
1662 if (rc)
1663 goto err;
1664 }
6b7c5b94
SP
1665
1666 return 0;
3abcdeda
SP
1667err:
1668 be_rx_queues_destroy(adapter);
1669 return -1;
6b7c5b94 1670}
6b7c5b94 1671
fe6d2a38 1672static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1673{
fe6d2a38
SP
1674 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1675 if (!eqe->evt)
1676 return false;
1677 else
1678 return true;
b628bde2
SP
1679}
1680
6b7c5b94
SP
1681static irqreturn_t be_intx(int irq, void *dev)
1682{
1683 struct be_adapter *adapter = dev;
3abcdeda 1684 struct be_rx_obj *rxo;
fe6d2a38 1685 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1686
fe6d2a38
SP
1687 if (lancer_chip(adapter)) {
1688 if (event_peek(&adapter->tx_eq))
1689 tx = event_handle(adapter, &adapter->tx_eq);
1690 for_all_rx_queues(adapter, rxo, i) {
1691 if (event_peek(&rxo->rx_eq))
1692 rx |= event_handle(adapter, &rxo->rx_eq);
1693 }
6b7c5b94 1694
fe6d2a38
SP
1695 if (!(tx || rx))
1696 return IRQ_NONE;
3abcdeda 1697
fe6d2a38
SP
1698 } else {
1699 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1700 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1701 if (!isr)
1702 return IRQ_NONE;
1703
ecd62107 1704 if ((1 << adapter->tx_eq.eq_idx & isr))
fe6d2a38
SP
1705 event_handle(adapter, &adapter->tx_eq);
1706
1707 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1708 if ((1 << rxo->rx_eq.eq_idx & isr))
fe6d2a38
SP
1709 event_handle(adapter, &rxo->rx_eq);
1710 }
3abcdeda 1711 }
c001c213 1712
8788fdc2 1713 return IRQ_HANDLED;
6b7c5b94
SP
1714}
1715
1716static irqreturn_t be_msix_rx(int irq, void *dev)
1717{
3abcdeda
SP
1718 struct be_rx_obj *rxo = dev;
1719 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1720
3abcdeda 1721 event_handle(adapter, &rxo->rx_eq);
6b7c5b94
SP
1722
1723 return IRQ_HANDLED;
1724}
1725
5fb379ee 1726static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1727{
1728 struct be_adapter *adapter = dev;
1729
8788fdc2 1730 event_handle(adapter, &adapter->tx_eq);
6b7c5b94
SP
1731
1732 return IRQ_HANDLED;
1733}
1734
2e588f84 1735static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1736{
2e588f84 1737 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1738}
1739
49b05221 1740static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1741{
1742 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1743 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1744 struct be_adapter *adapter = rxo->adapter;
1745 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1746 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1747 u32 work_done;
1748
3abcdeda 1749 rxo->stats.rx_polls++;
6b7c5b94 1750 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1751 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1752 if (!rxcp)
1753 break;
1754
e80d9da6 1755 /* Ignore flush completions */
2e588f84
SP
1756 if (rxcp->num_rcvd) {
1757 if (do_gro(rxcp))
64642811
SP
1758 be_rx_compl_process_gro(adapter, rxo, rxcp);
1759 else
1760 be_rx_compl_process(adapter, rxo, rxcp);
1761 }
2e588f84 1762 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1763 }
1764
6b7c5b94 1765 /* Refill the queue */
3abcdeda 1766 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1767 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1768
1769 /* All consumed */
1770 if (work_done < budget) {
1771 napi_complete(napi);
8788fdc2 1772 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1773 } else {
1774 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1775 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1776 }
1777 return work_done;
1778}
1779
f31e50a8
SP
1780/* As TX and MCC share the same EQ check for both TX and MCC completions.
1781 * For TX/MCC we don't honour budget; consume everything
1782 */
1783static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1784{
f31e50a8
SP
1785 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1786 struct be_adapter *adapter =
1787 container_of(tx_eq, struct be_adapter, tx_eq);
5fb379ee
SP
1788 struct be_queue_info *txq = &adapter->tx_obj.q;
1789 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1790 struct be_eth_tx_compl *txcp;
f31e50a8 1791 int tx_compl = 0, mcc_compl, status = 0;
6b7c5b94
SP
1792 u16 end_idx;
1793
5fb379ee 1794 while ((txcp = be_tx_compl_get(tx_cq))) {
6b7c5b94 1795 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
f31e50a8 1796 wrb_index, txcp);
6b7c5b94 1797 be_tx_compl_process(adapter, end_idx);
f31e50a8 1798 tx_compl++;
6b7c5b94
SP
1799 }
1800
f31e50a8
SP
1801 mcc_compl = be_process_mcc(adapter, &status);
1802
1803 napi_complete(napi);
1804
1805 if (mcc_compl) {
1806 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1807 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1808 }
1809
1810 if (tx_compl) {
1811 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
5fb379ee
SP
1812
1813 /* As Tx wrbs have been freed up, wake up netdev queue if
1814 * it was stopped due to lack of tx wrbs.
1815 */
1816 if (netif_queue_stopped(adapter->netdev) &&
6b7c5b94 1817 atomic_read(&txq->used) < txq->len / 2) {
5fb379ee
SP
1818 netif_wake_queue(adapter->netdev);
1819 }
1820
3abcdeda
SP
1821 tx_stats(adapter)->be_tx_events++;
1822 tx_stats(adapter)->be_tx_compl += tx_compl;
6b7c5b94 1823 }
6b7c5b94
SP
1824
1825 return 1;
1826}
1827
d053de91 1828void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1829{
1830 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1831 u32 i;
1832
1833 pci_read_config_dword(adapter->pdev,
1834 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1835 pci_read_config_dword(adapter->pdev,
1836 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1837 pci_read_config_dword(adapter->pdev,
1838 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1839 pci_read_config_dword(adapter->pdev,
1840 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1841
1842 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1843 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1844
d053de91
AK
1845 if (ue_status_lo || ue_status_hi) {
1846 adapter->ue_detected = true;
7acc2087 1847 adapter->eeh_err = true;
d053de91
AK
1848 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1849 }
1850
7c185276
AK
1851 if (ue_status_lo) {
1852 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1853 if (ue_status_lo & 1)
1854 dev_err(&adapter->pdev->dev,
1855 "UE: %s bit set\n", ue_status_low_desc[i]);
1856 }
1857 }
1858 if (ue_status_hi) {
1859 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1860 if (ue_status_hi & 1)
1861 dev_err(&adapter->pdev->dev,
1862 "UE: %s bit set\n", ue_status_hi_desc[i]);
1863 }
1864 }
1865
1866}
1867
ea1dae11
SP
1868static void be_worker(struct work_struct *work)
1869{
1870 struct be_adapter *adapter =
1871 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1872 struct be_rx_obj *rxo;
1873 int i;
ea1dae11 1874
16da8250
SP
1875 if (!adapter->ue_detected && !lancer_chip(adapter))
1876 be_detect_dump_ue(adapter);
1877
f203af70
SK
1878 /* when interrupts are not yet enabled, just reap any pending
1879 * mcc completions */
1880 if (!netif_running(adapter->netdev)) {
1881 int mcc_compl, status = 0;
1882
1883 mcc_compl = be_process_mcc(adapter, &status);
1884
1885 if (mcc_compl) {
1886 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1887 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1888 }
9b037f38 1889
f203af70
SK
1890 goto reschedule;
1891 }
1892
b2aebe6d 1893 if (!adapter->stats_cmd_sent)
3abcdeda 1894 be_cmd_get_stats(adapter, &adapter->stats_cmd);
ea1dae11 1895
4097f663 1896 be_tx_rate_update(adapter);
4097f663 1897
3abcdeda
SP
1898 for_all_rx_queues(adapter, rxo, i) {
1899 be_rx_rate_update(rxo);
1900 be_rx_eqd_update(adapter, rxo);
1901
1902 if (rxo->rx_post_starved) {
1903 rxo->rx_post_starved = false;
1829b086 1904 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 1905 }
ea1dae11
SP
1906 }
1907
f203af70 1908reschedule:
ea1dae11
SP
1909 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1910}
1911
8d56ff11
SP
1912static void be_msix_disable(struct be_adapter *adapter)
1913{
ac6a0c4a 1914 if (msix_enabled(adapter)) {
8d56ff11 1915 pci_disable_msix(adapter->pdev);
ac6a0c4a 1916 adapter->num_msix_vec = 0;
3abcdeda
SP
1917 }
1918}
1919
6b7c5b94
SP
1920static void be_msix_enable(struct be_adapter *adapter)
1921{
3abcdeda 1922#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 1923 int i, status, num_vec;
6b7c5b94 1924
ac6a0c4a 1925 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 1926
ac6a0c4a 1927 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
1928 adapter->msix_entries[i].entry = i;
1929
ac6a0c4a 1930 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
1931 if (status == 0) {
1932 goto done;
1933 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 1934 num_vec = status;
3abcdeda 1935 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 1936 num_vec) == 0)
3abcdeda 1937 goto done;
3abcdeda
SP
1938 }
1939 return;
1940done:
ac6a0c4a
SP
1941 adapter->num_msix_vec = num_vec;
1942 return;
6b7c5b94
SP
1943}
1944
ba343c77
SB
1945static void be_sriov_enable(struct be_adapter *adapter)
1946{
344dbf10 1947 be_check_sriov_fn_type(adapter);
6dedec81 1948#ifdef CONFIG_PCI_IOV
ba343c77 1949 if (be_physfn(adapter) && num_vfs) {
6dedec81
AK
1950 int status;
1951
ba343c77
SB
1952 status = pci_enable_sriov(adapter->pdev, num_vfs);
1953 adapter->sriov_enabled = status ? false : true;
1954 }
1955#endif
ba343c77
SB
1956}
1957
1958static void be_sriov_disable(struct be_adapter *adapter)
1959{
1960#ifdef CONFIG_PCI_IOV
1961 if (adapter->sriov_enabled) {
1962 pci_disable_sriov(adapter->pdev);
1963 adapter->sriov_enabled = false;
1964 }
1965#endif
1966}
1967
fe6d2a38
SP
1968static inline int be_msix_vec_get(struct be_adapter *adapter,
1969 struct be_eq_obj *eq_obj)
6b7c5b94 1970{
ecd62107 1971 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
1972}
1973
b628bde2
SP
1974static int be_request_irq(struct be_adapter *adapter,
1975 struct be_eq_obj *eq_obj,
3abcdeda 1976 void *handler, char *desc, void *context)
6b7c5b94
SP
1977{
1978 struct net_device *netdev = adapter->netdev;
b628bde2
SP
1979 int vec;
1980
1981 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 1982 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 1983 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
1984}
1985
3abcdeda
SP
1986static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1987 void *context)
b628bde2 1988{
fe6d2a38 1989 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 1990 free_irq(vec, context);
b628bde2 1991}
6b7c5b94 1992
b628bde2
SP
1993static int be_msix_register(struct be_adapter *adapter)
1994{
3abcdeda
SP
1995 struct be_rx_obj *rxo;
1996 int status, i;
1997 char qname[10];
b628bde2 1998
3abcdeda
SP
1999 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2000 adapter);
6b7c5b94
SP
2001 if (status)
2002 goto err;
2003
3abcdeda
SP
2004 for_all_rx_queues(adapter, rxo, i) {
2005 sprintf(qname, "rxq%d", i);
2006 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2007 qname, rxo);
2008 if (status)
2009 goto err_msix;
2010 }
b628bde2 2011
6b7c5b94 2012 return 0;
b628bde2 2013
3abcdeda
SP
2014err_msix:
2015 be_free_irq(adapter, &adapter->tx_eq, adapter);
2016
2017 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2018 be_free_irq(adapter, &rxo->rx_eq, rxo);
2019
6b7c5b94
SP
2020err:
2021 dev_warn(&adapter->pdev->dev,
2022 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2023 be_msix_disable(adapter);
6b7c5b94
SP
2024 return status;
2025}
2026
2027static int be_irq_register(struct be_adapter *adapter)
2028{
2029 struct net_device *netdev = adapter->netdev;
2030 int status;
2031
ac6a0c4a 2032 if (msix_enabled(adapter)) {
6b7c5b94
SP
2033 status = be_msix_register(adapter);
2034 if (status == 0)
2035 goto done;
ba343c77
SB
2036 /* INTx is not supported for VF */
2037 if (!be_physfn(adapter))
2038 return status;
6b7c5b94
SP
2039 }
2040
2041 /* INTx */
2042 netdev->irq = adapter->pdev->irq;
2043 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2044 adapter);
2045 if (status) {
2046 dev_err(&adapter->pdev->dev,
2047 "INTx request IRQ failed - err %d\n", status);
2048 return status;
2049 }
2050done:
2051 adapter->isr_registered = true;
2052 return 0;
2053}
2054
2055static void be_irq_unregister(struct be_adapter *adapter)
2056{
2057 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2058 struct be_rx_obj *rxo;
2059 int i;
6b7c5b94
SP
2060
2061 if (!adapter->isr_registered)
2062 return;
2063
2064 /* INTx */
ac6a0c4a 2065 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2066 free_irq(netdev->irq, adapter);
2067 goto done;
2068 }
2069
2070 /* MSIx */
3abcdeda
SP
2071 be_free_irq(adapter, &adapter->tx_eq, adapter);
2072
2073 for_all_rx_queues(adapter, rxo, i)
2074 be_free_irq(adapter, &rxo->rx_eq, rxo);
2075
6b7c5b94
SP
2076done:
2077 adapter->isr_registered = false;
6b7c5b94
SP
2078}
2079
889cd4b2
SP
2080static int be_close(struct net_device *netdev)
2081{
2082 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2083 struct be_rx_obj *rxo;
889cd4b2 2084 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2085 int vec, i;
889cd4b2 2086
889cd4b2
SP
2087 be_async_mcc_disable(adapter);
2088
889cd4b2
SP
2089 netif_carrier_off(netdev);
2090 adapter->link_up = false;
2091
fe6d2a38
SP
2092 if (!lancer_chip(adapter))
2093 be_intr_set(adapter, false);
889cd4b2 2094
63fcb27f
PR
2095 for_all_rx_queues(adapter, rxo, i)
2096 napi_disable(&rxo->rx_eq.napi);
2097
2098 napi_disable(&tx_eq->napi);
2099
2100 if (lancer_chip(adapter)) {
2101 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2102 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2103 for_all_rx_queues(adapter, rxo, i)
2104 be_cq_notify(adapter, rxo->cq.id, false, 0);
2105 }
2106
ac6a0c4a 2107 if (msix_enabled(adapter)) {
fe6d2a38 2108 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2109 synchronize_irq(vec);
3abcdeda
SP
2110
2111 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2112 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2113 synchronize_irq(vec);
2114 }
889cd4b2
SP
2115 } else {
2116 synchronize_irq(netdev->irq);
2117 }
2118 be_irq_unregister(adapter);
2119
889cd4b2
SP
2120 /* Wait for all pending tx completions to arrive so that
2121 * all tx skbs are freed.
2122 */
2123 be_tx_compl_clean(adapter);
2124
2125 return 0;
2126}
2127
6b7c5b94
SP
2128static int be_open(struct net_device *netdev)
2129{
2130 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2131 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2132 struct be_rx_obj *rxo;
a8f447bd 2133 bool link_up;
3abcdeda 2134 int status, i;
0388f251
SB
2135 u8 mac_speed;
2136 u16 link_speed;
5fb379ee 2137
3abcdeda 2138 for_all_rx_queues(adapter, rxo, i) {
1829b086 2139 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda
SP
2140 napi_enable(&rxo->rx_eq.napi);
2141 }
5fb379ee
SP
2142 napi_enable(&tx_eq->napi);
2143
2144 be_irq_register(adapter);
2145
fe6d2a38
SP
2146 if (!lancer_chip(adapter))
2147 be_intr_set(adapter, true);
5fb379ee
SP
2148
2149 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2150 for_all_rx_queues(adapter, rxo, i) {
2151 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2152 be_cq_notify(adapter, rxo->cq.id, true, 0);
2153 }
8788fdc2 2154 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2155
7a1e9b20
SP
2156 /* Now that interrupts are on we can process async mcc */
2157 be_async_mcc_enable(adapter);
2158
0388f251
SB
2159 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2160 &link_speed);
a8f447bd 2161 if (status)
889cd4b2 2162 goto err;
a8f447bd 2163 be_link_status_update(adapter, link_up);
5fb379ee 2164
889cd4b2 2165 if (be_physfn(adapter)) {
1da87b7f 2166 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2167 if (status)
2168 goto err;
4f2aa89c 2169
ba343c77
SB
2170 status = be_cmd_set_flow_control(adapter,
2171 adapter->tx_fc, adapter->rx_fc);
2172 if (status)
889cd4b2 2173 goto err;
ba343c77 2174 }
4f2aa89c 2175
889cd4b2
SP
2176 return 0;
2177err:
2178 be_close(adapter->netdev);
2179 return -EIO;
5fb379ee
SP
2180}
2181
71d8d1b5
AK
2182static int be_setup_wol(struct be_adapter *adapter, bool enable)
2183{
2184 struct be_dma_mem cmd;
2185 int status = 0;
2186 u8 mac[ETH_ALEN];
2187
2188 memset(mac, 0, ETH_ALEN);
2189
2190 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2191 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2192 GFP_KERNEL);
71d8d1b5
AK
2193 if (cmd.va == NULL)
2194 return -1;
2195 memset(cmd.va, 0, cmd.size);
2196
2197 if (enable) {
2198 status = pci_write_config_dword(adapter->pdev,
2199 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2200 if (status) {
2201 dev_err(&adapter->pdev->dev,
2381a55c 2202 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2203 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2204 cmd.dma);
71d8d1b5
AK
2205 return status;
2206 }
2207 status = be_cmd_enable_magic_wol(adapter,
2208 adapter->netdev->dev_addr, &cmd);
2209 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2210 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2211 } else {
2212 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2213 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2214 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2215 }
2216
2b7bcebf 2217 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2218 return status;
2219}
2220
6d87f5c3
AK
2221/*
2222 * Generate a seed MAC address from the PF MAC Address using jhash.
2223 * MAC Address for VFs are assigned incrementally starting from the seed.
2224 * These addresses are programmed in the ASIC by the PF and the VF driver
2225 * queries for the MAC address during its probe.
2226 */
2227static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2228{
2229 u32 vf = 0;
3abcdeda 2230 int status = 0;
6d87f5c3
AK
2231 u8 mac[ETH_ALEN];
2232
2233 be_vf_eth_addr_generate(adapter, mac);
2234
2235 for (vf = 0; vf < num_vfs; vf++) {
2236 status = be_cmd_pmac_add(adapter, mac,
2237 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2238 &adapter->vf_cfg[vf].vf_pmac_id,
2239 vf + 1);
6d87f5c3
AK
2240 if (status)
2241 dev_err(&adapter->pdev->dev,
2242 "Mac address add failed for VF %d\n", vf);
2243 else
2244 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2245
2246 mac[5] += 1;
2247 }
2248 return status;
2249}
2250
2251static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2252{
2253 u32 vf;
2254
2255 for (vf = 0; vf < num_vfs; vf++) {
2256 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2257 be_cmd_pmac_del(adapter,
2258 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2259 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2260 }
2261}
2262
5fb379ee
SP
2263static int be_setup(struct be_adapter *adapter)
2264{
5fb379ee 2265 struct net_device *netdev = adapter->netdev;
ba343c77 2266 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2267 int status;
ba343c77
SB
2268 u8 mac[ETH_ALEN];
2269
f21b538c
PR
2270 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2271 BE_IF_FLAGS_BROADCAST |
2272 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2273
ba343c77
SB
2274 if (be_physfn(adapter)) {
2275 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2276 BE_IF_FLAGS_PROMISCUOUS |
2277 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2278 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2279
ac6a0c4a 2280 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2281 cap_flags |= BE_IF_FLAGS_RSS;
2282 en_flags |= BE_IF_FLAGS_RSS;
2283 }
ba343c77 2284 }
73d540f2
SP
2285
2286 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2287 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2288 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2289 if (status != 0)
2290 goto do_none;
2291
ba343c77 2292 if (be_physfn(adapter)) {
c99ac3e7
AK
2293 if (adapter->sriov_enabled) {
2294 while (vf < num_vfs) {
2295 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2296 BE_IF_FLAGS_BROADCAST;
2297 status = be_cmd_if_create(adapter, cap_flags,
2298 en_flags, mac, true,
64600ea5 2299 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2300 NULL, vf+1);
c99ac3e7
AK
2301 if (status) {
2302 dev_err(&adapter->pdev->dev,
2303 "Interface Create failed for VF %d\n",
2304 vf);
2305 goto if_destroy;
2306 }
2307 adapter->vf_cfg[vf].vf_pmac_id =
2308 BE_INVALID_PMAC_ID;
2309 vf++;
ba343c77 2310 }
84e5b9f7 2311 }
c99ac3e7 2312 } else {
ba343c77
SB
2313 status = be_cmd_mac_addr_query(adapter, mac,
2314 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2315 if (!status) {
2316 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2317 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2318 }
2319 }
2320
6b7c5b94
SP
2321 status = be_tx_queues_create(adapter);
2322 if (status != 0)
2323 goto if_destroy;
2324
2325 status = be_rx_queues_create(adapter);
2326 if (status != 0)
2327 goto tx_qs_destroy;
2328
5fb379ee
SP
2329 status = be_mcc_queues_create(adapter);
2330 if (status != 0)
2331 goto rx_qs_destroy;
6b7c5b94 2332
0dffc83e
AK
2333 adapter->link_speed = -1;
2334
6b7c5b94
SP
2335 return 0;
2336
5fb379ee
SP
2337rx_qs_destroy:
2338 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2339tx_qs_destroy:
2340 be_tx_queues_destroy(adapter);
2341if_destroy:
c99ac3e7
AK
2342 if (be_physfn(adapter) && adapter->sriov_enabled)
2343 for (vf = 0; vf < num_vfs; vf++)
2344 if (adapter->vf_cfg[vf].vf_if_handle)
2345 be_cmd_if_destroy(adapter,
658681f7
AK
2346 adapter->vf_cfg[vf].vf_if_handle,
2347 vf + 1);
2348 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2349do_none:
2350 return status;
2351}
2352
5fb379ee
SP
2353static int be_clear(struct be_adapter *adapter)
2354{
7ab8b0b4
AK
2355 int vf;
2356
c99ac3e7 2357 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2358 be_vf_eth_addr_rem(adapter);
2359
1a8887d8 2360 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2361 be_rx_queues_destroy(adapter);
2362 be_tx_queues_destroy(adapter);
1f5db833 2363 adapter->eq_next_idx = 0;
5fb379ee 2364
7ab8b0b4
AK
2365 if (be_physfn(adapter) && adapter->sriov_enabled)
2366 for (vf = 0; vf < num_vfs; vf++)
2367 if (adapter->vf_cfg[vf].vf_if_handle)
2368 be_cmd_if_destroy(adapter,
2369 adapter->vf_cfg[vf].vf_if_handle,
2370 vf + 1);
2371
658681f7 2372 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2373
2243e2e9
SP
2374 /* tell fw we're done with firing cmds */
2375 be_cmd_fw_clean(adapter);
5fb379ee
SP
2376 return 0;
2377}
2378
6b7c5b94 2379
84517482 2380#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2381static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2382 const u8 *p, u32 img_start, int image_size,
2383 int hdr_size)
fa9a6fed
SB
2384{
2385 u32 crc_offset;
2386 u8 flashed_crc[4];
2387 int status;
3f0d4560
AK
2388
2389 crc_offset = hdr_size + img_start + image_size - 4;
2390
fa9a6fed 2391 p += crc_offset;
3f0d4560
AK
2392
2393 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2394 (image_size - 4));
fa9a6fed
SB
2395 if (status) {
2396 dev_err(&adapter->pdev->dev,
2397 "could not get crc from flash, not flashing redboot\n");
2398 return false;
2399 }
2400
2401 /*update redboot only if crc does not match*/
2402 if (!memcmp(flashed_crc, p, 4))
2403 return false;
2404 else
2405 return true;
fa9a6fed
SB
2406}
2407
3f0d4560 2408static int be_flash_data(struct be_adapter *adapter,
84517482 2409 const struct firmware *fw,
3f0d4560
AK
2410 struct be_dma_mem *flash_cmd, int num_of_images)
2411
84517482 2412{
3f0d4560
AK
2413 int status = 0, i, filehdr_size = 0;
2414 u32 total_bytes = 0, flash_op;
84517482
AK
2415 int num_bytes;
2416 const u8 *p = fw->data;
2417 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2418 const struct flash_comp *pflashcomp;
9fe96934 2419 int num_comp;
3f0d4560 2420
215faf9c 2421 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2422 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2423 FLASH_IMAGE_MAX_SIZE_g3},
2424 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2425 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2426 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2427 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2428 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2429 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2430 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2431 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2432 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2433 FLASH_IMAGE_MAX_SIZE_g3},
2434 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2435 FLASH_IMAGE_MAX_SIZE_g3},
2436 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2437 FLASH_IMAGE_MAX_SIZE_g3},
2438 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2439 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2440 };
215faf9c 2441 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2442 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2443 FLASH_IMAGE_MAX_SIZE_g2},
2444 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2445 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2446 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2447 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2448 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2449 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2450 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2451 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2452 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2453 FLASH_IMAGE_MAX_SIZE_g2},
2454 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2455 FLASH_IMAGE_MAX_SIZE_g2},
2456 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2457 FLASH_IMAGE_MAX_SIZE_g2}
2458 };
2459
2460 if (adapter->generation == BE_GEN3) {
2461 pflashcomp = gen3_flash_types;
2462 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2463 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2464 } else {
2465 pflashcomp = gen2_flash_types;
2466 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2467 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2468 }
9fe96934
SB
2469 for (i = 0; i < num_comp; i++) {
2470 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2471 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2472 continue;
3f0d4560
AK
2473 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2474 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2475 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2476 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2477 continue;
2478 p = fw->data;
2479 p += filehdr_size + pflashcomp[i].offset
2480 + (num_of_images * sizeof(struct image_hdr));
2481 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2482 return -1;
3f0d4560
AK
2483 total_bytes = pflashcomp[i].size;
2484 while (total_bytes) {
2485 if (total_bytes > 32*1024)
2486 num_bytes = 32*1024;
2487 else
2488 num_bytes = total_bytes;
2489 total_bytes -= num_bytes;
2490
2491 if (!total_bytes)
2492 flash_op = FLASHROM_OPER_FLASH;
2493 else
2494 flash_op = FLASHROM_OPER_SAVE;
2495 memcpy(req->params.data_buf, p, num_bytes);
2496 p += num_bytes;
2497 status = be_cmd_write_flashrom(adapter, flash_cmd,
2498 pflashcomp[i].optype, flash_op, num_bytes);
2499 if (status) {
2500 dev_err(&adapter->pdev->dev,
2501 "cmd to write to flash rom failed.\n");
2502 return -1;
2503 }
2504 yield();
84517482 2505 }
84517482 2506 }
84517482
AK
2507 return 0;
2508}
2509
3f0d4560
AK
2510static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2511{
2512 if (fhdr == NULL)
2513 return 0;
2514 if (fhdr->build[0] == '3')
2515 return BE_GEN3;
2516 else if (fhdr->build[0] == '2')
2517 return BE_GEN2;
2518 else
2519 return 0;
2520}
2521
84517482
AK
2522int be_load_fw(struct be_adapter *adapter, u8 *func)
2523{
2524 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2525 const struct firmware *fw;
3f0d4560
AK
2526 struct flash_file_hdr_g2 *fhdr;
2527 struct flash_file_hdr_g3 *fhdr3;
2528 struct image_hdr *img_hdr_ptr = NULL;
84517482 2529 struct be_dma_mem flash_cmd;
8b93b710 2530 int status, i = 0, num_imgs = 0;
84517482 2531 const u8 *p;
84517482 2532
d9efd2af
SB
2533 if (!netif_running(adapter->netdev)) {
2534 dev_err(&adapter->pdev->dev,
2535 "Firmware load not allowed (interface is down)\n");
2536 return -EPERM;
2537 }
2538
84517482
AK
2539 strcpy(fw_file, func);
2540
2541 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2542 if (status)
2543 goto fw_exit;
2544
2545 p = fw->data;
3f0d4560 2546 fhdr = (struct flash_file_hdr_g2 *) p;
84517482
AK
2547 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2548
84517482 2549 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2550 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2551 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2552 if (!flash_cmd.va) {
2553 status = -ENOMEM;
2554 dev_err(&adapter->pdev->dev,
2555 "Memory allocation failure while flashing\n");
2556 goto fw_exit;
2557 }
2558
3f0d4560
AK
2559 if ((adapter->generation == BE_GEN3) &&
2560 (get_ufigen_type(fhdr) == BE_GEN3)) {
2561 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2562 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2563 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2564 img_hdr_ptr = (struct image_hdr *) (fw->data +
2565 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2566 i * sizeof(struct image_hdr)));
2567 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2568 status = be_flash_data(adapter, fw, &flash_cmd,
2569 num_imgs);
3f0d4560
AK
2570 }
2571 } else if ((adapter->generation == BE_GEN2) &&
2572 (get_ufigen_type(fhdr) == BE_GEN2)) {
2573 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2574 } else {
2575 dev_err(&adapter->pdev->dev,
2576 "UFI and Interface are not compatible for flashing\n");
2577 status = -1;
84517482
AK
2578 }
2579
2b7bcebf
IV
2580 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2581 flash_cmd.dma);
84517482
AK
2582 if (status) {
2583 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2584 goto fw_exit;
2585 }
2586
af901ca1 2587 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482
AK
2588
2589fw_exit:
2590 release_firmware(fw);
2591 return status;
2592}
2593
6b7c5b94
SP
2594static struct net_device_ops be_netdev_ops = {
2595 .ndo_open = be_open,
2596 .ndo_stop = be_close,
2597 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2598 .ndo_set_rx_mode = be_set_multicast_list,
2599 .ndo_set_mac_address = be_mac_addr_set,
2600 .ndo_change_mtu = be_change_mtu,
2601 .ndo_validate_addr = eth_validate_addr,
2602 .ndo_vlan_rx_register = be_vlan_register,
2603 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2604 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2605 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2606 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2607 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2608 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2609};
2610
2611static void be_netdev_init(struct net_device *netdev)
2612{
2613 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2614 struct be_rx_obj *rxo;
2615 int i;
6b7c5b94
SP
2616
2617 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
79032644
MM
2618 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2619 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
49e4b847 2620 NETIF_F_GRO | NETIF_F_TSO6;
6b7c5b94 2621
4b972914
AK
2622 if (be_multi_rxq(adapter))
2623 netdev->features |= NETIF_F_RXHASH;
2624
79032644
MM
2625 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2626 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2627
fe6d2a38
SP
2628 if (lancer_chip(adapter))
2629 netdev->vlan_features |= NETIF_F_TSO6;
2630
6b7c5b94
SP
2631 netdev->flags |= IFF_MULTICAST;
2632
728a9972
AK
2633 adapter->rx_csum = true;
2634
9e90c961
AK
2635 /* Default settings for Rx and Tx flow control */
2636 adapter->rx_fc = true;
2637 adapter->tx_fc = true;
2638
c190e3c8
AK
2639 netif_set_gso_max_size(netdev, 65535);
2640
6b7c5b94
SP
2641 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2642
2643 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2644
3abcdeda
SP
2645 for_all_rx_queues(adapter, rxo, i)
2646 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2647 BE_NAPI_WEIGHT);
2648
5fb379ee 2649 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2650 BE_NAPI_WEIGHT);
6b7c5b94
SP
2651}
2652
2653static void be_unmap_pci_bars(struct be_adapter *adapter)
2654{
8788fdc2
SP
2655 if (adapter->csr)
2656 iounmap(adapter->csr);
2657 if (adapter->db)
2658 iounmap(adapter->db);
ba343c77 2659 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2660 iounmap(adapter->pcicfg);
6b7c5b94
SP
2661}
2662
2663static int be_map_pci_bars(struct be_adapter *adapter)
2664{
2665 u8 __iomem *addr;
ba343c77 2666 int pcicfg_reg, db_reg;
6b7c5b94 2667
fe6d2a38
SP
2668 if (lancer_chip(adapter)) {
2669 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2670 pci_resource_len(adapter->pdev, 0));
2671 if (addr == NULL)
2672 return -ENOMEM;
2673 adapter->db = addr;
2674 return 0;
2675 }
2676
ba343c77
SB
2677 if (be_physfn(adapter)) {
2678 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2679 pci_resource_len(adapter->pdev, 2));
2680 if (addr == NULL)
2681 return -ENOMEM;
2682 adapter->csr = addr;
2683 }
6b7c5b94 2684
ba343c77 2685 if (adapter->generation == BE_GEN2) {
7b139c83 2686 pcicfg_reg = 1;
ba343c77
SB
2687 db_reg = 4;
2688 } else {
7b139c83 2689 pcicfg_reg = 0;
ba343c77
SB
2690 if (be_physfn(adapter))
2691 db_reg = 4;
2692 else
2693 db_reg = 0;
2694 }
2695 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2696 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2697 if (addr == NULL)
2698 goto pci_map_err;
ba343c77
SB
2699 adapter->db = addr;
2700
2701 if (be_physfn(adapter)) {
2702 addr = ioremap_nocache(
2703 pci_resource_start(adapter->pdev, pcicfg_reg),
2704 pci_resource_len(adapter->pdev, pcicfg_reg));
2705 if (addr == NULL)
2706 goto pci_map_err;
2707 adapter->pcicfg = addr;
2708 } else
2709 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
2710
2711 return 0;
2712pci_map_err:
2713 be_unmap_pci_bars(adapter);
2714 return -ENOMEM;
2715}
2716
2717
2718static void be_ctrl_cleanup(struct be_adapter *adapter)
2719{
8788fdc2 2720 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2721
2722 be_unmap_pci_bars(adapter);
2723
2724 if (mem->va)
2b7bcebf
IV
2725 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2726 mem->dma);
e7b909a6
SP
2727
2728 mem = &adapter->mc_cmd_mem;
2729 if (mem->va)
2b7bcebf
IV
2730 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2731 mem->dma);
6b7c5b94
SP
2732}
2733
6b7c5b94
SP
2734static int be_ctrl_init(struct be_adapter *adapter)
2735{
8788fdc2
SP
2736 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2737 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 2738 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 2739 int status;
6b7c5b94
SP
2740
2741 status = be_map_pci_bars(adapter);
2742 if (status)
e7b909a6 2743 goto done;
6b7c5b94
SP
2744
2745 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
2746 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2747 mbox_mem_alloc->size,
2748 &mbox_mem_alloc->dma,
2749 GFP_KERNEL);
6b7c5b94 2750 if (!mbox_mem_alloc->va) {
e7b909a6
SP
2751 status = -ENOMEM;
2752 goto unmap_pci_bars;
6b7c5b94 2753 }
e7b909a6 2754
6b7c5b94
SP
2755 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2756 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2757 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2758 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
2759
2760 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2b7bcebf
IV
2761 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2762 mc_cmd_mem->size, &mc_cmd_mem->dma,
2763 GFP_KERNEL);
e7b909a6
SP
2764 if (mc_cmd_mem->va == NULL) {
2765 status = -ENOMEM;
2766 goto free_mbox;
2767 }
2768 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2769
2984961c 2770 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
2771 spin_lock_init(&adapter->mcc_lock);
2772 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 2773
dd131e76 2774 init_completion(&adapter->flash_compl);
cf588477 2775 pci_save_state(adapter->pdev);
6b7c5b94 2776 return 0;
e7b909a6
SP
2777
2778free_mbox:
2b7bcebf
IV
2779 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2780 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
2781
2782unmap_pci_bars:
2783 be_unmap_pci_bars(adapter);
2784
2785done:
2786 return status;
6b7c5b94
SP
2787}
2788
2789static void be_stats_cleanup(struct be_adapter *adapter)
2790{
3abcdeda 2791 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2792
2793 if (cmd->va)
2b7bcebf
IV
2794 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2795 cmd->va, cmd->dma);
6b7c5b94
SP
2796}
2797
2798static int be_stats_init(struct be_adapter *adapter)
2799{
3abcdeda 2800 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2801
2802 cmd->size = sizeof(struct be_cmd_req_get_stats);
2b7bcebf
IV
2803 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2804 GFP_KERNEL);
6b7c5b94
SP
2805 if (cmd->va == NULL)
2806 return -1;
d291b9af 2807 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
2808 return 0;
2809}
2810
2811static void __devexit be_remove(struct pci_dev *pdev)
2812{
2813 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 2814
6b7c5b94
SP
2815 if (!adapter)
2816 return;
2817
f203af70
SK
2818 cancel_delayed_work_sync(&adapter->work);
2819
6b7c5b94
SP
2820 unregister_netdev(adapter->netdev);
2821
5fb379ee
SP
2822 be_clear(adapter);
2823
6b7c5b94
SP
2824 be_stats_cleanup(adapter);
2825
2826 be_ctrl_cleanup(adapter);
2827
ba343c77
SB
2828 be_sriov_disable(adapter);
2829
8d56ff11 2830 be_msix_disable(adapter);
6b7c5b94
SP
2831
2832 pci_set_drvdata(pdev, NULL);
2833 pci_release_regions(pdev);
2834 pci_disable_device(pdev);
2835
2836 free_netdev(adapter->netdev);
2837}
2838
2243e2e9 2839static int be_get_config(struct be_adapter *adapter)
6b7c5b94 2840{
6b7c5b94 2841 int status;
2243e2e9 2842 u8 mac[ETH_ALEN];
6b7c5b94 2843
2243e2e9 2844 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
2845 if (status)
2846 return status;
2847
3abcdeda
SP
2848 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2849 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
2850 if (status)
2851 return status;
2852
2243e2e9 2853 memset(mac, 0, ETH_ALEN);
ba343c77
SB
2854
2855 if (be_physfn(adapter)) {
2856 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 2857 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 2858
ba343c77
SB
2859 if (status)
2860 return status;
ca9e4988 2861
ba343c77
SB
2862 if (!is_valid_ether_addr(mac))
2863 return -EADDRNOTAVAIL;
2864
2865 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2866 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2867 }
6b7c5b94 2868
3486be29 2869 if (adapter->function_mode & 0x400)
82903e4b
AK
2870 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2871 else
2872 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2873
9e1453c5
AK
2874 status = be_cmd_get_cntl_attributes(adapter);
2875 if (status)
2876 return status;
2877
2e588f84 2878 be_cmd_check_native_mode(adapter);
2243e2e9 2879 return 0;
6b7c5b94
SP
2880}
2881
fe6d2a38
SP
2882static int be_dev_family_check(struct be_adapter *adapter)
2883{
2884 struct pci_dev *pdev = adapter->pdev;
2885 u32 sli_intf = 0, if_type;
2886
2887 switch (pdev->device) {
2888 case BE_DEVICE_ID1:
2889 case OC_DEVICE_ID1:
2890 adapter->generation = BE_GEN2;
2891 break;
2892 case BE_DEVICE_ID2:
2893 case OC_DEVICE_ID2:
2894 adapter->generation = BE_GEN3;
2895 break;
2896 case OC_DEVICE_ID3:
2897 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2898 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2899 SLI_INTF_IF_TYPE_SHIFT;
2900
2901 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2902 if_type != 0x02) {
2903 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2904 return -EINVAL;
2905 }
2906 if (num_vfs > 0) {
2907 dev_err(&pdev->dev, "VFs not supported\n");
2908 return -EINVAL;
2909 }
2910 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2911 SLI_INTF_FAMILY_SHIFT);
2912 adapter->generation = BE_GEN3;
2913 break;
2914 default:
2915 adapter->generation = 0;
2916 }
2917 return 0;
2918}
2919
37eed1cb
PR
2920static int lancer_wait_ready(struct be_adapter *adapter)
2921{
2922#define SLIPORT_READY_TIMEOUT 500
2923 u32 sliport_status;
2924 int status = 0, i;
2925
2926 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2927 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2928 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2929 break;
2930
2931 msleep(20);
2932 }
2933
2934 if (i == SLIPORT_READY_TIMEOUT)
2935 status = -1;
2936
2937 return status;
2938}
2939
2940static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2941{
2942 int status;
2943 u32 sliport_status, err, reset_needed;
2944 status = lancer_wait_ready(adapter);
2945 if (!status) {
2946 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2947 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2948 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2949 if (err && reset_needed) {
2950 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2951 adapter->db + SLIPORT_CONTROL_OFFSET);
2952
2953 /* check adapter has corrected the error */
2954 status = lancer_wait_ready(adapter);
2955 sliport_status = ioread32(adapter->db +
2956 SLIPORT_STATUS_OFFSET);
2957 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2958 SLIPORT_STATUS_RN_MASK);
2959 if (status || sliport_status)
2960 status = -1;
2961 } else if (err || reset_needed) {
2962 status = -1;
2963 }
2964 }
2965 return status;
2966}
2967
6b7c5b94
SP
2968static int __devinit be_probe(struct pci_dev *pdev,
2969 const struct pci_device_id *pdev_id)
2970{
2971 int status = 0;
2972 struct be_adapter *adapter;
2973 struct net_device *netdev;
6b7c5b94
SP
2974
2975 status = pci_enable_device(pdev);
2976 if (status)
2977 goto do_none;
2978
2979 status = pci_request_regions(pdev, DRV_NAME);
2980 if (status)
2981 goto disable_dev;
2982 pci_set_master(pdev);
2983
2984 netdev = alloc_etherdev(sizeof(struct be_adapter));
2985 if (netdev == NULL) {
2986 status = -ENOMEM;
2987 goto rel_reg;
2988 }
2989 adapter = netdev_priv(netdev);
2990 adapter->pdev = pdev;
2991 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
2992
2993 status = be_dev_family_check(adapter);
63657b9c 2994 if (status)
fe6d2a38
SP
2995 goto free_netdev;
2996
6b7c5b94 2997 adapter->netdev = netdev;
2243e2e9 2998 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 2999
2b7bcebf 3000 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3001 if (!status) {
3002 netdev->features |= NETIF_F_HIGHDMA;
3003 } else {
2b7bcebf 3004 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3005 if (status) {
3006 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3007 goto free_netdev;
3008 }
3009 }
3010
ba343c77
SB
3011 be_sriov_enable(adapter);
3012
6b7c5b94
SP
3013 status = be_ctrl_init(adapter);
3014 if (status)
3015 goto free_netdev;
3016
37eed1cb
PR
3017 if (lancer_chip(adapter)) {
3018 status = lancer_test_and_set_rdy_state(adapter);
3019 if (status) {
3020 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3021 goto free_netdev;
3022 }
3023 }
3024
2243e2e9 3025 /* sync up with fw's ready state */
ba343c77
SB
3026 if (be_physfn(adapter)) {
3027 status = be_cmd_POST(adapter);
3028 if (status)
3029 goto ctrl_clean;
ba343c77 3030 }
6b7c5b94 3031
2243e2e9
SP
3032 /* tell fw we're ready to fire cmds */
3033 status = be_cmd_fw_init(adapter);
6b7c5b94 3034 if (status)
2243e2e9
SP
3035 goto ctrl_clean;
3036
a4b4dfab
AK
3037 status = be_cmd_reset_function(adapter);
3038 if (status)
3039 goto ctrl_clean;
556ae191 3040
2243e2e9
SP
3041 status = be_stats_init(adapter);
3042 if (status)
3043 goto ctrl_clean;
3044
3045 status = be_get_config(adapter);
6b7c5b94
SP
3046 if (status)
3047 goto stats_clean;
6b7c5b94 3048
3abcdeda
SP
3049 be_msix_enable(adapter);
3050
6b7c5b94 3051 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3052
5fb379ee
SP
3053 status = be_setup(adapter);
3054 if (status)
3abcdeda 3055 goto msix_disable;
2243e2e9 3056
3abcdeda 3057 be_netdev_init(netdev);
6b7c5b94
SP
3058 status = register_netdev(netdev);
3059 if (status != 0)
5fb379ee 3060 goto unsetup;
63a76944 3061 netif_carrier_off(netdev);
6b7c5b94 3062
e6319365
AK
3063 if (be_physfn(adapter) && adapter->sriov_enabled) {
3064 status = be_vf_eth_addr_config(adapter);
3065 if (status)
3066 goto unreg_netdev;
3067 }
3068
c4ca2374 3069 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
f203af70 3070 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3071 return 0;
3072
e6319365
AK
3073unreg_netdev:
3074 unregister_netdev(netdev);
5fb379ee
SP
3075unsetup:
3076 be_clear(adapter);
3abcdeda
SP
3077msix_disable:
3078 be_msix_disable(adapter);
6b7c5b94
SP
3079stats_clean:
3080 be_stats_cleanup(adapter);
3081ctrl_clean:
3082 be_ctrl_cleanup(adapter);
3083free_netdev:
ba343c77 3084 be_sriov_disable(adapter);
fe6d2a38 3085 free_netdev(netdev);
8d56ff11 3086 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3087rel_reg:
3088 pci_release_regions(pdev);
3089disable_dev:
3090 pci_disable_device(pdev);
3091do_none:
c4ca2374 3092 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3093 return status;
3094}
3095
3096static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3097{
3098 struct be_adapter *adapter = pci_get_drvdata(pdev);
3099 struct net_device *netdev = adapter->netdev;
3100
a4ca055f 3101 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3102 if (adapter->wol)
3103 be_setup_wol(adapter, true);
3104
6b7c5b94
SP
3105 netif_device_detach(netdev);
3106 if (netif_running(netdev)) {
3107 rtnl_lock();
3108 be_close(netdev);
3109 rtnl_unlock();
3110 }
9e90c961 3111 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3112 be_clear(adapter);
6b7c5b94 3113
a4ca055f 3114 be_msix_disable(adapter);
6b7c5b94
SP
3115 pci_save_state(pdev);
3116 pci_disable_device(pdev);
3117 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3118 return 0;
3119}
3120
3121static int be_resume(struct pci_dev *pdev)
3122{
3123 int status = 0;
3124 struct be_adapter *adapter = pci_get_drvdata(pdev);
3125 struct net_device *netdev = adapter->netdev;
3126
3127 netif_device_detach(netdev);
3128
3129 status = pci_enable_device(pdev);
3130 if (status)
3131 return status;
3132
3133 pci_set_power_state(pdev, 0);
3134 pci_restore_state(pdev);
3135
a4ca055f 3136 be_msix_enable(adapter);
2243e2e9
SP
3137 /* tell fw we're ready to fire cmds */
3138 status = be_cmd_fw_init(adapter);
3139 if (status)
3140 return status;
3141
9b0365f1 3142 be_setup(adapter);
6b7c5b94
SP
3143 if (netif_running(netdev)) {
3144 rtnl_lock();
3145 be_open(netdev);
3146 rtnl_unlock();
3147 }
3148 netif_device_attach(netdev);
71d8d1b5
AK
3149
3150 if (adapter->wol)
3151 be_setup_wol(adapter, false);
a4ca055f
AK
3152
3153 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3154 return 0;
3155}
3156
82456b03
SP
3157/*
3158 * An FLR will stop BE from DMAing any data.
3159 */
3160static void be_shutdown(struct pci_dev *pdev)
3161{
3162 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3163
2d5d4154
AK
3164 if (!adapter)
3165 return;
82456b03 3166
0f4a6828 3167 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3168
2d5d4154 3169 netif_device_detach(adapter->netdev);
82456b03
SP
3170
3171 be_cmd_reset_function(adapter);
3172
3173 if (adapter->wol)
3174 be_setup_wol(adapter, true);
3175
3176 pci_disable_device(pdev);
82456b03
SP
3177}
3178
cf588477
SP
3179static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3180 pci_channel_state_t state)
3181{
3182 struct be_adapter *adapter = pci_get_drvdata(pdev);
3183 struct net_device *netdev = adapter->netdev;
3184
3185 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3186
3187 adapter->eeh_err = true;
3188
3189 netif_device_detach(netdev);
3190
3191 if (netif_running(netdev)) {
3192 rtnl_lock();
3193 be_close(netdev);
3194 rtnl_unlock();
3195 }
3196 be_clear(adapter);
3197
3198 if (state == pci_channel_io_perm_failure)
3199 return PCI_ERS_RESULT_DISCONNECT;
3200
3201 pci_disable_device(pdev);
3202
3203 return PCI_ERS_RESULT_NEED_RESET;
3204}
3205
3206static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3207{
3208 struct be_adapter *adapter = pci_get_drvdata(pdev);
3209 int status;
3210
3211 dev_info(&adapter->pdev->dev, "EEH reset\n");
3212 adapter->eeh_err = false;
3213
3214 status = pci_enable_device(pdev);
3215 if (status)
3216 return PCI_ERS_RESULT_DISCONNECT;
3217
3218 pci_set_master(pdev);
3219 pci_set_power_state(pdev, 0);
3220 pci_restore_state(pdev);
3221
3222 /* Check if card is ok and fw is ready */
3223 status = be_cmd_POST(adapter);
3224 if (status)
3225 return PCI_ERS_RESULT_DISCONNECT;
3226
3227 return PCI_ERS_RESULT_RECOVERED;
3228}
3229
3230static void be_eeh_resume(struct pci_dev *pdev)
3231{
3232 int status = 0;
3233 struct be_adapter *adapter = pci_get_drvdata(pdev);
3234 struct net_device *netdev = adapter->netdev;
3235
3236 dev_info(&adapter->pdev->dev, "EEH resume\n");
3237
3238 pci_save_state(pdev);
3239
3240 /* tell fw we're ready to fire cmds */
3241 status = be_cmd_fw_init(adapter);
3242 if (status)
3243 goto err;
3244
3245 status = be_setup(adapter);
3246 if (status)
3247 goto err;
3248
3249 if (netif_running(netdev)) {
3250 status = be_open(netdev);
3251 if (status)
3252 goto err;
3253 }
3254 netif_device_attach(netdev);
3255 return;
3256err:
3257 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3258}
3259
3260static struct pci_error_handlers be_eeh_handlers = {
3261 .error_detected = be_eeh_err_detected,
3262 .slot_reset = be_eeh_reset,
3263 .resume = be_eeh_resume,
3264};
3265
6b7c5b94
SP
3266static struct pci_driver be_driver = {
3267 .name = DRV_NAME,
3268 .id_table = be_dev_ids,
3269 .probe = be_probe,
3270 .remove = be_remove,
3271 .suspend = be_suspend,
cf588477 3272 .resume = be_resume,
82456b03 3273 .shutdown = be_shutdown,
cf588477 3274 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3275};
3276
3277static int __init be_init_module(void)
3278{
8e95a202
JP
3279 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3280 rx_frag_size != 2048) {
6b7c5b94
SP
3281 printk(KERN_WARNING DRV_NAME
3282 " : Module param rx_frag_size must be 2048/4096/8192."
3283 " Using 2048\n");
3284 rx_frag_size = 2048;
3285 }
6b7c5b94 3286
ba343c77
SB
3287 if (num_vfs > 32) {
3288 printk(KERN_WARNING DRV_NAME
3289 " : Module param num_vfs must not be greater than 32."
3290 "Using 32\n");
3291 num_vfs = 32;
3292 }
3293
6b7c5b94
SP
3294 return pci_register_driver(&be_driver);
3295}
3296module_init(be_init_module);
3297
3298static void __exit be_exit_module(void)
3299{
3300 pci_unregister_driver(&be_driver);
3301}
3302module_exit(be_exit_module);
This page took 0.44745 seconds and 5 git commands to generate.