be2net: dynamically allocate adapter->vf_cfg
[deliverable/linux.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
18#include "be.h"
8788fdc2 19#include "be_cmds.h"
65f71b8b 20#include <asm/div64.h>
6b7c5b94
SP
21
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
2e588f84 28static ushort rx_frag_size = 2048;
ba343c77 29static unsigned int num_vfs;
2e588f84 30module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
3abcdeda
SP
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
6b7c5b94
SP
45 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276
AK
48/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
6b7c5b94
SP
118
119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120{
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
2b7bcebf
IV
123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
6b7c5b94
SP
125}
126
127static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
2b7bcebf
IV
136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
6b7c5b94
SP
138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142}
143
8788fdc2 144static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 145{
8788fdc2 146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 149
cf588477
SP
150 if (adapter->eeh_err)
151 return;
152
5f0b849e 153 if (!enabled && enable)
6b7c5b94 154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else if (enabled && !enable)
6b7c5b94 156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 157 else
6b7c5b94 158 return;
5f0b849e 159
6b7c5b94
SP
160 iowrite32(reg, addr);
161}
162
8788fdc2 163static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
164{
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
168
169 wmb();
8788fdc2 170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
171}
172
8788fdc2 173static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
174{
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
178
179 wmb();
8788fdc2 180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
181}
182
8788fdc2 183static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
184 bool arm, bool clear_int, u16 num_popped)
185{
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
190
191 if (adapter->eeh_err)
192 return;
193
6b7c5b94
SP
194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
201}
202
8788fdc2 203void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
204{
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
209
210 if (adapter->eeh_err)
211 return;
212
6b7c5b94
SP
213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
217}
218
6b7c5b94
SP
219static int be_mac_addr_set(struct net_device *netdev, void *p)
220{
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
ca9e4988
AK
225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
ba343c77
SB
228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
f8617e08
AK
234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
a65027e4
SP
236 if (status)
237 return status;
6b7c5b94 238
a65027e4 239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 240 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 241netdev_addr:
6b7c5b94
SP
242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246}
247
b31c50a7 248void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 249{
3abcdeda 250 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
6b7c5b94
SP
251 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
252 struct be_port_rxf_stats *port_stats =
253 &rxf_stats->port[adapter->port_num];
78122a52 254 struct net_device_stats *dev_stats = &adapter->netdev->stats;
68110868 255 struct be_erx_stats *erx_stats = &hw_stats->erx;
3abcdeda
SP
256 struct be_rx_obj *rxo;
257 int i;
6b7c5b94 258
3abcdeda
SP
259 memset(dev_stats, 0, sizeof(*dev_stats));
260 for_all_rx_queues(adapter, rxo, i) {
261 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
262 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
263 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
264 /* no space in linux buffers: best possible approximation */
265 dev_stats->rx_dropped +=
266 erx_stats->rx_drops_no_fragments[rxo->q.id];
267 }
268
269 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
270 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
6b7c5b94
SP
271
272 /* bad pkts received */
273 dev_stats->rx_errors = port_stats->rx_crc_errors +
274 port_stats->rx_alignment_symbol_errors +
275 port_stats->rx_in_range_errors +
68110868
SP
276 port_stats->rx_out_range_errors +
277 port_stats->rx_frame_too_long +
278 port_stats->rx_dropped_too_small +
279 port_stats->rx_dropped_too_short +
280 port_stats->rx_dropped_header_too_small +
281 port_stats->rx_dropped_tcp_length +
282 port_stats->rx_dropped_runt +
283 port_stats->rx_tcp_checksum_errs +
284 port_stats->rx_ip_checksum_errs +
285 port_stats->rx_udp_checksum_errs;
286
6b7c5b94
SP
287 /* detailed rx errors */
288 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
68110868
SP
289 port_stats->rx_out_range_errors +
290 port_stats->rx_frame_too_long;
291
6b7c5b94
SP
292 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
293
294 /* frame alignment errors */
295 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
68110868 296
6b7c5b94
SP
297 /* receiver fifo overrun */
298 /* drops_no_pbuf is no per i/f, it's per BE card */
299 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
300 port_stats->rx_input_fifo_overflow +
301 rxf_stats->rx_drops_no_pbuf;
6b7c5b94
SP
302}
303
8788fdc2 304void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 305{
6b7c5b94
SP
306 struct net_device *netdev = adapter->netdev;
307
6b7c5b94 308 /* If link came up or went down */
a8f447bd 309 if (adapter->link_up != link_up) {
0dffc83e 310 adapter->link_speed = -1;
a8f447bd 311 if (link_up) {
6b7c5b94
SP
312 netif_carrier_on(netdev);
313 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd 314 } else {
a8f447bd
SP
315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 317 }
a8f447bd 318 adapter->link_up = link_up;
6b7c5b94 319 }
6b7c5b94
SP
320}
321
322/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 324{
3abcdeda
SP
325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
6b7c5b94
SP
338
339 /* Update once a second */
4097f663 340 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
341 return;
342
3abcdeda 343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 344 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 345
4097f663 346 stats->rx_fps_jiffies = now;
3abcdeda
SP
347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
8788fdc2 357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
358
359 rx_eq->cur_eqd = eqd;
360}
361
65f71b8b
SH
362static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363{
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371}
372
4097f663
SP
373static void be_tx_rate_update(struct be_adapter *adapter)
374{
3abcdeda 375 struct be_tx_stats *stats = tx_stats(adapter);
4097f663
SP
376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
4097f663
SP
389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392}
393
6b7c5b94 394static void be_tx_stats_update(struct be_adapter *adapter,
91992e44 395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 396{
3abcdeda 397 struct be_tx_stats *stats = tx_stats(adapter);
6b7c5b94
SP
398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
91992e44 401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
402 if (stopped)
403 stats->be_tx_stops++;
6b7c5b94
SP
404}
405
406/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
407static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
408 bool *dummy)
6b7c5b94 409{
ebc8d2ab
DM
410 int cnt = (skb->len > skb->data_len);
411
412 cnt += skb_shinfo(skb)->nr_frags;
413
6b7c5b94
SP
414 /* to account for hdr wrb */
415 cnt++;
fe6d2a38
SP
416 if (lancer_chip(adapter) || !(cnt & 1)) {
417 *dummy = false;
418 } else {
6b7c5b94
SP
419 /* add a dummy to make it an even num */
420 cnt++;
421 *dummy = true;
fe6d2a38 422 }
6b7c5b94
SP
423 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
424 return cnt;
425}
426
427static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
428{
429 wrb->frag_pa_hi = upper_32_bits(addr);
430 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
431 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
432}
433
cc4ce020
SK
434static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
435 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 436{
cc4ce020
SK
437 u8 vlan_prio = 0;
438 u16 vlan_tag = 0;
439
6b7c5b94
SP
440 memset(hdr, 0, sizeof(*hdr));
441
442 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
443
49e4b847 444 if (skb_is_gso(skb)) {
6b7c5b94
SP
445 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
447 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 448 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
450 if (lancer_chip(adapter) && adapter->sli_family ==
451 LANCER_A0_SLI_FAMILY) {
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
453 if (is_tcp_pkt(skb))
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
455 tcpcs, hdr, 1);
456 else if (is_udp_pkt(skb))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
458 udpcs, hdr, 1);
459 }
6b7c5b94
SP
460 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
461 if (is_tcp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
463 else if (is_udp_pkt(skb))
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
465 }
466
cc4ce020 467 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
6b7c5b94 468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
469 vlan_tag = vlan_tx_tag_get(skb);
470 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
471 /* If vlan priority provided by OS is NOT in available bmap */
472 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
473 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
474 adapter->recommended_prio;
475 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
476 }
477
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
482}
483
2b7bcebf 484static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
485 bool unmap_single)
486{
487 dma_addr_t dma;
488
489 be_dws_le_to_cpu(wrb, sizeof(*wrb));
490
491 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 492 if (wrb->frag_len) {
7101e111 493 if (unmap_single)
2b7bcebf
IV
494 dma_unmap_single(dev, dma, wrb->frag_len,
495 DMA_TO_DEVICE);
7101e111 496 else
2b7bcebf 497 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
498 }
499}
6b7c5b94
SP
500
501static int make_tx_wrbs(struct be_adapter *adapter,
502 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
503{
7101e111
SP
504 dma_addr_t busaddr;
505 int i, copied = 0;
2b7bcebf 506 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
507 struct sk_buff *first_skb = skb;
508 struct be_queue_info *txq = &adapter->tx_obj.q;
509 struct be_eth_wrb *wrb;
510 struct be_eth_hdr_wrb *hdr;
7101e111
SP
511 bool map_single = false;
512 u16 map_head;
6b7c5b94 513
6b7c5b94
SP
514 hdr = queue_head_node(txq);
515 queue_head_inc(txq);
7101e111 516 map_head = txq->head;
6b7c5b94 517
ebc8d2ab 518 if (skb->len > skb->data_len) {
e743d313 519 int len = skb_headlen(skb);
2b7bcebf
IV
520 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
521 if (dma_mapping_error(dev, busaddr))
7101e111
SP
522 goto dma_err;
523 map_single = true;
ebc8d2ab
DM
524 wrb = queue_head_node(txq);
525 wrb_fill(wrb, busaddr, len);
526 be_dws_cpu_to_le(wrb, sizeof(*wrb));
527 queue_head_inc(txq);
528 copied += len;
529 }
6b7c5b94 530
ebc8d2ab
DM
531 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532 struct skb_frag_struct *frag =
533 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
534 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
535 frag->size, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, busaddr))
7101e111 537 goto dma_err;
ebc8d2ab
DM
538 wrb = queue_head_node(txq);
539 wrb_fill(wrb, busaddr, frag->size);
540 be_dws_cpu_to_le(wrb, sizeof(*wrb));
541 queue_head_inc(txq);
542 copied += frag->size;
6b7c5b94
SP
543 }
544
545 if (dummy_wrb) {
546 wrb = queue_head_node(txq);
547 wrb_fill(wrb, 0, 0);
548 be_dws_cpu_to_le(wrb, sizeof(*wrb));
549 queue_head_inc(txq);
550 }
551
cc4ce020 552 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
553 be_dws_cpu_to_le(hdr, sizeof(*hdr));
554
555 return copied;
7101e111
SP
556dma_err:
557 txq->head = map_head;
558 while (copied) {
559 wrb = queue_head_node(txq);
2b7bcebf 560 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
561 map_single = false;
562 copied -= wrb->frag_len;
563 queue_head_inc(txq);
564 }
565 return 0;
6b7c5b94
SP
566}
567
61357325 568static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 569 struct net_device *netdev)
6b7c5b94
SP
570{
571 struct be_adapter *adapter = netdev_priv(netdev);
572 struct be_tx_obj *tx_obj = &adapter->tx_obj;
573 struct be_queue_info *txq = &tx_obj->q;
574 u32 wrb_cnt = 0, copied = 0;
575 u32 start = txq->head;
576 bool dummy_wrb, stopped = false;
577
fe6d2a38 578 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94
SP
579
580 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
581 if (copied) {
582 /* record the sent skb in the sent_skb table */
583 BUG_ON(tx_obj->sent_skb_list[start]);
584 tx_obj->sent_skb_list[start] = skb;
585
586 /* Ensure txq has space for the next skb; Else stop the queue
587 * *BEFORE* ringing the tx doorbell, so that we serialze the
588 * tx compls of the current transmit which'll wake up the queue
589 */
7101e111 590 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
591 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
592 txq->len) {
593 netif_stop_queue(netdev);
594 stopped = true;
595 }
6b7c5b94 596
c190e3c8 597 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 598
91992e44
AK
599 be_tx_stats_update(adapter, wrb_cnt, copied,
600 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
601 } else {
602 txq->head = start;
603 dev_kfree_skb_any(skb);
6b7c5b94 604 }
6b7c5b94
SP
605 return NETDEV_TX_OK;
606}
607
608static int be_change_mtu(struct net_device *netdev, int new_mtu)
609{
610 struct be_adapter *adapter = netdev_priv(netdev);
611 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
612 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
613 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
614 dev_info(&adapter->pdev->dev,
615 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
616 BE_MIN_MTU,
617 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
618 return -EINVAL;
619 }
620 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
621 netdev->mtu, new_mtu);
622 netdev->mtu = new_mtu;
623 return 0;
624}
625
626/*
82903e4b
AK
627 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 629 */
1da87b7f 630static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 631{
6b7c5b94
SP
632 u16 vtag[BE_NUM_VLANS_SUPPORTED];
633 u16 ntags = 0, i;
82903e4b 634 int status = 0;
1da87b7f
AK
635 u32 if_handle;
636
637 if (vf) {
638 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
639 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
640 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
641 }
6b7c5b94 642
82903e4b 643 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 644 /* Construct VLAN Table to give to HW */
b738127d 645 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
646 if (adapter->vlan_tag[i]) {
647 vtag[ntags] = cpu_to_le16(i);
648 ntags++;
649 }
650 }
b31c50a7
SP
651 status = be_cmd_vlan_config(adapter, adapter->if_handle,
652 vtag, ntags, 1, 0);
6b7c5b94 653 } else {
b31c50a7
SP
654 status = be_cmd_vlan_config(adapter, adapter->if_handle,
655 NULL, 0, 1, 1);
6b7c5b94 656 }
1da87b7f 657
b31c50a7 658 return status;
6b7c5b94
SP
659}
660
661static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
662{
663 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 664
6b7c5b94 665 adapter->vlan_grp = grp;
6b7c5b94
SP
666}
667
668static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
669{
670 struct be_adapter *adapter = netdev_priv(netdev);
671
1da87b7f 672 adapter->vlans_added++;
ba343c77
SB
673 if (!be_physfn(adapter))
674 return;
675
6b7c5b94 676 adapter->vlan_tag[vid] = 1;
82903e4b 677 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 678 be_vid_config(adapter, false, 0);
6b7c5b94
SP
679}
680
681static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
682{
683 struct be_adapter *adapter = netdev_priv(netdev);
684
1da87b7f
AK
685 adapter->vlans_added--;
686 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
687
ba343c77
SB
688 if (!be_physfn(adapter))
689 return;
690
6b7c5b94 691 adapter->vlan_tag[vid] = 0;
82903e4b 692 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 693 be_vid_config(adapter, false, 0);
6b7c5b94
SP
694}
695
24307eef 696static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 699
24307eef 700 if (netdev->flags & IFF_PROMISC) {
8788fdc2 701 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
24307eef
SP
702 adapter->promiscuous = true;
703 goto done;
6b7c5b94
SP
704 }
705
24307eef
SP
706 /* BE was previously in promiscous mode; disable it */
707 if (adapter->promiscuous) {
708 adapter->promiscuous = false;
8788fdc2 709 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
6b7c5b94
SP
710 }
711
e7b909a6 712 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
713 if (netdev->flags & IFF_ALLMULTI ||
714 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 715 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 716 &adapter->mc_cmd_mem);
24307eef 717 goto done;
6b7c5b94 718 }
6b7c5b94 719
0ddf477b 720 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 721 &adapter->mc_cmd_mem);
24307eef
SP
722done:
723 return;
6b7c5b94
SP
724}
725
ba343c77
SB
726static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 int status;
730
731 if (!adapter->sriov_enabled)
732 return -EPERM;
733
734 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
735 return -EINVAL;
736
64600ea5
AK
737 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
738 status = be_cmd_pmac_del(adapter,
739 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 740 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 741
64600ea5
AK
742 status = be_cmd_pmac_add(adapter, mac,
743 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 744 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
745
746 if (status)
ba343c77
SB
747 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748 mac, vf);
64600ea5
AK
749 else
750 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
751
ba343c77
SB
752 return status;
753}
754
64600ea5
AK
755static int be_get_vf_config(struct net_device *netdev, int vf,
756 struct ifla_vf_info *vi)
757{
758 struct be_adapter *adapter = netdev_priv(netdev);
759
760 if (!adapter->sriov_enabled)
761 return -EPERM;
762
763 if (vf >= num_vfs)
764 return -EINVAL;
765
766 vi->vf = vf;
e1d18735 767 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 768 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
769 vi->qos = 0;
770 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
771
772 return 0;
773}
774
1da87b7f
AK
775static int be_set_vf_vlan(struct net_device *netdev,
776 int vf, u16 vlan, u8 qos)
777{
778 struct be_adapter *adapter = netdev_priv(netdev);
779 int status = 0;
780
781 if (!adapter->sriov_enabled)
782 return -EPERM;
783
784 if ((vf >= num_vfs) || (vlan > 4095))
785 return -EINVAL;
786
787 if (vlan) {
788 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
789 adapter->vlans_added++;
790 } else {
791 adapter->vf_cfg[vf].vf_vlan_tag = 0;
792 adapter->vlans_added--;
793 }
794
795 status = be_vid_config(adapter, true, vf);
796
797 if (status)
798 dev_info(&adapter->pdev->dev,
799 "VLAN %d config on VF %d failed\n", vlan, vf);
800 return status;
801}
802
e1d18735
AK
803static int be_set_vf_tx_rate(struct net_device *netdev,
804 int vf, int rate)
805{
806 struct be_adapter *adapter = netdev_priv(netdev);
807 int status = 0;
808
809 if (!adapter->sriov_enabled)
810 return -EPERM;
811
812 if ((vf >= num_vfs) || (rate < 0))
813 return -EINVAL;
814
815 if (rate > 10000)
816 rate = 10000;
817
818 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 819 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
820
821 if (status)
822 dev_info(&adapter->pdev->dev,
823 "tx rate %d on VF %d failed\n", rate, vf);
824 return status;
825}
826
3abcdeda 827static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 828{
3abcdeda 829 struct be_rx_stats *stats = &rxo->stats;
4097f663 830 ulong now = jiffies;
6b7c5b94 831
4097f663 832 /* Wrapped around */
3abcdeda
SP
833 if (time_before(now, stats->rx_jiffies)) {
834 stats->rx_jiffies = now;
4097f663
SP
835 return;
836 }
6b7c5b94
SP
837
838 /* Update the rate once in two seconds */
3abcdeda 839 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
840 return;
841
3abcdeda
SP
842 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
843 now - stats->rx_jiffies);
844 stats->rx_jiffies = now;
845 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
846}
847
3abcdeda 848static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 849 struct be_rx_compl_info *rxcp)
4097f663 850{
3abcdeda 851 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 852
3abcdeda 853 stats->rx_compl++;
2e588f84
SP
854 stats->rx_frags += rxcp->num_rcvd;
855 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 856 stats->rx_pkts++;
2e588f84 857 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 858 stats->rx_mcast_pkts++;
2e588f84
SP
859 if (rxcp->err)
860 stats->rxcp_err++;
4097f663
SP
861}
862
2e588f84 863static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 864{
19fad86f
PR
865 /* L4 checksum is not reliable for non TCP/UDP packets.
866 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
867 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
868 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
869}
870
6b7c5b94 871static struct be_rx_page_info *
3abcdeda
SP
872get_rx_page_info(struct be_adapter *adapter,
873 struct be_rx_obj *rxo,
874 u16 frag_idx)
6b7c5b94
SP
875{
876 struct be_rx_page_info *rx_page_info;
3abcdeda 877 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 878
3abcdeda 879 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
880 BUG_ON(!rx_page_info->page);
881
205859a2 882 if (rx_page_info->last_page_user) {
2b7bcebf
IV
883 dma_unmap_page(&adapter->pdev->dev,
884 dma_unmap_addr(rx_page_info, bus),
885 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
886 rx_page_info->last_page_user = false;
887 }
6b7c5b94
SP
888
889 atomic_dec(&rxq->used);
890 return rx_page_info;
891}
892
893/* Throwaway the data in the Rx completion */
894static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 895 struct be_rx_obj *rxo,
2e588f84 896 struct be_rx_compl_info *rxcp)
6b7c5b94 897{
3abcdeda 898 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 899 struct be_rx_page_info *page_info;
2e588f84 900 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 901
e80d9da6 902 for (i = 0; i < num_rcvd; i++) {
2e588f84 903 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
904 put_page(page_info->page);
905 memset(page_info, 0, sizeof(*page_info));
2e588f84 906 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
907 }
908}
909
910/*
911 * skb_fill_rx_data forms a complete skb for an ether frame
912 * indicated by rxcp.
913 */
3abcdeda 914static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 915 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 916{
3abcdeda 917 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 918 struct be_rx_page_info *page_info;
2e588f84
SP
919 u16 i, j;
920 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 921 u8 *start;
6b7c5b94 922
2e588f84 923 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
924 start = page_address(page_info->page) + page_info->page_offset;
925 prefetch(start);
926
927 /* Copy data in the first descriptor of this completion */
2e588f84 928 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
929
930 /* Copy the header portion into skb_data */
2e588f84 931 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
932 memcpy(skb->data, start, hdr_len);
933 skb->len = curr_frag_len;
934 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
935 /* Complete packet has now been moved to data */
936 put_page(page_info->page);
937 skb->data_len = 0;
938 skb->tail += curr_frag_len;
939 } else {
940 skb_shinfo(skb)->nr_frags = 1;
941 skb_shinfo(skb)->frags[0].page = page_info->page;
942 skb_shinfo(skb)->frags[0].page_offset =
943 page_info->page_offset + hdr_len;
944 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
945 skb->data_len = curr_frag_len - hdr_len;
946 skb->tail += hdr_len;
947 }
205859a2 948 page_info->page = NULL;
6b7c5b94 949
2e588f84
SP
950 if (rxcp->pkt_size <= rx_frag_size) {
951 BUG_ON(rxcp->num_rcvd != 1);
952 return;
6b7c5b94
SP
953 }
954
955 /* More frags present for this completion */
2e588f84
SP
956 index_inc(&rxcp->rxq_idx, rxq->len);
957 remaining = rxcp->pkt_size - curr_frag_len;
958 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
959 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
960 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 961
bd46cb6c
AK
962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info->page_offset == 0) {
964 /* Fresh page */
965 j++;
966 skb_shinfo(skb)->frags[j].page = page_info->page;
967 skb_shinfo(skb)->frags[j].page_offset =
968 page_info->page_offset;
969 skb_shinfo(skb)->frags[j].size = 0;
970 skb_shinfo(skb)->nr_frags++;
971 } else {
972 put_page(page_info->page);
973 }
974
975 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
976 skb->len += curr_frag_len;
977 skb->data_len += curr_frag_len;
6b7c5b94 978
2e588f84
SP
979 remaining -= curr_frag_len;
980 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 981 page_info->page = NULL;
6b7c5b94 982 }
bd46cb6c 983 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
984}
985
5be93b9a 986/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 987static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 988 struct be_rx_obj *rxo,
2e588f84 989 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
990{
991 struct sk_buff *skb;
89420424 992
89d71a66 993 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
a058a632 994 if (unlikely(!skb)) {
6b7c5b94
SP
995 if (net_ratelimit())
996 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
3abcdeda 997 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
998 return;
999 }
1000
2e588f84 1001 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1002
c6ce2f4b 1003 if (likely(adapter->rx_csum && csum_passed(rxcp)))
728a9972 1004 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1005 else
1006 skb_checksum_none_assert(skb);
6b7c5b94
SP
1007
1008 skb->truesize = skb->len + sizeof(struct sk_buff);
1009 skb->protocol = eth_type_trans(skb, adapter->netdev);
4b972914
AK
1010 if (adapter->netdev->features & NETIF_F_RXHASH)
1011 skb->rxhash = rxcp->rss_hash;
1012
6b7c5b94 1013
2e588f84 1014 if (unlikely(rxcp->vlanf)) {
82903e4b 1015 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
1016 kfree_skb(skb);
1017 return;
1018 }
2e588f84 1019 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
6b7c5b94
SP
1020 } else {
1021 netif_receive_skb(skb);
1022 }
6b7c5b94
SP
1023}
1024
5be93b9a
AK
1025/* Process the RX completion indicated by rxcp when GRO is enabled */
1026static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1027 struct be_rx_obj *rxo,
2e588f84 1028 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1029{
1030 struct be_rx_page_info *page_info;
5be93b9a 1031 struct sk_buff *skb = NULL;
3abcdeda
SP
1032 struct be_queue_info *rxq = &rxo->q;
1033 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1034 u16 remaining, curr_frag_len;
1035 u16 i, j;
3968fa1e 1036
5be93b9a
AK
1037 skb = napi_get_frags(&eq_obj->napi);
1038 if (!skb) {
3abcdeda 1039 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1040 return;
1041 }
1042
2e588f84
SP
1043 remaining = rxcp->pkt_size;
1044 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1045 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1046
1047 curr_frag_len = min(remaining, rx_frag_size);
1048
bd46cb6c
AK
1049 /* Coalesce all frags from the same physical page in one slot */
1050 if (i == 0 || page_info->page_offset == 0) {
1051 /* First frag or Fresh page */
1052 j++;
5be93b9a
AK
1053 skb_shinfo(skb)->frags[j].page = page_info->page;
1054 skb_shinfo(skb)->frags[j].page_offset =
1055 page_info->page_offset;
1056 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1057 } else {
1058 put_page(page_info->page);
1059 }
5be93b9a 1060 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1061
bd46cb6c 1062 remaining -= curr_frag_len;
2e588f84 1063 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1064 memset(page_info, 0, sizeof(*page_info));
1065 }
bd46cb6c 1066 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1067
5be93b9a 1068 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1069 skb->len = rxcp->pkt_size;
1070 skb->data_len = rxcp->pkt_size;
1071 skb->truesize += rxcp->pkt_size;
5be93b9a 1072 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1073 if (adapter->netdev->features & NETIF_F_RXHASH)
1074 skb->rxhash = rxcp->rss_hash;
5be93b9a 1075
2e588f84 1076 if (likely(!rxcp->vlanf))
5be93b9a 1077 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1078 else
1079 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
1080}
1081
1082static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1083 struct be_eth_rx_compl *compl,
1084 struct be_rx_compl_info *rxcp)
1085{
1086 rxcp->pkt_size =
1087 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1088 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1089 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1090 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1091 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1092 rxcp->ip_csum =
1093 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1094 rxcp->l4_csum =
1095 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1096 rxcp->ipv6 =
1097 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1098 rxcp->rxq_idx =
1099 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1100 rxcp->num_rcvd =
1101 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1102 rxcp->pkt_type =
1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1104 rxcp->rss_hash =
1105 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1106 if (rxcp->vlanf) {
1107 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1108 compl);
1109 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1110 compl);
1111 }
2e588f84
SP
1112}
1113
1114static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1115 struct be_eth_rx_compl *compl,
1116 struct be_rx_compl_info *rxcp)
1117{
1118 rxcp->pkt_size =
1119 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1120 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1121 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1122 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1123 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1124 rxcp->ip_csum =
1125 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1126 rxcp->l4_csum =
1127 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1128 rxcp->ipv6 =
1129 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1130 rxcp->rxq_idx =
1131 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1132 rxcp->num_rcvd =
1133 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1134 rxcp->pkt_type =
1135 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1136 rxcp->rss_hash =
1137 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1138 if (rxcp->vlanf) {
1139 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1140 compl);
1141 rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1142 compl);
1143 }
2e588f84
SP
1144}
1145
1146static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1147{
1148 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1149 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1150 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1151
2e588f84
SP
1152 /* For checking the valid bit it is Ok to use either definition as the
1153 * valid bit is at the same position in both v0 and v1 Rx compl */
1154 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1155 return NULL;
6b7c5b94 1156
2e588f84
SP
1157 rmb();
1158 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1159
2e588f84
SP
1160 if (adapter->be3_native)
1161 be_parse_rx_compl_v1(adapter, compl, rxcp);
1162 else
1163 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1164
15d72184
SP
1165 if (rxcp->vlanf) {
1166 /* vlanf could be wrongly set in some cards.
1167 * ignore if vtm is not set */
1168 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1169 rxcp->vlanf = 0;
6b7c5b94 1170
15d72184
SP
1171 if (!lancer_chip(adapter))
1172 rxcp->vid = swab16(rxcp->vid);
6b7c5b94 1173
15d72184
SP
1174 if ((adapter->pvid == rxcp->vid) &&
1175 !adapter->vlan_tag[rxcp->vid])
1176 rxcp->vlanf = 0;
1177 }
2e588f84
SP
1178
1179 /* As the compl has been parsed, reset it; we wont touch it again */
1180 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1181
3abcdeda 1182 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1183 return rxcp;
1184}
1185
1829b086 1186static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1187{
6b7c5b94 1188 u32 order = get_order(size);
1829b086 1189
6b7c5b94 1190 if (order > 0)
1829b086
ED
1191 gfp |= __GFP_COMP;
1192 return alloc_pages(gfp, order);
6b7c5b94
SP
1193}
1194
1195/*
1196 * Allocate a page, split it to fragments of size rx_frag_size and post as
1197 * receive buffers to BE
1198 */
1829b086 1199static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1200{
3abcdeda
SP
1201 struct be_adapter *adapter = rxo->adapter;
1202 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1203 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1204 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1205 struct page *pagep = NULL;
1206 struct be_eth_rx_d *rxd;
1207 u64 page_dmaaddr = 0, frag_dmaaddr;
1208 u32 posted, page_offset = 0;
1209
3abcdeda 1210 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1211 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1212 if (!pagep) {
1829b086 1213 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1214 if (unlikely(!pagep)) {
3abcdeda 1215 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1216 break;
1217 }
2b7bcebf
IV
1218 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1219 0, adapter->big_page_size,
1220 DMA_FROM_DEVICE);
6b7c5b94
SP
1221 page_info->page_offset = 0;
1222 } else {
1223 get_page(pagep);
1224 page_info->page_offset = page_offset + rx_frag_size;
1225 }
1226 page_offset = page_info->page_offset;
1227 page_info->page = pagep;
fac6da5b 1228 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1229 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1230
1231 rxd = queue_head_node(rxq);
1232 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1233 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1234
1235 /* Any space left in the current big page for another frag? */
1236 if ((page_offset + rx_frag_size + rx_frag_size) >
1237 adapter->big_page_size) {
1238 pagep = NULL;
1239 page_info->last_page_user = true;
1240 }
26d92f92
SP
1241
1242 prev_page_info = page_info;
1243 queue_head_inc(rxq);
6b7c5b94
SP
1244 page_info = &page_info_tbl[rxq->head];
1245 }
1246 if (pagep)
26d92f92 1247 prev_page_info->last_page_user = true;
6b7c5b94
SP
1248
1249 if (posted) {
6b7c5b94 1250 atomic_add(posted, &rxq->used);
8788fdc2 1251 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1252 } else if (atomic_read(&rxq->used) == 0) {
1253 /* Let be_worker replenish when memory is available */
3abcdeda 1254 rxo->rx_post_starved = true;
6b7c5b94 1255 }
6b7c5b94
SP
1256}
1257
5fb379ee 1258static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1259{
6b7c5b94
SP
1260 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1261
1262 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1263 return NULL;
1264
f3eb62d2 1265 rmb();
6b7c5b94
SP
1266 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1267
1268 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1269
1270 queue_tail_inc(tx_cq);
1271 return txcp;
1272}
1273
1274static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1275{
1276 struct be_queue_info *txq = &adapter->tx_obj.q;
a73b796e 1277 struct be_eth_wrb *wrb;
6b7c5b94
SP
1278 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1279 struct sk_buff *sent_skb;
ec43b1a6
SP
1280 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1281 bool unmap_skb_hdr = true;
6b7c5b94 1282
ec43b1a6 1283 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1284 BUG_ON(!sent_skb);
ec43b1a6
SP
1285 sent_skbs[txq->tail] = NULL;
1286
1287 /* skip header wrb */
a73b796e 1288 queue_tail_inc(txq);
6b7c5b94 1289
ec43b1a6 1290 do {
6b7c5b94 1291 cur_index = txq->tail;
a73b796e 1292 wrb = queue_tail_node(txq);
2b7bcebf
IV
1293 unmap_tx_frag(&adapter->pdev->dev, wrb,
1294 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1295 unmap_skb_hdr = false;
1296
6b7c5b94
SP
1297 num_wrbs++;
1298 queue_tail_inc(txq);
ec43b1a6 1299 } while (cur_index != last_index);
6b7c5b94
SP
1300
1301 atomic_sub(num_wrbs, &txq->used);
a73b796e 1302
6b7c5b94
SP
1303 kfree_skb(sent_skb);
1304}
1305
859b1e4e
SP
1306static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1307{
1308 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1309
1310 if (!eqe->evt)
1311 return NULL;
1312
f3eb62d2 1313 rmb();
859b1e4e
SP
1314 eqe->evt = le32_to_cpu(eqe->evt);
1315 queue_tail_inc(&eq_obj->q);
1316 return eqe;
1317}
1318
1319static int event_handle(struct be_adapter *adapter,
1320 struct be_eq_obj *eq_obj)
1321{
1322 struct be_eq_entry *eqe;
1323 u16 num = 0;
1324
1325 while ((eqe = event_get(eq_obj)) != NULL) {
1326 eqe->evt = 0;
1327 num++;
1328 }
1329
1330 /* Deal with any spurious interrupts that come
1331 * without events
1332 */
1333 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1334 if (num)
1335 napi_schedule(&eq_obj->napi);
1336
1337 return num;
1338}
1339
1340/* Just read and notify events without processing them.
1341 * Used at the time of destroying event queues */
1342static void be_eq_clean(struct be_adapter *adapter,
1343 struct be_eq_obj *eq_obj)
1344{
1345 struct be_eq_entry *eqe;
1346 u16 num = 0;
1347
1348 while ((eqe = event_get(eq_obj)) != NULL) {
1349 eqe->evt = 0;
1350 num++;
1351 }
1352
1353 if (num)
1354 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1355}
1356
3abcdeda 1357static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1358{
1359 struct be_rx_page_info *page_info;
3abcdeda
SP
1360 struct be_queue_info *rxq = &rxo->q;
1361 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1362 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1363 u16 tail;
1364
1365 /* First cleanup pending rx completions */
3abcdeda
SP
1366 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1367 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1368 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1369 }
1370
1371 /* Then free posted rx buffer that were not used */
1372 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1373 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1374 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1375 put_page(page_info->page);
1376 memset(page_info, 0, sizeof(*page_info));
1377 }
1378 BUG_ON(atomic_read(&rxq->used));
1379}
1380
a8e9179a 1381static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1382{
a8e9179a 1383 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1384 struct be_queue_info *txq = &adapter->tx_obj.q;
a8e9179a
SP
1385 struct be_eth_tx_compl *txcp;
1386 u16 end_idx, cmpl = 0, timeo = 0;
b03388d6
SP
1387 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1388 struct sk_buff *sent_skb;
1389 bool dummy_wrb;
a8e9179a
SP
1390
1391 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1392 do {
1393 while ((txcp = be_tx_compl_get(tx_cq))) {
1394 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1395 wrb_index, txcp);
1396 be_tx_compl_process(adapter, end_idx);
1397 cmpl++;
1398 }
1399 if (cmpl) {
1400 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1401 cmpl = 0;
1402 }
1403
1404 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1405 break;
1406
1407 mdelay(1);
1408 } while (true);
1409
1410 if (atomic_read(&txq->used))
1411 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1412 atomic_read(&txq->used));
b03388d6
SP
1413
1414 /* free posted tx for which compls will never arrive */
1415 while (atomic_read(&txq->used)) {
1416 sent_skb = sent_skbs[txq->tail];
1417 end_idx = txq->tail;
1418 index_adv(&end_idx,
fe6d2a38
SP
1419 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1420 txq->len);
b03388d6
SP
1421 be_tx_compl_process(adapter, end_idx);
1422 }
6b7c5b94
SP
1423}
1424
5fb379ee
SP
1425static void be_mcc_queues_destroy(struct be_adapter *adapter)
1426{
1427 struct be_queue_info *q;
5fb379ee 1428
8788fdc2 1429 q = &adapter->mcc_obj.q;
5fb379ee 1430 if (q->created)
8788fdc2 1431 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1432 be_queue_free(adapter, q);
1433
8788fdc2 1434 q = &adapter->mcc_obj.cq;
5fb379ee 1435 if (q->created)
8788fdc2 1436 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1437 be_queue_free(adapter, q);
1438}
1439
1440/* Must be called only after TX qs are created as MCC shares TX EQ */
1441static int be_mcc_queues_create(struct be_adapter *adapter)
1442{
1443 struct be_queue_info *q, *cq;
5fb379ee
SP
1444
1445 /* Alloc MCC compl queue */
8788fdc2 1446 cq = &adapter->mcc_obj.cq;
5fb379ee 1447 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1448 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1449 goto err;
1450
1451 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1452 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1453 goto mcc_cq_free;
1454
1455 /* Alloc MCC queue */
8788fdc2 1456 q = &adapter->mcc_obj.q;
5fb379ee
SP
1457 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1458 goto mcc_cq_destroy;
1459
1460 /* Ask BE to create MCC queue */
8788fdc2 1461 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1462 goto mcc_q_free;
1463
1464 return 0;
1465
1466mcc_q_free:
1467 be_queue_free(adapter, q);
1468mcc_cq_destroy:
8788fdc2 1469 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1470mcc_cq_free:
1471 be_queue_free(adapter, cq);
1472err:
1473 return -1;
1474}
1475
6b7c5b94
SP
1476static void be_tx_queues_destroy(struct be_adapter *adapter)
1477{
1478 struct be_queue_info *q;
1479
1480 q = &adapter->tx_obj.q;
a8e9179a 1481 if (q->created)
8788fdc2 1482 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
6b7c5b94
SP
1483 be_queue_free(adapter, q);
1484
1485 q = &adapter->tx_obj.cq;
1486 if (q->created)
8788fdc2 1487 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1488 be_queue_free(adapter, q);
1489
859b1e4e
SP
1490 /* Clear any residual events */
1491 be_eq_clean(adapter, &adapter->tx_eq);
1492
6b7c5b94
SP
1493 q = &adapter->tx_eq.q;
1494 if (q->created)
8788fdc2 1495 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1496 be_queue_free(adapter, q);
1497}
1498
1499static int be_tx_queues_create(struct be_adapter *adapter)
1500{
1501 struct be_queue_info *eq, *q, *cq;
1502
1503 adapter->tx_eq.max_eqd = 0;
1504 adapter->tx_eq.min_eqd = 0;
1505 adapter->tx_eq.cur_eqd = 96;
1506 adapter->tx_eq.enable_aic = false;
1507 /* Alloc Tx Event queue */
1508 eq = &adapter->tx_eq.q;
1509 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1510 return -1;
1511
1512 /* Ask BE to create Tx Event queue */
8788fdc2 1513 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
6b7c5b94 1514 goto tx_eq_free;
fe6d2a38 1515
ecd62107 1516 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1517
ba343c77 1518
6b7c5b94
SP
1519 /* Alloc TX eth compl queue */
1520 cq = &adapter->tx_obj.cq;
1521 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1522 sizeof(struct be_eth_tx_compl)))
1523 goto tx_eq_destroy;
1524
1525 /* Ask BE to create Tx eth compl queue */
8788fdc2 1526 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
6b7c5b94
SP
1527 goto tx_cq_free;
1528
1529 /* Alloc TX eth queue */
1530 q = &adapter->tx_obj.q;
1531 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1532 goto tx_cq_destroy;
1533
1534 /* Ask BE to create Tx eth queue */
8788fdc2 1535 if (be_cmd_txq_create(adapter, q, cq))
6b7c5b94
SP
1536 goto tx_q_free;
1537 return 0;
1538
1539tx_q_free:
1540 be_queue_free(adapter, q);
1541tx_cq_destroy:
8788fdc2 1542 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1543tx_cq_free:
1544 be_queue_free(adapter, cq);
1545tx_eq_destroy:
8788fdc2 1546 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1547tx_eq_free:
1548 be_queue_free(adapter, eq);
1549 return -1;
1550}
1551
1552static void be_rx_queues_destroy(struct be_adapter *adapter)
1553{
1554 struct be_queue_info *q;
3abcdeda
SP
1555 struct be_rx_obj *rxo;
1556 int i;
1557
1558 for_all_rx_queues(adapter, rxo, i) {
1559 q = &rxo->q;
1560 if (q->created) {
1561 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1562 /* After the rxq is invalidated, wait for a grace time
1563 * of 1ms for all dma to end and the flush compl to
1564 * arrive
1565 */
1566 mdelay(1);
1567 be_rx_q_clean(adapter, rxo);
1568 }
1569 be_queue_free(adapter, q);
1570
1571 q = &rxo->cq;
1572 if (q->created)
1573 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1574 be_queue_free(adapter, q);
1575
1576 /* Clear any residual events */
1577 q = &rxo->rx_eq.q;
1578 if (q->created) {
1579 be_eq_clean(adapter, &rxo->rx_eq);
1580 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1581 }
1582 be_queue_free(adapter, q);
6b7c5b94 1583 }
6b7c5b94
SP
1584}
1585
ac6a0c4a
SP
1586static u32 be_num_rxqs_want(struct be_adapter *adapter)
1587{
1588 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1589 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1590 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1591 } else {
1592 dev_warn(&adapter->pdev->dev,
1593 "No support for multiple RX queues\n");
1594 return 1;
1595 }
1596}
1597
6b7c5b94
SP
1598static int be_rx_queues_create(struct be_adapter *adapter)
1599{
1600 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1601 struct be_rx_obj *rxo;
1602 int rc, i;
6b7c5b94 1603
ac6a0c4a
SP
1604 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1605 msix_enabled(adapter) ?
1606 adapter->num_msix_vec - 1 : 1);
1607 if (adapter->num_rx_qs != MAX_RX_QS)
1608 dev_warn(&adapter->pdev->dev,
1609 "Can create only %d RX queues", adapter->num_rx_qs);
1610
6b7c5b94 1611 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1612 for_all_rx_queues(adapter, rxo, i) {
1613 rxo->adapter = adapter;
1614 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1615 rxo->rx_eq.enable_aic = true;
1616
1617 /* EQ */
1618 eq = &rxo->rx_eq.q;
1619 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1620 sizeof(struct be_eq_entry));
1621 if (rc)
1622 goto err;
1623
1624 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1625 if (rc)
1626 goto err;
1627
ecd62107 1628 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1629
3abcdeda
SP
1630 /* CQ */
1631 cq = &rxo->cq;
1632 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1633 sizeof(struct be_eth_rx_compl));
1634 if (rc)
1635 goto err;
1636
1637 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1638 if (rc)
1639 goto err;
3abcdeda
SP
1640 /* Rx Q */
1641 q = &rxo->q;
1642 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1643 sizeof(struct be_eth_rx_d));
1644 if (rc)
1645 goto err;
1646
1647 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1648 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1649 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1650 if (rc)
1651 goto err;
1652 }
1653
1654 if (be_multi_rxq(adapter)) {
1655 u8 rsstable[MAX_RSS_QS];
1656
1657 for_all_rss_queues(adapter, rxo, i)
1658 rsstable[i] = rxo->rss_id;
1659
1660 rc = be_cmd_rss_config(adapter, rsstable,
1661 adapter->num_rx_qs - 1);
1662 if (rc)
1663 goto err;
1664 }
6b7c5b94
SP
1665
1666 return 0;
3abcdeda
SP
1667err:
1668 be_rx_queues_destroy(adapter);
1669 return -1;
6b7c5b94 1670}
6b7c5b94 1671
fe6d2a38 1672static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1673{
fe6d2a38
SP
1674 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1675 if (!eqe->evt)
1676 return false;
1677 else
1678 return true;
b628bde2
SP
1679}
1680
6b7c5b94
SP
1681static irqreturn_t be_intx(int irq, void *dev)
1682{
1683 struct be_adapter *adapter = dev;
3abcdeda 1684 struct be_rx_obj *rxo;
fe6d2a38 1685 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1686
fe6d2a38
SP
1687 if (lancer_chip(adapter)) {
1688 if (event_peek(&adapter->tx_eq))
1689 tx = event_handle(adapter, &adapter->tx_eq);
1690 for_all_rx_queues(adapter, rxo, i) {
1691 if (event_peek(&rxo->rx_eq))
1692 rx |= event_handle(adapter, &rxo->rx_eq);
1693 }
6b7c5b94 1694
fe6d2a38
SP
1695 if (!(tx || rx))
1696 return IRQ_NONE;
3abcdeda 1697
fe6d2a38
SP
1698 } else {
1699 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1700 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1701 if (!isr)
1702 return IRQ_NONE;
1703
ecd62107 1704 if ((1 << adapter->tx_eq.eq_idx & isr))
fe6d2a38
SP
1705 event_handle(adapter, &adapter->tx_eq);
1706
1707 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1708 if ((1 << rxo->rx_eq.eq_idx & isr))
fe6d2a38
SP
1709 event_handle(adapter, &rxo->rx_eq);
1710 }
3abcdeda 1711 }
c001c213 1712
8788fdc2 1713 return IRQ_HANDLED;
6b7c5b94
SP
1714}
1715
1716static irqreturn_t be_msix_rx(int irq, void *dev)
1717{
3abcdeda
SP
1718 struct be_rx_obj *rxo = dev;
1719 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1720
3abcdeda 1721 event_handle(adapter, &rxo->rx_eq);
6b7c5b94
SP
1722
1723 return IRQ_HANDLED;
1724}
1725
5fb379ee 1726static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1727{
1728 struct be_adapter *adapter = dev;
1729
8788fdc2 1730 event_handle(adapter, &adapter->tx_eq);
6b7c5b94
SP
1731
1732 return IRQ_HANDLED;
1733}
1734
2e588f84 1735static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1736{
2e588f84 1737 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1738}
1739
49b05221 1740static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1741{
1742 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1743 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1744 struct be_adapter *adapter = rxo->adapter;
1745 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1746 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1747 u32 work_done;
1748
3abcdeda 1749 rxo->stats.rx_polls++;
6b7c5b94 1750 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1751 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1752 if (!rxcp)
1753 break;
1754
e80d9da6 1755 /* Ignore flush completions */
2e588f84
SP
1756 if (rxcp->num_rcvd) {
1757 if (do_gro(rxcp))
64642811
SP
1758 be_rx_compl_process_gro(adapter, rxo, rxcp);
1759 else
1760 be_rx_compl_process(adapter, rxo, rxcp);
1761 }
2e588f84 1762 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1763 }
1764
6b7c5b94 1765 /* Refill the queue */
3abcdeda 1766 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1767 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1768
1769 /* All consumed */
1770 if (work_done < budget) {
1771 napi_complete(napi);
8788fdc2 1772 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1773 } else {
1774 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1775 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1776 }
1777 return work_done;
1778}
1779
f31e50a8
SP
1780/* As TX and MCC share the same EQ check for both TX and MCC completions.
1781 * For TX/MCC we don't honour budget; consume everything
1782 */
1783static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1784{
f31e50a8
SP
1785 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1786 struct be_adapter *adapter =
1787 container_of(tx_eq, struct be_adapter, tx_eq);
5fb379ee
SP
1788 struct be_queue_info *txq = &adapter->tx_obj.q;
1789 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1790 struct be_eth_tx_compl *txcp;
f31e50a8 1791 int tx_compl = 0, mcc_compl, status = 0;
6b7c5b94
SP
1792 u16 end_idx;
1793
5fb379ee 1794 while ((txcp = be_tx_compl_get(tx_cq))) {
6b7c5b94 1795 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
f31e50a8 1796 wrb_index, txcp);
6b7c5b94 1797 be_tx_compl_process(adapter, end_idx);
f31e50a8 1798 tx_compl++;
6b7c5b94
SP
1799 }
1800
f31e50a8
SP
1801 mcc_compl = be_process_mcc(adapter, &status);
1802
1803 napi_complete(napi);
1804
1805 if (mcc_compl) {
1806 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1807 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1808 }
1809
1810 if (tx_compl) {
1811 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
5fb379ee
SP
1812
1813 /* As Tx wrbs have been freed up, wake up netdev queue if
1814 * it was stopped due to lack of tx wrbs.
1815 */
1816 if (netif_queue_stopped(adapter->netdev) &&
6b7c5b94 1817 atomic_read(&txq->used) < txq->len / 2) {
5fb379ee
SP
1818 netif_wake_queue(adapter->netdev);
1819 }
1820
3abcdeda
SP
1821 tx_stats(adapter)->be_tx_events++;
1822 tx_stats(adapter)->be_tx_compl += tx_compl;
6b7c5b94 1823 }
6b7c5b94
SP
1824
1825 return 1;
1826}
1827
d053de91 1828void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1829{
1830 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1831 u32 i;
1832
1833 pci_read_config_dword(adapter->pdev,
1834 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1835 pci_read_config_dword(adapter->pdev,
1836 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1837 pci_read_config_dword(adapter->pdev,
1838 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1839 pci_read_config_dword(adapter->pdev,
1840 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1841
1842 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1843 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1844
d053de91
AK
1845 if (ue_status_lo || ue_status_hi) {
1846 adapter->ue_detected = true;
7acc2087 1847 adapter->eeh_err = true;
d053de91
AK
1848 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1849 }
1850
7c185276
AK
1851 if (ue_status_lo) {
1852 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1853 if (ue_status_lo & 1)
1854 dev_err(&adapter->pdev->dev,
1855 "UE: %s bit set\n", ue_status_low_desc[i]);
1856 }
1857 }
1858 if (ue_status_hi) {
1859 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1860 if (ue_status_hi & 1)
1861 dev_err(&adapter->pdev->dev,
1862 "UE: %s bit set\n", ue_status_hi_desc[i]);
1863 }
1864 }
1865
1866}
1867
ea1dae11
SP
1868static void be_worker(struct work_struct *work)
1869{
1870 struct be_adapter *adapter =
1871 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1872 struct be_rx_obj *rxo;
1873 int i;
ea1dae11 1874
16da8250
SP
1875 if (!adapter->ue_detected && !lancer_chip(adapter))
1876 be_detect_dump_ue(adapter);
1877
f203af70
SK
1878 /* when interrupts are not yet enabled, just reap any pending
1879 * mcc completions */
1880 if (!netif_running(adapter->netdev)) {
1881 int mcc_compl, status = 0;
1882
1883 mcc_compl = be_process_mcc(adapter, &status);
1884
1885 if (mcc_compl) {
1886 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1887 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1888 }
9b037f38 1889
f203af70
SK
1890 goto reschedule;
1891 }
1892
b2aebe6d 1893 if (!adapter->stats_cmd_sent)
3abcdeda 1894 be_cmd_get_stats(adapter, &adapter->stats_cmd);
ea1dae11 1895
4097f663 1896 be_tx_rate_update(adapter);
4097f663 1897
3abcdeda
SP
1898 for_all_rx_queues(adapter, rxo, i) {
1899 be_rx_rate_update(rxo);
1900 be_rx_eqd_update(adapter, rxo);
1901
1902 if (rxo->rx_post_starved) {
1903 rxo->rx_post_starved = false;
1829b086 1904 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 1905 }
ea1dae11
SP
1906 }
1907
f203af70 1908reschedule:
ea1dae11
SP
1909 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1910}
1911
8d56ff11
SP
1912static void be_msix_disable(struct be_adapter *adapter)
1913{
ac6a0c4a 1914 if (msix_enabled(adapter)) {
8d56ff11 1915 pci_disable_msix(adapter->pdev);
ac6a0c4a 1916 adapter->num_msix_vec = 0;
3abcdeda
SP
1917 }
1918}
1919
6b7c5b94
SP
1920static void be_msix_enable(struct be_adapter *adapter)
1921{
3abcdeda 1922#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 1923 int i, status, num_vec;
6b7c5b94 1924
ac6a0c4a 1925 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 1926
ac6a0c4a 1927 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
1928 adapter->msix_entries[i].entry = i;
1929
ac6a0c4a 1930 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
1931 if (status == 0) {
1932 goto done;
1933 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 1934 num_vec = status;
3abcdeda 1935 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 1936 num_vec) == 0)
3abcdeda 1937 goto done;
3abcdeda
SP
1938 }
1939 return;
1940done:
ac6a0c4a
SP
1941 adapter->num_msix_vec = num_vec;
1942 return;
6b7c5b94
SP
1943}
1944
ba343c77
SB
1945static void be_sriov_enable(struct be_adapter *adapter)
1946{
344dbf10 1947 be_check_sriov_fn_type(adapter);
6dedec81 1948#ifdef CONFIG_PCI_IOV
ba343c77 1949 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
1950 int status, pos;
1951 u16 nvfs;
1952
1953 pos = pci_find_ext_capability(adapter->pdev,
1954 PCI_EXT_CAP_ID_SRIOV);
1955 pci_read_config_word(adapter->pdev,
1956 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
1957
1958 if (num_vfs > nvfs) {
1959 dev_info(&adapter->pdev->dev,
1960 "Device supports %d VFs and not %d\n",
1961 nvfs, num_vfs);
1962 num_vfs = nvfs;
1963 }
6dedec81 1964
ba343c77
SB
1965 status = pci_enable_sriov(adapter->pdev, num_vfs);
1966 adapter->sriov_enabled = status ? false : true;
1967 }
1968#endif
ba343c77
SB
1969}
1970
1971static void be_sriov_disable(struct be_adapter *adapter)
1972{
1973#ifdef CONFIG_PCI_IOV
1974 if (adapter->sriov_enabled) {
1975 pci_disable_sriov(adapter->pdev);
1976 adapter->sriov_enabled = false;
1977 }
1978#endif
1979}
1980
fe6d2a38
SP
1981static inline int be_msix_vec_get(struct be_adapter *adapter,
1982 struct be_eq_obj *eq_obj)
6b7c5b94 1983{
ecd62107 1984 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
1985}
1986
b628bde2
SP
1987static int be_request_irq(struct be_adapter *adapter,
1988 struct be_eq_obj *eq_obj,
3abcdeda 1989 void *handler, char *desc, void *context)
6b7c5b94
SP
1990{
1991 struct net_device *netdev = adapter->netdev;
b628bde2
SP
1992 int vec;
1993
1994 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 1995 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 1996 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
1997}
1998
3abcdeda
SP
1999static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2000 void *context)
b628bde2 2001{
fe6d2a38 2002 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2003 free_irq(vec, context);
b628bde2 2004}
6b7c5b94 2005
b628bde2
SP
2006static int be_msix_register(struct be_adapter *adapter)
2007{
3abcdeda
SP
2008 struct be_rx_obj *rxo;
2009 int status, i;
2010 char qname[10];
b628bde2 2011
3abcdeda
SP
2012 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2013 adapter);
6b7c5b94
SP
2014 if (status)
2015 goto err;
2016
3abcdeda
SP
2017 for_all_rx_queues(adapter, rxo, i) {
2018 sprintf(qname, "rxq%d", i);
2019 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2020 qname, rxo);
2021 if (status)
2022 goto err_msix;
2023 }
b628bde2 2024
6b7c5b94 2025 return 0;
b628bde2 2026
3abcdeda
SP
2027err_msix:
2028 be_free_irq(adapter, &adapter->tx_eq, adapter);
2029
2030 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2031 be_free_irq(adapter, &rxo->rx_eq, rxo);
2032
6b7c5b94
SP
2033err:
2034 dev_warn(&adapter->pdev->dev,
2035 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2036 be_msix_disable(adapter);
6b7c5b94
SP
2037 return status;
2038}
2039
2040static int be_irq_register(struct be_adapter *adapter)
2041{
2042 struct net_device *netdev = adapter->netdev;
2043 int status;
2044
ac6a0c4a 2045 if (msix_enabled(adapter)) {
6b7c5b94
SP
2046 status = be_msix_register(adapter);
2047 if (status == 0)
2048 goto done;
ba343c77
SB
2049 /* INTx is not supported for VF */
2050 if (!be_physfn(adapter))
2051 return status;
6b7c5b94
SP
2052 }
2053
2054 /* INTx */
2055 netdev->irq = adapter->pdev->irq;
2056 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2057 adapter);
2058 if (status) {
2059 dev_err(&adapter->pdev->dev,
2060 "INTx request IRQ failed - err %d\n", status);
2061 return status;
2062 }
2063done:
2064 adapter->isr_registered = true;
2065 return 0;
2066}
2067
2068static void be_irq_unregister(struct be_adapter *adapter)
2069{
2070 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2071 struct be_rx_obj *rxo;
2072 int i;
6b7c5b94
SP
2073
2074 if (!adapter->isr_registered)
2075 return;
2076
2077 /* INTx */
ac6a0c4a 2078 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2079 free_irq(netdev->irq, adapter);
2080 goto done;
2081 }
2082
2083 /* MSIx */
3abcdeda
SP
2084 be_free_irq(adapter, &adapter->tx_eq, adapter);
2085
2086 for_all_rx_queues(adapter, rxo, i)
2087 be_free_irq(adapter, &rxo->rx_eq, rxo);
2088
6b7c5b94
SP
2089done:
2090 adapter->isr_registered = false;
6b7c5b94
SP
2091}
2092
889cd4b2
SP
2093static int be_close(struct net_device *netdev)
2094{
2095 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2096 struct be_rx_obj *rxo;
889cd4b2 2097 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2098 int vec, i;
889cd4b2 2099
889cd4b2
SP
2100 be_async_mcc_disable(adapter);
2101
889cd4b2
SP
2102 netif_carrier_off(netdev);
2103 adapter->link_up = false;
2104
fe6d2a38
SP
2105 if (!lancer_chip(adapter))
2106 be_intr_set(adapter, false);
889cd4b2 2107
63fcb27f
PR
2108 for_all_rx_queues(adapter, rxo, i)
2109 napi_disable(&rxo->rx_eq.napi);
2110
2111 napi_disable(&tx_eq->napi);
2112
2113 if (lancer_chip(adapter)) {
2114 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2115 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2116 for_all_rx_queues(adapter, rxo, i)
2117 be_cq_notify(adapter, rxo->cq.id, false, 0);
2118 }
2119
ac6a0c4a 2120 if (msix_enabled(adapter)) {
fe6d2a38 2121 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2122 synchronize_irq(vec);
3abcdeda
SP
2123
2124 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2125 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2126 synchronize_irq(vec);
2127 }
889cd4b2
SP
2128 } else {
2129 synchronize_irq(netdev->irq);
2130 }
2131 be_irq_unregister(adapter);
2132
889cd4b2
SP
2133 /* Wait for all pending tx completions to arrive so that
2134 * all tx skbs are freed.
2135 */
2136 be_tx_compl_clean(adapter);
2137
2138 return 0;
2139}
2140
6b7c5b94
SP
2141static int be_open(struct net_device *netdev)
2142{
2143 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2144 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2145 struct be_rx_obj *rxo;
a8f447bd 2146 bool link_up;
3abcdeda 2147 int status, i;
0388f251
SB
2148 u8 mac_speed;
2149 u16 link_speed;
5fb379ee 2150
3abcdeda 2151 for_all_rx_queues(adapter, rxo, i) {
1829b086 2152 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda
SP
2153 napi_enable(&rxo->rx_eq.napi);
2154 }
5fb379ee
SP
2155 napi_enable(&tx_eq->napi);
2156
2157 be_irq_register(adapter);
2158
fe6d2a38
SP
2159 if (!lancer_chip(adapter))
2160 be_intr_set(adapter, true);
5fb379ee
SP
2161
2162 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2163 for_all_rx_queues(adapter, rxo, i) {
2164 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2165 be_cq_notify(adapter, rxo->cq.id, true, 0);
2166 }
8788fdc2 2167 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2168
7a1e9b20
SP
2169 /* Now that interrupts are on we can process async mcc */
2170 be_async_mcc_enable(adapter);
2171
0388f251
SB
2172 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2173 &link_speed);
a8f447bd 2174 if (status)
889cd4b2 2175 goto err;
a8f447bd 2176 be_link_status_update(adapter, link_up);
5fb379ee 2177
889cd4b2 2178 if (be_physfn(adapter)) {
1da87b7f 2179 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2180 if (status)
2181 goto err;
4f2aa89c 2182
ba343c77
SB
2183 status = be_cmd_set_flow_control(adapter,
2184 adapter->tx_fc, adapter->rx_fc);
2185 if (status)
889cd4b2 2186 goto err;
ba343c77 2187 }
4f2aa89c 2188
889cd4b2
SP
2189 return 0;
2190err:
2191 be_close(adapter->netdev);
2192 return -EIO;
5fb379ee
SP
2193}
2194
71d8d1b5
AK
2195static int be_setup_wol(struct be_adapter *adapter, bool enable)
2196{
2197 struct be_dma_mem cmd;
2198 int status = 0;
2199 u8 mac[ETH_ALEN];
2200
2201 memset(mac, 0, ETH_ALEN);
2202
2203 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2204 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2205 GFP_KERNEL);
71d8d1b5
AK
2206 if (cmd.va == NULL)
2207 return -1;
2208 memset(cmd.va, 0, cmd.size);
2209
2210 if (enable) {
2211 status = pci_write_config_dword(adapter->pdev,
2212 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2213 if (status) {
2214 dev_err(&adapter->pdev->dev,
2381a55c 2215 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2216 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2217 cmd.dma);
71d8d1b5
AK
2218 return status;
2219 }
2220 status = be_cmd_enable_magic_wol(adapter,
2221 adapter->netdev->dev_addr, &cmd);
2222 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2223 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2224 } else {
2225 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2226 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2227 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2228 }
2229
2b7bcebf 2230 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2231 return status;
2232}
2233
6d87f5c3
AK
2234/*
2235 * Generate a seed MAC address from the PF MAC Address using jhash.
2236 * MAC Address for VFs are assigned incrementally starting from the seed.
2237 * These addresses are programmed in the ASIC by the PF and the VF driver
2238 * queries for the MAC address during its probe.
2239 */
2240static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2241{
2242 u32 vf = 0;
3abcdeda 2243 int status = 0;
6d87f5c3
AK
2244 u8 mac[ETH_ALEN];
2245
2246 be_vf_eth_addr_generate(adapter, mac);
2247
2248 for (vf = 0; vf < num_vfs; vf++) {
2249 status = be_cmd_pmac_add(adapter, mac,
2250 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2251 &adapter->vf_cfg[vf].vf_pmac_id,
2252 vf + 1);
6d87f5c3
AK
2253 if (status)
2254 dev_err(&adapter->pdev->dev,
2255 "Mac address add failed for VF %d\n", vf);
2256 else
2257 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2258
2259 mac[5] += 1;
2260 }
2261 return status;
2262}
2263
2264static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2265{
2266 u32 vf;
2267
2268 for (vf = 0; vf < num_vfs; vf++) {
2269 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2270 be_cmd_pmac_del(adapter,
2271 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2272 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2273 }
2274}
2275
5fb379ee
SP
2276static int be_setup(struct be_adapter *adapter)
2277{
5fb379ee 2278 struct net_device *netdev = adapter->netdev;
ba343c77 2279 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2280 int status;
ba343c77
SB
2281 u8 mac[ETH_ALEN];
2282
f21b538c
PR
2283 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2284 BE_IF_FLAGS_BROADCAST |
2285 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2286
ba343c77
SB
2287 if (be_physfn(adapter)) {
2288 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2289 BE_IF_FLAGS_PROMISCUOUS |
2290 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2291 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2292
ac6a0c4a 2293 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2294 cap_flags |= BE_IF_FLAGS_RSS;
2295 en_flags |= BE_IF_FLAGS_RSS;
2296 }
ba343c77 2297 }
73d540f2
SP
2298
2299 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2300 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2301 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2302 if (status != 0)
2303 goto do_none;
2304
ba343c77 2305 if (be_physfn(adapter)) {
c99ac3e7
AK
2306 if (adapter->sriov_enabled) {
2307 while (vf < num_vfs) {
2308 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2309 BE_IF_FLAGS_BROADCAST;
2310 status = be_cmd_if_create(adapter, cap_flags,
2311 en_flags, mac, true,
64600ea5 2312 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2313 NULL, vf+1);
c99ac3e7
AK
2314 if (status) {
2315 dev_err(&adapter->pdev->dev,
2316 "Interface Create failed for VF %d\n",
2317 vf);
2318 goto if_destroy;
2319 }
2320 adapter->vf_cfg[vf].vf_pmac_id =
2321 BE_INVALID_PMAC_ID;
2322 vf++;
ba343c77 2323 }
84e5b9f7 2324 }
c99ac3e7 2325 } else {
ba343c77
SB
2326 status = be_cmd_mac_addr_query(adapter, mac,
2327 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2328 if (!status) {
2329 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2330 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2331 }
2332 }
2333
6b7c5b94
SP
2334 status = be_tx_queues_create(adapter);
2335 if (status != 0)
2336 goto if_destroy;
2337
2338 status = be_rx_queues_create(adapter);
2339 if (status != 0)
2340 goto tx_qs_destroy;
2341
5fb379ee
SP
2342 status = be_mcc_queues_create(adapter);
2343 if (status != 0)
2344 goto rx_qs_destroy;
6b7c5b94 2345
0dffc83e
AK
2346 adapter->link_speed = -1;
2347
6b7c5b94
SP
2348 return 0;
2349
5fb379ee
SP
2350rx_qs_destroy:
2351 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2352tx_qs_destroy:
2353 be_tx_queues_destroy(adapter);
2354if_destroy:
c99ac3e7
AK
2355 if (be_physfn(adapter) && adapter->sriov_enabled)
2356 for (vf = 0; vf < num_vfs; vf++)
2357 if (adapter->vf_cfg[vf].vf_if_handle)
2358 be_cmd_if_destroy(adapter,
658681f7
AK
2359 adapter->vf_cfg[vf].vf_if_handle,
2360 vf + 1);
2361 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2362do_none:
2363 return status;
2364}
2365
5fb379ee
SP
2366static int be_clear(struct be_adapter *adapter)
2367{
7ab8b0b4
AK
2368 int vf;
2369
c99ac3e7 2370 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2371 be_vf_eth_addr_rem(adapter);
2372
1a8887d8 2373 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2374 be_rx_queues_destroy(adapter);
2375 be_tx_queues_destroy(adapter);
1f5db833 2376 adapter->eq_next_idx = 0;
5fb379ee 2377
7ab8b0b4
AK
2378 if (be_physfn(adapter) && adapter->sriov_enabled)
2379 for (vf = 0; vf < num_vfs; vf++)
2380 if (adapter->vf_cfg[vf].vf_if_handle)
2381 be_cmd_if_destroy(adapter,
2382 adapter->vf_cfg[vf].vf_if_handle,
2383 vf + 1);
2384
658681f7 2385 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2386
2243e2e9
SP
2387 /* tell fw we're done with firing cmds */
2388 be_cmd_fw_clean(adapter);
5fb379ee
SP
2389 return 0;
2390}
2391
6b7c5b94 2392
84517482 2393#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2394static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2395 const u8 *p, u32 img_start, int image_size,
2396 int hdr_size)
fa9a6fed
SB
2397{
2398 u32 crc_offset;
2399 u8 flashed_crc[4];
2400 int status;
3f0d4560
AK
2401
2402 crc_offset = hdr_size + img_start + image_size - 4;
2403
fa9a6fed 2404 p += crc_offset;
3f0d4560
AK
2405
2406 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2407 (image_size - 4));
fa9a6fed
SB
2408 if (status) {
2409 dev_err(&adapter->pdev->dev,
2410 "could not get crc from flash, not flashing redboot\n");
2411 return false;
2412 }
2413
2414 /*update redboot only if crc does not match*/
2415 if (!memcmp(flashed_crc, p, 4))
2416 return false;
2417 else
2418 return true;
fa9a6fed
SB
2419}
2420
3f0d4560 2421static int be_flash_data(struct be_adapter *adapter,
84517482 2422 const struct firmware *fw,
3f0d4560
AK
2423 struct be_dma_mem *flash_cmd, int num_of_images)
2424
84517482 2425{
3f0d4560
AK
2426 int status = 0, i, filehdr_size = 0;
2427 u32 total_bytes = 0, flash_op;
84517482
AK
2428 int num_bytes;
2429 const u8 *p = fw->data;
2430 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2431 const struct flash_comp *pflashcomp;
9fe96934 2432 int num_comp;
3f0d4560 2433
215faf9c 2434 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2435 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2436 FLASH_IMAGE_MAX_SIZE_g3},
2437 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2438 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2439 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2440 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2441 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2442 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2443 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2444 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2445 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2446 FLASH_IMAGE_MAX_SIZE_g3},
2447 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2448 FLASH_IMAGE_MAX_SIZE_g3},
2449 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2450 FLASH_IMAGE_MAX_SIZE_g3},
2451 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2452 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2453 };
215faf9c 2454 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2455 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2456 FLASH_IMAGE_MAX_SIZE_g2},
2457 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2458 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2459 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2460 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2461 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2462 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2463 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2464 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2465 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2466 FLASH_IMAGE_MAX_SIZE_g2},
2467 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2468 FLASH_IMAGE_MAX_SIZE_g2},
2469 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2470 FLASH_IMAGE_MAX_SIZE_g2}
2471 };
2472
2473 if (adapter->generation == BE_GEN3) {
2474 pflashcomp = gen3_flash_types;
2475 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2476 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2477 } else {
2478 pflashcomp = gen2_flash_types;
2479 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2480 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2481 }
9fe96934
SB
2482 for (i = 0; i < num_comp; i++) {
2483 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2484 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2485 continue;
3f0d4560
AK
2486 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2487 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2488 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2489 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2490 continue;
2491 p = fw->data;
2492 p += filehdr_size + pflashcomp[i].offset
2493 + (num_of_images * sizeof(struct image_hdr));
2494 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2495 return -1;
3f0d4560
AK
2496 total_bytes = pflashcomp[i].size;
2497 while (total_bytes) {
2498 if (total_bytes > 32*1024)
2499 num_bytes = 32*1024;
2500 else
2501 num_bytes = total_bytes;
2502 total_bytes -= num_bytes;
2503
2504 if (!total_bytes)
2505 flash_op = FLASHROM_OPER_FLASH;
2506 else
2507 flash_op = FLASHROM_OPER_SAVE;
2508 memcpy(req->params.data_buf, p, num_bytes);
2509 p += num_bytes;
2510 status = be_cmd_write_flashrom(adapter, flash_cmd,
2511 pflashcomp[i].optype, flash_op, num_bytes);
2512 if (status) {
2513 dev_err(&adapter->pdev->dev,
2514 "cmd to write to flash rom failed.\n");
2515 return -1;
2516 }
2517 yield();
84517482 2518 }
84517482 2519 }
84517482
AK
2520 return 0;
2521}
2522
3f0d4560
AK
2523static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2524{
2525 if (fhdr == NULL)
2526 return 0;
2527 if (fhdr->build[0] == '3')
2528 return BE_GEN3;
2529 else if (fhdr->build[0] == '2')
2530 return BE_GEN2;
2531 else
2532 return 0;
2533}
2534
84517482
AK
2535int be_load_fw(struct be_adapter *adapter, u8 *func)
2536{
2537 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2538 const struct firmware *fw;
3f0d4560
AK
2539 struct flash_file_hdr_g2 *fhdr;
2540 struct flash_file_hdr_g3 *fhdr3;
2541 struct image_hdr *img_hdr_ptr = NULL;
84517482 2542 struct be_dma_mem flash_cmd;
8b93b710 2543 int status, i = 0, num_imgs = 0;
84517482 2544 const u8 *p;
84517482 2545
d9efd2af
SB
2546 if (!netif_running(adapter->netdev)) {
2547 dev_err(&adapter->pdev->dev,
2548 "Firmware load not allowed (interface is down)\n");
2549 return -EPERM;
2550 }
2551
84517482
AK
2552 strcpy(fw_file, func);
2553
2554 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2555 if (status)
2556 goto fw_exit;
2557
2558 p = fw->data;
3f0d4560 2559 fhdr = (struct flash_file_hdr_g2 *) p;
84517482
AK
2560 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2561
84517482 2562 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2563 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2564 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2565 if (!flash_cmd.va) {
2566 status = -ENOMEM;
2567 dev_err(&adapter->pdev->dev,
2568 "Memory allocation failure while flashing\n");
2569 goto fw_exit;
2570 }
2571
3f0d4560
AK
2572 if ((adapter->generation == BE_GEN3) &&
2573 (get_ufigen_type(fhdr) == BE_GEN3)) {
2574 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2575 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2576 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2577 img_hdr_ptr = (struct image_hdr *) (fw->data +
2578 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2579 i * sizeof(struct image_hdr)));
2580 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2581 status = be_flash_data(adapter, fw, &flash_cmd,
2582 num_imgs);
3f0d4560
AK
2583 }
2584 } else if ((adapter->generation == BE_GEN2) &&
2585 (get_ufigen_type(fhdr) == BE_GEN2)) {
2586 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2587 } else {
2588 dev_err(&adapter->pdev->dev,
2589 "UFI and Interface are not compatible for flashing\n");
2590 status = -1;
84517482
AK
2591 }
2592
2b7bcebf
IV
2593 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2594 flash_cmd.dma);
84517482
AK
2595 if (status) {
2596 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2597 goto fw_exit;
2598 }
2599
af901ca1 2600 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482
AK
2601
2602fw_exit:
2603 release_firmware(fw);
2604 return status;
2605}
2606
6b7c5b94
SP
2607static struct net_device_ops be_netdev_ops = {
2608 .ndo_open = be_open,
2609 .ndo_stop = be_close,
2610 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2611 .ndo_set_rx_mode = be_set_multicast_list,
2612 .ndo_set_mac_address = be_mac_addr_set,
2613 .ndo_change_mtu = be_change_mtu,
2614 .ndo_validate_addr = eth_validate_addr,
2615 .ndo_vlan_rx_register = be_vlan_register,
2616 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2617 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2618 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2619 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2620 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2621 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2622};
2623
2624static void be_netdev_init(struct net_device *netdev)
2625{
2626 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2627 struct be_rx_obj *rxo;
2628 int i;
6b7c5b94
SP
2629
2630 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
79032644
MM
2631 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2632 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
49e4b847 2633 NETIF_F_GRO | NETIF_F_TSO6;
6b7c5b94 2634
4b972914
AK
2635 if (be_multi_rxq(adapter))
2636 netdev->features |= NETIF_F_RXHASH;
2637
79032644
MM
2638 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2639 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2640
fe6d2a38
SP
2641 if (lancer_chip(adapter))
2642 netdev->vlan_features |= NETIF_F_TSO6;
2643
6b7c5b94
SP
2644 netdev->flags |= IFF_MULTICAST;
2645
728a9972
AK
2646 adapter->rx_csum = true;
2647
9e90c961
AK
2648 /* Default settings for Rx and Tx flow control */
2649 adapter->rx_fc = true;
2650 adapter->tx_fc = true;
2651
c190e3c8
AK
2652 netif_set_gso_max_size(netdev, 65535);
2653
6b7c5b94
SP
2654 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2655
2656 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2657
3abcdeda
SP
2658 for_all_rx_queues(adapter, rxo, i)
2659 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2660 BE_NAPI_WEIGHT);
2661
5fb379ee 2662 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2663 BE_NAPI_WEIGHT);
6b7c5b94
SP
2664}
2665
2666static void be_unmap_pci_bars(struct be_adapter *adapter)
2667{
8788fdc2
SP
2668 if (adapter->csr)
2669 iounmap(adapter->csr);
2670 if (adapter->db)
2671 iounmap(adapter->db);
ba343c77 2672 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2673 iounmap(adapter->pcicfg);
6b7c5b94
SP
2674}
2675
2676static int be_map_pci_bars(struct be_adapter *adapter)
2677{
2678 u8 __iomem *addr;
ba343c77 2679 int pcicfg_reg, db_reg;
6b7c5b94 2680
fe6d2a38
SP
2681 if (lancer_chip(adapter)) {
2682 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2683 pci_resource_len(adapter->pdev, 0));
2684 if (addr == NULL)
2685 return -ENOMEM;
2686 adapter->db = addr;
2687 return 0;
2688 }
2689
ba343c77
SB
2690 if (be_physfn(adapter)) {
2691 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2692 pci_resource_len(adapter->pdev, 2));
2693 if (addr == NULL)
2694 return -ENOMEM;
2695 adapter->csr = addr;
2696 }
6b7c5b94 2697
ba343c77 2698 if (adapter->generation == BE_GEN2) {
7b139c83 2699 pcicfg_reg = 1;
ba343c77
SB
2700 db_reg = 4;
2701 } else {
7b139c83 2702 pcicfg_reg = 0;
ba343c77
SB
2703 if (be_physfn(adapter))
2704 db_reg = 4;
2705 else
2706 db_reg = 0;
2707 }
2708 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2709 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2710 if (addr == NULL)
2711 goto pci_map_err;
ba343c77
SB
2712 adapter->db = addr;
2713
2714 if (be_physfn(adapter)) {
2715 addr = ioremap_nocache(
2716 pci_resource_start(adapter->pdev, pcicfg_reg),
2717 pci_resource_len(adapter->pdev, pcicfg_reg));
2718 if (addr == NULL)
2719 goto pci_map_err;
2720 adapter->pcicfg = addr;
2721 } else
2722 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
2723
2724 return 0;
2725pci_map_err:
2726 be_unmap_pci_bars(adapter);
2727 return -ENOMEM;
2728}
2729
2730
2731static void be_ctrl_cleanup(struct be_adapter *adapter)
2732{
8788fdc2 2733 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2734
2735 be_unmap_pci_bars(adapter);
2736
2737 if (mem->va)
2b7bcebf
IV
2738 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2739 mem->dma);
e7b909a6
SP
2740
2741 mem = &adapter->mc_cmd_mem;
2742 if (mem->va)
2b7bcebf
IV
2743 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2744 mem->dma);
6b7c5b94
SP
2745}
2746
6b7c5b94
SP
2747static int be_ctrl_init(struct be_adapter *adapter)
2748{
8788fdc2
SP
2749 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2750 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 2751 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 2752 int status;
6b7c5b94
SP
2753
2754 status = be_map_pci_bars(adapter);
2755 if (status)
e7b909a6 2756 goto done;
6b7c5b94
SP
2757
2758 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
2759 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2760 mbox_mem_alloc->size,
2761 &mbox_mem_alloc->dma,
2762 GFP_KERNEL);
6b7c5b94 2763 if (!mbox_mem_alloc->va) {
e7b909a6
SP
2764 status = -ENOMEM;
2765 goto unmap_pci_bars;
6b7c5b94 2766 }
e7b909a6 2767
6b7c5b94
SP
2768 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2769 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2770 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2771 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
2772
2773 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2b7bcebf
IV
2774 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2775 mc_cmd_mem->size, &mc_cmd_mem->dma,
2776 GFP_KERNEL);
e7b909a6
SP
2777 if (mc_cmd_mem->va == NULL) {
2778 status = -ENOMEM;
2779 goto free_mbox;
2780 }
2781 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2782
2984961c 2783 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
2784 spin_lock_init(&adapter->mcc_lock);
2785 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 2786
dd131e76 2787 init_completion(&adapter->flash_compl);
cf588477 2788 pci_save_state(adapter->pdev);
6b7c5b94 2789 return 0;
e7b909a6
SP
2790
2791free_mbox:
2b7bcebf
IV
2792 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2793 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
2794
2795unmap_pci_bars:
2796 be_unmap_pci_bars(adapter);
2797
2798done:
2799 return status;
6b7c5b94
SP
2800}
2801
2802static void be_stats_cleanup(struct be_adapter *adapter)
2803{
3abcdeda 2804 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2805
2806 if (cmd->va)
2b7bcebf
IV
2807 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2808 cmd->va, cmd->dma);
6b7c5b94
SP
2809}
2810
2811static int be_stats_init(struct be_adapter *adapter)
2812{
3abcdeda 2813 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2814
2815 cmd->size = sizeof(struct be_cmd_req_get_stats);
2b7bcebf
IV
2816 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2817 GFP_KERNEL);
6b7c5b94
SP
2818 if (cmd->va == NULL)
2819 return -1;
d291b9af 2820 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
2821 return 0;
2822}
2823
2824static void __devexit be_remove(struct pci_dev *pdev)
2825{
2826 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 2827
6b7c5b94
SP
2828 if (!adapter)
2829 return;
2830
f203af70
SK
2831 cancel_delayed_work_sync(&adapter->work);
2832
6b7c5b94
SP
2833 unregister_netdev(adapter->netdev);
2834
5fb379ee
SP
2835 be_clear(adapter);
2836
6b7c5b94
SP
2837 be_stats_cleanup(adapter);
2838
2839 be_ctrl_cleanup(adapter);
2840
48f5a191 2841 kfree(adapter->vf_cfg);
ba343c77
SB
2842 be_sriov_disable(adapter);
2843
8d56ff11 2844 be_msix_disable(adapter);
6b7c5b94
SP
2845
2846 pci_set_drvdata(pdev, NULL);
2847 pci_release_regions(pdev);
2848 pci_disable_device(pdev);
2849
2850 free_netdev(adapter->netdev);
2851}
2852
2243e2e9 2853static int be_get_config(struct be_adapter *adapter)
6b7c5b94 2854{
6b7c5b94 2855 int status;
2243e2e9 2856 u8 mac[ETH_ALEN];
6b7c5b94 2857
2243e2e9 2858 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
2859 if (status)
2860 return status;
2861
3abcdeda
SP
2862 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2863 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
2864 if (status)
2865 return status;
2866
2243e2e9 2867 memset(mac, 0, ETH_ALEN);
ba343c77
SB
2868
2869 if (be_physfn(adapter)) {
2870 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 2871 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 2872
ba343c77
SB
2873 if (status)
2874 return status;
ca9e4988 2875
ba343c77
SB
2876 if (!is_valid_ether_addr(mac))
2877 return -EADDRNOTAVAIL;
2878
2879 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2880 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2881 }
6b7c5b94 2882
3486be29 2883 if (adapter->function_mode & 0x400)
82903e4b
AK
2884 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2885 else
2886 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2887
9e1453c5
AK
2888 status = be_cmd_get_cntl_attributes(adapter);
2889 if (status)
2890 return status;
2891
2e588f84 2892 be_cmd_check_native_mode(adapter);
2243e2e9 2893 return 0;
6b7c5b94
SP
2894}
2895
fe6d2a38
SP
2896static int be_dev_family_check(struct be_adapter *adapter)
2897{
2898 struct pci_dev *pdev = adapter->pdev;
2899 u32 sli_intf = 0, if_type;
2900
2901 switch (pdev->device) {
2902 case BE_DEVICE_ID1:
2903 case OC_DEVICE_ID1:
2904 adapter->generation = BE_GEN2;
2905 break;
2906 case BE_DEVICE_ID2:
2907 case OC_DEVICE_ID2:
2908 adapter->generation = BE_GEN3;
2909 break;
2910 case OC_DEVICE_ID3:
2911 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2912 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2913 SLI_INTF_IF_TYPE_SHIFT;
2914
2915 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2916 if_type != 0x02) {
2917 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2918 return -EINVAL;
2919 }
2920 if (num_vfs > 0) {
2921 dev_err(&pdev->dev, "VFs not supported\n");
2922 return -EINVAL;
2923 }
2924 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2925 SLI_INTF_FAMILY_SHIFT);
2926 adapter->generation = BE_GEN3;
2927 break;
2928 default:
2929 adapter->generation = 0;
2930 }
2931 return 0;
2932}
2933
37eed1cb
PR
2934static int lancer_wait_ready(struct be_adapter *adapter)
2935{
2936#define SLIPORT_READY_TIMEOUT 500
2937 u32 sliport_status;
2938 int status = 0, i;
2939
2940 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
2941 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2942 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
2943 break;
2944
2945 msleep(20);
2946 }
2947
2948 if (i == SLIPORT_READY_TIMEOUT)
2949 status = -1;
2950
2951 return status;
2952}
2953
2954static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
2955{
2956 int status;
2957 u32 sliport_status, err, reset_needed;
2958 status = lancer_wait_ready(adapter);
2959 if (!status) {
2960 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2961 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
2962 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
2963 if (err && reset_needed) {
2964 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2965 adapter->db + SLIPORT_CONTROL_OFFSET);
2966
2967 /* check adapter has corrected the error */
2968 status = lancer_wait_ready(adapter);
2969 sliport_status = ioread32(adapter->db +
2970 SLIPORT_STATUS_OFFSET);
2971 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
2972 SLIPORT_STATUS_RN_MASK);
2973 if (status || sliport_status)
2974 status = -1;
2975 } else if (err || reset_needed) {
2976 status = -1;
2977 }
2978 }
2979 return status;
2980}
2981
6b7c5b94
SP
2982static int __devinit be_probe(struct pci_dev *pdev,
2983 const struct pci_device_id *pdev_id)
2984{
2985 int status = 0;
2986 struct be_adapter *adapter;
2987 struct net_device *netdev;
6b7c5b94
SP
2988
2989 status = pci_enable_device(pdev);
2990 if (status)
2991 goto do_none;
2992
2993 status = pci_request_regions(pdev, DRV_NAME);
2994 if (status)
2995 goto disable_dev;
2996 pci_set_master(pdev);
2997
2998 netdev = alloc_etherdev(sizeof(struct be_adapter));
2999 if (netdev == NULL) {
3000 status = -ENOMEM;
3001 goto rel_reg;
3002 }
3003 adapter = netdev_priv(netdev);
3004 adapter->pdev = pdev;
3005 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3006
3007 status = be_dev_family_check(adapter);
63657b9c 3008 if (status)
fe6d2a38
SP
3009 goto free_netdev;
3010
6b7c5b94 3011 adapter->netdev = netdev;
2243e2e9 3012 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3013
2b7bcebf 3014 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3015 if (!status) {
3016 netdev->features |= NETIF_F_HIGHDMA;
3017 } else {
2b7bcebf 3018 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3019 if (status) {
3020 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3021 goto free_netdev;
3022 }
3023 }
3024
ba343c77 3025 be_sriov_enable(adapter);
48f5a191
AK
3026 if (adapter->sriov_enabled) {
3027 adapter->vf_cfg = kcalloc(num_vfs,
3028 sizeof(struct be_vf_cfg), GFP_KERNEL);
3029
3030 if (!adapter->vf_cfg)
3031 goto free_netdev;
3032 }
ba343c77 3033
6b7c5b94
SP
3034 status = be_ctrl_init(adapter);
3035 if (status)
48f5a191 3036 goto free_vf_cfg;
6b7c5b94 3037
37eed1cb
PR
3038 if (lancer_chip(adapter)) {
3039 status = lancer_test_and_set_rdy_state(adapter);
3040 if (status) {
3041 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3042 goto ctrl_clean;
37eed1cb
PR
3043 }
3044 }
3045
2243e2e9 3046 /* sync up with fw's ready state */
ba343c77
SB
3047 if (be_physfn(adapter)) {
3048 status = be_cmd_POST(adapter);
3049 if (status)
3050 goto ctrl_clean;
ba343c77 3051 }
6b7c5b94 3052
2243e2e9
SP
3053 /* tell fw we're ready to fire cmds */
3054 status = be_cmd_fw_init(adapter);
6b7c5b94 3055 if (status)
2243e2e9
SP
3056 goto ctrl_clean;
3057
a4b4dfab
AK
3058 status = be_cmd_reset_function(adapter);
3059 if (status)
3060 goto ctrl_clean;
556ae191 3061
2243e2e9
SP
3062 status = be_stats_init(adapter);
3063 if (status)
3064 goto ctrl_clean;
3065
3066 status = be_get_config(adapter);
6b7c5b94
SP
3067 if (status)
3068 goto stats_clean;
6b7c5b94 3069
3abcdeda
SP
3070 be_msix_enable(adapter);
3071
6b7c5b94 3072 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3073
5fb379ee
SP
3074 status = be_setup(adapter);
3075 if (status)
3abcdeda 3076 goto msix_disable;
2243e2e9 3077
3abcdeda 3078 be_netdev_init(netdev);
6b7c5b94
SP
3079 status = register_netdev(netdev);
3080 if (status != 0)
5fb379ee 3081 goto unsetup;
63a76944 3082 netif_carrier_off(netdev);
6b7c5b94 3083
e6319365
AK
3084 if (be_physfn(adapter) && adapter->sriov_enabled) {
3085 status = be_vf_eth_addr_config(adapter);
3086 if (status)
3087 goto unreg_netdev;
3088 }
3089
c4ca2374 3090 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
f203af70 3091 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3092 return 0;
3093
e6319365
AK
3094unreg_netdev:
3095 unregister_netdev(netdev);
5fb379ee
SP
3096unsetup:
3097 be_clear(adapter);
3abcdeda
SP
3098msix_disable:
3099 be_msix_disable(adapter);
6b7c5b94
SP
3100stats_clean:
3101 be_stats_cleanup(adapter);
3102ctrl_clean:
3103 be_ctrl_cleanup(adapter);
48f5a191
AK
3104free_vf_cfg:
3105 kfree(adapter->vf_cfg);
6b7c5b94 3106free_netdev:
ba343c77 3107 be_sriov_disable(adapter);
fe6d2a38 3108 free_netdev(netdev);
8d56ff11 3109 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3110rel_reg:
3111 pci_release_regions(pdev);
3112disable_dev:
3113 pci_disable_device(pdev);
3114do_none:
c4ca2374 3115 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3116 return status;
3117}
3118
3119static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3120{
3121 struct be_adapter *adapter = pci_get_drvdata(pdev);
3122 struct net_device *netdev = adapter->netdev;
3123
a4ca055f 3124 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3125 if (adapter->wol)
3126 be_setup_wol(adapter, true);
3127
6b7c5b94
SP
3128 netif_device_detach(netdev);
3129 if (netif_running(netdev)) {
3130 rtnl_lock();
3131 be_close(netdev);
3132 rtnl_unlock();
3133 }
9e90c961 3134 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3135 be_clear(adapter);
6b7c5b94 3136
a4ca055f 3137 be_msix_disable(adapter);
6b7c5b94
SP
3138 pci_save_state(pdev);
3139 pci_disable_device(pdev);
3140 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3141 return 0;
3142}
3143
3144static int be_resume(struct pci_dev *pdev)
3145{
3146 int status = 0;
3147 struct be_adapter *adapter = pci_get_drvdata(pdev);
3148 struct net_device *netdev = adapter->netdev;
3149
3150 netif_device_detach(netdev);
3151
3152 status = pci_enable_device(pdev);
3153 if (status)
3154 return status;
3155
3156 pci_set_power_state(pdev, 0);
3157 pci_restore_state(pdev);
3158
a4ca055f 3159 be_msix_enable(adapter);
2243e2e9
SP
3160 /* tell fw we're ready to fire cmds */
3161 status = be_cmd_fw_init(adapter);
3162 if (status)
3163 return status;
3164
9b0365f1 3165 be_setup(adapter);
6b7c5b94
SP
3166 if (netif_running(netdev)) {
3167 rtnl_lock();
3168 be_open(netdev);
3169 rtnl_unlock();
3170 }
3171 netif_device_attach(netdev);
71d8d1b5
AK
3172
3173 if (adapter->wol)
3174 be_setup_wol(adapter, false);
a4ca055f
AK
3175
3176 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3177 return 0;
3178}
3179
82456b03
SP
3180/*
3181 * An FLR will stop BE from DMAing any data.
3182 */
3183static void be_shutdown(struct pci_dev *pdev)
3184{
3185 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3186
2d5d4154
AK
3187 if (!adapter)
3188 return;
82456b03 3189
0f4a6828 3190 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3191
2d5d4154 3192 netif_device_detach(adapter->netdev);
82456b03
SP
3193
3194 be_cmd_reset_function(adapter);
3195
3196 if (adapter->wol)
3197 be_setup_wol(adapter, true);
3198
3199 pci_disable_device(pdev);
82456b03
SP
3200}
3201
cf588477
SP
3202static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3203 pci_channel_state_t state)
3204{
3205 struct be_adapter *adapter = pci_get_drvdata(pdev);
3206 struct net_device *netdev = adapter->netdev;
3207
3208 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3209
3210 adapter->eeh_err = true;
3211
3212 netif_device_detach(netdev);
3213
3214 if (netif_running(netdev)) {
3215 rtnl_lock();
3216 be_close(netdev);
3217 rtnl_unlock();
3218 }
3219 be_clear(adapter);
3220
3221 if (state == pci_channel_io_perm_failure)
3222 return PCI_ERS_RESULT_DISCONNECT;
3223
3224 pci_disable_device(pdev);
3225
3226 return PCI_ERS_RESULT_NEED_RESET;
3227}
3228
3229static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3230{
3231 struct be_adapter *adapter = pci_get_drvdata(pdev);
3232 int status;
3233
3234 dev_info(&adapter->pdev->dev, "EEH reset\n");
3235 adapter->eeh_err = false;
3236
3237 status = pci_enable_device(pdev);
3238 if (status)
3239 return PCI_ERS_RESULT_DISCONNECT;
3240
3241 pci_set_master(pdev);
3242 pci_set_power_state(pdev, 0);
3243 pci_restore_state(pdev);
3244
3245 /* Check if card is ok and fw is ready */
3246 status = be_cmd_POST(adapter);
3247 if (status)
3248 return PCI_ERS_RESULT_DISCONNECT;
3249
3250 return PCI_ERS_RESULT_RECOVERED;
3251}
3252
3253static void be_eeh_resume(struct pci_dev *pdev)
3254{
3255 int status = 0;
3256 struct be_adapter *adapter = pci_get_drvdata(pdev);
3257 struct net_device *netdev = adapter->netdev;
3258
3259 dev_info(&adapter->pdev->dev, "EEH resume\n");
3260
3261 pci_save_state(pdev);
3262
3263 /* tell fw we're ready to fire cmds */
3264 status = be_cmd_fw_init(adapter);
3265 if (status)
3266 goto err;
3267
3268 status = be_setup(adapter);
3269 if (status)
3270 goto err;
3271
3272 if (netif_running(netdev)) {
3273 status = be_open(netdev);
3274 if (status)
3275 goto err;
3276 }
3277 netif_device_attach(netdev);
3278 return;
3279err:
3280 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3281}
3282
3283static struct pci_error_handlers be_eeh_handlers = {
3284 .error_detected = be_eeh_err_detected,
3285 .slot_reset = be_eeh_reset,
3286 .resume = be_eeh_resume,
3287};
3288
6b7c5b94
SP
3289static struct pci_driver be_driver = {
3290 .name = DRV_NAME,
3291 .id_table = be_dev_ids,
3292 .probe = be_probe,
3293 .remove = be_remove,
3294 .suspend = be_suspend,
cf588477 3295 .resume = be_resume,
82456b03 3296 .shutdown = be_shutdown,
cf588477 3297 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3298};
3299
3300static int __init be_init_module(void)
3301{
8e95a202
JP
3302 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3303 rx_frag_size != 2048) {
6b7c5b94
SP
3304 printk(KERN_WARNING DRV_NAME
3305 " : Module param rx_frag_size must be 2048/4096/8192."
3306 " Using 2048\n");
3307 rx_frag_size = 2048;
3308 }
6b7c5b94
SP
3309
3310 return pci_register_driver(&be_driver);
3311}
3312module_init(be_init_module);
3313
3314static void __exit be_exit_module(void)
3315{
3316 pci_unregister_driver(&be_driver);
3317}
3318module_exit(be_exit_module);
This page took 0.460551 seconds and 5 git commands to generate.