vlan: Don't check for vlan group before vlan_tx_tag_present.
[deliverable/linux.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
294aedcf 2 * Copyright (C) 2005 - 2010 ServerEngines
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
8788fdc2 19#include "be_cmds.h"
65f71b8b 20#include <asm/div64.h>
6b7c5b94
SP
21
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
ba343c77 29static unsigned int num_vfs;
6b7c5b94 30module_param(rx_frag_size, uint, S_IRUGO);
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
3abcdeda
SP
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
6b7c5b94
SP
44 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276
AK
47/* UE Status Low CSR */
48static char *ue_status_low_desc[] = {
49 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
83static char *ue_status_hi_desc[] = {
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
107 "NETC"
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
6b7c5b94 117
3abcdeda
SP
118static inline bool be_multi_rxq(struct be_adapter *adapter)
119{
120 return (adapter->num_rx_qs > 1);
121}
122
6b7c5b94
SP
123static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
124{
125 struct be_dma_mem *mem = &q->dma_mem;
126 if (mem->va)
127 pci_free_consistent(adapter->pdev, mem->size,
128 mem->va, mem->dma);
129}
130
131static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
132 u16 len, u16 entry_size)
133{
134 struct be_dma_mem *mem = &q->dma_mem;
135
136 memset(q, 0, sizeof(*q));
137 q->len = len;
138 q->entry_size = entry_size;
139 mem->size = len * entry_size;
140 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
141 if (!mem->va)
142 return -1;
143 memset(mem->va, 0, mem->size);
144 return 0;
145}
146
8788fdc2 147static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 148{
8788fdc2 149 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
150 u32 reg = ioread32(addr);
151 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 152
cf588477
SP
153 if (adapter->eeh_err)
154 return;
155
5f0b849e 156 if (!enabled && enable)
6b7c5b94 157 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 158 else if (enabled && !enable)
6b7c5b94 159 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 160 else
6b7c5b94 161 return;
5f0b849e 162
6b7c5b94
SP
163 iowrite32(reg, addr);
164}
165
8788fdc2 166static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
167{
168 u32 val = 0;
169 val |= qid & DB_RQ_RING_ID_MASK;
170 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
171
172 wmb();
8788fdc2 173 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
174}
175
8788fdc2 176static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
177{
178 u32 val = 0;
179 val |= qid & DB_TXULP_RING_ID_MASK;
180 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
181
182 wmb();
8788fdc2 183 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
184}
185
8788fdc2 186static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
187 bool arm, bool clear_int, u16 num_popped)
188{
189 u32 val = 0;
190 val |= qid & DB_EQ_RING_ID_MASK;
cf588477
SP
191
192 if (adapter->eeh_err)
193 return;
194
6b7c5b94
SP
195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
202}
203
8788fdc2 204void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
205{
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
cf588477
SP
208
209 if (adapter->eeh_err)
210 return;
211
6b7c5b94
SP
212 if (arm)
213 val |= 1 << DB_CQ_REARM_SHIFT;
214 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 215 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
216}
217
6b7c5b94
SP
218static int be_mac_addr_set(struct net_device *netdev, void *p)
219{
220 struct be_adapter *adapter = netdev_priv(netdev);
221 struct sockaddr *addr = p;
222 int status = 0;
223
ca9e4988
AK
224 if (!is_valid_ether_addr(addr->sa_data))
225 return -EADDRNOTAVAIL;
226
ba343c77
SB
227 /* MAC addr configuration will be done in hardware for VFs
228 * by their corresponding PFs. Just copy to netdev addr here
229 */
230 if (!be_physfn(adapter))
231 goto netdev_addr;
232
a65027e4
SP
233 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
234 if (status)
235 return status;
6b7c5b94 236
a65027e4
SP
237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238 adapter->if_handle, &adapter->pmac_id);
ba343c77 239netdev_addr:
6b7c5b94
SP
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
b31c50a7 246void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 247{
3abcdeda 248 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
6b7c5b94
SP
249 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
250 struct be_port_rxf_stats *port_stats =
251 &rxf_stats->port[adapter->port_num];
78122a52 252 struct net_device_stats *dev_stats = &adapter->netdev->stats;
68110868 253 struct be_erx_stats *erx_stats = &hw_stats->erx;
3abcdeda
SP
254 struct be_rx_obj *rxo;
255 int i;
6b7c5b94 256
3abcdeda
SP
257 memset(dev_stats, 0, sizeof(*dev_stats));
258 for_all_rx_queues(adapter, rxo, i) {
259 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
260 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
261 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
262 /* no space in linux buffers: best possible approximation */
263 dev_stats->rx_dropped +=
264 erx_stats->rx_drops_no_fragments[rxo->q.id];
265 }
266
267 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
268 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
6b7c5b94
SP
269
270 /* bad pkts received */
271 dev_stats->rx_errors = port_stats->rx_crc_errors +
272 port_stats->rx_alignment_symbol_errors +
273 port_stats->rx_in_range_errors +
68110868
SP
274 port_stats->rx_out_range_errors +
275 port_stats->rx_frame_too_long +
276 port_stats->rx_dropped_too_small +
277 port_stats->rx_dropped_too_short +
278 port_stats->rx_dropped_header_too_small +
279 port_stats->rx_dropped_tcp_length +
280 port_stats->rx_dropped_runt +
281 port_stats->rx_tcp_checksum_errs +
282 port_stats->rx_ip_checksum_errs +
283 port_stats->rx_udp_checksum_errs;
284
6b7c5b94
SP
285 /* detailed rx errors */
286 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
68110868
SP
287 port_stats->rx_out_range_errors +
288 port_stats->rx_frame_too_long;
289
6b7c5b94
SP
290 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
291
292 /* frame alignment errors */
293 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
68110868 294
6b7c5b94
SP
295 /* receiver fifo overrun */
296 /* drops_no_pbuf is no per i/f, it's per BE card */
297 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
298 port_stats->rx_input_fifo_overflow +
299 rxf_stats->rx_drops_no_pbuf;
6b7c5b94
SP
300}
301
8788fdc2 302void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 303{
6b7c5b94
SP
304 struct net_device *netdev = adapter->netdev;
305
6b7c5b94 306 /* If link came up or went down */
a8f447bd 307 if (adapter->link_up != link_up) {
0dffc83e 308 adapter->link_speed = -1;
a8f447bd 309 if (link_up) {
6b7c5b94
SP
310 netif_start_queue(netdev);
311 netif_carrier_on(netdev);
312 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd
SP
313 } else {
314 netif_stop_queue(netdev);
315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 317 }
a8f447bd 318 adapter->link_up = link_up;
6b7c5b94 319 }
6b7c5b94
SP
320}
321
322/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 324{
3abcdeda
SP
325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
6b7c5b94
SP
338
339 /* Update once a second */
4097f663 340 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
341 return;
342
3abcdeda 343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 344 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 345
4097f663 346 stats->rx_fps_jiffies = now;
3abcdeda
SP
347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
8788fdc2 357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
358
359 rx_eq->cur_eqd = eqd;
360}
361
65f71b8b
SH
362static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363{
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371}
372
4097f663
SP
373static void be_tx_rate_update(struct be_adapter *adapter)
374{
3abcdeda 375 struct be_tx_stats *stats = tx_stats(adapter);
4097f663
SP
376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
4097f663
SP
389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392}
393
6b7c5b94 394static void be_tx_stats_update(struct be_adapter *adapter,
91992e44 395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 396{
3abcdeda 397 struct be_tx_stats *stats = tx_stats(adapter);
6b7c5b94
SP
398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
91992e44 401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
402 if (stopped)
403 stats->be_tx_stops++;
6b7c5b94
SP
404}
405
406/* Determine number of WRB entries needed to xmit data in an skb */
407static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
408{
ebc8d2ab
DM
409 int cnt = (skb->len > skb->data_len);
410
411 cnt += skb_shinfo(skb)->nr_frags;
412
6b7c5b94
SP
413 /* to account for hdr wrb */
414 cnt++;
415 if (cnt & 1) {
416 /* add a dummy to make it an even num */
417 cnt++;
418 *dummy = true;
419 } else
420 *dummy = false;
421 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422 return cnt;
423}
424
425static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
426{
427 wrb->frag_pa_hi = upper_32_bits(addr);
428 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
429 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
430}
431
432static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
eab6d18d 433 u32 wrb_cnt, u32 len)
6b7c5b94
SP
434{
435 memset(hdr, 0, sizeof(*hdr));
436
437 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
438
49e4b847 439 if (skb_is_gso(skb)) {
6b7c5b94
SP
440 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
441 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
442 hdr, skb_shinfo(skb)->gso_size);
49e4b847
AK
443 if (skb_is_gso_v6(skb))
444 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
445 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
446 if (is_tcp_pkt(skb))
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
448 else if (is_udp_pkt(skb))
449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
450 }
451
eab6d18d 452 if (vlan_tx_tag_present(skb)) {
6b7c5b94
SP
453 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
455 hdr, vlan_tx_tag_get(skb));
456 }
457
458 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
460 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
461 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
462}
463
7101e111
SP
464static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
465 bool unmap_single)
466{
467 dma_addr_t dma;
468
469 be_dws_le_to_cpu(wrb, sizeof(*wrb));
470
471 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 472 if (wrb->frag_len) {
7101e111
SP
473 if (unmap_single)
474 pci_unmap_single(pdev, dma, wrb->frag_len,
475 PCI_DMA_TODEVICE);
476 else
477 pci_unmap_page(pdev, dma, wrb->frag_len,
478 PCI_DMA_TODEVICE);
479 }
480}
6b7c5b94
SP
481
482static int make_tx_wrbs(struct be_adapter *adapter,
483 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
484{
7101e111
SP
485 dma_addr_t busaddr;
486 int i, copied = 0;
6b7c5b94
SP
487 struct pci_dev *pdev = adapter->pdev;
488 struct sk_buff *first_skb = skb;
489 struct be_queue_info *txq = &adapter->tx_obj.q;
490 struct be_eth_wrb *wrb;
491 struct be_eth_hdr_wrb *hdr;
7101e111
SP
492 bool map_single = false;
493 u16 map_head;
6b7c5b94 494
6b7c5b94
SP
495 hdr = queue_head_node(txq);
496 queue_head_inc(txq);
7101e111 497 map_head = txq->head;
6b7c5b94 498
ebc8d2ab 499 if (skb->len > skb->data_len) {
e743d313 500 int len = skb_headlen(skb);
a73b796e
AD
501 busaddr = pci_map_single(pdev, skb->data, len,
502 PCI_DMA_TODEVICE);
7101e111
SP
503 if (pci_dma_mapping_error(pdev, busaddr))
504 goto dma_err;
505 map_single = true;
ebc8d2ab
DM
506 wrb = queue_head_node(txq);
507 wrb_fill(wrb, busaddr, len);
508 be_dws_cpu_to_le(wrb, sizeof(*wrb));
509 queue_head_inc(txq);
510 copied += len;
511 }
6b7c5b94 512
ebc8d2ab
DM
513 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
514 struct skb_frag_struct *frag =
515 &skb_shinfo(skb)->frags[i];
a73b796e
AD
516 busaddr = pci_map_page(pdev, frag->page,
517 frag->page_offset,
518 frag->size, PCI_DMA_TODEVICE);
7101e111
SP
519 if (pci_dma_mapping_error(pdev, busaddr))
520 goto dma_err;
ebc8d2ab
DM
521 wrb = queue_head_node(txq);
522 wrb_fill(wrb, busaddr, frag->size);
523 be_dws_cpu_to_le(wrb, sizeof(*wrb));
524 queue_head_inc(txq);
525 copied += frag->size;
6b7c5b94
SP
526 }
527
528 if (dummy_wrb) {
529 wrb = queue_head_node(txq);
530 wrb_fill(wrb, 0, 0);
531 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532 queue_head_inc(txq);
533 }
534
eab6d18d 535 wrb_fill_hdr(hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
536 be_dws_cpu_to_le(hdr, sizeof(*hdr));
537
538 return copied;
7101e111
SP
539dma_err:
540 txq->head = map_head;
541 while (copied) {
542 wrb = queue_head_node(txq);
543 unmap_tx_frag(pdev, wrb, map_single);
544 map_single = false;
545 copied -= wrb->frag_len;
546 queue_head_inc(txq);
547 }
548 return 0;
6b7c5b94
SP
549}
550
61357325 551static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 552 struct net_device *netdev)
6b7c5b94
SP
553{
554 struct be_adapter *adapter = netdev_priv(netdev);
555 struct be_tx_obj *tx_obj = &adapter->tx_obj;
556 struct be_queue_info *txq = &tx_obj->q;
557 u32 wrb_cnt = 0, copied = 0;
558 u32 start = txq->head;
559 bool dummy_wrb, stopped = false;
560
561 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
562
563 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
564 if (copied) {
565 /* record the sent skb in the sent_skb table */
566 BUG_ON(tx_obj->sent_skb_list[start]);
567 tx_obj->sent_skb_list[start] = skb;
568
569 /* Ensure txq has space for the next skb; Else stop the queue
570 * *BEFORE* ringing the tx doorbell, so that we serialze the
571 * tx compls of the current transmit which'll wake up the queue
572 */
7101e111 573 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
574 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
575 txq->len) {
576 netif_stop_queue(netdev);
577 stopped = true;
578 }
6b7c5b94 579
c190e3c8 580 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 581
91992e44
AK
582 be_tx_stats_update(adapter, wrb_cnt, copied,
583 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
584 } else {
585 txq->head = start;
586 dev_kfree_skb_any(skb);
6b7c5b94 587 }
6b7c5b94
SP
588 return NETDEV_TX_OK;
589}
590
591static int be_change_mtu(struct net_device *netdev, int new_mtu)
592{
593 struct be_adapter *adapter = netdev_priv(netdev);
594 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
595 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
596 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
597 dev_info(&adapter->pdev->dev,
598 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
599 BE_MIN_MTU,
600 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
601 return -EINVAL;
602 }
603 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
604 netdev->mtu, new_mtu);
605 netdev->mtu = new_mtu;
606 return 0;
607}
608
609/*
82903e4b
AK
610 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
611 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 612 */
1da87b7f 613static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 614{
6b7c5b94
SP
615 u16 vtag[BE_NUM_VLANS_SUPPORTED];
616 u16 ntags = 0, i;
82903e4b 617 int status = 0;
1da87b7f
AK
618 u32 if_handle;
619
620 if (vf) {
621 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
622 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
623 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
624 }
6b7c5b94 625
82903e4b 626 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 627 /* Construct VLAN Table to give to HW */
b738127d 628 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
629 if (adapter->vlan_tag[i]) {
630 vtag[ntags] = cpu_to_le16(i);
631 ntags++;
632 }
633 }
b31c50a7
SP
634 status = be_cmd_vlan_config(adapter, adapter->if_handle,
635 vtag, ntags, 1, 0);
6b7c5b94 636 } else {
b31c50a7
SP
637 status = be_cmd_vlan_config(adapter, adapter->if_handle,
638 NULL, 0, 1, 1);
6b7c5b94 639 }
1da87b7f 640
b31c50a7 641 return status;
6b7c5b94
SP
642}
643
644static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
645{
646 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 647
6b7c5b94 648 adapter->vlan_grp = grp;
6b7c5b94
SP
649}
650
651static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
652{
653 struct be_adapter *adapter = netdev_priv(netdev);
654
1da87b7f 655 adapter->vlans_added++;
ba343c77
SB
656 if (!be_physfn(adapter))
657 return;
658
6b7c5b94 659 adapter->vlan_tag[vid] = 1;
82903e4b 660 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 661 be_vid_config(adapter, false, 0);
6b7c5b94
SP
662}
663
664static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
665{
666 struct be_adapter *adapter = netdev_priv(netdev);
667
1da87b7f
AK
668 adapter->vlans_added--;
669 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
670
ba343c77
SB
671 if (!be_physfn(adapter))
672 return;
673
6b7c5b94 674 adapter->vlan_tag[vid] = 0;
82903e4b 675 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 676 be_vid_config(adapter, false, 0);
6b7c5b94
SP
677}
678
24307eef 679static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
680{
681 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 682
24307eef 683 if (netdev->flags & IFF_PROMISC) {
8788fdc2 684 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
24307eef
SP
685 adapter->promiscuous = true;
686 goto done;
6b7c5b94
SP
687 }
688
24307eef
SP
689 /* BE was previously in promiscous mode; disable it */
690 if (adapter->promiscuous) {
691 adapter->promiscuous = false;
8788fdc2 692 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
6b7c5b94
SP
693 }
694
e7b909a6 695 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
696 if (netdev->flags & IFF_ALLMULTI ||
697 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 698 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 699 &adapter->mc_cmd_mem);
24307eef 700 goto done;
6b7c5b94 701 }
6b7c5b94 702
0ddf477b 703 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 704 &adapter->mc_cmd_mem);
24307eef
SP
705done:
706 return;
6b7c5b94
SP
707}
708
ba343c77
SB
709static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
710{
711 struct be_adapter *adapter = netdev_priv(netdev);
712 int status;
713
714 if (!adapter->sriov_enabled)
715 return -EPERM;
716
717 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
718 return -EINVAL;
719
64600ea5
AK
720 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
721 status = be_cmd_pmac_del(adapter,
722 adapter->vf_cfg[vf].vf_if_handle,
723 adapter->vf_cfg[vf].vf_pmac_id);
ba343c77 724
64600ea5
AK
725 status = be_cmd_pmac_add(adapter, mac,
726 adapter->vf_cfg[vf].vf_if_handle,
727 &adapter->vf_cfg[vf].vf_pmac_id);
728
729 if (status)
ba343c77
SB
730 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
731 mac, vf);
64600ea5
AK
732 else
733 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
734
ba343c77
SB
735 return status;
736}
737
64600ea5
AK
738static int be_get_vf_config(struct net_device *netdev, int vf,
739 struct ifla_vf_info *vi)
740{
741 struct be_adapter *adapter = netdev_priv(netdev);
742
743 if (!adapter->sriov_enabled)
744 return -EPERM;
745
746 if (vf >= num_vfs)
747 return -EINVAL;
748
749 vi->vf = vf;
e1d18735 750 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 751 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
752 vi->qos = 0;
753 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
754
755 return 0;
756}
757
1da87b7f
AK
758static int be_set_vf_vlan(struct net_device *netdev,
759 int vf, u16 vlan, u8 qos)
760{
761 struct be_adapter *adapter = netdev_priv(netdev);
762 int status = 0;
763
764 if (!adapter->sriov_enabled)
765 return -EPERM;
766
767 if ((vf >= num_vfs) || (vlan > 4095))
768 return -EINVAL;
769
770 if (vlan) {
771 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
772 adapter->vlans_added++;
773 } else {
774 adapter->vf_cfg[vf].vf_vlan_tag = 0;
775 adapter->vlans_added--;
776 }
777
778 status = be_vid_config(adapter, true, vf);
779
780 if (status)
781 dev_info(&adapter->pdev->dev,
782 "VLAN %d config on VF %d failed\n", vlan, vf);
783 return status;
784}
785
e1d18735
AK
786static int be_set_vf_tx_rate(struct net_device *netdev,
787 int vf, int rate)
788{
789 struct be_adapter *adapter = netdev_priv(netdev);
790 int status = 0;
791
792 if (!adapter->sriov_enabled)
793 return -EPERM;
794
795 if ((vf >= num_vfs) || (rate < 0))
796 return -EINVAL;
797
798 if (rate > 10000)
799 rate = 10000;
800
801 adapter->vf_cfg[vf].vf_tx_rate = rate;
802 status = be_cmd_set_qos(adapter, rate / 10, vf);
803
804 if (status)
805 dev_info(&adapter->pdev->dev,
806 "tx rate %d on VF %d failed\n", rate, vf);
807 return status;
808}
809
3abcdeda 810static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 811{
3abcdeda 812 struct be_rx_stats *stats = &rxo->stats;
4097f663 813 ulong now = jiffies;
6b7c5b94 814
4097f663 815 /* Wrapped around */
3abcdeda
SP
816 if (time_before(now, stats->rx_jiffies)) {
817 stats->rx_jiffies = now;
4097f663
SP
818 return;
819 }
6b7c5b94
SP
820
821 /* Update the rate once in two seconds */
3abcdeda 822 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
823 return;
824
3abcdeda
SP
825 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
826 now - stats->rx_jiffies);
827 stats->rx_jiffies = now;
828 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
829}
830
3abcdeda 831static void be_rx_stats_update(struct be_rx_obj *rxo,
1ef78abe 832 u32 pktsize, u16 numfrags, u8 pkt_type)
4097f663 833{
3abcdeda 834 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 835
3abcdeda
SP
836 stats->rx_compl++;
837 stats->rx_frags += numfrags;
838 stats->rx_bytes += pktsize;
839 stats->rx_pkts++;
1ef78abe 840 if (pkt_type == BE_MULTICAST_PACKET)
3abcdeda 841 stats->rx_mcast_pkts++;
4097f663
SP
842}
843
728a9972
AK
844static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
845{
846 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
847
848 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
849 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
850 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
851 if (ip_version) {
852 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
853 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
854 }
855 ipv6_chk = (ip_version && (tcpf || udpf));
856
857 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
858}
859
6b7c5b94 860static struct be_rx_page_info *
3abcdeda
SP
861get_rx_page_info(struct be_adapter *adapter,
862 struct be_rx_obj *rxo,
863 u16 frag_idx)
6b7c5b94
SP
864{
865 struct be_rx_page_info *rx_page_info;
3abcdeda 866 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 867
3abcdeda 868 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
869 BUG_ON(!rx_page_info->page);
870
205859a2 871 if (rx_page_info->last_page_user) {
fac6da5b 872 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
6b7c5b94 873 adapter->big_page_size, PCI_DMA_FROMDEVICE);
205859a2
AK
874 rx_page_info->last_page_user = false;
875 }
6b7c5b94
SP
876
877 atomic_dec(&rxq->used);
878 return rx_page_info;
879}
880
881/* Throwaway the data in the Rx completion */
882static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda
SP
883 struct be_rx_obj *rxo,
884 struct be_eth_rx_compl *rxcp)
6b7c5b94 885{
3abcdeda 886 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
887 struct be_rx_page_info *page_info;
888 u16 rxq_idx, i, num_rcvd;
889
890 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
891 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
892
893 for (i = 0; i < num_rcvd; i++) {
3abcdeda 894 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
6b7c5b94
SP
895 put_page(page_info->page);
896 memset(page_info, 0, sizeof(*page_info));
897 index_inc(&rxq_idx, rxq->len);
898 }
899}
900
901/*
902 * skb_fill_rx_data forms a complete skb for an ether frame
903 * indicated by rxcp.
904 */
3abcdeda 905static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
89420424
SP
906 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
907 u16 num_rcvd)
6b7c5b94 908{
3abcdeda 909 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 910 struct be_rx_page_info *page_info;
89420424 911 u16 rxq_idx, i, j;
fa77406a 912 u32 pktsize, hdr_len, curr_frag_len, size;
6b7c5b94 913 u8 *start;
1ef78abe 914 u8 pkt_type;
6b7c5b94
SP
915
916 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
917 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1ef78abe 918 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
6b7c5b94 919
3abcdeda 920 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
6b7c5b94
SP
921
922 start = page_address(page_info->page) + page_info->page_offset;
923 prefetch(start);
924
925 /* Copy data in the first descriptor of this completion */
926 curr_frag_len = min(pktsize, rx_frag_size);
927
928 /* Copy the header portion into skb_data */
929 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
930 memcpy(skb->data, start, hdr_len);
931 skb->len = curr_frag_len;
932 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
933 /* Complete packet has now been moved to data */
934 put_page(page_info->page);
935 skb->data_len = 0;
936 skb->tail += curr_frag_len;
937 } else {
938 skb_shinfo(skb)->nr_frags = 1;
939 skb_shinfo(skb)->frags[0].page = page_info->page;
940 skb_shinfo(skb)->frags[0].page_offset =
941 page_info->page_offset + hdr_len;
942 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
943 skb->data_len = curr_frag_len - hdr_len;
944 skb->tail += hdr_len;
945 }
205859a2 946 page_info->page = NULL;
6b7c5b94
SP
947
948 if (pktsize <= rx_frag_size) {
949 BUG_ON(num_rcvd != 1);
76fbb429 950 goto done;
6b7c5b94
SP
951 }
952
953 /* More frags present for this completion */
fa77406a 954 size = pktsize;
bd46cb6c 955 for (i = 1, j = 0; i < num_rcvd; i++) {
fa77406a 956 size -= curr_frag_len;
6b7c5b94 957 index_inc(&rxq_idx, rxq->len);
3abcdeda 958 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
6b7c5b94 959
fa77406a 960 curr_frag_len = min(size, rx_frag_size);
6b7c5b94 961
bd46cb6c
AK
962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info->page_offset == 0) {
964 /* Fresh page */
965 j++;
966 skb_shinfo(skb)->frags[j].page = page_info->page;
967 skb_shinfo(skb)->frags[j].page_offset =
968 page_info->page_offset;
969 skb_shinfo(skb)->frags[j].size = 0;
970 skb_shinfo(skb)->nr_frags++;
971 } else {
972 put_page(page_info->page);
973 }
974
975 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
976 skb->len += curr_frag_len;
977 skb->data_len += curr_frag_len;
6b7c5b94 978
205859a2 979 page_info->page = NULL;
6b7c5b94 980 }
bd46cb6c 981 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 982
76fbb429 983done:
3abcdeda 984 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
6b7c5b94
SP
985}
986
5be93b9a 987/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 988static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 989 struct be_rx_obj *rxo,
6b7c5b94
SP
990 struct be_eth_rx_compl *rxcp)
991{
992 struct sk_buff *skb;
dcb9b564 993 u32 vlanf, vid;
89420424 994 u16 num_rcvd;
dcb9b564 995 u8 vtm;
6b7c5b94 996
89420424
SP
997 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
998 /* Is it a flush compl that has no data */
999 if (unlikely(num_rcvd == 0))
1000 return;
1001
89d71a66 1002 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
a058a632 1003 if (unlikely(!skb)) {
6b7c5b94
SP
1004 if (net_ratelimit())
1005 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
3abcdeda 1006 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1007 return;
1008 }
1009
3abcdeda 1010 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
6b7c5b94 1011
728a9972 1012 if (do_pkt_csum(rxcp, adapter->rx_csum))
bc8acf2c 1013 skb_checksum_none_assert(skb);
728a9972
AK
1014 else
1015 skb->ip_summed = CHECKSUM_UNNECESSARY;
6b7c5b94
SP
1016
1017 skb->truesize = skb->len + sizeof(struct sk_buff);
1018 skb->protocol = eth_type_trans(skb, adapter->netdev);
6b7c5b94 1019
a058a632
SP
1020 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1021 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1022
1023 /* vlanf could be wrongly set in some cards.
1024 * ignore if vtm is not set */
3486be29 1025 if ((adapter->function_mode & 0x400) && !vtm)
a058a632
SP
1026 vlanf = 0;
1027
1028 if (unlikely(vlanf)) {
82903e4b 1029 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
1030 kfree_skb(skb);
1031 return;
1032 }
1033 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
9cae9e4f 1034 vid = swab16(vid);
6b7c5b94
SP
1035 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1036 } else {
1037 netif_receive_skb(skb);
1038 }
6b7c5b94
SP
1039}
1040
5be93b9a
AK
1041/* Process the RX completion indicated by rxcp when GRO is enabled */
1042static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda
SP
1043 struct be_rx_obj *rxo,
1044 struct be_eth_rx_compl *rxcp)
6b7c5b94
SP
1045{
1046 struct be_rx_page_info *page_info;
5be93b9a 1047 struct sk_buff *skb = NULL;
3abcdeda
SP
1048 struct be_queue_info *rxq = &rxo->q;
1049 struct be_eq_obj *eq_obj = &rxo->rx_eq;
6b7c5b94 1050 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
bd46cb6c 1051 u16 i, rxq_idx = 0, vid, j;
dcb9b564 1052 u8 vtm;
1ef78abe 1053 u8 pkt_type;
6b7c5b94
SP
1054
1055 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
89420424
SP
1056 /* Is it a flush compl that has no data */
1057 if (unlikely(num_rcvd == 0))
1058 return;
1059
6b7c5b94
SP
1060 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1061 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1062 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
dcb9b564 1063 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1ef78abe 1064 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
dcb9b564
AK
1065
1066 /* vlanf could be wrongly set in some cards.
1067 * ignore if vtm is not set */
3486be29 1068 if ((adapter->function_mode & 0x400) && !vtm)
dcb9b564 1069 vlanf = 0;
6b7c5b94 1070
5be93b9a
AK
1071 skb = napi_get_frags(&eq_obj->napi);
1072 if (!skb) {
3abcdeda 1073 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1074 return;
1075 }
1076
6b7c5b94 1077 remaining = pkt_size;
bd46cb6c 1078 for (i = 0, j = -1; i < num_rcvd; i++) {
3abcdeda 1079 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
6b7c5b94
SP
1080
1081 curr_frag_len = min(remaining, rx_frag_size);
1082
bd46cb6c
AK
1083 /* Coalesce all frags from the same physical page in one slot */
1084 if (i == 0 || page_info->page_offset == 0) {
1085 /* First frag or Fresh page */
1086 j++;
5be93b9a
AK
1087 skb_shinfo(skb)->frags[j].page = page_info->page;
1088 skb_shinfo(skb)->frags[j].page_offset =
1089 page_info->page_offset;
1090 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1091 } else {
1092 put_page(page_info->page);
1093 }
5be93b9a 1094 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1095
bd46cb6c 1096 remaining -= curr_frag_len;
6b7c5b94 1097 index_inc(&rxq_idx, rxq->len);
6b7c5b94
SP
1098 memset(page_info, 0, sizeof(*page_info));
1099 }
bd46cb6c 1100 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1101
5be93b9a
AK
1102 skb_shinfo(skb)->nr_frags = j + 1;
1103 skb->len = pkt_size;
1104 skb->data_len = pkt_size;
1105 skb->truesize += pkt_size;
1106 skb->ip_summed = CHECKSUM_UNNECESSARY;
1107
6b7c5b94 1108 if (likely(!vlanf)) {
5be93b9a 1109 napi_gro_frags(&eq_obj->napi);
6b7c5b94
SP
1110 } else {
1111 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
9cae9e4f 1112 vid = swab16(vid);
6b7c5b94 1113
82903e4b 1114 if (!adapter->vlan_grp || adapter->vlans_added == 0)
6b7c5b94
SP
1115 return;
1116
5be93b9a 1117 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
6b7c5b94
SP
1118 }
1119
3abcdeda 1120 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
6b7c5b94
SP
1121}
1122
3abcdeda 1123static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
6b7c5b94 1124{
3abcdeda 1125 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
6b7c5b94
SP
1126
1127 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1128 return NULL;
1129
f3eb62d2 1130 rmb();
6b7c5b94
SP
1131 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1132
3abcdeda 1133 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1134 return rxcp;
1135}
1136
a7a0ef31
SP
1137/* To reset the valid bit, we need to reset the whole word as
1138 * when walking the queue the valid entries are little-endian
1139 * and invalid entries are host endian
1140 */
1141static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1142{
1143 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1144}
1145
6b7c5b94
SP
1146static inline struct page *be_alloc_pages(u32 size)
1147{
1148 gfp_t alloc_flags = GFP_ATOMIC;
1149 u32 order = get_order(size);
1150 if (order > 0)
1151 alloc_flags |= __GFP_COMP;
1152 return alloc_pages(alloc_flags, order);
1153}
1154
1155/*
1156 * Allocate a page, split it to fragments of size rx_frag_size and post as
1157 * receive buffers to BE
1158 */
3abcdeda 1159static void be_post_rx_frags(struct be_rx_obj *rxo)
6b7c5b94 1160{
3abcdeda
SP
1161 struct be_adapter *adapter = rxo->adapter;
1162 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1163 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1164 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1165 struct page *pagep = NULL;
1166 struct be_eth_rx_d *rxd;
1167 u64 page_dmaaddr = 0, frag_dmaaddr;
1168 u32 posted, page_offset = 0;
1169
3abcdeda 1170 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1171 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1172 if (!pagep) {
1173 pagep = be_alloc_pages(adapter->big_page_size);
1174 if (unlikely(!pagep)) {
3abcdeda 1175 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1176 break;
1177 }
1178 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1179 adapter->big_page_size,
1180 PCI_DMA_FROMDEVICE);
1181 page_info->page_offset = 0;
1182 } else {
1183 get_page(pagep);
1184 page_info->page_offset = page_offset + rx_frag_size;
1185 }
1186 page_offset = page_info->page_offset;
1187 page_info->page = pagep;
fac6da5b 1188 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1189 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1190
1191 rxd = queue_head_node(rxq);
1192 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1193 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1194
1195 /* Any space left in the current big page for another frag? */
1196 if ((page_offset + rx_frag_size + rx_frag_size) >
1197 adapter->big_page_size) {
1198 pagep = NULL;
1199 page_info->last_page_user = true;
1200 }
26d92f92
SP
1201
1202 prev_page_info = page_info;
1203 queue_head_inc(rxq);
6b7c5b94
SP
1204 page_info = &page_info_tbl[rxq->head];
1205 }
1206 if (pagep)
26d92f92 1207 prev_page_info->last_page_user = true;
6b7c5b94
SP
1208
1209 if (posted) {
6b7c5b94 1210 atomic_add(posted, &rxq->used);
8788fdc2 1211 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1212 } else if (atomic_read(&rxq->used) == 0) {
1213 /* Let be_worker replenish when memory is available */
3abcdeda 1214 rxo->rx_post_starved = true;
6b7c5b94 1215 }
6b7c5b94
SP
1216}
1217
5fb379ee 1218static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1219{
6b7c5b94
SP
1220 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1221
1222 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1223 return NULL;
1224
f3eb62d2 1225 rmb();
6b7c5b94
SP
1226 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1227
1228 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1229
1230 queue_tail_inc(tx_cq);
1231 return txcp;
1232}
1233
1234static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1235{
1236 struct be_queue_info *txq = &adapter->tx_obj.q;
a73b796e 1237 struct be_eth_wrb *wrb;
6b7c5b94
SP
1238 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1239 struct sk_buff *sent_skb;
ec43b1a6
SP
1240 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1241 bool unmap_skb_hdr = true;
6b7c5b94 1242
ec43b1a6 1243 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1244 BUG_ON(!sent_skb);
ec43b1a6
SP
1245 sent_skbs[txq->tail] = NULL;
1246
1247 /* skip header wrb */
a73b796e 1248 queue_tail_inc(txq);
6b7c5b94 1249
ec43b1a6 1250 do {
6b7c5b94 1251 cur_index = txq->tail;
a73b796e 1252 wrb = queue_tail_node(txq);
ec43b1a6 1253 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
e743d313 1254 skb_headlen(sent_skb)));
ec43b1a6
SP
1255 unmap_skb_hdr = false;
1256
6b7c5b94
SP
1257 num_wrbs++;
1258 queue_tail_inc(txq);
ec43b1a6 1259 } while (cur_index != last_index);
6b7c5b94
SP
1260
1261 atomic_sub(num_wrbs, &txq->used);
a73b796e 1262
6b7c5b94
SP
1263 kfree_skb(sent_skb);
1264}
1265
859b1e4e
SP
1266static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1267{
1268 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1269
1270 if (!eqe->evt)
1271 return NULL;
1272
f3eb62d2 1273 rmb();
859b1e4e
SP
1274 eqe->evt = le32_to_cpu(eqe->evt);
1275 queue_tail_inc(&eq_obj->q);
1276 return eqe;
1277}
1278
1279static int event_handle(struct be_adapter *adapter,
1280 struct be_eq_obj *eq_obj)
1281{
1282 struct be_eq_entry *eqe;
1283 u16 num = 0;
1284
1285 while ((eqe = event_get(eq_obj)) != NULL) {
1286 eqe->evt = 0;
1287 num++;
1288 }
1289
1290 /* Deal with any spurious interrupts that come
1291 * without events
1292 */
1293 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1294 if (num)
1295 napi_schedule(&eq_obj->napi);
1296
1297 return num;
1298}
1299
1300/* Just read and notify events without processing them.
1301 * Used at the time of destroying event queues */
1302static void be_eq_clean(struct be_adapter *adapter,
1303 struct be_eq_obj *eq_obj)
1304{
1305 struct be_eq_entry *eqe;
1306 u16 num = 0;
1307
1308 while ((eqe = event_get(eq_obj)) != NULL) {
1309 eqe->evt = 0;
1310 num++;
1311 }
1312
1313 if (num)
1314 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1315}
1316
3abcdeda 1317static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1318{
1319 struct be_rx_page_info *page_info;
3abcdeda
SP
1320 struct be_queue_info *rxq = &rxo->q;
1321 struct be_queue_info *rx_cq = &rxo->cq;
6b7c5b94
SP
1322 struct be_eth_rx_compl *rxcp;
1323 u16 tail;
1324
1325 /* First cleanup pending rx completions */
3abcdeda
SP
1326 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1327 be_rx_compl_discard(adapter, rxo, rxcp);
a7a0ef31 1328 be_rx_compl_reset(rxcp);
8788fdc2 1329 be_cq_notify(adapter, rx_cq->id, true, 1);
6b7c5b94
SP
1330 }
1331
1332 /* Then free posted rx buffer that were not used */
1333 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1334 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1335 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1336 put_page(page_info->page);
1337 memset(page_info, 0, sizeof(*page_info));
1338 }
1339 BUG_ON(atomic_read(&rxq->used));
1340}
1341
a8e9179a 1342static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1343{
a8e9179a 1344 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1345 struct be_queue_info *txq = &adapter->tx_obj.q;
a8e9179a
SP
1346 struct be_eth_tx_compl *txcp;
1347 u16 end_idx, cmpl = 0, timeo = 0;
b03388d6
SP
1348 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1349 struct sk_buff *sent_skb;
1350 bool dummy_wrb;
a8e9179a
SP
1351
1352 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1353 do {
1354 while ((txcp = be_tx_compl_get(tx_cq))) {
1355 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1356 wrb_index, txcp);
1357 be_tx_compl_process(adapter, end_idx);
1358 cmpl++;
1359 }
1360 if (cmpl) {
1361 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1362 cmpl = 0;
1363 }
1364
1365 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1366 break;
1367
1368 mdelay(1);
1369 } while (true);
1370
1371 if (atomic_read(&txq->used))
1372 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1373 atomic_read(&txq->used));
b03388d6
SP
1374
1375 /* free posted tx for which compls will never arrive */
1376 while (atomic_read(&txq->used)) {
1377 sent_skb = sent_skbs[txq->tail];
1378 end_idx = txq->tail;
1379 index_adv(&end_idx,
1380 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1381 be_tx_compl_process(adapter, end_idx);
1382 }
6b7c5b94
SP
1383}
1384
5fb379ee
SP
1385static void be_mcc_queues_destroy(struct be_adapter *adapter)
1386{
1387 struct be_queue_info *q;
5fb379ee 1388
8788fdc2 1389 q = &adapter->mcc_obj.q;
5fb379ee 1390 if (q->created)
8788fdc2 1391 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1392 be_queue_free(adapter, q);
1393
8788fdc2 1394 q = &adapter->mcc_obj.cq;
5fb379ee 1395 if (q->created)
8788fdc2 1396 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1397 be_queue_free(adapter, q);
1398}
1399
1400/* Must be called only after TX qs are created as MCC shares TX EQ */
1401static int be_mcc_queues_create(struct be_adapter *adapter)
1402{
1403 struct be_queue_info *q, *cq;
5fb379ee
SP
1404
1405 /* Alloc MCC compl queue */
8788fdc2 1406 cq = &adapter->mcc_obj.cq;
5fb379ee 1407 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1408 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1409 goto err;
1410
1411 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1412 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1413 goto mcc_cq_free;
1414
1415 /* Alloc MCC queue */
8788fdc2 1416 q = &adapter->mcc_obj.q;
5fb379ee
SP
1417 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1418 goto mcc_cq_destroy;
1419
1420 /* Ask BE to create MCC queue */
8788fdc2 1421 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1422 goto mcc_q_free;
1423
1424 return 0;
1425
1426mcc_q_free:
1427 be_queue_free(adapter, q);
1428mcc_cq_destroy:
8788fdc2 1429 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1430mcc_cq_free:
1431 be_queue_free(adapter, cq);
1432err:
1433 return -1;
1434}
1435
6b7c5b94
SP
1436static void be_tx_queues_destroy(struct be_adapter *adapter)
1437{
1438 struct be_queue_info *q;
1439
1440 q = &adapter->tx_obj.q;
a8e9179a 1441 if (q->created)
8788fdc2 1442 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
6b7c5b94
SP
1443 be_queue_free(adapter, q);
1444
1445 q = &adapter->tx_obj.cq;
1446 if (q->created)
8788fdc2 1447 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1448 be_queue_free(adapter, q);
1449
859b1e4e
SP
1450 /* Clear any residual events */
1451 be_eq_clean(adapter, &adapter->tx_eq);
1452
6b7c5b94
SP
1453 q = &adapter->tx_eq.q;
1454 if (q->created)
8788fdc2 1455 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1456 be_queue_free(adapter, q);
1457}
1458
1459static int be_tx_queues_create(struct be_adapter *adapter)
1460{
1461 struct be_queue_info *eq, *q, *cq;
1462
1463 adapter->tx_eq.max_eqd = 0;
1464 adapter->tx_eq.min_eqd = 0;
1465 adapter->tx_eq.cur_eqd = 96;
1466 adapter->tx_eq.enable_aic = false;
1467 /* Alloc Tx Event queue */
1468 eq = &adapter->tx_eq.q;
1469 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1470 return -1;
1471
1472 /* Ask BE to create Tx Event queue */
8788fdc2 1473 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
6b7c5b94 1474 goto tx_eq_free;
ba343c77
SB
1475 adapter->base_eq_id = adapter->tx_eq.q.id;
1476
6b7c5b94
SP
1477 /* Alloc TX eth compl queue */
1478 cq = &adapter->tx_obj.cq;
1479 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1480 sizeof(struct be_eth_tx_compl)))
1481 goto tx_eq_destroy;
1482
1483 /* Ask BE to create Tx eth compl queue */
8788fdc2 1484 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
6b7c5b94
SP
1485 goto tx_cq_free;
1486
1487 /* Alloc TX eth queue */
1488 q = &adapter->tx_obj.q;
1489 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1490 goto tx_cq_destroy;
1491
1492 /* Ask BE to create Tx eth queue */
8788fdc2 1493 if (be_cmd_txq_create(adapter, q, cq))
6b7c5b94
SP
1494 goto tx_q_free;
1495 return 0;
1496
1497tx_q_free:
1498 be_queue_free(adapter, q);
1499tx_cq_destroy:
8788fdc2 1500 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1501tx_cq_free:
1502 be_queue_free(adapter, cq);
1503tx_eq_destroy:
8788fdc2 1504 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1505tx_eq_free:
1506 be_queue_free(adapter, eq);
1507 return -1;
1508}
1509
1510static void be_rx_queues_destroy(struct be_adapter *adapter)
1511{
1512 struct be_queue_info *q;
3abcdeda
SP
1513 struct be_rx_obj *rxo;
1514 int i;
1515
1516 for_all_rx_queues(adapter, rxo, i) {
1517 q = &rxo->q;
1518 if (q->created) {
1519 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1520 /* After the rxq is invalidated, wait for a grace time
1521 * of 1ms for all dma to end and the flush compl to
1522 * arrive
1523 */
1524 mdelay(1);
1525 be_rx_q_clean(adapter, rxo);
1526 }
1527 be_queue_free(adapter, q);
1528
1529 q = &rxo->cq;
1530 if (q->created)
1531 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1532 be_queue_free(adapter, q);
1533
1534 /* Clear any residual events */
1535 q = &rxo->rx_eq.q;
1536 if (q->created) {
1537 be_eq_clean(adapter, &rxo->rx_eq);
1538 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1539 }
1540 be_queue_free(adapter, q);
6b7c5b94 1541 }
6b7c5b94
SP
1542}
1543
1544static int be_rx_queues_create(struct be_adapter *adapter)
1545{
1546 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1547 struct be_rx_obj *rxo;
1548 int rc, i;
6b7c5b94 1549
6b7c5b94 1550 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1551 for_all_rx_queues(adapter, rxo, i) {
1552 rxo->adapter = adapter;
1553 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1554 rxo->rx_eq.enable_aic = true;
1555
1556 /* EQ */
1557 eq = &rxo->rx_eq.q;
1558 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1559 sizeof(struct be_eq_entry));
1560 if (rc)
1561 goto err;
1562
1563 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1564 if (rc)
1565 goto err;
1566
1567 /* CQ */
1568 cq = &rxo->cq;
1569 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1570 sizeof(struct be_eth_rx_compl));
1571 if (rc)
1572 goto err;
1573
1574 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1575 if (rc)
1576 goto err;
1577
1578 /* Rx Q */
1579 q = &rxo->q;
1580 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1581 sizeof(struct be_eth_rx_d));
1582 if (rc)
1583 goto err;
1584
1585 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1586 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1587 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1588 if (rc)
1589 goto err;
1590 }
1591
1592 if (be_multi_rxq(adapter)) {
1593 u8 rsstable[MAX_RSS_QS];
1594
1595 for_all_rss_queues(adapter, rxo, i)
1596 rsstable[i] = rxo->rss_id;
1597
1598 rc = be_cmd_rss_config(adapter, rsstable,
1599 adapter->num_rx_qs - 1);
1600 if (rc)
1601 goto err;
1602 }
6b7c5b94
SP
1603
1604 return 0;
3abcdeda
SP
1605err:
1606 be_rx_queues_destroy(adapter);
1607 return -1;
6b7c5b94 1608}
6b7c5b94 1609
b628bde2
SP
1610/* There are 8 evt ids per func. Retruns the evt id's bit number */
1611static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1612{
ba343c77 1613 return eq_id - adapter->base_eq_id;
b628bde2
SP
1614}
1615
6b7c5b94
SP
1616static irqreturn_t be_intx(int irq, void *dev)
1617{
1618 struct be_adapter *adapter = dev;
3abcdeda
SP
1619 struct be_rx_obj *rxo;
1620 int isr, i;
6b7c5b94 1621
8788fdc2 1622 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
55bdeed9 1623 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
c001c213 1624 if (!isr)
8788fdc2 1625 return IRQ_NONE;
6b7c5b94 1626
3abcdeda
SP
1627 if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
1628 event_handle(adapter, &adapter->tx_eq);
1629
1630 for_all_rx_queues(adapter, rxo, i) {
1631 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
1632 event_handle(adapter, &rxo->rx_eq);
1633 }
c001c213 1634
8788fdc2 1635 return IRQ_HANDLED;
6b7c5b94
SP
1636}
1637
1638static irqreturn_t be_msix_rx(int irq, void *dev)
1639{
3abcdeda
SP
1640 struct be_rx_obj *rxo = dev;
1641 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1642
3abcdeda 1643 event_handle(adapter, &rxo->rx_eq);
6b7c5b94
SP
1644
1645 return IRQ_HANDLED;
1646}
1647
5fb379ee 1648static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1649{
1650 struct be_adapter *adapter = dev;
1651
8788fdc2 1652 event_handle(adapter, &adapter->tx_eq);
6b7c5b94
SP
1653
1654 return IRQ_HANDLED;
1655}
1656
3abcdeda 1657static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
6b7c5b94
SP
1658 struct be_eth_rx_compl *rxcp)
1659{
1660 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1661 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1662
1663 if (err)
3abcdeda 1664 rxo->stats.rxcp_err++;
6b7c5b94 1665
5be93b9a 1666 return (tcp_frame && !err) ? true : false;
6b7c5b94
SP
1667}
1668
1669int be_poll_rx(struct napi_struct *napi, int budget)
1670{
1671 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1672 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1673 struct be_adapter *adapter = rxo->adapter;
1674 struct be_queue_info *rx_cq = &rxo->cq;
6b7c5b94
SP
1675 struct be_eth_rx_compl *rxcp;
1676 u32 work_done;
1677
3abcdeda 1678 rxo->stats.rx_polls++;
6b7c5b94 1679 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1680 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1681 if (!rxcp)
1682 break;
1683
3abcdeda
SP
1684 if (do_gro(adapter, rxo, rxcp))
1685 be_rx_compl_process_gro(adapter, rxo, rxcp);
6b7c5b94 1686 else
3abcdeda 1687 be_rx_compl_process(adapter, rxo, rxcp);
a7a0ef31
SP
1688
1689 be_rx_compl_reset(rxcp);
6b7c5b94
SP
1690 }
1691
6b7c5b94 1692 /* Refill the queue */
3abcdeda
SP
1693 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1694 be_post_rx_frags(rxo);
6b7c5b94
SP
1695
1696 /* All consumed */
1697 if (work_done < budget) {
1698 napi_complete(napi);
8788fdc2 1699 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1700 } else {
1701 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1702 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1703 }
1704 return work_done;
1705}
1706
f31e50a8
SP
1707/* As TX and MCC share the same EQ check for both TX and MCC completions.
1708 * For TX/MCC we don't honour budget; consume everything
1709 */
1710static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1711{
f31e50a8
SP
1712 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1713 struct be_adapter *adapter =
1714 container_of(tx_eq, struct be_adapter, tx_eq);
5fb379ee
SP
1715 struct be_queue_info *txq = &adapter->tx_obj.q;
1716 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1717 struct be_eth_tx_compl *txcp;
f31e50a8 1718 int tx_compl = 0, mcc_compl, status = 0;
6b7c5b94
SP
1719 u16 end_idx;
1720
5fb379ee 1721 while ((txcp = be_tx_compl_get(tx_cq))) {
6b7c5b94 1722 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
f31e50a8 1723 wrb_index, txcp);
6b7c5b94 1724 be_tx_compl_process(adapter, end_idx);
f31e50a8 1725 tx_compl++;
6b7c5b94
SP
1726 }
1727
f31e50a8
SP
1728 mcc_compl = be_process_mcc(adapter, &status);
1729
1730 napi_complete(napi);
1731
1732 if (mcc_compl) {
1733 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1734 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1735 }
1736
1737 if (tx_compl) {
1738 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
5fb379ee
SP
1739
1740 /* As Tx wrbs have been freed up, wake up netdev queue if
1741 * it was stopped due to lack of tx wrbs.
1742 */
1743 if (netif_queue_stopped(adapter->netdev) &&
6b7c5b94 1744 atomic_read(&txq->used) < txq->len / 2) {
5fb379ee
SP
1745 netif_wake_queue(adapter->netdev);
1746 }
1747
3abcdeda
SP
1748 tx_stats(adapter)->be_tx_events++;
1749 tx_stats(adapter)->be_tx_compl += tx_compl;
6b7c5b94 1750 }
6b7c5b94
SP
1751
1752 return 1;
1753}
1754
d053de91 1755void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1756{
1757 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1758 u32 i;
1759
1760 pci_read_config_dword(adapter->pdev,
1761 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1762 pci_read_config_dword(adapter->pdev,
1763 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1764 pci_read_config_dword(adapter->pdev,
1765 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1766 pci_read_config_dword(adapter->pdev,
1767 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1768
1769 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1770 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1771
d053de91
AK
1772 if (ue_status_lo || ue_status_hi) {
1773 adapter->ue_detected = true;
1774 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1775 }
1776
7c185276
AK
1777 if (ue_status_lo) {
1778 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1779 if (ue_status_lo & 1)
1780 dev_err(&adapter->pdev->dev,
1781 "UE: %s bit set\n", ue_status_low_desc[i]);
1782 }
1783 }
1784 if (ue_status_hi) {
1785 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1786 if (ue_status_hi & 1)
1787 dev_err(&adapter->pdev->dev,
1788 "UE: %s bit set\n", ue_status_hi_desc[i]);
1789 }
1790 }
1791
1792}
1793
ea1dae11
SP
1794static void be_worker(struct work_struct *work)
1795{
1796 struct be_adapter *adapter =
1797 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1798 struct be_rx_obj *rxo;
1799 int i;
ea1dae11 1800
0fc48c37 1801 if (!adapter->stats_ioctl_sent)
3abcdeda 1802 be_cmd_get_stats(adapter, &adapter->stats_cmd);
ea1dae11 1803
4097f663 1804 be_tx_rate_update(adapter);
4097f663 1805
3abcdeda
SP
1806 for_all_rx_queues(adapter, rxo, i) {
1807 be_rx_rate_update(rxo);
1808 be_rx_eqd_update(adapter, rxo);
1809
1810 if (rxo->rx_post_starved) {
1811 rxo->rx_post_starved = false;
1812 be_post_rx_frags(rxo);
1813 }
ea1dae11 1814 }
3abcdeda 1815
d053de91
AK
1816 if (!adapter->ue_detected)
1817 be_detect_dump_ue(adapter);
ea1dae11
SP
1818
1819 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1820}
1821
8d56ff11
SP
1822static void be_msix_disable(struct be_adapter *adapter)
1823{
1824 if (adapter->msix_enabled) {
1825 pci_disable_msix(adapter->pdev);
1826 adapter->msix_enabled = false;
1827 }
1828}
1829
3abcdeda
SP
1830static int be_num_rxqs_get(struct be_adapter *adapter)
1831{
1832 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1833 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1834 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1835 } else {
1836 dev_warn(&adapter->pdev->dev,
1837 "No support for multiple RX queues\n");
1838 return 1;
1839 }
1840}
1841
6b7c5b94
SP
1842static void be_msix_enable(struct be_adapter *adapter)
1843{
3abcdeda 1844#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
6b7c5b94
SP
1845 int i, status;
1846
3abcdeda
SP
1847 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1848
1849 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
6b7c5b94
SP
1850 adapter->msix_entries[i].entry = i;
1851
1852 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
3abcdeda
SP
1853 adapter->num_rx_qs + 1);
1854 if (status == 0) {
1855 goto done;
1856 } else if (status >= BE_MIN_MSIX_VECTORS) {
1857 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1858 status) == 0) {
1859 adapter->num_rx_qs = status - 1;
1860 dev_warn(&adapter->pdev->dev,
1861 "Could alloc only %d MSIx vectors. "
1862 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1863 goto done;
1864 }
1865 }
1866 return;
1867done:
1868 adapter->msix_enabled = true;
6b7c5b94
SP
1869}
1870
ba343c77
SB
1871static void be_sriov_enable(struct be_adapter *adapter)
1872{
344dbf10 1873 be_check_sriov_fn_type(adapter);
6dedec81 1874#ifdef CONFIG_PCI_IOV
ba343c77 1875 if (be_physfn(adapter) && num_vfs) {
6dedec81
AK
1876 int status;
1877
ba343c77
SB
1878 status = pci_enable_sriov(adapter->pdev, num_vfs);
1879 adapter->sriov_enabled = status ? false : true;
1880 }
1881#endif
ba343c77
SB
1882}
1883
1884static void be_sriov_disable(struct be_adapter *adapter)
1885{
1886#ifdef CONFIG_PCI_IOV
1887 if (adapter->sriov_enabled) {
1888 pci_disable_sriov(adapter->pdev);
1889 adapter->sriov_enabled = false;
1890 }
1891#endif
1892}
1893
6b7c5b94
SP
1894static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1895{
b628bde2
SP
1896 return adapter->msix_entries[
1897 be_evt_bit_get(adapter, eq_id)].vector;
6b7c5b94
SP
1898}
1899
b628bde2
SP
1900static int be_request_irq(struct be_adapter *adapter,
1901 struct be_eq_obj *eq_obj,
3abcdeda 1902 void *handler, char *desc, void *context)
6b7c5b94
SP
1903{
1904 struct net_device *netdev = adapter->netdev;
b628bde2
SP
1905 int vec;
1906
1907 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1908 vec = be_msix_vec_get(adapter, eq_obj->q.id);
3abcdeda 1909 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
1910}
1911
3abcdeda
SP
1912static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1913 void *context)
b628bde2
SP
1914{
1915 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
3abcdeda 1916 free_irq(vec, context);
b628bde2 1917}
6b7c5b94 1918
b628bde2
SP
1919static int be_msix_register(struct be_adapter *adapter)
1920{
3abcdeda
SP
1921 struct be_rx_obj *rxo;
1922 int status, i;
1923 char qname[10];
b628bde2 1924
3abcdeda
SP
1925 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1926 adapter);
6b7c5b94
SP
1927 if (status)
1928 goto err;
1929
3abcdeda
SP
1930 for_all_rx_queues(adapter, rxo, i) {
1931 sprintf(qname, "rxq%d", i);
1932 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1933 qname, rxo);
1934 if (status)
1935 goto err_msix;
1936 }
b628bde2 1937
6b7c5b94 1938 return 0;
b628bde2 1939
3abcdeda
SP
1940err_msix:
1941 be_free_irq(adapter, &adapter->tx_eq, adapter);
1942
1943 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
1944 be_free_irq(adapter, &rxo->rx_eq, rxo);
1945
6b7c5b94
SP
1946err:
1947 dev_warn(&adapter->pdev->dev,
1948 "MSIX Request IRQ failed - err %d\n", status);
1949 pci_disable_msix(adapter->pdev);
1950 adapter->msix_enabled = false;
1951 return status;
1952}
1953
1954static int be_irq_register(struct be_adapter *adapter)
1955{
1956 struct net_device *netdev = adapter->netdev;
1957 int status;
1958
1959 if (adapter->msix_enabled) {
1960 status = be_msix_register(adapter);
1961 if (status == 0)
1962 goto done;
ba343c77
SB
1963 /* INTx is not supported for VF */
1964 if (!be_physfn(adapter))
1965 return status;
6b7c5b94
SP
1966 }
1967
1968 /* INTx */
1969 netdev->irq = adapter->pdev->irq;
1970 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1971 adapter);
1972 if (status) {
1973 dev_err(&adapter->pdev->dev,
1974 "INTx request IRQ failed - err %d\n", status);
1975 return status;
1976 }
1977done:
1978 adapter->isr_registered = true;
1979 return 0;
1980}
1981
1982static void be_irq_unregister(struct be_adapter *adapter)
1983{
1984 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
1985 struct be_rx_obj *rxo;
1986 int i;
6b7c5b94
SP
1987
1988 if (!adapter->isr_registered)
1989 return;
1990
1991 /* INTx */
1992 if (!adapter->msix_enabled) {
1993 free_irq(netdev->irq, adapter);
1994 goto done;
1995 }
1996
1997 /* MSIx */
3abcdeda
SP
1998 be_free_irq(adapter, &adapter->tx_eq, adapter);
1999
2000 for_all_rx_queues(adapter, rxo, i)
2001 be_free_irq(adapter, &rxo->rx_eq, rxo);
2002
6b7c5b94
SP
2003done:
2004 adapter->isr_registered = false;
6b7c5b94
SP
2005}
2006
889cd4b2
SP
2007static int be_close(struct net_device *netdev)
2008{
2009 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2010 struct be_rx_obj *rxo;
889cd4b2 2011 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2012 int vec, i;
889cd4b2
SP
2013
2014 cancel_delayed_work_sync(&adapter->work);
2015
2016 be_async_mcc_disable(adapter);
2017
2018 netif_stop_queue(netdev);
2019 netif_carrier_off(netdev);
2020 adapter->link_up = false;
2021
2022 be_intr_set(adapter, false);
2023
2024 if (adapter->msix_enabled) {
2025 vec = be_msix_vec_get(adapter, tx_eq->q.id);
2026 synchronize_irq(vec);
3abcdeda
SP
2027
2028 for_all_rx_queues(adapter, rxo, i) {
2029 vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
2030 synchronize_irq(vec);
2031 }
889cd4b2
SP
2032 } else {
2033 synchronize_irq(netdev->irq);
2034 }
2035 be_irq_unregister(adapter);
2036
3abcdeda
SP
2037 for_all_rx_queues(adapter, rxo, i)
2038 napi_disable(&rxo->rx_eq.napi);
2039
889cd4b2
SP
2040 napi_disable(&tx_eq->napi);
2041
2042 /* Wait for all pending tx completions to arrive so that
2043 * all tx skbs are freed.
2044 */
2045 be_tx_compl_clean(adapter);
2046
2047 return 0;
2048}
2049
6b7c5b94
SP
2050static int be_open(struct net_device *netdev)
2051{
2052 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2053 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2054 struct be_rx_obj *rxo;
a8f447bd 2055 bool link_up;
3abcdeda 2056 int status, i;
0388f251
SB
2057 u8 mac_speed;
2058 u16 link_speed;
5fb379ee 2059
3abcdeda
SP
2060 for_all_rx_queues(adapter, rxo, i) {
2061 be_post_rx_frags(rxo);
2062 napi_enable(&rxo->rx_eq.napi);
2063 }
5fb379ee
SP
2064 napi_enable(&tx_eq->napi);
2065
2066 be_irq_register(adapter);
2067
8788fdc2 2068 be_intr_set(adapter, true);
5fb379ee
SP
2069
2070 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2071 for_all_rx_queues(adapter, rxo, i) {
2072 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2073 be_cq_notify(adapter, rxo->cq.id, true, 0);
2074 }
8788fdc2 2075 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2076
7a1e9b20
SP
2077 /* Now that interrupts are on we can process async mcc */
2078 be_async_mcc_enable(adapter);
2079
889cd4b2
SP
2080 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2081
0388f251
SB
2082 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2083 &link_speed);
a8f447bd 2084 if (status)
889cd4b2 2085 goto err;
a8f447bd 2086 be_link_status_update(adapter, link_up);
5fb379ee 2087
889cd4b2 2088 if (be_physfn(adapter)) {
1da87b7f 2089 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2090 if (status)
2091 goto err;
4f2aa89c 2092
ba343c77
SB
2093 status = be_cmd_set_flow_control(adapter,
2094 adapter->tx_fc, adapter->rx_fc);
2095 if (status)
889cd4b2 2096 goto err;
ba343c77 2097 }
4f2aa89c 2098
889cd4b2
SP
2099 return 0;
2100err:
2101 be_close(adapter->netdev);
2102 return -EIO;
5fb379ee
SP
2103}
2104
71d8d1b5
AK
2105static int be_setup_wol(struct be_adapter *adapter, bool enable)
2106{
2107 struct be_dma_mem cmd;
2108 int status = 0;
2109 u8 mac[ETH_ALEN];
2110
2111 memset(mac, 0, ETH_ALEN);
2112
2113 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2114 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2115 if (cmd.va == NULL)
2116 return -1;
2117 memset(cmd.va, 0, cmd.size);
2118
2119 if (enable) {
2120 status = pci_write_config_dword(adapter->pdev,
2121 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2122 if (status) {
2123 dev_err(&adapter->pdev->dev,
2381a55c 2124 "Could not enable Wake-on-lan\n");
71d8d1b5
AK
2125 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2126 cmd.dma);
2127 return status;
2128 }
2129 status = be_cmd_enable_magic_wol(adapter,
2130 adapter->netdev->dev_addr, &cmd);
2131 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2132 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2133 } else {
2134 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2135 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2136 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2137 }
2138
2139 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2140 return status;
2141}
2142
6d87f5c3
AK
2143/*
2144 * Generate a seed MAC address from the PF MAC Address using jhash.
2145 * MAC Address for VFs are assigned incrementally starting from the seed.
2146 * These addresses are programmed in the ASIC by the PF and the VF driver
2147 * queries for the MAC address during its probe.
2148 */
2149static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2150{
2151 u32 vf = 0;
3abcdeda 2152 int status = 0;
6d87f5c3
AK
2153 u8 mac[ETH_ALEN];
2154
2155 be_vf_eth_addr_generate(adapter, mac);
2156
2157 for (vf = 0; vf < num_vfs; vf++) {
2158 status = be_cmd_pmac_add(adapter, mac,
2159 adapter->vf_cfg[vf].vf_if_handle,
2160 &adapter->vf_cfg[vf].vf_pmac_id);
2161 if (status)
2162 dev_err(&adapter->pdev->dev,
2163 "Mac address add failed for VF %d\n", vf);
2164 else
2165 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2166
2167 mac[5] += 1;
2168 }
2169 return status;
2170}
2171
2172static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2173{
2174 u32 vf;
2175
2176 for (vf = 0; vf < num_vfs; vf++) {
2177 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2178 be_cmd_pmac_del(adapter,
2179 adapter->vf_cfg[vf].vf_if_handle,
2180 adapter->vf_cfg[vf].vf_pmac_id);
2181 }
2182}
2183
5fb379ee
SP
2184static int be_setup(struct be_adapter *adapter)
2185{
5fb379ee 2186 struct net_device *netdev = adapter->netdev;
ba343c77 2187 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2188 int status;
ba343c77
SB
2189 u8 mac[ETH_ALEN];
2190
2191 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
6b7c5b94 2192
ba343c77
SB
2193 if (be_physfn(adapter)) {
2194 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2195 BE_IF_FLAGS_PROMISCUOUS |
2196 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2197 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda
SP
2198
2199 if (be_multi_rxq(adapter)) {
2200 cap_flags |= BE_IF_FLAGS_RSS;
2201 en_flags |= BE_IF_FLAGS_RSS;
2202 }
ba343c77 2203 }
73d540f2
SP
2204
2205 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2206 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2207 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2208 if (status != 0)
2209 goto do_none;
2210
ba343c77
SB
2211 if (be_physfn(adapter)) {
2212 while (vf < num_vfs) {
2213 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2214 | BE_IF_FLAGS_BROADCAST;
2215 status = be_cmd_if_create(adapter, cap_flags, en_flags,
64600ea5
AK
2216 mac, true,
2217 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77
SB
2218 NULL, vf+1);
2219 if (status) {
2220 dev_err(&adapter->pdev->dev,
2221 "Interface Create failed for VF %d\n", vf);
2222 goto if_destroy;
2223 }
64600ea5 2224 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
ba343c77 2225 vf++;
84e5b9f7 2226 }
ba343c77
SB
2227 } else if (!be_physfn(adapter)) {
2228 status = be_cmd_mac_addr_query(adapter, mac,
2229 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2230 if (!status) {
2231 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2232 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2233 }
2234 }
2235
6b7c5b94
SP
2236 status = be_tx_queues_create(adapter);
2237 if (status != 0)
2238 goto if_destroy;
2239
2240 status = be_rx_queues_create(adapter);
2241 if (status != 0)
2242 goto tx_qs_destroy;
2243
5fb379ee
SP
2244 status = be_mcc_queues_create(adapter);
2245 if (status != 0)
2246 goto rx_qs_destroy;
6b7c5b94 2247
6d87f5c3
AK
2248 if (be_physfn(adapter)) {
2249 status = be_vf_eth_addr_config(adapter);
2250 if (status)
2251 goto mcc_q_destroy;
2252 }
2253
0dffc83e
AK
2254 adapter->link_speed = -1;
2255
6b7c5b94
SP
2256 return 0;
2257
6d87f5c3
AK
2258mcc_q_destroy:
2259 if (be_physfn(adapter))
2260 be_vf_eth_addr_rem(adapter);
2261 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2262rx_qs_destroy:
2263 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2264tx_qs_destroy:
2265 be_tx_queues_destroy(adapter);
2266if_destroy:
ba343c77 2267 for (vf = 0; vf < num_vfs; vf++)
64600ea5
AK
2268 if (adapter->vf_cfg[vf].vf_if_handle)
2269 be_cmd_if_destroy(adapter,
2270 adapter->vf_cfg[vf].vf_if_handle);
8788fdc2 2271 be_cmd_if_destroy(adapter, adapter->if_handle);
6b7c5b94
SP
2272do_none:
2273 return status;
2274}
2275
5fb379ee
SP
2276static int be_clear(struct be_adapter *adapter)
2277{
6d87f5c3
AK
2278 if (be_physfn(adapter))
2279 be_vf_eth_addr_rem(adapter);
2280
1a8887d8 2281 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2282 be_rx_queues_destroy(adapter);
2283 be_tx_queues_destroy(adapter);
2284
8788fdc2 2285 be_cmd_if_destroy(adapter, adapter->if_handle);
5fb379ee 2286
2243e2e9
SP
2287 /* tell fw we're done with firing cmds */
2288 be_cmd_fw_clean(adapter);
5fb379ee
SP
2289 return 0;
2290}
2291
6b7c5b94 2292
84517482
AK
2293#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2294char flash_cookie[2][16] = {"*** SE FLAS",
2295 "H DIRECTORY *** "};
fa9a6fed
SB
2296
2297static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2298 const u8 *p, u32 img_start, int image_size,
2299 int hdr_size)
fa9a6fed
SB
2300{
2301 u32 crc_offset;
2302 u8 flashed_crc[4];
2303 int status;
3f0d4560
AK
2304
2305 crc_offset = hdr_size + img_start + image_size - 4;
2306
fa9a6fed 2307 p += crc_offset;
3f0d4560
AK
2308
2309 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2310 (image_size - 4));
fa9a6fed
SB
2311 if (status) {
2312 dev_err(&adapter->pdev->dev,
2313 "could not get crc from flash, not flashing redboot\n");
2314 return false;
2315 }
2316
2317 /*update redboot only if crc does not match*/
2318 if (!memcmp(flashed_crc, p, 4))
2319 return false;
2320 else
2321 return true;
fa9a6fed
SB
2322}
2323
3f0d4560 2324static int be_flash_data(struct be_adapter *adapter,
84517482 2325 const struct firmware *fw,
3f0d4560
AK
2326 struct be_dma_mem *flash_cmd, int num_of_images)
2327
84517482 2328{
3f0d4560
AK
2329 int status = 0, i, filehdr_size = 0;
2330 u32 total_bytes = 0, flash_op;
84517482
AK
2331 int num_bytes;
2332 const u8 *p = fw->data;
2333 struct be_cmd_write_flashrom *req = flash_cmd->va;
3f0d4560 2334 struct flash_comp *pflashcomp;
9fe96934 2335 int num_comp;
3f0d4560 2336
9fe96934 2337 struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2338 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2339 FLASH_IMAGE_MAX_SIZE_g3},
2340 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2341 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2342 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2343 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2344 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2345 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2346 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2347 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2348 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2349 FLASH_IMAGE_MAX_SIZE_g3},
2350 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2351 FLASH_IMAGE_MAX_SIZE_g3},
2352 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2353 FLASH_IMAGE_MAX_SIZE_g3},
2354 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2355 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560
AK
2356 };
2357 struct flash_comp gen2_flash_types[8] = {
2358 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2359 FLASH_IMAGE_MAX_SIZE_g2},
2360 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2361 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2362 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2363 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2364 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2365 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2366 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2367 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2368 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2369 FLASH_IMAGE_MAX_SIZE_g2},
2370 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2371 FLASH_IMAGE_MAX_SIZE_g2},
2372 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2373 FLASH_IMAGE_MAX_SIZE_g2}
2374 };
2375
2376 if (adapter->generation == BE_GEN3) {
2377 pflashcomp = gen3_flash_types;
2378 filehdr_size = sizeof(struct flash_file_hdr_g3);
9fe96934 2379 num_comp = 9;
3f0d4560
AK
2380 } else {
2381 pflashcomp = gen2_flash_types;
2382 filehdr_size = sizeof(struct flash_file_hdr_g2);
9fe96934 2383 num_comp = 8;
84517482 2384 }
9fe96934
SB
2385 for (i = 0; i < num_comp; i++) {
2386 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2387 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2388 continue;
3f0d4560
AK
2389 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2390 (!be_flash_redboot(adapter, fw->data,
2391 pflashcomp[i].offset, pflashcomp[i].size,
2392 filehdr_size)))
2393 continue;
2394 p = fw->data;
2395 p += filehdr_size + pflashcomp[i].offset
2396 + (num_of_images * sizeof(struct image_hdr));
2397 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2398 return -1;
3f0d4560
AK
2399 total_bytes = pflashcomp[i].size;
2400 while (total_bytes) {
2401 if (total_bytes > 32*1024)
2402 num_bytes = 32*1024;
2403 else
2404 num_bytes = total_bytes;
2405 total_bytes -= num_bytes;
2406
2407 if (!total_bytes)
2408 flash_op = FLASHROM_OPER_FLASH;
2409 else
2410 flash_op = FLASHROM_OPER_SAVE;
2411 memcpy(req->params.data_buf, p, num_bytes);
2412 p += num_bytes;
2413 status = be_cmd_write_flashrom(adapter, flash_cmd,
2414 pflashcomp[i].optype, flash_op, num_bytes);
2415 if (status) {
2416 dev_err(&adapter->pdev->dev,
2417 "cmd to write to flash rom failed.\n");
2418 return -1;
2419 }
2420 yield();
84517482 2421 }
84517482 2422 }
84517482
AK
2423 return 0;
2424}
2425
3f0d4560
AK
2426static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2427{
2428 if (fhdr == NULL)
2429 return 0;
2430 if (fhdr->build[0] == '3')
2431 return BE_GEN3;
2432 else if (fhdr->build[0] == '2')
2433 return BE_GEN2;
2434 else
2435 return 0;
2436}
2437
84517482
AK
2438int be_load_fw(struct be_adapter *adapter, u8 *func)
2439{
2440 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2441 const struct firmware *fw;
3f0d4560
AK
2442 struct flash_file_hdr_g2 *fhdr;
2443 struct flash_file_hdr_g3 *fhdr3;
2444 struct image_hdr *img_hdr_ptr = NULL;
84517482 2445 struct be_dma_mem flash_cmd;
8b93b710 2446 int status, i = 0, num_imgs = 0;
84517482 2447 const u8 *p;
84517482 2448
84517482
AK
2449 strcpy(fw_file, func);
2450
2451 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2452 if (status)
2453 goto fw_exit;
2454
2455 p = fw->data;
3f0d4560 2456 fhdr = (struct flash_file_hdr_g2 *) p;
84517482
AK
2457 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2458
84517482
AK
2459 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2460 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2461 &flash_cmd.dma);
2462 if (!flash_cmd.va) {
2463 status = -ENOMEM;
2464 dev_err(&adapter->pdev->dev,
2465 "Memory allocation failure while flashing\n");
2466 goto fw_exit;
2467 }
2468
3f0d4560
AK
2469 if ((adapter->generation == BE_GEN3) &&
2470 (get_ufigen_type(fhdr) == BE_GEN3)) {
2471 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2472 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2473 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2474 img_hdr_ptr = (struct image_hdr *) (fw->data +
2475 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2476 i * sizeof(struct image_hdr)));
2477 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2478 status = be_flash_data(adapter, fw, &flash_cmd,
2479 num_imgs);
3f0d4560
AK
2480 }
2481 } else if ((adapter->generation == BE_GEN2) &&
2482 (get_ufigen_type(fhdr) == BE_GEN2)) {
2483 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2484 } else {
2485 dev_err(&adapter->pdev->dev,
2486 "UFI and Interface are not compatible for flashing\n");
2487 status = -1;
84517482
AK
2488 }
2489
2490 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2491 flash_cmd.dma);
2492 if (status) {
2493 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2494 goto fw_exit;
2495 }
2496
af901ca1 2497 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482
AK
2498
2499fw_exit:
2500 release_firmware(fw);
2501 return status;
2502}
2503
6b7c5b94
SP
2504static struct net_device_ops be_netdev_ops = {
2505 .ndo_open = be_open,
2506 .ndo_stop = be_close,
2507 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2508 .ndo_set_rx_mode = be_set_multicast_list,
2509 .ndo_set_mac_address = be_mac_addr_set,
2510 .ndo_change_mtu = be_change_mtu,
2511 .ndo_validate_addr = eth_validate_addr,
2512 .ndo_vlan_rx_register = be_vlan_register,
2513 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2514 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2515 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2516 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2517 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2518 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2519};
2520
2521static void be_netdev_init(struct net_device *netdev)
2522{
2523 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2524 struct be_rx_obj *rxo;
2525 int i;
6b7c5b94
SP
2526
2527 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
583e3f34 2528 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
49e4b847 2529 NETIF_F_GRO | NETIF_F_TSO6;
6b7c5b94 2530
51c59870
AK
2531 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2532
6b7c5b94
SP
2533 netdev->flags |= IFF_MULTICAST;
2534
728a9972
AK
2535 adapter->rx_csum = true;
2536
9e90c961
AK
2537 /* Default settings for Rx and Tx flow control */
2538 adapter->rx_fc = true;
2539 adapter->tx_fc = true;
2540
c190e3c8
AK
2541 netif_set_gso_max_size(netdev, 65535);
2542
6b7c5b94
SP
2543 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2544
2545 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2546
3abcdeda
SP
2547 for_all_rx_queues(adapter, rxo, i)
2548 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2549 BE_NAPI_WEIGHT);
2550
5fb379ee 2551 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94
SP
2552 BE_NAPI_WEIGHT);
2553
2554 netif_carrier_off(netdev);
2555 netif_stop_queue(netdev);
2556}
2557
2558static void be_unmap_pci_bars(struct be_adapter *adapter)
2559{
8788fdc2
SP
2560 if (adapter->csr)
2561 iounmap(adapter->csr);
2562 if (adapter->db)
2563 iounmap(adapter->db);
ba343c77 2564 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2565 iounmap(adapter->pcicfg);
6b7c5b94
SP
2566}
2567
2568static int be_map_pci_bars(struct be_adapter *adapter)
2569{
2570 u8 __iomem *addr;
ba343c77 2571 int pcicfg_reg, db_reg;
6b7c5b94 2572
ba343c77
SB
2573 if (be_physfn(adapter)) {
2574 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2575 pci_resource_len(adapter->pdev, 2));
2576 if (addr == NULL)
2577 return -ENOMEM;
2578 adapter->csr = addr;
2579 }
6b7c5b94 2580
ba343c77 2581 if (adapter->generation == BE_GEN2) {
7b139c83 2582 pcicfg_reg = 1;
ba343c77
SB
2583 db_reg = 4;
2584 } else {
7b139c83 2585 pcicfg_reg = 0;
ba343c77
SB
2586 if (be_physfn(adapter))
2587 db_reg = 4;
2588 else
2589 db_reg = 0;
2590 }
2591 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2592 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2593 if (addr == NULL)
2594 goto pci_map_err;
ba343c77
SB
2595 adapter->db = addr;
2596
2597 if (be_physfn(adapter)) {
2598 addr = ioremap_nocache(
2599 pci_resource_start(adapter->pdev, pcicfg_reg),
2600 pci_resource_len(adapter->pdev, pcicfg_reg));
2601 if (addr == NULL)
2602 goto pci_map_err;
2603 adapter->pcicfg = addr;
2604 } else
2605 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
2606
2607 return 0;
2608pci_map_err:
2609 be_unmap_pci_bars(adapter);
2610 return -ENOMEM;
2611}
2612
2613
2614static void be_ctrl_cleanup(struct be_adapter *adapter)
2615{
8788fdc2 2616 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2617
2618 be_unmap_pci_bars(adapter);
2619
2620 if (mem->va)
2621 pci_free_consistent(adapter->pdev, mem->size,
2622 mem->va, mem->dma);
e7b909a6
SP
2623
2624 mem = &adapter->mc_cmd_mem;
2625 if (mem->va)
2626 pci_free_consistent(adapter->pdev, mem->size,
2627 mem->va, mem->dma);
6b7c5b94
SP
2628}
2629
6b7c5b94
SP
2630static int be_ctrl_init(struct be_adapter *adapter)
2631{
8788fdc2
SP
2632 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2633 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 2634 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 2635 int status;
6b7c5b94
SP
2636
2637 status = be_map_pci_bars(adapter);
2638 if (status)
e7b909a6 2639 goto done;
6b7c5b94
SP
2640
2641 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2642 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2643 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2644 if (!mbox_mem_alloc->va) {
e7b909a6
SP
2645 status = -ENOMEM;
2646 goto unmap_pci_bars;
6b7c5b94 2647 }
e7b909a6 2648
6b7c5b94
SP
2649 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2650 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2651 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2652 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
2653
2654 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2655 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2656 &mc_cmd_mem->dma);
2657 if (mc_cmd_mem->va == NULL) {
2658 status = -ENOMEM;
2659 goto free_mbox;
2660 }
2661 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2662
8788fdc2
SP
2663 spin_lock_init(&adapter->mbox_lock);
2664 spin_lock_init(&adapter->mcc_lock);
2665 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 2666
dd131e76 2667 init_completion(&adapter->flash_compl);
cf588477 2668 pci_save_state(adapter->pdev);
6b7c5b94 2669 return 0;
e7b909a6
SP
2670
2671free_mbox:
2672 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2673 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2674
2675unmap_pci_bars:
2676 be_unmap_pci_bars(adapter);
2677
2678done:
2679 return status;
6b7c5b94
SP
2680}
2681
2682static void be_stats_cleanup(struct be_adapter *adapter)
2683{
3abcdeda 2684 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2685
2686 if (cmd->va)
2687 pci_free_consistent(adapter->pdev, cmd->size,
2688 cmd->va, cmd->dma);
2689}
2690
2691static int be_stats_init(struct be_adapter *adapter)
2692{
3abcdeda 2693 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2694
2695 cmd->size = sizeof(struct be_cmd_req_get_stats);
2696 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2697 if (cmd->va == NULL)
2698 return -1;
d291b9af 2699 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
2700 return 0;
2701}
2702
2703static void __devexit be_remove(struct pci_dev *pdev)
2704{
2705 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 2706
6b7c5b94
SP
2707 if (!adapter)
2708 return;
2709
2710 unregister_netdev(adapter->netdev);
2711
5fb379ee
SP
2712 be_clear(adapter);
2713
6b7c5b94
SP
2714 be_stats_cleanup(adapter);
2715
2716 be_ctrl_cleanup(adapter);
2717
ba343c77
SB
2718 be_sriov_disable(adapter);
2719
8d56ff11 2720 be_msix_disable(adapter);
6b7c5b94
SP
2721
2722 pci_set_drvdata(pdev, NULL);
2723 pci_release_regions(pdev);
2724 pci_disable_device(pdev);
2725
2726 free_netdev(adapter->netdev);
2727}
2728
2243e2e9 2729static int be_get_config(struct be_adapter *adapter)
6b7c5b94 2730{
6b7c5b94 2731 int status;
2243e2e9 2732 u8 mac[ETH_ALEN];
6b7c5b94 2733
2243e2e9 2734 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
2735 if (status)
2736 return status;
2737
3abcdeda
SP
2738 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2739 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
2740 if (status)
2741 return status;
2742
2243e2e9 2743 memset(mac, 0, ETH_ALEN);
ba343c77
SB
2744
2745 if (be_physfn(adapter)) {
2746 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 2747 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 2748
ba343c77
SB
2749 if (status)
2750 return status;
ca9e4988 2751
ba343c77
SB
2752 if (!is_valid_ether_addr(mac))
2753 return -EADDRNOTAVAIL;
2754
2755 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2756 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2757 }
6b7c5b94 2758
3486be29 2759 if (adapter->function_mode & 0x400)
82903e4b
AK
2760 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2761 else
2762 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2763
2243e2e9 2764 return 0;
6b7c5b94
SP
2765}
2766
2767static int __devinit be_probe(struct pci_dev *pdev,
2768 const struct pci_device_id *pdev_id)
2769{
2770 int status = 0;
2771 struct be_adapter *adapter;
2772 struct net_device *netdev;
6b7c5b94
SP
2773
2774 status = pci_enable_device(pdev);
2775 if (status)
2776 goto do_none;
2777
2778 status = pci_request_regions(pdev, DRV_NAME);
2779 if (status)
2780 goto disable_dev;
2781 pci_set_master(pdev);
2782
2783 netdev = alloc_etherdev(sizeof(struct be_adapter));
2784 if (netdev == NULL) {
2785 status = -ENOMEM;
2786 goto rel_reg;
2787 }
2788 adapter = netdev_priv(netdev);
7b139c83
AK
2789
2790 switch (pdev->device) {
2791 case BE_DEVICE_ID1:
2792 case OC_DEVICE_ID1:
2793 adapter->generation = BE_GEN2;
2794 break;
2795 case BE_DEVICE_ID2:
2796 case OC_DEVICE_ID2:
2797 adapter->generation = BE_GEN3;
2798 break;
2799 default:
2800 adapter->generation = 0;
2801 }
2802
6b7c5b94
SP
2803 adapter->pdev = pdev;
2804 pci_set_drvdata(pdev, adapter);
2805 adapter->netdev = netdev;
2243e2e9 2806 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 2807
e930438c 2808 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
6b7c5b94
SP
2809 if (!status) {
2810 netdev->features |= NETIF_F_HIGHDMA;
2811 } else {
e930438c 2812 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6b7c5b94
SP
2813 if (status) {
2814 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2815 goto free_netdev;
2816 }
2817 }
2818
ba343c77
SB
2819 be_sriov_enable(adapter);
2820
6b7c5b94
SP
2821 status = be_ctrl_init(adapter);
2822 if (status)
2823 goto free_netdev;
2824
2243e2e9 2825 /* sync up with fw's ready state */
ba343c77
SB
2826 if (be_physfn(adapter)) {
2827 status = be_cmd_POST(adapter);
2828 if (status)
2829 goto ctrl_clean;
ba343c77 2830 }
6b7c5b94 2831
2243e2e9
SP
2832 /* tell fw we're ready to fire cmds */
2833 status = be_cmd_fw_init(adapter);
6b7c5b94 2834 if (status)
2243e2e9
SP
2835 goto ctrl_clean;
2836
556ae191
SB
2837 if (be_physfn(adapter)) {
2838 status = be_cmd_reset_function(adapter);
2839 if (status)
2840 goto ctrl_clean;
2841 }
2842
2243e2e9
SP
2843 status = be_stats_init(adapter);
2844 if (status)
2845 goto ctrl_clean;
2846
2847 status = be_get_config(adapter);
6b7c5b94
SP
2848 if (status)
2849 goto stats_clean;
6b7c5b94 2850
3abcdeda
SP
2851 be_msix_enable(adapter);
2852
6b7c5b94 2853 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 2854
5fb379ee
SP
2855 status = be_setup(adapter);
2856 if (status)
3abcdeda 2857 goto msix_disable;
2243e2e9 2858
3abcdeda 2859 be_netdev_init(netdev);
6b7c5b94
SP
2860 status = register_netdev(netdev);
2861 if (status != 0)
5fb379ee 2862 goto unsetup;
6b7c5b94 2863
c4ca2374 2864 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
6b7c5b94
SP
2865 return 0;
2866
5fb379ee
SP
2867unsetup:
2868 be_clear(adapter);
3abcdeda
SP
2869msix_disable:
2870 be_msix_disable(adapter);
6b7c5b94
SP
2871stats_clean:
2872 be_stats_cleanup(adapter);
2873ctrl_clean:
2874 be_ctrl_cleanup(adapter);
2875free_netdev:
ba343c77 2876 be_sriov_disable(adapter);
6b7c5b94 2877 free_netdev(adapter->netdev);
8d56ff11 2878 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
2879rel_reg:
2880 pci_release_regions(pdev);
2881disable_dev:
2882 pci_disable_device(pdev);
2883do_none:
c4ca2374 2884 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
2885 return status;
2886}
2887
2888static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2889{
2890 struct be_adapter *adapter = pci_get_drvdata(pdev);
2891 struct net_device *netdev = adapter->netdev;
2892
71d8d1b5
AK
2893 if (adapter->wol)
2894 be_setup_wol(adapter, true);
2895
6b7c5b94
SP
2896 netif_device_detach(netdev);
2897 if (netif_running(netdev)) {
2898 rtnl_lock();
2899 be_close(netdev);
2900 rtnl_unlock();
2901 }
9e90c961 2902 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 2903 be_clear(adapter);
6b7c5b94
SP
2904
2905 pci_save_state(pdev);
2906 pci_disable_device(pdev);
2907 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2908 return 0;
2909}
2910
2911static int be_resume(struct pci_dev *pdev)
2912{
2913 int status = 0;
2914 struct be_adapter *adapter = pci_get_drvdata(pdev);
2915 struct net_device *netdev = adapter->netdev;
2916
2917 netif_device_detach(netdev);
2918
2919 status = pci_enable_device(pdev);
2920 if (status)
2921 return status;
2922
2923 pci_set_power_state(pdev, 0);
2924 pci_restore_state(pdev);
2925
2243e2e9
SP
2926 /* tell fw we're ready to fire cmds */
2927 status = be_cmd_fw_init(adapter);
2928 if (status)
2929 return status;
2930
9b0365f1 2931 be_setup(adapter);
6b7c5b94
SP
2932 if (netif_running(netdev)) {
2933 rtnl_lock();
2934 be_open(netdev);
2935 rtnl_unlock();
2936 }
2937 netif_device_attach(netdev);
71d8d1b5
AK
2938
2939 if (adapter->wol)
2940 be_setup_wol(adapter, false);
6b7c5b94
SP
2941 return 0;
2942}
2943
82456b03
SP
2944/*
2945 * An FLR will stop BE from DMAing any data.
2946 */
2947static void be_shutdown(struct pci_dev *pdev)
2948{
2949 struct be_adapter *adapter = pci_get_drvdata(pdev);
2950 struct net_device *netdev = adapter->netdev;
2951
2952 netif_device_detach(netdev);
2953
2954 be_cmd_reset_function(adapter);
2955
2956 if (adapter->wol)
2957 be_setup_wol(adapter, true);
2958
2959 pci_disable_device(pdev);
82456b03
SP
2960}
2961
cf588477
SP
2962static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2963 pci_channel_state_t state)
2964{
2965 struct be_adapter *adapter = pci_get_drvdata(pdev);
2966 struct net_device *netdev = adapter->netdev;
2967
2968 dev_err(&adapter->pdev->dev, "EEH error detected\n");
2969
2970 adapter->eeh_err = true;
2971
2972 netif_device_detach(netdev);
2973
2974 if (netif_running(netdev)) {
2975 rtnl_lock();
2976 be_close(netdev);
2977 rtnl_unlock();
2978 }
2979 be_clear(adapter);
2980
2981 if (state == pci_channel_io_perm_failure)
2982 return PCI_ERS_RESULT_DISCONNECT;
2983
2984 pci_disable_device(pdev);
2985
2986 return PCI_ERS_RESULT_NEED_RESET;
2987}
2988
2989static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2990{
2991 struct be_adapter *adapter = pci_get_drvdata(pdev);
2992 int status;
2993
2994 dev_info(&adapter->pdev->dev, "EEH reset\n");
2995 adapter->eeh_err = false;
2996
2997 status = pci_enable_device(pdev);
2998 if (status)
2999 return PCI_ERS_RESULT_DISCONNECT;
3000
3001 pci_set_master(pdev);
3002 pci_set_power_state(pdev, 0);
3003 pci_restore_state(pdev);
3004
3005 /* Check if card is ok and fw is ready */
3006 status = be_cmd_POST(adapter);
3007 if (status)
3008 return PCI_ERS_RESULT_DISCONNECT;
3009
3010 return PCI_ERS_RESULT_RECOVERED;
3011}
3012
3013static void be_eeh_resume(struct pci_dev *pdev)
3014{
3015 int status = 0;
3016 struct be_adapter *adapter = pci_get_drvdata(pdev);
3017 struct net_device *netdev = adapter->netdev;
3018
3019 dev_info(&adapter->pdev->dev, "EEH resume\n");
3020
3021 pci_save_state(pdev);
3022
3023 /* tell fw we're ready to fire cmds */
3024 status = be_cmd_fw_init(adapter);
3025 if (status)
3026 goto err;
3027
3028 status = be_setup(adapter);
3029 if (status)
3030 goto err;
3031
3032 if (netif_running(netdev)) {
3033 status = be_open(netdev);
3034 if (status)
3035 goto err;
3036 }
3037 netif_device_attach(netdev);
3038 return;
3039err:
3040 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3041}
3042
3043static struct pci_error_handlers be_eeh_handlers = {
3044 .error_detected = be_eeh_err_detected,
3045 .slot_reset = be_eeh_reset,
3046 .resume = be_eeh_resume,
3047};
3048
6b7c5b94
SP
3049static struct pci_driver be_driver = {
3050 .name = DRV_NAME,
3051 .id_table = be_dev_ids,
3052 .probe = be_probe,
3053 .remove = be_remove,
3054 .suspend = be_suspend,
cf588477 3055 .resume = be_resume,
82456b03 3056 .shutdown = be_shutdown,
cf588477 3057 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3058};
3059
3060static int __init be_init_module(void)
3061{
8e95a202
JP
3062 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3063 rx_frag_size != 2048) {
6b7c5b94
SP
3064 printk(KERN_WARNING DRV_NAME
3065 " : Module param rx_frag_size must be 2048/4096/8192."
3066 " Using 2048\n");
3067 rx_frag_size = 2048;
3068 }
6b7c5b94 3069
ba343c77
SB
3070 if (num_vfs > 32) {
3071 printk(KERN_WARNING DRV_NAME
3072 " : Module param num_vfs must not be greater than 32."
3073 "Using 32\n");
3074 num_vfs = 32;
3075 }
3076
6b7c5b94
SP
3077 return pci_register_driver(&be_driver);
3078}
3079module_init(be_init_module);
3080
3081static void __exit be_exit_module(void)
3082{
3083 pci_unregister_driver(&be_driver);
3084}
3085module_exit(be_exit_module);
This page took 0.404995 seconds and 5 git commands to generate.