2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
20 #include <asm/div64.h>
22 MODULE_VERSION(DRV_VER
);
23 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
24 MODULE_DESCRIPTION(DRV_DESC
" " DRV_VER
);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
28 static ushort rx_frag_size
= 2048;
29 static unsigned int num_vfs
;
30 module_param(rx_frag_size
, ushort
, S_IRUGO
);
31 module_param(num_vfs
, uint
, S_IRUGO
);
32 MODULE_PARM_DESC(rx_frag_size
, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs
, "Number of PCI VFs to initialize");
35 static bool multi_rxq
= true;
36 module_param(multi_rxq
, bool, S_IRUGO
| S_IWUSR
);
37 MODULE_PARM_DESC(multi_rxq
, "Multi Rx Queue support. Enabled by default");
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids
) = {
40 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID1
) },
41 { PCI_DEVICE(BE_VENDOR_ID
, BE_DEVICE_ID2
) },
42 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID1
) },
43 { PCI_DEVICE(BE_VENDOR_ID
, OC_DEVICE_ID2
) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID
, OC_DEVICE_ID3
)},
47 MODULE_DEVICE_TABLE(pci
, be_dev_ids
);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc
[] = {
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc
[] = {
119 static void be_queue_free(struct be_adapter
*adapter
, struct be_queue_info
*q
)
121 struct be_dma_mem
*mem
= &q
->dma_mem
;
123 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
127 static int be_queue_alloc(struct be_adapter
*adapter
, struct be_queue_info
*q
,
128 u16 len
, u16 entry_size
)
130 struct be_dma_mem
*mem
= &q
->dma_mem
;
132 memset(q
, 0, sizeof(*q
));
134 q
->entry_size
= entry_size
;
135 mem
->size
= len
* entry_size
;
136 mem
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, mem
->size
, &mem
->dma
,
140 memset(mem
->va
, 0, mem
->size
);
144 static void be_intr_set(struct be_adapter
*adapter
, bool enable
)
146 u8 __iomem
*addr
= adapter
->pcicfg
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET
;
147 u32 reg
= ioread32(addr
);
148 u32 enabled
= reg
& MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
150 if (adapter
->eeh_err
)
153 if (!enabled
&& enable
)
154 reg
|= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
155 else if (enabled
&& !enable
)
156 reg
&= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK
;
160 iowrite32(reg
, addr
);
163 static void be_rxq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
166 val
|= qid
& DB_RQ_RING_ID_MASK
;
167 val
|= posted
<< DB_RQ_NUM_POSTED_SHIFT
;
170 iowrite32(val
, adapter
->db
+ DB_RQ_OFFSET
);
173 static void be_txq_notify(struct be_adapter
*adapter
, u16 qid
, u16 posted
)
176 val
|= qid
& DB_TXULP_RING_ID_MASK
;
177 val
|= (posted
& DB_TXULP_NUM_POSTED_MASK
) << DB_TXULP_NUM_POSTED_SHIFT
;
180 iowrite32(val
, adapter
->db
+ DB_TXULP1_OFFSET
);
183 static void be_eq_notify(struct be_adapter
*adapter
, u16 qid
,
184 bool arm
, bool clear_int
, u16 num_popped
)
187 val
|= qid
& DB_EQ_RING_ID_MASK
;
188 val
|= ((qid
& DB_EQ_RING_ID_EXT_MASK
) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT
);
191 if (adapter
->eeh_err
)
195 val
|= 1 << DB_EQ_REARM_SHIFT
;
197 val
|= 1 << DB_EQ_CLR_SHIFT
;
198 val
|= 1 << DB_EQ_EVNT_SHIFT
;
199 val
|= num_popped
<< DB_EQ_NUM_POPPED_SHIFT
;
200 iowrite32(val
, adapter
->db
+ DB_EQ_OFFSET
);
203 void be_cq_notify(struct be_adapter
*adapter
, u16 qid
, bool arm
, u16 num_popped
)
206 val
|= qid
& DB_CQ_RING_ID_MASK
;
207 val
|= ((qid
& DB_CQ_RING_ID_EXT_MASK
) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT
);
210 if (adapter
->eeh_err
)
214 val
|= 1 << DB_CQ_REARM_SHIFT
;
215 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
216 iowrite32(val
, adapter
->db
+ DB_CQ_OFFSET
);
219 static int be_mac_addr_set(struct net_device
*netdev
, void *p
)
221 struct be_adapter
*adapter
= netdev_priv(netdev
);
222 struct sockaddr
*addr
= p
;
225 if (!is_valid_ether_addr(addr
->sa_data
))
226 return -EADDRNOTAVAIL
;
228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
231 if (!be_physfn(adapter
))
234 status
= be_cmd_pmac_del(adapter
, adapter
->if_handle
,
235 adapter
->pmac_id
, 0);
239 status
= be_cmd_pmac_add(adapter
, (u8
*)addr
->sa_data
,
240 adapter
->if_handle
, &adapter
->pmac_id
, 0);
243 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
248 void netdev_stats_update(struct be_adapter
*adapter
)
250 struct be_hw_stats
*hw_stats
= hw_stats_from_cmd(adapter
->stats_cmd
.va
);
251 struct be_rxf_stats
*rxf_stats
= &hw_stats
->rxf
;
252 struct be_port_rxf_stats
*port_stats
=
253 &rxf_stats
->port
[adapter
->port_num
];
254 struct net_device_stats
*dev_stats
= &adapter
->netdev
->stats
;
255 struct be_erx_stats
*erx_stats
= &hw_stats
->erx
;
256 struct be_rx_obj
*rxo
;
259 memset(dev_stats
, 0, sizeof(*dev_stats
));
260 for_all_rx_queues(adapter
, rxo
, i
) {
261 dev_stats
->rx_packets
+= rx_stats(rxo
)->rx_pkts
;
262 dev_stats
->rx_bytes
+= rx_stats(rxo
)->rx_bytes
;
263 dev_stats
->multicast
+= rx_stats(rxo
)->rx_mcast_pkts
;
264 /* no space in linux buffers: best possible approximation */
265 dev_stats
->rx_dropped
+=
266 erx_stats
->rx_drops_no_fragments
[rxo
->q
.id
];
269 dev_stats
->tx_packets
= tx_stats(adapter
)->be_tx_pkts
;
270 dev_stats
->tx_bytes
= tx_stats(adapter
)->be_tx_bytes
;
272 /* bad pkts received */
273 dev_stats
->rx_errors
= port_stats
->rx_crc_errors
+
274 port_stats
->rx_alignment_symbol_errors
+
275 port_stats
->rx_in_range_errors
+
276 port_stats
->rx_out_range_errors
+
277 port_stats
->rx_frame_too_long
+
278 port_stats
->rx_dropped_too_small
+
279 port_stats
->rx_dropped_too_short
+
280 port_stats
->rx_dropped_header_too_small
+
281 port_stats
->rx_dropped_tcp_length
+
282 port_stats
->rx_dropped_runt
+
283 port_stats
->rx_tcp_checksum_errs
+
284 port_stats
->rx_ip_checksum_errs
+
285 port_stats
->rx_udp_checksum_errs
;
287 /* detailed rx errors */
288 dev_stats
->rx_length_errors
= port_stats
->rx_in_range_errors
+
289 port_stats
->rx_out_range_errors
+
290 port_stats
->rx_frame_too_long
;
292 dev_stats
->rx_crc_errors
= port_stats
->rx_crc_errors
;
294 /* frame alignment errors */
295 dev_stats
->rx_frame_errors
= port_stats
->rx_alignment_symbol_errors
;
297 /* receiver fifo overrun */
298 /* drops_no_pbuf is no per i/f, it's per BE card */
299 dev_stats
->rx_fifo_errors
= port_stats
->rx_fifo_overflow
+
300 port_stats
->rx_input_fifo_overflow
+
301 rxf_stats
->rx_drops_no_pbuf
;
304 void be_link_status_update(struct be_adapter
*adapter
, bool link_up
)
306 struct net_device
*netdev
= adapter
->netdev
;
308 /* If link came up or went down */
309 if (adapter
->link_up
!= link_up
) {
310 adapter
->link_speed
= -1;
312 netif_carrier_on(netdev
);
313 printk(KERN_INFO
"%s: Link up\n", netdev
->name
);
315 netif_carrier_off(netdev
);
316 printk(KERN_INFO
"%s: Link down\n", netdev
->name
);
318 adapter
->link_up
= link_up
;
322 /* Update the EQ delay n BE based on the RX frags consumed / sec */
323 static void be_rx_eqd_update(struct be_adapter
*adapter
, struct be_rx_obj
*rxo
)
325 struct be_eq_obj
*rx_eq
= &rxo
->rx_eq
;
326 struct be_rx_stats
*stats
= &rxo
->stats
;
330 if (!rx_eq
->enable_aic
)
334 if (time_before(now
, stats
->rx_fps_jiffies
)) {
335 stats
->rx_fps_jiffies
= now
;
339 /* Update once a second */
340 if ((now
- stats
->rx_fps_jiffies
) < HZ
)
343 stats
->rx_fps
= (stats
->rx_frags
- stats
->prev_rx_frags
) /
344 ((now
- stats
->rx_fps_jiffies
) / HZ
);
346 stats
->rx_fps_jiffies
= now
;
347 stats
->prev_rx_frags
= stats
->rx_frags
;
348 eqd
= stats
->rx_fps
/ 110000;
350 if (eqd
> rx_eq
->max_eqd
)
351 eqd
= rx_eq
->max_eqd
;
352 if (eqd
< rx_eq
->min_eqd
)
353 eqd
= rx_eq
->min_eqd
;
356 if (eqd
!= rx_eq
->cur_eqd
)
357 be_cmd_modify_eqd(adapter
, rx_eq
->q
.id
, eqd
);
359 rx_eq
->cur_eqd
= eqd
;
362 static u32
be_calc_rate(u64 bytes
, unsigned long ticks
)
366 do_div(rate
, ticks
/ HZ
);
367 rate
<<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate
, 1000000ul); /* MB/Sec */
373 static void be_tx_rate_update(struct be_adapter
*adapter
)
375 struct be_tx_stats
*stats
= tx_stats(adapter
);
378 /* Wrapped around? */
379 if (time_before(now
, stats
->be_tx_jiffies
)) {
380 stats
->be_tx_jiffies
= now
;
384 /* Update tx rate once in two seconds */
385 if ((now
- stats
->be_tx_jiffies
) > 2 * HZ
) {
386 stats
->be_tx_rate
= be_calc_rate(stats
->be_tx_bytes
387 - stats
->be_tx_bytes_prev
,
388 now
- stats
->be_tx_jiffies
);
389 stats
->be_tx_jiffies
= now
;
390 stats
->be_tx_bytes_prev
= stats
->be_tx_bytes
;
394 static void be_tx_stats_update(struct be_adapter
*adapter
,
395 u32 wrb_cnt
, u32 copied
, u32 gso_segs
, bool stopped
)
397 struct be_tx_stats
*stats
= tx_stats(adapter
);
399 stats
->be_tx_wrbs
+= wrb_cnt
;
400 stats
->be_tx_bytes
+= copied
;
401 stats
->be_tx_pkts
+= (gso_segs
? gso_segs
: 1);
403 stats
->be_tx_stops
++;
406 /* Determine number of WRB entries needed to xmit data in an skb */
407 static u32
wrb_cnt_for_skb(struct be_adapter
*adapter
, struct sk_buff
*skb
,
410 int cnt
= (skb
->len
> skb
->data_len
);
412 cnt
+= skb_shinfo(skb
)->nr_frags
;
414 /* to account for hdr wrb */
416 if (lancer_chip(adapter
) || !(cnt
& 1)) {
419 /* add a dummy to make it an even num */
423 BUG_ON(cnt
> BE_MAX_TX_FRAG_COUNT
);
427 static inline void wrb_fill(struct be_eth_wrb
*wrb
, u64 addr
, int len
)
429 wrb
->frag_pa_hi
= upper_32_bits(addr
);
430 wrb
->frag_pa_lo
= addr
& 0xFFFFFFFF;
431 wrb
->frag_len
= len
& ETH_WRB_FRAG_LEN_MASK
;
434 static void wrb_fill_hdr(struct be_adapter
*adapter
, struct be_eth_hdr_wrb
*hdr
,
435 struct sk_buff
*skb
, u32 wrb_cnt
, u32 len
)
440 memset(hdr
, 0, sizeof(*hdr
));
442 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, crc
, hdr
, 1);
444 if (skb_is_gso(skb
)) {
445 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso
, hdr
, 1);
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso_mss
,
447 hdr
, skb_shinfo(skb
)->gso_size
);
448 if (skb_is_gso_v6(skb
) && !lancer_chip(adapter
))
449 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, lso6
, hdr
, 1);
450 if (lancer_chip(adapter
) && adapter
->sli_family
==
451 LANCER_A0_SLI_FAMILY
) {
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, ipcs
, hdr
, 1);
454 AMAP_SET_BITS(struct amap_eth_hdr_wrb
,
456 else if (is_udp_pkt(skb
))
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb
,
460 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, tcpcs
, hdr
, 1);
463 else if (is_udp_pkt(skb
))
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, udpcs
, hdr
, 1);
467 if (adapter
->vlan_grp
&& vlan_tx_tag_present(skb
)) {
468 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan
, hdr
, 1);
469 vlan_tag
= vlan_tx_tag_get(skb
);
470 vlan_prio
= (vlan_tag
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
471 /* If vlan priority provided by OS is NOT in available bmap */
472 if (!(adapter
->vlan_prio_bmap
& (1 << vlan_prio
)))
473 vlan_tag
= (vlan_tag
& ~VLAN_PRIO_MASK
) |
474 adapter
->recommended_prio
;
475 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, vlan_tag
, hdr
, vlan_tag
);
478 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, event
, hdr
, 1);
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, complete
, hdr
, 1);
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, num_wrb
, hdr
, wrb_cnt
);
481 AMAP_SET_BITS(struct amap_eth_hdr_wrb
, len
, hdr
, len
);
484 static void unmap_tx_frag(struct device
*dev
, struct be_eth_wrb
*wrb
,
489 be_dws_le_to_cpu(wrb
, sizeof(*wrb
));
491 dma
= (u64
)wrb
->frag_pa_hi
<< 32 | (u64
)wrb
->frag_pa_lo
;
494 dma_unmap_single(dev
, dma
, wrb
->frag_len
,
497 dma_unmap_page(dev
, dma
, wrb
->frag_len
, DMA_TO_DEVICE
);
501 static int make_tx_wrbs(struct be_adapter
*adapter
,
502 struct sk_buff
*skb
, u32 wrb_cnt
, bool dummy_wrb
)
506 struct device
*dev
= &adapter
->pdev
->dev
;
507 struct sk_buff
*first_skb
= skb
;
508 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
509 struct be_eth_wrb
*wrb
;
510 struct be_eth_hdr_wrb
*hdr
;
511 bool map_single
= false;
514 hdr
= queue_head_node(txq
);
516 map_head
= txq
->head
;
518 if (skb
->len
> skb
->data_len
) {
519 int len
= skb_headlen(skb
);
520 busaddr
= dma_map_single(dev
, skb
->data
, len
, DMA_TO_DEVICE
);
521 if (dma_mapping_error(dev
, busaddr
))
524 wrb
= queue_head_node(txq
);
525 wrb_fill(wrb
, busaddr
, len
);
526 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
531 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
532 struct skb_frag_struct
*frag
=
533 &skb_shinfo(skb
)->frags
[i
];
534 busaddr
= dma_map_page(dev
, frag
->page
, frag
->page_offset
,
535 frag
->size
, DMA_TO_DEVICE
);
536 if (dma_mapping_error(dev
, busaddr
))
538 wrb
= queue_head_node(txq
);
539 wrb_fill(wrb
, busaddr
, frag
->size
);
540 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
542 copied
+= frag
->size
;
546 wrb
= queue_head_node(txq
);
548 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
552 wrb_fill_hdr(adapter
, hdr
, first_skb
, wrb_cnt
, copied
);
553 be_dws_cpu_to_le(hdr
, sizeof(*hdr
));
557 txq
->head
= map_head
;
559 wrb
= queue_head_node(txq
);
560 unmap_tx_frag(dev
, wrb
, map_single
);
562 copied
-= wrb
->frag_len
;
568 static netdev_tx_t
be_xmit(struct sk_buff
*skb
,
569 struct net_device
*netdev
)
571 struct be_adapter
*adapter
= netdev_priv(netdev
);
572 struct be_tx_obj
*tx_obj
= &adapter
->tx_obj
;
573 struct be_queue_info
*txq
= &tx_obj
->q
;
574 u32 wrb_cnt
= 0, copied
= 0;
575 u32 start
= txq
->head
;
576 bool dummy_wrb
, stopped
= false;
578 wrb_cnt
= wrb_cnt_for_skb(adapter
, skb
, &dummy_wrb
);
580 copied
= make_tx_wrbs(adapter
, skb
, wrb_cnt
, dummy_wrb
);
582 /* record the sent skb in the sent_skb table */
583 BUG_ON(tx_obj
->sent_skb_list
[start
]);
584 tx_obj
->sent_skb_list
[start
] = skb
;
586 /* Ensure txq has space for the next skb; Else stop the queue
587 * *BEFORE* ringing the tx doorbell, so that we serialze the
588 * tx compls of the current transmit which'll wake up the queue
590 atomic_add(wrb_cnt
, &txq
->used
);
591 if ((BE_MAX_TX_FRAG_COUNT
+ atomic_read(&txq
->used
)) >=
593 netif_stop_queue(netdev
);
597 be_txq_notify(adapter
, txq
->id
, wrb_cnt
);
599 be_tx_stats_update(adapter
, wrb_cnt
, copied
,
600 skb_shinfo(skb
)->gso_segs
, stopped
);
603 dev_kfree_skb_any(skb
);
608 static int be_change_mtu(struct net_device
*netdev
, int new_mtu
)
610 struct be_adapter
*adapter
= netdev_priv(netdev
);
611 if (new_mtu
< BE_MIN_MTU
||
612 new_mtu
> (BE_MAX_JUMBO_FRAME_SIZE
-
613 (ETH_HLEN
+ ETH_FCS_LEN
))) {
614 dev_info(&adapter
->pdev
->dev
,
615 "MTU must be between %d and %d bytes\n",
617 (BE_MAX_JUMBO_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
)));
620 dev_info(&adapter
->pdev
->dev
, "MTU changed from %d to %d bytes\n",
621 netdev
->mtu
, new_mtu
);
622 netdev
->mtu
= new_mtu
;
627 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
628 * If the user configures more, place BE in vlan promiscuous mode.
630 static int be_vid_config(struct be_adapter
*adapter
, bool vf
, u32 vf_num
)
632 u16 vtag
[BE_NUM_VLANS_SUPPORTED
];
638 if_handle
= adapter
->vf_cfg
[vf_num
].vf_if_handle
;
639 vtag
[0] = cpu_to_le16(adapter
->vf_cfg
[vf_num
].vf_vlan_tag
);
640 status
= be_cmd_vlan_config(adapter
, if_handle
, vtag
, 1, 1, 0);
643 if (adapter
->vlans_added
<= adapter
->max_vlans
) {
644 /* Construct VLAN Table to give to HW */
645 for (i
= 0; i
< VLAN_N_VID
; i
++) {
646 if (adapter
->vlan_tag
[i
]) {
647 vtag
[ntags
] = cpu_to_le16(i
);
651 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
654 status
= be_cmd_vlan_config(adapter
, adapter
->if_handle
,
661 static void be_vlan_register(struct net_device
*netdev
, struct vlan_group
*grp
)
663 struct be_adapter
*adapter
= netdev_priv(netdev
);
665 adapter
->vlan_grp
= grp
;
668 static void be_vlan_add_vid(struct net_device
*netdev
, u16 vid
)
670 struct be_adapter
*adapter
= netdev_priv(netdev
);
672 adapter
->vlans_added
++;
673 if (!be_physfn(adapter
))
676 adapter
->vlan_tag
[vid
] = 1;
677 if (adapter
->vlans_added
<= (adapter
->max_vlans
+ 1))
678 be_vid_config(adapter
, false, 0);
681 static void be_vlan_rem_vid(struct net_device
*netdev
, u16 vid
)
683 struct be_adapter
*adapter
= netdev_priv(netdev
);
685 adapter
->vlans_added
--;
686 vlan_group_set_device(adapter
->vlan_grp
, vid
, NULL
);
688 if (!be_physfn(adapter
))
691 adapter
->vlan_tag
[vid
] = 0;
692 if (adapter
->vlans_added
<= adapter
->max_vlans
)
693 be_vid_config(adapter
, false, 0);
696 static void be_set_multicast_list(struct net_device
*netdev
)
698 struct be_adapter
*adapter
= netdev_priv(netdev
);
700 if (netdev
->flags
& IFF_PROMISC
) {
701 be_cmd_promiscuous_config(adapter
, adapter
->port_num
, 1);
702 adapter
->promiscuous
= true;
706 /* BE was previously in promiscous mode; disable it */
707 if (adapter
->promiscuous
) {
708 adapter
->promiscuous
= false;
709 be_cmd_promiscuous_config(adapter
, adapter
->port_num
, 0);
712 /* Enable multicast promisc if num configured exceeds what we support */
713 if (netdev
->flags
& IFF_ALLMULTI
||
714 netdev_mc_count(netdev
) > BE_MAX_MC
) {
715 be_cmd_multicast_set(adapter
, adapter
->if_handle
, NULL
,
716 &adapter
->mc_cmd_mem
);
720 be_cmd_multicast_set(adapter
, adapter
->if_handle
, netdev
,
721 &adapter
->mc_cmd_mem
);
726 static int be_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
728 struct be_adapter
*adapter
= netdev_priv(netdev
);
731 if (!adapter
->sriov_enabled
)
734 if (!is_valid_ether_addr(mac
) || (vf
>= num_vfs
))
737 if (adapter
->vf_cfg
[vf
].vf_pmac_id
!= BE_INVALID_PMAC_ID
)
738 status
= be_cmd_pmac_del(adapter
,
739 adapter
->vf_cfg
[vf
].vf_if_handle
,
740 adapter
->vf_cfg
[vf
].vf_pmac_id
, vf
+ 1);
742 status
= be_cmd_pmac_add(adapter
, mac
,
743 adapter
->vf_cfg
[vf
].vf_if_handle
,
744 &adapter
->vf_cfg
[vf
].vf_pmac_id
, vf
+ 1);
747 dev_err(&adapter
->pdev
->dev
, "MAC %pM set on VF %d Failed\n",
750 memcpy(adapter
->vf_cfg
[vf
].vf_mac_addr
, mac
, ETH_ALEN
);
755 static int be_get_vf_config(struct net_device
*netdev
, int vf
,
756 struct ifla_vf_info
*vi
)
758 struct be_adapter
*adapter
= netdev_priv(netdev
);
760 if (!adapter
->sriov_enabled
)
767 vi
->tx_rate
= adapter
->vf_cfg
[vf
].vf_tx_rate
;
768 vi
->vlan
= adapter
->vf_cfg
[vf
].vf_vlan_tag
;
770 memcpy(&vi
->mac
, adapter
->vf_cfg
[vf
].vf_mac_addr
, ETH_ALEN
);
775 static int be_set_vf_vlan(struct net_device
*netdev
,
776 int vf
, u16 vlan
, u8 qos
)
778 struct be_adapter
*adapter
= netdev_priv(netdev
);
781 if (!adapter
->sriov_enabled
)
784 if ((vf
>= num_vfs
) || (vlan
> 4095))
788 adapter
->vf_cfg
[vf
].vf_vlan_tag
= vlan
;
789 adapter
->vlans_added
++;
791 adapter
->vf_cfg
[vf
].vf_vlan_tag
= 0;
792 adapter
->vlans_added
--;
795 status
= be_vid_config(adapter
, true, vf
);
798 dev_info(&adapter
->pdev
->dev
,
799 "VLAN %d config on VF %d failed\n", vlan
, vf
);
803 static int be_set_vf_tx_rate(struct net_device
*netdev
,
806 struct be_adapter
*adapter
= netdev_priv(netdev
);
809 if (!adapter
->sriov_enabled
)
812 if ((vf
>= num_vfs
) || (rate
< 0))
818 adapter
->vf_cfg
[vf
].vf_tx_rate
= rate
;
819 status
= be_cmd_set_qos(adapter
, rate
/ 10, vf
+ 1);
822 dev_info(&adapter
->pdev
->dev
,
823 "tx rate %d on VF %d failed\n", rate
, vf
);
827 static void be_rx_rate_update(struct be_rx_obj
*rxo
)
829 struct be_rx_stats
*stats
= &rxo
->stats
;
833 if (time_before(now
, stats
->rx_jiffies
)) {
834 stats
->rx_jiffies
= now
;
838 /* Update the rate once in two seconds */
839 if ((now
- stats
->rx_jiffies
) < 2 * HZ
)
842 stats
->rx_rate
= be_calc_rate(stats
->rx_bytes
- stats
->rx_bytes_prev
,
843 now
- stats
->rx_jiffies
);
844 stats
->rx_jiffies
= now
;
845 stats
->rx_bytes_prev
= stats
->rx_bytes
;
848 static void be_rx_stats_update(struct be_rx_obj
*rxo
,
849 struct be_rx_compl_info
*rxcp
)
851 struct be_rx_stats
*stats
= &rxo
->stats
;
854 stats
->rx_frags
+= rxcp
->num_rcvd
;
855 stats
->rx_bytes
+= rxcp
->pkt_size
;
857 if (rxcp
->pkt_type
== BE_MULTICAST_PACKET
)
858 stats
->rx_mcast_pkts
++;
863 static inline bool csum_passed(struct be_rx_compl_info
*rxcp
)
865 /* L4 checksum is not reliable for non TCP/UDP packets.
866 * Also ignore ipcksm for ipv6 pkts */
867 return (rxcp
->tcpf
|| rxcp
->udpf
) && rxcp
->l4_csum
&&
868 (rxcp
->ip_csum
|| rxcp
->ipv6
);
871 static struct be_rx_page_info
*
872 get_rx_page_info(struct be_adapter
*adapter
,
873 struct be_rx_obj
*rxo
,
876 struct be_rx_page_info
*rx_page_info
;
877 struct be_queue_info
*rxq
= &rxo
->q
;
879 rx_page_info
= &rxo
->page_info_tbl
[frag_idx
];
880 BUG_ON(!rx_page_info
->page
);
882 if (rx_page_info
->last_page_user
) {
883 dma_unmap_page(&adapter
->pdev
->dev
,
884 dma_unmap_addr(rx_page_info
, bus
),
885 adapter
->big_page_size
, DMA_FROM_DEVICE
);
886 rx_page_info
->last_page_user
= false;
889 atomic_dec(&rxq
->used
);
893 /* Throwaway the data in the Rx completion */
894 static void be_rx_compl_discard(struct be_adapter
*adapter
,
895 struct be_rx_obj
*rxo
,
896 struct be_rx_compl_info
*rxcp
)
898 struct be_queue_info
*rxq
= &rxo
->q
;
899 struct be_rx_page_info
*page_info
;
900 u16 i
, num_rcvd
= rxcp
->num_rcvd
;
902 for (i
= 0; i
< num_rcvd
; i
++) {
903 page_info
= get_rx_page_info(adapter
, rxo
, rxcp
->rxq_idx
);
904 put_page(page_info
->page
);
905 memset(page_info
, 0, sizeof(*page_info
));
906 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
911 * skb_fill_rx_data forms a complete skb for an ether frame
914 static void skb_fill_rx_data(struct be_adapter
*adapter
, struct be_rx_obj
*rxo
,
915 struct sk_buff
*skb
, struct be_rx_compl_info
*rxcp
)
917 struct be_queue_info
*rxq
= &rxo
->q
;
918 struct be_rx_page_info
*page_info
;
920 u16 hdr_len
, curr_frag_len
, remaining
;
923 page_info
= get_rx_page_info(adapter
, rxo
, rxcp
->rxq_idx
);
924 start
= page_address(page_info
->page
) + page_info
->page_offset
;
927 /* Copy data in the first descriptor of this completion */
928 curr_frag_len
= min(rxcp
->pkt_size
, rx_frag_size
);
930 /* Copy the header portion into skb_data */
931 hdr_len
= min(BE_HDR_LEN
, curr_frag_len
);
932 memcpy(skb
->data
, start
, hdr_len
);
933 skb
->len
= curr_frag_len
;
934 if (curr_frag_len
<= BE_HDR_LEN
) { /* tiny packet */
935 /* Complete packet has now been moved to data */
936 put_page(page_info
->page
);
938 skb
->tail
+= curr_frag_len
;
940 skb_shinfo(skb
)->nr_frags
= 1;
941 skb_shinfo(skb
)->frags
[0].page
= page_info
->page
;
942 skb_shinfo(skb
)->frags
[0].page_offset
=
943 page_info
->page_offset
+ hdr_len
;
944 skb_shinfo(skb
)->frags
[0].size
= curr_frag_len
- hdr_len
;
945 skb
->data_len
= curr_frag_len
- hdr_len
;
946 skb
->tail
+= hdr_len
;
948 page_info
->page
= NULL
;
950 if (rxcp
->pkt_size
<= rx_frag_size
) {
951 BUG_ON(rxcp
->num_rcvd
!= 1);
955 /* More frags present for this completion */
956 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
957 remaining
= rxcp
->pkt_size
- curr_frag_len
;
958 for (i
= 1, j
= 0; i
< rxcp
->num_rcvd
; i
++) {
959 page_info
= get_rx_page_info(adapter
, rxo
, rxcp
->rxq_idx
);
960 curr_frag_len
= min(remaining
, rx_frag_size
);
962 /* Coalesce all frags from the same physical page in one slot */
963 if (page_info
->page_offset
== 0) {
966 skb_shinfo(skb
)->frags
[j
].page
= page_info
->page
;
967 skb_shinfo(skb
)->frags
[j
].page_offset
=
968 page_info
->page_offset
;
969 skb_shinfo(skb
)->frags
[j
].size
= 0;
970 skb_shinfo(skb
)->nr_frags
++;
972 put_page(page_info
->page
);
975 skb_shinfo(skb
)->frags
[j
].size
+= curr_frag_len
;
976 skb
->len
+= curr_frag_len
;
977 skb
->data_len
+= curr_frag_len
;
979 remaining
-= curr_frag_len
;
980 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
981 page_info
->page
= NULL
;
983 BUG_ON(j
> MAX_SKB_FRAGS
);
986 /* Process the RX completion indicated by rxcp when GRO is disabled */
987 static void be_rx_compl_process(struct be_adapter
*adapter
,
988 struct be_rx_obj
*rxo
,
989 struct be_rx_compl_info
*rxcp
)
993 skb
= netdev_alloc_skb_ip_align(adapter
->netdev
, BE_HDR_LEN
);
994 if (unlikely(!skb
)) {
996 dev_warn(&adapter
->pdev
->dev
, "skb alloc failed\n");
997 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1001 skb_fill_rx_data(adapter
, rxo
, skb
, rxcp
);
1003 if (likely(adapter
->rx_csum
&& csum_passed(rxcp
)))
1004 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1006 skb_checksum_none_assert(skb
);
1008 skb
->truesize
= skb
->len
+ sizeof(struct sk_buff
);
1009 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
1010 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
1011 skb
->rxhash
= rxcp
->rss_hash
;
1014 if (unlikely(rxcp
->vlanf
)) {
1015 if (!adapter
->vlan_grp
|| adapter
->vlans_added
== 0) {
1019 vlan_hwaccel_receive_skb(skb
, adapter
->vlan_grp
, rxcp
->vid
);
1021 netif_receive_skb(skb
);
1025 /* Process the RX completion indicated by rxcp when GRO is enabled */
1026 static void be_rx_compl_process_gro(struct be_adapter
*adapter
,
1027 struct be_rx_obj
*rxo
,
1028 struct be_rx_compl_info
*rxcp
)
1030 struct be_rx_page_info
*page_info
;
1031 struct sk_buff
*skb
= NULL
;
1032 struct be_queue_info
*rxq
= &rxo
->q
;
1033 struct be_eq_obj
*eq_obj
= &rxo
->rx_eq
;
1034 u16 remaining
, curr_frag_len
;
1037 skb
= napi_get_frags(&eq_obj
->napi
);
1039 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1043 remaining
= rxcp
->pkt_size
;
1044 for (i
= 0, j
= -1; i
< rxcp
->num_rcvd
; i
++) {
1045 page_info
= get_rx_page_info(adapter
, rxo
, rxcp
->rxq_idx
);
1047 curr_frag_len
= min(remaining
, rx_frag_size
);
1049 /* Coalesce all frags from the same physical page in one slot */
1050 if (i
== 0 || page_info
->page_offset
== 0) {
1051 /* First frag or Fresh page */
1053 skb_shinfo(skb
)->frags
[j
].page
= page_info
->page
;
1054 skb_shinfo(skb
)->frags
[j
].page_offset
=
1055 page_info
->page_offset
;
1056 skb_shinfo(skb
)->frags
[j
].size
= 0;
1058 put_page(page_info
->page
);
1060 skb_shinfo(skb
)->frags
[j
].size
+= curr_frag_len
;
1062 remaining
-= curr_frag_len
;
1063 index_inc(&rxcp
->rxq_idx
, rxq
->len
);
1064 memset(page_info
, 0, sizeof(*page_info
));
1066 BUG_ON(j
> MAX_SKB_FRAGS
);
1068 skb_shinfo(skb
)->nr_frags
= j
+ 1;
1069 skb
->len
= rxcp
->pkt_size
;
1070 skb
->data_len
= rxcp
->pkt_size
;
1071 skb
->truesize
+= rxcp
->pkt_size
;
1072 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1073 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
1074 skb
->rxhash
= rxcp
->rss_hash
;
1076 if (likely(!rxcp
->vlanf
))
1077 napi_gro_frags(&eq_obj
->napi
);
1079 vlan_gro_frags(&eq_obj
->napi
, adapter
->vlan_grp
, rxcp
->vid
);
1082 static void be_parse_rx_compl_v1(struct be_adapter
*adapter
,
1083 struct be_eth_rx_compl
*compl,
1084 struct be_rx_compl_info
*rxcp
)
1087 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, pktsize
, compl);
1088 rxcp
->vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vtp
, compl);
1089 rxcp
->err
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, err
, compl);
1090 rxcp
->tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, tcpf
, compl);
1091 rxcp
->udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, udpf
, compl);
1093 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, ipcksm
, compl);
1095 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, l4_cksm
, compl);
1097 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, ip_version
, compl);
1099 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, fragndx
, compl);
1101 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, numfrags
, compl);
1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, cast_enc
, compl);
1105 AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, rsshash
, rxcp
);
1107 rxcp
->vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vtm
,
1109 rxcp
->vid
= AMAP_GET_BITS(struct amap_eth_rx_compl_v1
, vlan_tag
,
1114 static void be_parse_rx_compl_v0(struct be_adapter
*adapter
,
1115 struct be_eth_rx_compl
*compl,
1116 struct be_rx_compl_info
*rxcp
)
1119 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, pktsize
, compl);
1120 rxcp
->vlanf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vtp
, compl);
1121 rxcp
->err
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, err
, compl);
1122 rxcp
->tcpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, tcpf
, compl);
1123 rxcp
->udpf
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, udpf
, compl);
1125 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, ipcksm
, compl);
1127 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, l4_cksm
, compl);
1129 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, ip_version
, compl);
1131 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, fragndx
, compl);
1133 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, numfrags
, compl);
1135 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, cast_enc
, compl);
1137 AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, rsshash
, rxcp
);
1139 rxcp
->vtm
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vtm
,
1141 rxcp
->vid
= AMAP_GET_BITS(struct amap_eth_rx_compl_v0
, vlan_tag
,
1146 static struct be_rx_compl_info
*be_rx_compl_get(struct be_rx_obj
*rxo
)
1148 struct be_eth_rx_compl
*compl = queue_tail_node(&rxo
->cq
);
1149 struct be_rx_compl_info
*rxcp
= &rxo
->rxcp
;
1150 struct be_adapter
*adapter
= rxo
->adapter
;
1152 /* For checking the valid bit it is Ok to use either definition as the
1153 * valid bit is at the same position in both v0 and v1 Rx compl */
1154 if (compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] == 0)
1158 be_dws_le_to_cpu(compl, sizeof(*compl));
1160 if (adapter
->be3_native
)
1161 be_parse_rx_compl_v1(adapter
, compl, rxcp
);
1163 be_parse_rx_compl_v0(adapter
, compl, rxcp
);
1166 /* vlanf could be wrongly set in some cards.
1167 * ignore if vtm is not set */
1168 if ((adapter
->function_mode
& 0x400) && !rxcp
->vtm
)
1171 if (!lancer_chip(adapter
))
1172 rxcp
->vid
= swab16(rxcp
->vid
);
1174 if ((adapter
->pvid
== rxcp
->vid
) &&
1175 !adapter
->vlan_tag
[rxcp
->vid
])
1179 /* As the compl has been parsed, reset it; we wont touch it again */
1180 compl->dw
[offsetof(struct amap_eth_rx_compl_v1
, valid
) / 32] = 0;
1182 queue_tail_inc(&rxo
->cq
);
1186 static inline struct page
*be_alloc_pages(u32 size
, gfp_t gfp
)
1188 u32 order
= get_order(size
);
1192 return alloc_pages(gfp
, order
);
1196 * Allocate a page, split it to fragments of size rx_frag_size and post as
1197 * receive buffers to BE
1199 static void be_post_rx_frags(struct be_rx_obj
*rxo
, gfp_t gfp
)
1201 struct be_adapter
*adapter
= rxo
->adapter
;
1202 struct be_rx_page_info
*page_info_tbl
= rxo
->page_info_tbl
;
1203 struct be_rx_page_info
*page_info
= NULL
, *prev_page_info
= NULL
;
1204 struct be_queue_info
*rxq
= &rxo
->q
;
1205 struct page
*pagep
= NULL
;
1206 struct be_eth_rx_d
*rxd
;
1207 u64 page_dmaaddr
= 0, frag_dmaaddr
;
1208 u32 posted
, page_offset
= 0;
1210 page_info
= &rxo
->page_info_tbl
[rxq
->head
];
1211 for (posted
= 0; posted
< MAX_RX_POST
&& !page_info
->page
; posted
++) {
1213 pagep
= be_alloc_pages(adapter
->big_page_size
, gfp
);
1214 if (unlikely(!pagep
)) {
1215 rxo
->stats
.rx_post_fail
++;
1218 page_dmaaddr
= dma_map_page(&adapter
->pdev
->dev
, pagep
,
1219 0, adapter
->big_page_size
,
1221 page_info
->page_offset
= 0;
1224 page_info
->page_offset
= page_offset
+ rx_frag_size
;
1226 page_offset
= page_info
->page_offset
;
1227 page_info
->page
= pagep
;
1228 dma_unmap_addr_set(page_info
, bus
, page_dmaaddr
);
1229 frag_dmaaddr
= page_dmaaddr
+ page_info
->page_offset
;
1231 rxd
= queue_head_node(rxq
);
1232 rxd
->fragpa_lo
= cpu_to_le32(frag_dmaaddr
& 0xFFFFFFFF);
1233 rxd
->fragpa_hi
= cpu_to_le32(upper_32_bits(frag_dmaaddr
));
1235 /* Any space left in the current big page for another frag? */
1236 if ((page_offset
+ rx_frag_size
+ rx_frag_size
) >
1237 adapter
->big_page_size
) {
1239 page_info
->last_page_user
= true;
1242 prev_page_info
= page_info
;
1243 queue_head_inc(rxq
);
1244 page_info
= &page_info_tbl
[rxq
->head
];
1247 prev_page_info
->last_page_user
= true;
1250 atomic_add(posted
, &rxq
->used
);
1251 be_rxq_notify(adapter
, rxq
->id
, posted
);
1252 } else if (atomic_read(&rxq
->used
) == 0) {
1253 /* Let be_worker replenish when memory is available */
1254 rxo
->rx_post_starved
= true;
1258 static struct be_eth_tx_compl
*be_tx_compl_get(struct be_queue_info
*tx_cq
)
1260 struct be_eth_tx_compl
*txcp
= queue_tail_node(tx_cq
);
1262 if (txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] == 0)
1266 be_dws_le_to_cpu(txcp
, sizeof(*txcp
));
1268 txcp
->dw
[offsetof(struct amap_eth_tx_compl
, valid
) / 32] = 0;
1270 queue_tail_inc(tx_cq
);
1274 static void be_tx_compl_process(struct be_adapter
*adapter
, u16 last_index
)
1276 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1277 struct be_eth_wrb
*wrb
;
1278 struct sk_buff
**sent_skbs
= adapter
->tx_obj
.sent_skb_list
;
1279 struct sk_buff
*sent_skb
;
1280 u16 cur_index
, num_wrbs
= 1; /* account for hdr wrb */
1281 bool unmap_skb_hdr
= true;
1283 sent_skb
= sent_skbs
[txq
->tail
];
1285 sent_skbs
[txq
->tail
] = NULL
;
1287 /* skip header wrb */
1288 queue_tail_inc(txq
);
1291 cur_index
= txq
->tail
;
1292 wrb
= queue_tail_node(txq
);
1293 unmap_tx_frag(&adapter
->pdev
->dev
, wrb
,
1294 (unmap_skb_hdr
&& skb_headlen(sent_skb
)));
1295 unmap_skb_hdr
= false;
1298 queue_tail_inc(txq
);
1299 } while (cur_index
!= last_index
);
1301 atomic_sub(num_wrbs
, &txq
->used
);
1303 kfree_skb(sent_skb
);
1306 static inline struct be_eq_entry
*event_get(struct be_eq_obj
*eq_obj
)
1308 struct be_eq_entry
*eqe
= queue_tail_node(&eq_obj
->q
);
1314 eqe
->evt
= le32_to_cpu(eqe
->evt
);
1315 queue_tail_inc(&eq_obj
->q
);
1319 static int event_handle(struct be_adapter
*adapter
,
1320 struct be_eq_obj
*eq_obj
)
1322 struct be_eq_entry
*eqe
;
1325 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1330 /* Deal with any spurious interrupts that come
1333 be_eq_notify(adapter
, eq_obj
->q
.id
, true, true, num
);
1335 napi_schedule(&eq_obj
->napi
);
1340 /* Just read and notify events without processing them.
1341 * Used at the time of destroying event queues */
1342 static void be_eq_clean(struct be_adapter
*adapter
,
1343 struct be_eq_obj
*eq_obj
)
1345 struct be_eq_entry
*eqe
;
1348 while ((eqe
= event_get(eq_obj
)) != NULL
) {
1354 be_eq_notify(adapter
, eq_obj
->q
.id
, false, true, num
);
1357 static void be_rx_q_clean(struct be_adapter
*adapter
, struct be_rx_obj
*rxo
)
1359 struct be_rx_page_info
*page_info
;
1360 struct be_queue_info
*rxq
= &rxo
->q
;
1361 struct be_queue_info
*rx_cq
= &rxo
->cq
;
1362 struct be_rx_compl_info
*rxcp
;
1365 /* First cleanup pending rx completions */
1366 while ((rxcp
= be_rx_compl_get(rxo
)) != NULL
) {
1367 be_rx_compl_discard(adapter
, rxo
, rxcp
);
1368 be_cq_notify(adapter
, rx_cq
->id
, false, 1);
1371 /* Then free posted rx buffer that were not used */
1372 tail
= (rxq
->head
+ rxq
->len
- atomic_read(&rxq
->used
)) % rxq
->len
;
1373 for (; atomic_read(&rxq
->used
) > 0; index_inc(&tail
, rxq
->len
)) {
1374 page_info
= get_rx_page_info(adapter
, rxo
, tail
);
1375 put_page(page_info
->page
);
1376 memset(page_info
, 0, sizeof(*page_info
));
1378 BUG_ON(atomic_read(&rxq
->used
));
1381 static void be_tx_compl_clean(struct be_adapter
*adapter
)
1383 struct be_queue_info
*tx_cq
= &adapter
->tx_obj
.cq
;
1384 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1385 struct be_eth_tx_compl
*txcp
;
1386 u16 end_idx
, cmpl
= 0, timeo
= 0;
1387 struct sk_buff
**sent_skbs
= adapter
->tx_obj
.sent_skb_list
;
1388 struct sk_buff
*sent_skb
;
1391 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1393 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1394 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1396 be_tx_compl_process(adapter
, end_idx
);
1400 be_cq_notify(adapter
, tx_cq
->id
, false, cmpl
);
1404 if (atomic_read(&txq
->used
) == 0 || ++timeo
> 200)
1410 if (atomic_read(&txq
->used
))
1411 dev_err(&adapter
->pdev
->dev
, "%d pending tx-completions\n",
1412 atomic_read(&txq
->used
));
1414 /* free posted tx for which compls will never arrive */
1415 while (atomic_read(&txq
->used
)) {
1416 sent_skb
= sent_skbs
[txq
->tail
];
1417 end_idx
= txq
->tail
;
1419 wrb_cnt_for_skb(adapter
, sent_skb
, &dummy_wrb
) - 1,
1421 be_tx_compl_process(adapter
, end_idx
);
1425 static void be_mcc_queues_destroy(struct be_adapter
*adapter
)
1427 struct be_queue_info
*q
;
1429 q
= &adapter
->mcc_obj
.q
;
1431 be_cmd_q_destroy(adapter
, q
, QTYPE_MCCQ
);
1432 be_queue_free(adapter
, q
);
1434 q
= &adapter
->mcc_obj
.cq
;
1436 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1437 be_queue_free(adapter
, q
);
1440 /* Must be called only after TX qs are created as MCC shares TX EQ */
1441 static int be_mcc_queues_create(struct be_adapter
*adapter
)
1443 struct be_queue_info
*q
, *cq
;
1445 /* Alloc MCC compl queue */
1446 cq
= &adapter
->mcc_obj
.cq
;
1447 if (be_queue_alloc(adapter
, cq
, MCC_CQ_LEN
,
1448 sizeof(struct be_mcc_compl
)))
1451 /* Ask BE to create MCC compl queue; share TX's eq */
1452 if (be_cmd_cq_create(adapter
, cq
, &adapter
->tx_eq
.q
, false, true, 0))
1455 /* Alloc MCC queue */
1456 q
= &adapter
->mcc_obj
.q
;
1457 if (be_queue_alloc(adapter
, q
, MCC_Q_LEN
, sizeof(struct be_mcc_wrb
)))
1458 goto mcc_cq_destroy
;
1460 /* Ask BE to create MCC queue */
1461 if (be_cmd_mccq_create(adapter
, q
, cq
))
1467 be_queue_free(adapter
, q
);
1469 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1471 be_queue_free(adapter
, cq
);
1476 static void be_tx_queues_destroy(struct be_adapter
*adapter
)
1478 struct be_queue_info
*q
;
1480 q
= &adapter
->tx_obj
.q
;
1482 be_cmd_q_destroy(adapter
, q
, QTYPE_TXQ
);
1483 be_queue_free(adapter
, q
);
1485 q
= &adapter
->tx_obj
.cq
;
1487 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1488 be_queue_free(adapter
, q
);
1490 /* Clear any residual events */
1491 be_eq_clean(adapter
, &adapter
->tx_eq
);
1493 q
= &adapter
->tx_eq
.q
;
1495 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1496 be_queue_free(adapter
, q
);
1499 static int be_tx_queues_create(struct be_adapter
*adapter
)
1501 struct be_queue_info
*eq
, *q
, *cq
;
1503 adapter
->tx_eq
.max_eqd
= 0;
1504 adapter
->tx_eq
.min_eqd
= 0;
1505 adapter
->tx_eq
.cur_eqd
= 96;
1506 adapter
->tx_eq
.enable_aic
= false;
1507 /* Alloc Tx Event queue */
1508 eq
= &adapter
->tx_eq
.q
;
1509 if (be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
, sizeof(struct be_eq_entry
)))
1512 /* Ask BE to create Tx Event queue */
1513 if (be_cmd_eq_create(adapter
, eq
, adapter
->tx_eq
.cur_eqd
))
1516 adapter
->tx_eq
.eq_idx
= adapter
->eq_next_idx
++;
1519 /* Alloc TX eth compl queue */
1520 cq
= &adapter
->tx_obj
.cq
;
1521 if (be_queue_alloc(adapter
, cq
, TX_CQ_LEN
,
1522 sizeof(struct be_eth_tx_compl
)))
1525 /* Ask BE to create Tx eth compl queue */
1526 if (be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3))
1529 /* Alloc TX eth queue */
1530 q
= &adapter
->tx_obj
.q
;
1531 if (be_queue_alloc(adapter
, q
, TX_Q_LEN
, sizeof(struct be_eth_wrb
)))
1534 /* Ask BE to create Tx eth queue */
1535 if (be_cmd_txq_create(adapter
, q
, cq
))
1540 be_queue_free(adapter
, q
);
1542 be_cmd_q_destroy(adapter
, cq
, QTYPE_CQ
);
1544 be_queue_free(adapter
, cq
);
1546 be_cmd_q_destroy(adapter
, eq
, QTYPE_EQ
);
1548 be_queue_free(adapter
, eq
);
1552 static void be_rx_queues_destroy(struct be_adapter
*adapter
)
1554 struct be_queue_info
*q
;
1555 struct be_rx_obj
*rxo
;
1558 for_all_rx_queues(adapter
, rxo
, i
) {
1561 be_cmd_q_destroy(adapter
, q
, QTYPE_RXQ
);
1562 /* After the rxq is invalidated, wait for a grace time
1563 * of 1ms for all dma to end and the flush compl to
1567 be_rx_q_clean(adapter
, rxo
);
1569 be_queue_free(adapter
, q
);
1573 be_cmd_q_destroy(adapter
, q
, QTYPE_CQ
);
1574 be_queue_free(adapter
, q
);
1576 /* Clear any residual events */
1579 be_eq_clean(adapter
, &rxo
->rx_eq
);
1580 be_cmd_q_destroy(adapter
, q
, QTYPE_EQ
);
1582 be_queue_free(adapter
, q
);
1586 static u32
be_num_rxqs_want(struct be_adapter
*adapter
)
1588 if (multi_rxq
&& (adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) &&
1589 !adapter
->sriov_enabled
&& !(adapter
->function_mode
& 0x400)) {
1590 return 1 + MAX_RSS_QS
; /* one default non-RSS queue */
1592 dev_warn(&adapter
->pdev
->dev
,
1593 "No support for multiple RX queues\n");
1598 static int be_rx_queues_create(struct be_adapter
*adapter
)
1600 struct be_queue_info
*eq
, *q
, *cq
;
1601 struct be_rx_obj
*rxo
;
1604 adapter
->num_rx_qs
= min(be_num_rxqs_want(adapter
),
1605 msix_enabled(adapter
) ?
1606 adapter
->num_msix_vec
- 1 : 1);
1607 if (adapter
->num_rx_qs
!= MAX_RX_QS
)
1608 dev_warn(&adapter
->pdev
->dev
,
1609 "Can create only %d RX queues", adapter
->num_rx_qs
);
1611 adapter
->big_page_size
= (1 << get_order(rx_frag_size
)) * PAGE_SIZE
;
1612 for_all_rx_queues(adapter
, rxo
, i
) {
1613 rxo
->adapter
= adapter
;
1614 rxo
->rx_eq
.max_eqd
= BE_MAX_EQD
;
1615 rxo
->rx_eq
.enable_aic
= true;
1619 rc
= be_queue_alloc(adapter
, eq
, EVNT_Q_LEN
,
1620 sizeof(struct be_eq_entry
));
1624 rc
= be_cmd_eq_create(adapter
, eq
, rxo
->rx_eq
.cur_eqd
);
1628 rxo
->rx_eq
.eq_idx
= adapter
->eq_next_idx
++;
1632 rc
= be_queue_alloc(adapter
, cq
, RX_CQ_LEN
,
1633 sizeof(struct be_eth_rx_compl
));
1637 rc
= be_cmd_cq_create(adapter
, cq
, eq
, false, false, 3);
1642 rc
= be_queue_alloc(adapter
, q
, RX_Q_LEN
,
1643 sizeof(struct be_eth_rx_d
));
1647 rc
= be_cmd_rxq_create(adapter
, q
, cq
->id
, rx_frag_size
,
1648 BE_MAX_JUMBO_FRAME_SIZE
, adapter
->if_handle
,
1649 (i
> 0) ? 1 : 0/* rss enable */, &rxo
->rss_id
);
1654 if (be_multi_rxq(adapter
)) {
1655 u8 rsstable
[MAX_RSS_QS
];
1657 for_all_rss_queues(adapter
, rxo
, i
)
1658 rsstable
[i
] = rxo
->rss_id
;
1660 rc
= be_cmd_rss_config(adapter
, rsstable
,
1661 adapter
->num_rx_qs
- 1);
1668 be_rx_queues_destroy(adapter
);
1672 static bool event_peek(struct be_eq_obj
*eq_obj
)
1674 struct be_eq_entry
*eqe
= queue_tail_node(&eq_obj
->q
);
1681 static irqreturn_t
be_intx(int irq
, void *dev
)
1683 struct be_adapter
*adapter
= dev
;
1684 struct be_rx_obj
*rxo
;
1685 int isr
, i
, tx
= 0 , rx
= 0;
1687 if (lancer_chip(adapter
)) {
1688 if (event_peek(&adapter
->tx_eq
))
1689 tx
= event_handle(adapter
, &adapter
->tx_eq
);
1690 for_all_rx_queues(adapter
, rxo
, i
) {
1691 if (event_peek(&rxo
->rx_eq
))
1692 rx
|= event_handle(adapter
, &rxo
->rx_eq
);
1699 isr
= ioread32(adapter
->csr
+ CEV_ISR0_OFFSET
+
1700 (adapter
->tx_eq
.q
.id
/ 8) * CEV_ISR_SIZE
);
1704 if ((1 << adapter
->tx_eq
.eq_idx
& isr
))
1705 event_handle(adapter
, &adapter
->tx_eq
);
1707 for_all_rx_queues(adapter
, rxo
, i
) {
1708 if ((1 << rxo
->rx_eq
.eq_idx
& isr
))
1709 event_handle(adapter
, &rxo
->rx_eq
);
1716 static irqreturn_t
be_msix_rx(int irq
, void *dev
)
1718 struct be_rx_obj
*rxo
= dev
;
1719 struct be_adapter
*adapter
= rxo
->adapter
;
1721 event_handle(adapter
, &rxo
->rx_eq
);
1726 static irqreturn_t
be_msix_tx_mcc(int irq
, void *dev
)
1728 struct be_adapter
*adapter
= dev
;
1730 event_handle(adapter
, &adapter
->tx_eq
);
1735 static inline bool do_gro(struct be_rx_compl_info
*rxcp
)
1737 return (rxcp
->tcpf
&& !rxcp
->err
) ? true : false;
1740 static int be_poll_rx(struct napi_struct
*napi
, int budget
)
1742 struct be_eq_obj
*rx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1743 struct be_rx_obj
*rxo
= container_of(rx_eq
, struct be_rx_obj
, rx_eq
);
1744 struct be_adapter
*adapter
= rxo
->adapter
;
1745 struct be_queue_info
*rx_cq
= &rxo
->cq
;
1746 struct be_rx_compl_info
*rxcp
;
1749 rxo
->stats
.rx_polls
++;
1750 for (work_done
= 0; work_done
< budget
; work_done
++) {
1751 rxcp
= be_rx_compl_get(rxo
);
1755 /* Ignore flush completions */
1756 if (rxcp
->num_rcvd
) {
1758 be_rx_compl_process_gro(adapter
, rxo
, rxcp
);
1760 be_rx_compl_process(adapter
, rxo
, rxcp
);
1762 be_rx_stats_update(rxo
, rxcp
);
1765 /* Refill the queue */
1766 if (atomic_read(&rxo
->q
.used
) < RX_FRAGS_REFILL_WM
)
1767 be_post_rx_frags(rxo
, GFP_ATOMIC
);
1770 if (work_done
< budget
) {
1771 napi_complete(napi
);
1772 be_cq_notify(adapter
, rx_cq
->id
, true, work_done
);
1774 /* More to be consumed; continue with interrupts disabled */
1775 be_cq_notify(adapter
, rx_cq
->id
, false, work_done
);
1780 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1781 * For TX/MCC we don't honour budget; consume everything
1783 static int be_poll_tx_mcc(struct napi_struct
*napi
, int budget
)
1785 struct be_eq_obj
*tx_eq
= container_of(napi
, struct be_eq_obj
, napi
);
1786 struct be_adapter
*adapter
=
1787 container_of(tx_eq
, struct be_adapter
, tx_eq
);
1788 struct be_queue_info
*txq
= &adapter
->tx_obj
.q
;
1789 struct be_queue_info
*tx_cq
= &adapter
->tx_obj
.cq
;
1790 struct be_eth_tx_compl
*txcp
;
1791 int tx_compl
= 0, mcc_compl
, status
= 0;
1794 while ((txcp
= be_tx_compl_get(tx_cq
))) {
1795 end_idx
= AMAP_GET_BITS(struct amap_eth_tx_compl
,
1797 be_tx_compl_process(adapter
, end_idx
);
1801 mcc_compl
= be_process_mcc(adapter
, &status
);
1803 napi_complete(napi
);
1806 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
1807 be_cq_notify(adapter
, mcc_obj
->cq
.id
, true, mcc_compl
);
1811 be_cq_notify(adapter
, adapter
->tx_obj
.cq
.id
, true, tx_compl
);
1813 /* As Tx wrbs have been freed up, wake up netdev queue if
1814 * it was stopped due to lack of tx wrbs.
1816 if (netif_queue_stopped(adapter
->netdev
) &&
1817 atomic_read(&txq
->used
) < txq
->len
/ 2) {
1818 netif_wake_queue(adapter
->netdev
);
1821 tx_stats(adapter
)->be_tx_events
++;
1822 tx_stats(adapter
)->be_tx_compl
+= tx_compl
;
1828 void be_detect_dump_ue(struct be_adapter
*adapter
)
1830 u32 ue_status_lo
, ue_status_hi
, ue_status_lo_mask
, ue_status_hi_mask
;
1833 pci_read_config_dword(adapter
->pdev
,
1834 PCICFG_UE_STATUS_LOW
, &ue_status_lo
);
1835 pci_read_config_dword(adapter
->pdev
,
1836 PCICFG_UE_STATUS_HIGH
, &ue_status_hi
);
1837 pci_read_config_dword(adapter
->pdev
,
1838 PCICFG_UE_STATUS_LOW_MASK
, &ue_status_lo_mask
);
1839 pci_read_config_dword(adapter
->pdev
,
1840 PCICFG_UE_STATUS_HI_MASK
, &ue_status_hi_mask
);
1842 ue_status_lo
= (ue_status_lo
& (~ue_status_lo_mask
));
1843 ue_status_hi
= (ue_status_hi
& (~ue_status_hi_mask
));
1845 if (ue_status_lo
|| ue_status_hi
) {
1846 adapter
->ue_detected
= true;
1847 adapter
->eeh_err
= true;
1848 dev_err(&adapter
->pdev
->dev
, "UE Detected!!\n");
1852 for (i
= 0; ue_status_lo
; ue_status_lo
>>= 1, i
++) {
1853 if (ue_status_lo
& 1)
1854 dev_err(&adapter
->pdev
->dev
,
1855 "UE: %s bit set\n", ue_status_low_desc
[i
]);
1859 for (i
= 0; ue_status_hi
; ue_status_hi
>>= 1, i
++) {
1860 if (ue_status_hi
& 1)
1861 dev_err(&adapter
->pdev
->dev
,
1862 "UE: %s bit set\n", ue_status_hi_desc
[i
]);
1868 static void be_worker(struct work_struct
*work
)
1870 struct be_adapter
*adapter
=
1871 container_of(work
, struct be_adapter
, work
.work
);
1872 struct be_rx_obj
*rxo
;
1875 if (!adapter
->ue_detected
&& !lancer_chip(adapter
))
1876 be_detect_dump_ue(adapter
);
1878 /* when interrupts are not yet enabled, just reap any pending
1879 * mcc completions */
1880 if (!netif_running(adapter
->netdev
)) {
1881 int mcc_compl
, status
= 0;
1883 mcc_compl
= be_process_mcc(adapter
, &status
);
1886 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
1887 be_cq_notify(adapter
, mcc_obj
->cq
.id
, false, mcc_compl
);
1893 if (!adapter
->stats_cmd_sent
)
1894 be_cmd_get_stats(adapter
, &adapter
->stats_cmd
);
1896 be_tx_rate_update(adapter
);
1898 for_all_rx_queues(adapter
, rxo
, i
) {
1899 be_rx_rate_update(rxo
);
1900 be_rx_eqd_update(adapter
, rxo
);
1902 if (rxo
->rx_post_starved
) {
1903 rxo
->rx_post_starved
= false;
1904 be_post_rx_frags(rxo
, GFP_KERNEL
);
1909 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(1000));
1912 static void be_msix_disable(struct be_adapter
*adapter
)
1914 if (msix_enabled(adapter
)) {
1915 pci_disable_msix(adapter
->pdev
);
1916 adapter
->num_msix_vec
= 0;
1920 static void be_msix_enable(struct be_adapter
*adapter
)
1922 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1923 int i
, status
, num_vec
;
1925 num_vec
= be_num_rxqs_want(adapter
) + 1;
1927 for (i
= 0; i
< num_vec
; i
++)
1928 adapter
->msix_entries
[i
].entry
= i
;
1930 status
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
, num_vec
);
1933 } else if (status
>= BE_MIN_MSIX_VECTORS
) {
1935 if (pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1941 adapter
->num_msix_vec
= num_vec
;
1945 static void be_sriov_enable(struct be_adapter
*adapter
)
1947 be_check_sriov_fn_type(adapter
);
1948 #ifdef CONFIG_PCI_IOV
1949 if (be_physfn(adapter
) && num_vfs
) {
1953 pos
= pci_find_ext_capability(adapter
->pdev
,
1954 PCI_EXT_CAP_ID_SRIOV
);
1955 pci_read_config_word(adapter
->pdev
,
1956 pos
+ PCI_SRIOV_TOTAL_VF
, &nvfs
);
1958 if (num_vfs
> nvfs
) {
1959 dev_info(&adapter
->pdev
->dev
,
1960 "Device supports %d VFs and not %d\n",
1965 status
= pci_enable_sriov(adapter
->pdev
, num_vfs
);
1966 adapter
->sriov_enabled
= status
? false : true;
1971 static void be_sriov_disable(struct be_adapter
*adapter
)
1973 #ifdef CONFIG_PCI_IOV
1974 if (adapter
->sriov_enabled
) {
1975 pci_disable_sriov(adapter
->pdev
);
1976 adapter
->sriov_enabled
= false;
1981 static inline int be_msix_vec_get(struct be_adapter
*adapter
,
1982 struct be_eq_obj
*eq_obj
)
1984 return adapter
->msix_entries
[eq_obj
->eq_idx
].vector
;
1987 static int be_request_irq(struct be_adapter
*adapter
,
1988 struct be_eq_obj
*eq_obj
,
1989 void *handler
, char *desc
, void *context
)
1991 struct net_device
*netdev
= adapter
->netdev
;
1994 sprintf(eq_obj
->desc
, "%s-%s", netdev
->name
, desc
);
1995 vec
= be_msix_vec_get(adapter
, eq_obj
);
1996 return request_irq(vec
, handler
, 0, eq_obj
->desc
, context
);
1999 static void be_free_irq(struct be_adapter
*adapter
, struct be_eq_obj
*eq_obj
,
2002 int vec
= be_msix_vec_get(adapter
, eq_obj
);
2003 free_irq(vec
, context
);
2006 static int be_msix_register(struct be_adapter
*adapter
)
2008 struct be_rx_obj
*rxo
;
2012 status
= be_request_irq(adapter
, &adapter
->tx_eq
, be_msix_tx_mcc
, "tx",
2017 for_all_rx_queues(adapter
, rxo
, i
) {
2018 sprintf(qname
, "rxq%d", i
);
2019 status
= be_request_irq(adapter
, &rxo
->rx_eq
, be_msix_rx
,
2028 be_free_irq(adapter
, &adapter
->tx_eq
, adapter
);
2030 for (i
--, rxo
= &adapter
->rx_obj
[i
]; i
>= 0; i
--, rxo
--)
2031 be_free_irq(adapter
, &rxo
->rx_eq
, rxo
);
2034 dev_warn(&adapter
->pdev
->dev
,
2035 "MSIX Request IRQ failed - err %d\n", status
);
2036 be_msix_disable(adapter
);
2040 static int be_irq_register(struct be_adapter
*adapter
)
2042 struct net_device
*netdev
= adapter
->netdev
;
2045 if (msix_enabled(adapter
)) {
2046 status
= be_msix_register(adapter
);
2049 /* INTx is not supported for VF */
2050 if (!be_physfn(adapter
))
2055 netdev
->irq
= adapter
->pdev
->irq
;
2056 status
= request_irq(netdev
->irq
, be_intx
, IRQF_SHARED
, netdev
->name
,
2059 dev_err(&adapter
->pdev
->dev
,
2060 "INTx request IRQ failed - err %d\n", status
);
2064 adapter
->isr_registered
= true;
2068 static void be_irq_unregister(struct be_adapter
*adapter
)
2070 struct net_device
*netdev
= adapter
->netdev
;
2071 struct be_rx_obj
*rxo
;
2074 if (!adapter
->isr_registered
)
2078 if (!msix_enabled(adapter
)) {
2079 free_irq(netdev
->irq
, adapter
);
2084 be_free_irq(adapter
, &adapter
->tx_eq
, adapter
);
2086 for_all_rx_queues(adapter
, rxo
, i
)
2087 be_free_irq(adapter
, &rxo
->rx_eq
, rxo
);
2090 adapter
->isr_registered
= false;
2093 static int be_close(struct net_device
*netdev
)
2095 struct be_adapter
*adapter
= netdev_priv(netdev
);
2096 struct be_rx_obj
*rxo
;
2097 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
2100 be_async_mcc_disable(adapter
);
2102 netif_carrier_off(netdev
);
2103 adapter
->link_up
= false;
2105 if (!lancer_chip(adapter
))
2106 be_intr_set(adapter
, false);
2108 for_all_rx_queues(adapter
, rxo
, i
)
2109 napi_disable(&rxo
->rx_eq
.napi
);
2111 napi_disable(&tx_eq
->napi
);
2113 if (lancer_chip(adapter
)) {
2114 be_cq_notify(adapter
, adapter
->tx_obj
.cq
.id
, false, 0);
2115 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, false, 0);
2116 for_all_rx_queues(adapter
, rxo
, i
)
2117 be_cq_notify(adapter
, rxo
->cq
.id
, false, 0);
2120 if (msix_enabled(adapter
)) {
2121 vec
= be_msix_vec_get(adapter
, tx_eq
);
2122 synchronize_irq(vec
);
2124 for_all_rx_queues(adapter
, rxo
, i
) {
2125 vec
= be_msix_vec_get(adapter
, &rxo
->rx_eq
);
2126 synchronize_irq(vec
);
2129 synchronize_irq(netdev
->irq
);
2131 be_irq_unregister(adapter
);
2133 /* Wait for all pending tx completions to arrive so that
2134 * all tx skbs are freed.
2136 be_tx_compl_clean(adapter
);
2141 static int be_open(struct net_device
*netdev
)
2143 struct be_adapter
*adapter
= netdev_priv(netdev
);
2144 struct be_eq_obj
*tx_eq
= &adapter
->tx_eq
;
2145 struct be_rx_obj
*rxo
;
2151 for_all_rx_queues(adapter
, rxo
, i
) {
2152 be_post_rx_frags(rxo
, GFP_KERNEL
);
2153 napi_enable(&rxo
->rx_eq
.napi
);
2155 napi_enable(&tx_eq
->napi
);
2157 be_irq_register(adapter
);
2159 if (!lancer_chip(adapter
))
2160 be_intr_set(adapter
, true);
2162 /* The evt queues are created in unarmed state; arm them */
2163 for_all_rx_queues(adapter
, rxo
, i
) {
2164 be_eq_notify(adapter
, rxo
->rx_eq
.q
.id
, true, false, 0);
2165 be_cq_notify(adapter
, rxo
->cq
.id
, true, 0);
2167 be_eq_notify(adapter
, tx_eq
->q
.id
, true, false, 0);
2169 /* Now that interrupts are on we can process async mcc */
2170 be_async_mcc_enable(adapter
);
2172 status
= be_cmd_link_status_query(adapter
, &link_up
, &mac_speed
,
2176 be_link_status_update(adapter
, link_up
);
2178 if (be_physfn(adapter
)) {
2179 status
= be_vid_config(adapter
, false, 0);
2183 status
= be_cmd_set_flow_control(adapter
,
2184 adapter
->tx_fc
, adapter
->rx_fc
);
2191 be_close(adapter
->netdev
);
2195 static int be_setup_wol(struct be_adapter
*adapter
, bool enable
)
2197 struct be_dma_mem cmd
;
2201 memset(mac
, 0, ETH_ALEN
);
2203 cmd
.size
= sizeof(struct be_cmd_req_acpi_wol_magic_config
);
2204 cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
2208 memset(cmd
.va
, 0, cmd
.size
);
2211 status
= pci_write_config_dword(adapter
->pdev
,
2212 PCICFG_PM_CONTROL_OFFSET
, PCICFG_PM_CONTROL_MASK
);
2214 dev_err(&adapter
->pdev
->dev
,
2215 "Could not enable Wake-on-lan\n");
2216 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
,
2220 status
= be_cmd_enable_magic_wol(adapter
,
2221 adapter
->netdev
->dev_addr
, &cmd
);
2222 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 1);
2223 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 1);
2225 status
= be_cmd_enable_magic_wol(adapter
, mac
, &cmd
);
2226 pci_enable_wake(adapter
->pdev
, PCI_D3hot
, 0);
2227 pci_enable_wake(adapter
->pdev
, PCI_D3cold
, 0);
2230 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
, cmd
.dma
);
2235 * Generate a seed MAC address from the PF MAC Address using jhash.
2236 * MAC Address for VFs are assigned incrementally starting from the seed.
2237 * These addresses are programmed in the ASIC by the PF and the VF driver
2238 * queries for the MAC address during its probe.
2240 static inline int be_vf_eth_addr_config(struct be_adapter
*adapter
)
2246 be_vf_eth_addr_generate(adapter
, mac
);
2248 for (vf
= 0; vf
< num_vfs
; vf
++) {
2249 status
= be_cmd_pmac_add(adapter
, mac
,
2250 adapter
->vf_cfg
[vf
].vf_if_handle
,
2251 &adapter
->vf_cfg
[vf
].vf_pmac_id
,
2254 dev_err(&adapter
->pdev
->dev
,
2255 "Mac address add failed for VF %d\n", vf
);
2257 memcpy(adapter
->vf_cfg
[vf
].vf_mac_addr
, mac
, ETH_ALEN
);
2264 static inline void be_vf_eth_addr_rem(struct be_adapter
*adapter
)
2268 for (vf
= 0; vf
< num_vfs
; vf
++) {
2269 if (adapter
->vf_cfg
[vf
].vf_pmac_id
!= BE_INVALID_PMAC_ID
)
2270 be_cmd_pmac_del(adapter
,
2271 adapter
->vf_cfg
[vf
].vf_if_handle
,
2272 adapter
->vf_cfg
[vf
].vf_pmac_id
, vf
+ 1);
2276 static int be_setup(struct be_adapter
*adapter
)
2278 struct net_device
*netdev
= adapter
->netdev
;
2279 u32 cap_flags
, en_flags
, vf
= 0;
2283 cap_flags
= en_flags
= BE_IF_FLAGS_UNTAGGED
|
2284 BE_IF_FLAGS_BROADCAST
|
2285 BE_IF_FLAGS_MULTICAST
;
2287 if (be_physfn(adapter
)) {
2288 cap_flags
|= BE_IF_FLAGS_MCAST_PROMISCUOUS
|
2289 BE_IF_FLAGS_PROMISCUOUS
|
2290 BE_IF_FLAGS_PASS_L3L4_ERRORS
;
2291 en_flags
|= BE_IF_FLAGS_PASS_L3L4_ERRORS
;
2293 if (adapter
->function_caps
& BE_FUNCTION_CAPS_RSS
) {
2294 cap_flags
|= BE_IF_FLAGS_RSS
;
2295 en_flags
|= BE_IF_FLAGS_RSS
;
2299 status
= be_cmd_if_create(adapter
, cap_flags
, en_flags
,
2300 netdev
->dev_addr
, false/* pmac_invalid */,
2301 &adapter
->if_handle
, &adapter
->pmac_id
, 0);
2305 if (be_physfn(adapter
)) {
2306 if (adapter
->sriov_enabled
) {
2307 while (vf
< num_vfs
) {
2308 cap_flags
= en_flags
= BE_IF_FLAGS_UNTAGGED
|
2309 BE_IF_FLAGS_BROADCAST
;
2310 status
= be_cmd_if_create(adapter
, cap_flags
,
2311 en_flags
, mac
, true,
2312 &adapter
->vf_cfg
[vf
].vf_if_handle
,
2315 dev_err(&adapter
->pdev
->dev
,
2316 "Interface Create failed for VF %d\n",
2320 adapter
->vf_cfg
[vf
].vf_pmac_id
=
2326 status
= be_cmd_mac_addr_query(adapter
, mac
,
2327 MAC_ADDRESS_TYPE_NETWORK
, false, adapter
->if_handle
);
2329 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2330 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2334 status
= be_tx_queues_create(adapter
);
2338 status
= be_rx_queues_create(adapter
);
2342 status
= be_mcc_queues_create(adapter
);
2346 adapter
->link_speed
= -1;
2351 be_rx_queues_destroy(adapter
);
2353 be_tx_queues_destroy(adapter
);
2355 if (be_physfn(adapter
) && adapter
->sriov_enabled
)
2356 for (vf
= 0; vf
< num_vfs
; vf
++)
2357 if (adapter
->vf_cfg
[vf
].vf_if_handle
)
2358 be_cmd_if_destroy(adapter
,
2359 adapter
->vf_cfg
[vf
].vf_if_handle
,
2361 be_cmd_if_destroy(adapter
, adapter
->if_handle
, 0);
2366 static int be_clear(struct be_adapter
*adapter
)
2370 if (be_physfn(adapter
) && adapter
->sriov_enabled
)
2371 be_vf_eth_addr_rem(adapter
);
2373 be_mcc_queues_destroy(adapter
);
2374 be_rx_queues_destroy(adapter
);
2375 be_tx_queues_destroy(adapter
);
2376 adapter
->eq_next_idx
= 0;
2378 if (be_physfn(adapter
) && adapter
->sriov_enabled
)
2379 for (vf
= 0; vf
< num_vfs
; vf
++)
2380 if (adapter
->vf_cfg
[vf
].vf_if_handle
)
2381 be_cmd_if_destroy(adapter
,
2382 adapter
->vf_cfg
[vf
].vf_if_handle
,
2385 be_cmd_if_destroy(adapter
, adapter
->if_handle
, 0);
2387 /* tell fw we're done with firing cmds */
2388 be_cmd_fw_clean(adapter
);
2393 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2394 static bool be_flash_redboot(struct be_adapter
*adapter
,
2395 const u8
*p
, u32 img_start
, int image_size
,
2402 crc_offset
= hdr_size
+ img_start
+ image_size
- 4;
2406 status
= be_cmd_get_flash_crc(adapter
, flashed_crc
,
2409 dev_err(&adapter
->pdev
->dev
,
2410 "could not get crc from flash, not flashing redboot\n");
2414 /*update redboot only if crc does not match*/
2415 if (!memcmp(flashed_crc
, p
, 4))
2421 static int be_flash_data(struct be_adapter
*adapter
,
2422 const struct firmware
*fw
,
2423 struct be_dma_mem
*flash_cmd
, int num_of_images
)
2426 int status
= 0, i
, filehdr_size
= 0;
2427 u32 total_bytes
= 0, flash_op
;
2429 const u8
*p
= fw
->data
;
2430 struct be_cmd_write_flashrom
*req
= flash_cmd
->va
;
2431 const struct flash_comp
*pflashcomp
;
2434 static const struct flash_comp gen3_flash_types
[9] = {
2435 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3
, IMG_TYPE_ISCSI_ACTIVE
,
2436 FLASH_IMAGE_MAX_SIZE_g3
},
2437 { FLASH_REDBOOT_START_g3
, IMG_TYPE_REDBOOT
,
2438 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3
},
2439 { FLASH_iSCSI_BIOS_START_g3
, IMG_TYPE_BIOS
,
2440 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2441 { FLASH_PXE_BIOS_START_g3
, IMG_TYPE_PXE_BIOS
,
2442 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2443 { FLASH_FCoE_BIOS_START_g3
, IMG_TYPE_FCOE_BIOS
,
2444 FLASH_BIOS_IMAGE_MAX_SIZE_g3
},
2445 { FLASH_iSCSI_BACKUP_IMAGE_START_g3
, IMG_TYPE_ISCSI_BACKUP
,
2446 FLASH_IMAGE_MAX_SIZE_g3
},
2447 { FLASH_FCoE_PRIMARY_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_ACTIVE
,
2448 FLASH_IMAGE_MAX_SIZE_g3
},
2449 { FLASH_FCoE_BACKUP_IMAGE_START_g3
, IMG_TYPE_FCOE_FW_BACKUP
,
2450 FLASH_IMAGE_MAX_SIZE_g3
},
2451 { FLASH_NCSI_START_g3
, IMG_TYPE_NCSI_FW
,
2452 FLASH_NCSI_IMAGE_MAX_SIZE_g3
}
2454 static const struct flash_comp gen2_flash_types
[8] = {
2455 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2
, IMG_TYPE_ISCSI_ACTIVE
,
2456 FLASH_IMAGE_MAX_SIZE_g2
},
2457 { FLASH_REDBOOT_START_g2
, IMG_TYPE_REDBOOT
,
2458 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2
},
2459 { FLASH_iSCSI_BIOS_START_g2
, IMG_TYPE_BIOS
,
2460 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2461 { FLASH_PXE_BIOS_START_g2
, IMG_TYPE_PXE_BIOS
,
2462 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2463 { FLASH_FCoE_BIOS_START_g2
, IMG_TYPE_FCOE_BIOS
,
2464 FLASH_BIOS_IMAGE_MAX_SIZE_g2
},
2465 { FLASH_iSCSI_BACKUP_IMAGE_START_g2
, IMG_TYPE_ISCSI_BACKUP
,
2466 FLASH_IMAGE_MAX_SIZE_g2
},
2467 { FLASH_FCoE_PRIMARY_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_ACTIVE
,
2468 FLASH_IMAGE_MAX_SIZE_g2
},
2469 { FLASH_FCoE_BACKUP_IMAGE_START_g2
, IMG_TYPE_FCOE_FW_BACKUP
,
2470 FLASH_IMAGE_MAX_SIZE_g2
}
2473 if (adapter
->generation
== BE_GEN3
) {
2474 pflashcomp
= gen3_flash_types
;
2475 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
2476 num_comp
= ARRAY_SIZE(gen3_flash_types
);
2478 pflashcomp
= gen2_flash_types
;
2479 filehdr_size
= sizeof(struct flash_file_hdr_g2
);
2480 num_comp
= ARRAY_SIZE(gen2_flash_types
);
2482 for (i
= 0; i
< num_comp
; i
++) {
2483 if ((pflashcomp
[i
].optype
== IMG_TYPE_NCSI_FW
) &&
2484 memcmp(adapter
->fw_ver
, "3.102.148.0", 11) < 0)
2486 if ((pflashcomp
[i
].optype
== IMG_TYPE_REDBOOT
) &&
2487 (!be_flash_redboot(adapter
, fw
->data
,
2488 pflashcomp
[i
].offset
, pflashcomp
[i
].size
, filehdr_size
+
2489 (num_of_images
* sizeof(struct image_hdr
)))))
2492 p
+= filehdr_size
+ pflashcomp
[i
].offset
2493 + (num_of_images
* sizeof(struct image_hdr
));
2494 if (p
+ pflashcomp
[i
].size
> fw
->data
+ fw
->size
)
2496 total_bytes
= pflashcomp
[i
].size
;
2497 while (total_bytes
) {
2498 if (total_bytes
> 32*1024)
2499 num_bytes
= 32*1024;
2501 num_bytes
= total_bytes
;
2502 total_bytes
-= num_bytes
;
2505 flash_op
= FLASHROM_OPER_FLASH
;
2507 flash_op
= FLASHROM_OPER_SAVE
;
2508 memcpy(req
->params
.data_buf
, p
, num_bytes
);
2510 status
= be_cmd_write_flashrom(adapter
, flash_cmd
,
2511 pflashcomp
[i
].optype
, flash_op
, num_bytes
);
2513 dev_err(&adapter
->pdev
->dev
,
2514 "cmd to write to flash rom failed.\n");
2523 static int get_ufigen_type(struct flash_file_hdr_g2
*fhdr
)
2527 if (fhdr
->build
[0] == '3')
2529 else if (fhdr
->build
[0] == '2')
2535 int be_load_fw(struct be_adapter
*adapter
, u8
*func
)
2537 char fw_file
[ETHTOOL_FLASH_MAX_FILENAME
];
2538 const struct firmware
*fw
;
2539 struct flash_file_hdr_g2
*fhdr
;
2540 struct flash_file_hdr_g3
*fhdr3
;
2541 struct image_hdr
*img_hdr_ptr
= NULL
;
2542 struct be_dma_mem flash_cmd
;
2543 int status
, i
= 0, num_imgs
= 0;
2546 if (!netif_running(adapter
->netdev
)) {
2547 dev_err(&adapter
->pdev
->dev
,
2548 "Firmware load not allowed (interface is down)\n");
2552 strcpy(fw_file
, func
);
2554 status
= request_firmware(&fw
, fw_file
, &adapter
->pdev
->dev
);
2559 fhdr
= (struct flash_file_hdr_g2
*) p
;
2560 dev_info(&adapter
->pdev
->dev
, "Flashing firmware file %s\n", fw_file
);
2562 flash_cmd
.size
= sizeof(struct be_cmd_write_flashrom
) + 32*1024;
2563 flash_cmd
.va
= dma_alloc_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
,
2564 &flash_cmd
.dma
, GFP_KERNEL
);
2565 if (!flash_cmd
.va
) {
2567 dev_err(&adapter
->pdev
->dev
,
2568 "Memory allocation failure while flashing\n");
2572 if ((adapter
->generation
== BE_GEN3
) &&
2573 (get_ufigen_type(fhdr
) == BE_GEN3
)) {
2574 fhdr3
= (struct flash_file_hdr_g3
*) fw
->data
;
2575 num_imgs
= le32_to_cpu(fhdr3
->num_imgs
);
2576 for (i
= 0; i
< num_imgs
; i
++) {
2577 img_hdr_ptr
= (struct image_hdr
*) (fw
->data
+
2578 (sizeof(struct flash_file_hdr_g3
) +
2579 i
* sizeof(struct image_hdr
)));
2580 if (le32_to_cpu(img_hdr_ptr
->imageid
) == 1)
2581 status
= be_flash_data(adapter
, fw
, &flash_cmd
,
2584 } else if ((adapter
->generation
== BE_GEN2
) &&
2585 (get_ufigen_type(fhdr
) == BE_GEN2
)) {
2586 status
= be_flash_data(adapter
, fw
, &flash_cmd
, 0);
2588 dev_err(&adapter
->pdev
->dev
,
2589 "UFI and Interface are not compatible for flashing\n");
2593 dma_free_coherent(&adapter
->pdev
->dev
, flash_cmd
.size
, flash_cmd
.va
,
2596 dev_err(&adapter
->pdev
->dev
, "Firmware load error\n");
2600 dev_info(&adapter
->pdev
->dev
, "Firmware flashed successfully\n");
2603 release_firmware(fw
);
2607 static struct net_device_ops be_netdev_ops
= {
2608 .ndo_open
= be_open
,
2609 .ndo_stop
= be_close
,
2610 .ndo_start_xmit
= be_xmit
,
2611 .ndo_set_rx_mode
= be_set_multicast_list
,
2612 .ndo_set_mac_address
= be_mac_addr_set
,
2613 .ndo_change_mtu
= be_change_mtu
,
2614 .ndo_validate_addr
= eth_validate_addr
,
2615 .ndo_vlan_rx_register
= be_vlan_register
,
2616 .ndo_vlan_rx_add_vid
= be_vlan_add_vid
,
2617 .ndo_vlan_rx_kill_vid
= be_vlan_rem_vid
,
2618 .ndo_set_vf_mac
= be_set_vf_mac
,
2619 .ndo_set_vf_vlan
= be_set_vf_vlan
,
2620 .ndo_set_vf_tx_rate
= be_set_vf_tx_rate
,
2621 .ndo_get_vf_config
= be_get_vf_config
2624 static void be_netdev_init(struct net_device
*netdev
)
2626 struct be_adapter
*adapter
= netdev_priv(netdev
);
2627 struct be_rx_obj
*rxo
;
2630 netdev
->features
|= NETIF_F_SG
| NETIF_F_HW_VLAN_RX
| NETIF_F_TSO
|
2631 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_FILTER
|
2632 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2633 NETIF_F_GRO
| NETIF_F_TSO6
;
2635 if (be_multi_rxq(adapter
))
2636 netdev
->features
|= NETIF_F_RXHASH
;
2638 netdev
->vlan_features
|= NETIF_F_SG
| NETIF_F_TSO
|
2639 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2641 if (lancer_chip(adapter
))
2642 netdev
->vlan_features
|= NETIF_F_TSO6
;
2644 netdev
->flags
|= IFF_MULTICAST
;
2646 adapter
->rx_csum
= true;
2648 /* Default settings for Rx and Tx flow control */
2649 adapter
->rx_fc
= true;
2650 adapter
->tx_fc
= true;
2652 netif_set_gso_max_size(netdev
, 65535);
2654 BE_SET_NETDEV_OPS(netdev
, &be_netdev_ops
);
2656 SET_ETHTOOL_OPS(netdev
, &be_ethtool_ops
);
2658 for_all_rx_queues(adapter
, rxo
, i
)
2659 netif_napi_add(netdev
, &rxo
->rx_eq
.napi
, be_poll_rx
,
2662 netif_napi_add(netdev
, &adapter
->tx_eq
.napi
, be_poll_tx_mcc
,
2666 static void be_unmap_pci_bars(struct be_adapter
*adapter
)
2669 iounmap(adapter
->csr
);
2671 iounmap(adapter
->db
);
2672 if (adapter
->pcicfg
&& be_physfn(adapter
))
2673 iounmap(adapter
->pcicfg
);
2676 static int be_map_pci_bars(struct be_adapter
*adapter
)
2679 int pcicfg_reg
, db_reg
;
2681 if (lancer_chip(adapter
)) {
2682 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 0),
2683 pci_resource_len(adapter
->pdev
, 0));
2690 if (be_physfn(adapter
)) {
2691 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, 2),
2692 pci_resource_len(adapter
->pdev
, 2));
2695 adapter
->csr
= addr
;
2698 if (adapter
->generation
== BE_GEN2
) {
2703 if (be_physfn(adapter
))
2708 addr
= ioremap_nocache(pci_resource_start(adapter
->pdev
, db_reg
),
2709 pci_resource_len(adapter
->pdev
, db_reg
));
2714 if (be_physfn(adapter
)) {
2715 addr
= ioremap_nocache(
2716 pci_resource_start(adapter
->pdev
, pcicfg_reg
),
2717 pci_resource_len(adapter
->pdev
, pcicfg_reg
));
2720 adapter
->pcicfg
= addr
;
2722 adapter
->pcicfg
= adapter
->db
+ SRIOV_VF_PCICFG_OFFSET
;
2726 be_unmap_pci_bars(adapter
);
2731 static void be_ctrl_cleanup(struct be_adapter
*adapter
)
2733 struct be_dma_mem
*mem
= &adapter
->mbox_mem_alloced
;
2735 be_unmap_pci_bars(adapter
);
2738 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
2741 mem
= &adapter
->mc_cmd_mem
;
2743 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
, mem
->va
,
2747 static int be_ctrl_init(struct be_adapter
*adapter
)
2749 struct be_dma_mem
*mbox_mem_alloc
= &adapter
->mbox_mem_alloced
;
2750 struct be_dma_mem
*mbox_mem_align
= &adapter
->mbox_mem
;
2751 struct be_dma_mem
*mc_cmd_mem
= &adapter
->mc_cmd_mem
;
2754 status
= be_map_pci_bars(adapter
);
2758 mbox_mem_alloc
->size
= sizeof(struct be_mcc_mailbox
) + 16;
2759 mbox_mem_alloc
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
,
2760 mbox_mem_alloc
->size
,
2761 &mbox_mem_alloc
->dma
,
2763 if (!mbox_mem_alloc
->va
) {
2765 goto unmap_pci_bars
;
2768 mbox_mem_align
->size
= sizeof(struct be_mcc_mailbox
);
2769 mbox_mem_align
->va
= PTR_ALIGN(mbox_mem_alloc
->va
, 16);
2770 mbox_mem_align
->dma
= PTR_ALIGN(mbox_mem_alloc
->dma
, 16);
2771 memset(mbox_mem_align
->va
, 0, sizeof(struct be_mcc_mailbox
));
2773 mc_cmd_mem
->size
= sizeof(struct be_cmd_req_mcast_mac_config
);
2774 mc_cmd_mem
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
,
2775 mc_cmd_mem
->size
, &mc_cmd_mem
->dma
,
2777 if (mc_cmd_mem
->va
== NULL
) {
2781 memset(mc_cmd_mem
->va
, 0, mc_cmd_mem
->size
);
2783 mutex_init(&adapter
->mbox_lock
);
2784 spin_lock_init(&adapter
->mcc_lock
);
2785 spin_lock_init(&adapter
->mcc_cq_lock
);
2787 init_completion(&adapter
->flash_compl
);
2788 pci_save_state(adapter
->pdev
);
2792 dma_free_coherent(&adapter
->pdev
->dev
, mbox_mem_alloc
->size
,
2793 mbox_mem_alloc
->va
, mbox_mem_alloc
->dma
);
2796 be_unmap_pci_bars(adapter
);
2802 static void be_stats_cleanup(struct be_adapter
*adapter
)
2804 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
2807 dma_free_coherent(&adapter
->pdev
->dev
, cmd
->size
,
2811 static int be_stats_init(struct be_adapter
*adapter
)
2813 struct be_dma_mem
*cmd
= &adapter
->stats_cmd
;
2815 cmd
->size
= sizeof(struct be_cmd_req_get_stats
);
2816 cmd
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, cmd
->size
, &cmd
->dma
,
2818 if (cmd
->va
== NULL
)
2820 memset(cmd
->va
, 0, cmd
->size
);
2824 static void __devexit
be_remove(struct pci_dev
*pdev
)
2826 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
2831 cancel_delayed_work_sync(&adapter
->work
);
2833 unregister_netdev(adapter
->netdev
);
2837 be_stats_cleanup(adapter
);
2839 be_ctrl_cleanup(adapter
);
2841 kfree(adapter
->vf_cfg
);
2842 be_sriov_disable(adapter
);
2844 be_msix_disable(adapter
);
2846 pci_set_drvdata(pdev
, NULL
);
2847 pci_release_regions(pdev
);
2848 pci_disable_device(pdev
);
2850 free_netdev(adapter
->netdev
);
2853 static int be_get_config(struct be_adapter
*adapter
)
2858 status
= be_cmd_get_fw_ver(adapter
, adapter
->fw_ver
);
2862 status
= be_cmd_query_fw_cfg(adapter
, &adapter
->port_num
,
2863 &adapter
->function_mode
, &adapter
->function_caps
);
2867 memset(mac
, 0, ETH_ALEN
);
2869 if (be_physfn(adapter
)) {
2870 status
= be_cmd_mac_addr_query(adapter
, mac
,
2871 MAC_ADDRESS_TYPE_NETWORK
, true /*permanent */, 0);
2876 if (!is_valid_ether_addr(mac
))
2877 return -EADDRNOTAVAIL
;
2879 memcpy(adapter
->netdev
->dev_addr
, mac
, ETH_ALEN
);
2880 memcpy(adapter
->netdev
->perm_addr
, mac
, ETH_ALEN
);
2883 if (adapter
->function_mode
& 0x400)
2884 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
/4;
2886 adapter
->max_vlans
= BE_NUM_VLANS_SUPPORTED
;
2888 status
= be_cmd_get_cntl_attributes(adapter
);
2892 be_cmd_check_native_mode(adapter
);
2896 static int be_dev_family_check(struct be_adapter
*adapter
)
2898 struct pci_dev
*pdev
= adapter
->pdev
;
2899 u32 sli_intf
= 0, if_type
;
2901 switch (pdev
->device
) {
2904 adapter
->generation
= BE_GEN2
;
2908 adapter
->generation
= BE_GEN3
;
2911 pci_read_config_dword(pdev
, SLI_INTF_REG_OFFSET
, &sli_intf
);
2912 if_type
= (sli_intf
& SLI_INTF_IF_TYPE_MASK
) >>
2913 SLI_INTF_IF_TYPE_SHIFT
;
2915 if (((sli_intf
& SLI_INTF_VALID_MASK
) != SLI_INTF_VALID
) ||
2917 dev_err(&pdev
->dev
, "SLI_INTF reg val is not valid\n");
2921 dev_err(&pdev
->dev
, "VFs not supported\n");
2924 adapter
->sli_family
= ((sli_intf
& SLI_INTF_FAMILY_MASK
) >>
2925 SLI_INTF_FAMILY_SHIFT
);
2926 adapter
->generation
= BE_GEN3
;
2929 adapter
->generation
= 0;
2934 static int lancer_wait_ready(struct be_adapter
*adapter
)
2936 #define SLIPORT_READY_TIMEOUT 500
2940 for (i
= 0; i
< SLIPORT_READY_TIMEOUT
; i
++) {
2941 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
2942 if (sliport_status
& SLIPORT_STATUS_RDY_MASK
)
2948 if (i
== SLIPORT_READY_TIMEOUT
)
2954 static int lancer_test_and_set_rdy_state(struct be_adapter
*adapter
)
2957 u32 sliport_status
, err
, reset_needed
;
2958 status
= lancer_wait_ready(adapter
);
2960 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
2961 err
= sliport_status
& SLIPORT_STATUS_ERR_MASK
;
2962 reset_needed
= sliport_status
& SLIPORT_STATUS_RN_MASK
;
2963 if (err
&& reset_needed
) {
2964 iowrite32(SLI_PORT_CONTROL_IP_MASK
,
2965 adapter
->db
+ SLIPORT_CONTROL_OFFSET
);
2967 /* check adapter has corrected the error */
2968 status
= lancer_wait_ready(adapter
);
2969 sliport_status
= ioread32(adapter
->db
+
2970 SLIPORT_STATUS_OFFSET
);
2971 sliport_status
&= (SLIPORT_STATUS_ERR_MASK
|
2972 SLIPORT_STATUS_RN_MASK
);
2973 if (status
|| sliport_status
)
2975 } else if (err
|| reset_needed
) {
2982 static int __devinit
be_probe(struct pci_dev
*pdev
,
2983 const struct pci_device_id
*pdev_id
)
2986 struct be_adapter
*adapter
;
2987 struct net_device
*netdev
;
2989 status
= pci_enable_device(pdev
);
2993 status
= pci_request_regions(pdev
, DRV_NAME
);
2996 pci_set_master(pdev
);
2998 netdev
= alloc_etherdev(sizeof(struct be_adapter
));
2999 if (netdev
== NULL
) {
3003 adapter
= netdev_priv(netdev
);
3004 adapter
->pdev
= pdev
;
3005 pci_set_drvdata(pdev
, adapter
);
3007 status
= be_dev_family_check(adapter
);
3011 adapter
->netdev
= netdev
;
3012 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3014 status
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
3016 netdev
->features
|= NETIF_F_HIGHDMA
;
3018 status
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3020 dev_err(&pdev
->dev
, "Could not set PCI DMA Mask\n");
3025 be_sriov_enable(adapter
);
3026 if (adapter
->sriov_enabled
) {
3027 adapter
->vf_cfg
= kcalloc(num_vfs
,
3028 sizeof(struct be_vf_cfg
), GFP_KERNEL
);
3030 if (!adapter
->vf_cfg
)
3034 status
= be_ctrl_init(adapter
);
3038 if (lancer_chip(adapter
)) {
3039 status
= lancer_test_and_set_rdy_state(adapter
);
3041 dev_err(&pdev
->dev
, "Adapter in non recoverable error\n");
3046 /* sync up with fw's ready state */
3047 if (be_physfn(adapter
)) {
3048 status
= be_cmd_POST(adapter
);
3053 /* tell fw we're ready to fire cmds */
3054 status
= be_cmd_fw_init(adapter
);
3058 status
= be_cmd_reset_function(adapter
);
3062 status
= be_stats_init(adapter
);
3066 status
= be_get_config(adapter
);
3070 be_msix_enable(adapter
);
3072 INIT_DELAYED_WORK(&adapter
->work
, be_worker
);
3074 status
= be_setup(adapter
);
3078 be_netdev_init(netdev
);
3079 status
= register_netdev(netdev
);
3082 netif_carrier_off(netdev
);
3084 if (be_physfn(adapter
) && adapter
->sriov_enabled
) {
3085 status
= be_vf_eth_addr_config(adapter
);
3090 dev_info(&pdev
->dev
, "%s port %d\n", nic_name(pdev
), adapter
->port_num
);
3091 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(100));
3095 unregister_netdev(netdev
);
3099 be_msix_disable(adapter
);
3101 be_stats_cleanup(adapter
);
3103 be_ctrl_cleanup(adapter
);
3105 kfree(adapter
->vf_cfg
);
3107 be_sriov_disable(adapter
);
3108 free_netdev(netdev
);
3109 pci_set_drvdata(pdev
, NULL
);
3111 pci_release_regions(pdev
);
3113 pci_disable_device(pdev
);
3115 dev_err(&pdev
->dev
, "%s initialization failed\n", nic_name(pdev
));
3119 static int be_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3121 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3122 struct net_device
*netdev
= adapter
->netdev
;
3124 cancel_delayed_work_sync(&adapter
->work
);
3126 be_setup_wol(adapter
, true);
3128 netif_device_detach(netdev
);
3129 if (netif_running(netdev
)) {
3134 be_cmd_get_flow_control(adapter
, &adapter
->tx_fc
, &adapter
->rx_fc
);
3137 be_msix_disable(adapter
);
3138 pci_save_state(pdev
);
3139 pci_disable_device(pdev
);
3140 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3144 static int be_resume(struct pci_dev
*pdev
)
3147 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3148 struct net_device
*netdev
= adapter
->netdev
;
3150 netif_device_detach(netdev
);
3152 status
= pci_enable_device(pdev
);
3156 pci_set_power_state(pdev
, 0);
3157 pci_restore_state(pdev
);
3159 be_msix_enable(adapter
);
3160 /* tell fw we're ready to fire cmds */
3161 status
= be_cmd_fw_init(adapter
);
3166 if (netif_running(netdev
)) {
3171 netif_device_attach(netdev
);
3174 be_setup_wol(adapter
, false);
3176 schedule_delayed_work(&adapter
->work
, msecs_to_jiffies(100));
3181 * An FLR will stop BE from DMAing any data.
3183 static void be_shutdown(struct pci_dev
*pdev
)
3185 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3190 cancel_delayed_work_sync(&adapter
->work
);
3192 netif_device_detach(adapter
->netdev
);
3194 be_cmd_reset_function(adapter
);
3197 be_setup_wol(adapter
, true);
3199 pci_disable_device(pdev
);
3202 static pci_ers_result_t
be_eeh_err_detected(struct pci_dev
*pdev
,
3203 pci_channel_state_t state
)
3205 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3206 struct net_device
*netdev
= adapter
->netdev
;
3208 dev_err(&adapter
->pdev
->dev
, "EEH error detected\n");
3210 adapter
->eeh_err
= true;
3212 netif_device_detach(netdev
);
3214 if (netif_running(netdev
)) {
3221 if (state
== pci_channel_io_perm_failure
)
3222 return PCI_ERS_RESULT_DISCONNECT
;
3224 pci_disable_device(pdev
);
3226 return PCI_ERS_RESULT_NEED_RESET
;
3229 static pci_ers_result_t
be_eeh_reset(struct pci_dev
*pdev
)
3231 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3234 dev_info(&adapter
->pdev
->dev
, "EEH reset\n");
3235 adapter
->eeh_err
= false;
3237 status
= pci_enable_device(pdev
);
3239 return PCI_ERS_RESULT_DISCONNECT
;
3241 pci_set_master(pdev
);
3242 pci_set_power_state(pdev
, 0);
3243 pci_restore_state(pdev
);
3245 /* Check if card is ok and fw is ready */
3246 status
= be_cmd_POST(adapter
);
3248 return PCI_ERS_RESULT_DISCONNECT
;
3250 return PCI_ERS_RESULT_RECOVERED
;
3253 static void be_eeh_resume(struct pci_dev
*pdev
)
3256 struct be_adapter
*adapter
= pci_get_drvdata(pdev
);
3257 struct net_device
*netdev
= adapter
->netdev
;
3259 dev_info(&adapter
->pdev
->dev
, "EEH resume\n");
3261 pci_save_state(pdev
);
3263 /* tell fw we're ready to fire cmds */
3264 status
= be_cmd_fw_init(adapter
);
3268 status
= be_setup(adapter
);
3272 if (netif_running(netdev
)) {
3273 status
= be_open(netdev
);
3277 netif_device_attach(netdev
);
3280 dev_err(&adapter
->pdev
->dev
, "EEH resume failed\n");
3283 static struct pci_error_handlers be_eeh_handlers
= {
3284 .error_detected
= be_eeh_err_detected
,
3285 .slot_reset
= be_eeh_reset
,
3286 .resume
= be_eeh_resume
,
3289 static struct pci_driver be_driver
= {
3291 .id_table
= be_dev_ids
,
3293 .remove
= be_remove
,
3294 .suspend
= be_suspend
,
3295 .resume
= be_resume
,
3296 .shutdown
= be_shutdown
,
3297 .err_handler
= &be_eeh_handlers
3300 static int __init
be_init_module(void)
3302 if (rx_frag_size
!= 8192 && rx_frag_size
!= 4096 &&
3303 rx_frag_size
!= 2048) {
3304 printk(KERN_WARNING DRV_NAME
3305 " : Module param rx_frag_size must be 2048/4096/8192."
3307 rx_frag_size
= 2048;
3310 return pci_register_driver(&be_driver
);
3312 module_init(be_init_module
);
3314 static void __exit
be_exit_module(void)
3316 pci_unregister_driver(&be_driver
);
3318 module_exit(be_exit_module
);