2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/hash.h>
42 #include <linux/mlx4/driver.h>
43 #include <linux/mlx4/device.h>
44 #include <linux/mlx4/cmd.h>
45 #include <linux/mlx4/cq.h>
50 int mlx4_en_setup_tc(struct net_device
*dev
, u8 up
)
52 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
54 unsigned int offset
= 0;
56 if (up
&& up
!= MLX4_EN_NUM_UP
)
59 netdev_set_num_tc(dev
, up
);
61 /* Partition Tx queues evenly amongst UP's */
62 for (i
= 0; i
< up
; i
++) {
63 netdev_set_tc_queue(dev
, i
, priv
->num_tx_rings_p_up
, offset
);
64 offset
+= priv
->num_tx_rings_p_up
;
70 #ifdef CONFIG_RFS_ACCEL
72 struct mlx4_en_filter
{
73 struct list_head next
;
74 struct work_struct work
;
82 struct mlx4_en_priv
*priv
;
83 u32 flow_id
; /* RFS infrastructure id */
84 int id
; /* mlx4_en driver id */
85 u64 reg_id
; /* Flow steering API id */
86 u8 activated
; /* Used to prevent expiry before filter
89 struct hlist_node filter_chain
;
92 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv
*priv
);
94 static void mlx4_en_filter_work(struct work_struct
*work
)
96 struct mlx4_en_filter
*filter
= container_of(work
,
97 struct mlx4_en_filter
,
99 struct mlx4_en_priv
*priv
= filter
->priv
;
100 struct mlx4_spec_list spec_tcp
= {
101 .id
= MLX4_NET_TRANS_RULE_ID_TCP
,
104 .dst_port
= filter
->dst_port
,
105 .dst_port_msk
= (__force __be16
)-1,
106 .src_port
= filter
->src_port
,
107 .src_port_msk
= (__force __be16
)-1,
111 struct mlx4_spec_list spec_ip
= {
112 .id
= MLX4_NET_TRANS_RULE_ID_IPV4
,
115 .dst_ip
= filter
->dst_ip
,
116 .dst_ip_msk
= (__force __be32
)-1,
117 .src_ip
= filter
->src_ip
,
118 .src_ip_msk
= (__force __be32
)-1,
122 struct mlx4_spec_list spec_eth
= {
123 .id
= MLX4_NET_TRANS_RULE_ID_ETH
,
125 struct mlx4_net_trans_rule rule
= {
126 .list
= LIST_HEAD_INIT(rule
.list
),
127 .queue_mode
= MLX4_NET_TRANS_Q_LIFO
,
130 .promisc_mode
= MLX4_FS_PROMISC_NONE
,
132 .priority
= MLX4_DOMAIN_RFS
,
135 __be64 mac_mask
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
137 list_add_tail(&spec_eth
.list
, &rule
.list
);
138 list_add_tail(&spec_ip
.list
, &rule
.list
);
139 list_add_tail(&spec_tcp
.list
, &rule
.list
);
141 rule
.qpn
= priv
->rss_map
.qps
[filter
->rxq_index
].qpn
;
142 memcpy(spec_eth
.eth
.dst_mac
, priv
->dev
->dev_addr
, ETH_ALEN
);
143 memcpy(spec_eth
.eth
.dst_mac_msk
, &mac_mask
, ETH_ALEN
);
145 filter
->activated
= 0;
147 if (filter
->reg_id
) {
148 rc
= mlx4_flow_detach(priv
->mdev
->dev
, filter
->reg_id
);
149 if (rc
&& rc
!= -ENOENT
)
150 en_err(priv
, "Error detaching flow. rc = %d\n", rc
);
153 rc
= mlx4_flow_attach(priv
->mdev
->dev
, &rule
, &filter
->reg_id
);
155 en_err(priv
, "Error attaching flow. err = %d\n", rc
);
157 mlx4_en_filter_rfs_expire(priv
);
159 filter
->activated
= 1;
162 static inline struct hlist_head
*
163 filter_hash_bucket(struct mlx4_en_priv
*priv
, __be32 src_ip
, __be32 dst_ip
,
164 __be16 src_port
, __be16 dst_port
)
169 l
= (__force
unsigned long)src_port
|
170 ((__force
unsigned long)dst_port
<< 2);
171 l
^= (__force
unsigned long)(src_ip
^ dst_ip
);
173 bucket_idx
= hash_long(l
, MLX4_EN_FILTER_HASH_SHIFT
);
175 return &priv
->filter_hash
[bucket_idx
];
178 static struct mlx4_en_filter
*
179 mlx4_en_filter_alloc(struct mlx4_en_priv
*priv
, int rxq_index
, __be32 src_ip
,
180 __be32 dst_ip
, __be16 src_port
, __be16 dst_port
,
183 struct mlx4_en_filter
*filter
= NULL
;
185 filter
= kzalloc(sizeof(struct mlx4_en_filter
), GFP_ATOMIC
);
190 filter
->rxq_index
= rxq_index
;
191 INIT_WORK(&filter
->work
, mlx4_en_filter_work
);
193 filter
->src_ip
= src_ip
;
194 filter
->dst_ip
= dst_ip
;
195 filter
->src_port
= src_port
;
196 filter
->dst_port
= dst_port
;
198 filter
->flow_id
= flow_id
;
200 filter
->id
= priv
->last_filter_id
++ % RPS_NO_FILTER
;
202 list_add_tail(&filter
->next
, &priv
->filters
);
203 hlist_add_head(&filter
->filter_chain
,
204 filter_hash_bucket(priv
, src_ip
, dst_ip
, src_port
,
210 static void mlx4_en_filter_free(struct mlx4_en_filter
*filter
)
212 struct mlx4_en_priv
*priv
= filter
->priv
;
215 list_del(&filter
->next
);
217 rc
= mlx4_flow_detach(priv
->mdev
->dev
, filter
->reg_id
);
218 if (rc
&& rc
!= -ENOENT
)
219 en_err(priv
, "Error detaching flow. rc = %d\n", rc
);
224 static inline struct mlx4_en_filter
*
225 mlx4_en_filter_find(struct mlx4_en_priv
*priv
, __be32 src_ip
, __be32 dst_ip
,
226 __be16 src_port
, __be16 dst_port
)
228 struct mlx4_en_filter
*filter
;
229 struct mlx4_en_filter
*ret
= NULL
;
231 hlist_for_each_entry(filter
,
232 filter_hash_bucket(priv
, src_ip
, dst_ip
,
235 if (filter
->src_ip
== src_ip
&&
236 filter
->dst_ip
== dst_ip
&&
237 filter
->src_port
== src_port
&&
238 filter
->dst_port
== dst_port
) {
248 mlx4_en_filter_rfs(struct net_device
*net_dev
, const struct sk_buff
*skb
,
249 u16 rxq_index
, u32 flow_id
)
251 struct mlx4_en_priv
*priv
= netdev_priv(net_dev
);
252 struct mlx4_en_filter
*filter
;
253 const struct iphdr
*ip
;
259 int nhoff
= skb_network_offset(skb
);
262 if (skb
->protocol
!= htons(ETH_P_IP
))
263 return -EPROTONOSUPPORT
;
265 ip
= (const struct iphdr
*)(skb
->data
+ nhoff
);
266 if (ip_is_fragment(ip
))
267 return -EPROTONOSUPPORT
;
269 ports
= (const __be16
*)(skb
->data
+ nhoff
+ 4 * ip
->ihl
);
276 if (ip
->protocol
!= IPPROTO_TCP
)
277 return -EPROTONOSUPPORT
;
279 spin_lock_bh(&priv
->filters_lock
);
280 filter
= mlx4_en_filter_find(priv
, src_ip
, dst_ip
, src_port
, dst_port
);
282 if (filter
->rxq_index
== rxq_index
)
285 filter
->rxq_index
= rxq_index
;
287 filter
= mlx4_en_filter_alloc(priv
, rxq_index
,
289 src_port
, dst_port
, flow_id
);
296 queue_work(priv
->mdev
->workqueue
, &filter
->work
);
301 spin_unlock_bh(&priv
->filters_lock
);
306 void mlx4_en_cleanup_filters(struct mlx4_en_priv
*priv
,
307 struct mlx4_en_rx_ring
*rx_ring
)
309 struct mlx4_en_filter
*filter
, *tmp
;
312 spin_lock_bh(&priv
->filters_lock
);
313 list_for_each_entry_safe(filter
, tmp
, &priv
->filters
, next
) {
314 list_move(&filter
->next
, &del_list
);
315 hlist_del(&filter
->filter_chain
);
317 spin_unlock_bh(&priv
->filters_lock
);
319 list_for_each_entry_safe(filter
, tmp
, &del_list
, next
) {
320 cancel_work_sync(&filter
->work
);
321 mlx4_en_filter_free(filter
);
325 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv
*priv
)
327 struct mlx4_en_filter
*filter
= NULL
, *tmp
, *last_filter
= NULL
;
331 spin_lock_bh(&priv
->filters_lock
);
332 list_for_each_entry_safe(filter
, tmp
, &priv
->filters
, next
) {
333 if (i
> MLX4_EN_FILTER_EXPIRY_QUOTA
)
336 if (filter
->activated
&&
337 !work_pending(&filter
->work
) &&
338 rps_may_expire_flow(priv
->dev
,
339 filter
->rxq_index
, filter
->flow_id
,
341 list_move(&filter
->next
, &del_list
);
342 hlist_del(&filter
->filter_chain
);
344 last_filter
= filter
;
349 if (last_filter
&& (&last_filter
->next
!= priv
->filters
.next
))
350 list_move(&priv
->filters
, &last_filter
->next
);
352 spin_unlock_bh(&priv
->filters_lock
);
354 list_for_each_entry_safe(filter
, tmp
, &del_list
, next
)
355 mlx4_en_filter_free(filter
);
359 static int mlx4_en_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
361 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
362 struct mlx4_en_dev
*mdev
= priv
->mdev
;
366 en_dbg(HW
, priv
, "adding VLAN:%d\n", vid
);
368 set_bit(vid
, priv
->active_vlans
);
370 /* Add VID to port VLAN filter */
371 mutex_lock(&mdev
->state_lock
);
372 if (mdev
->device_up
&& priv
->port_up
) {
373 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
375 en_err(priv
, "Failed configuring VLAN filter\n");
377 if (mlx4_register_vlan(mdev
->dev
, priv
->port
, vid
, &idx
))
378 en_err(priv
, "failed adding vlan %d\n", vid
);
379 mutex_unlock(&mdev
->state_lock
);
384 static int mlx4_en_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
386 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
387 struct mlx4_en_dev
*mdev
= priv
->mdev
;
391 en_dbg(HW
, priv
, "Killing VID:%d\n", vid
);
393 clear_bit(vid
, priv
->active_vlans
);
395 /* Remove VID from port VLAN filter */
396 mutex_lock(&mdev
->state_lock
);
397 if (!mlx4_find_cached_vlan(mdev
->dev
, priv
->port
, vid
, &idx
))
398 mlx4_unregister_vlan(mdev
->dev
, priv
->port
, idx
);
400 en_err(priv
, "could not find vid %d in cache\n", vid
);
402 if (mdev
->device_up
&& priv
->port_up
) {
403 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
405 en_err(priv
, "Failed configuring VLAN filter\n");
407 mutex_unlock(&mdev
->state_lock
);
412 static void mlx4_en_u64_to_mac(unsigned char dst_mac
[ETH_ALEN
+ 2], u64 src_mac
)
415 for (i
= ETH_ALEN
- 1; i
; --i
) {
416 dst_mac
[i
] = src_mac
& 0xff;
419 memset(&dst_mac
[ETH_ALEN
], 0, 2);
422 static int mlx4_en_uc_steer_add(struct mlx4_en_priv
*priv
,
423 unsigned char *mac
, int *qpn
, u64
*reg_id
)
425 struct mlx4_en_dev
*mdev
= priv
->mdev
;
426 struct mlx4_dev
*dev
= mdev
->dev
;
429 switch (dev
->caps
.steering_mode
) {
430 case MLX4_STEERING_MODE_B0
: {
435 memcpy(&gid
[10], mac
, ETH_ALEN
);
438 err
= mlx4_unicast_attach(dev
, &qp
, gid
, 0, MLX4_PROT_ETH
);
441 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
442 struct mlx4_spec_list spec_eth
= { {NULL
} };
443 __be64 mac_mask
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
445 struct mlx4_net_trans_rule rule
= {
446 .queue_mode
= MLX4_NET_TRANS_Q_FIFO
,
449 .promisc_mode
= MLX4_FS_PROMISC_NONE
,
450 .priority
= MLX4_DOMAIN_NIC
,
453 rule
.port
= priv
->port
;
455 INIT_LIST_HEAD(&rule
.list
);
457 spec_eth
.id
= MLX4_NET_TRANS_RULE_ID_ETH
;
458 memcpy(spec_eth
.eth
.dst_mac
, mac
, ETH_ALEN
);
459 memcpy(spec_eth
.eth
.dst_mac_msk
, &mac_mask
, ETH_ALEN
);
460 list_add_tail(&spec_eth
.list
, &rule
.list
);
462 err
= mlx4_flow_attach(dev
, &rule
, reg_id
);
469 en_warn(priv
, "Failed Attaching Unicast\n");
474 static void mlx4_en_uc_steer_release(struct mlx4_en_priv
*priv
,
475 unsigned char *mac
, int qpn
, u64 reg_id
)
477 struct mlx4_en_dev
*mdev
= priv
->mdev
;
478 struct mlx4_dev
*dev
= mdev
->dev
;
480 switch (dev
->caps
.steering_mode
) {
481 case MLX4_STEERING_MODE_B0
: {
486 memcpy(&gid
[10], mac
, ETH_ALEN
);
489 mlx4_unicast_detach(dev
, &qp
, gid
, MLX4_PROT_ETH
);
492 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
493 mlx4_flow_detach(dev
, reg_id
);
497 en_err(priv
, "Invalid steering mode.\n");
501 static int mlx4_en_get_qp(struct mlx4_en_priv
*priv
)
503 struct mlx4_en_dev
*mdev
= priv
->mdev
;
504 struct mlx4_dev
*dev
= mdev
->dev
;
505 struct mlx4_mac_entry
*entry
;
509 int *qpn
= &priv
->base_qpn
;
510 u64 mac
= mlx4_en_mac_to_u64(priv
->dev
->dev_addr
);
512 en_dbg(DRV
, priv
, "Registering MAC: %pM for adding\n",
513 priv
->dev
->dev_addr
);
514 index
= mlx4_register_mac(dev
, priv
->port
, mac
);
517 en_err(priv
, "Failed adding MAC: %pM\n",
518 priv
->dev
->dev_addr
);
522 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_A0
) {
523 int base_qpn
= mlx4_get_base_qpn(dev
, priv
->port
);
524 *qpn
= base_qpn
+ index
;
528 err
= mlx4_qp_reserve_range(dev
, 1, 1, qpn
);
529 en_dbg(DRV
, priv
, "Reserved qp %d\n", *qpn
);
531 en_err(priv
, "Failed to reserve qp for mac registration\n");
535 err
= mlx4_en_uc_steer_add(priv
, priv
->dev
->dev_addr
, qpn
, ®_id
);
539 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
544 memcpy(entry
->mac
, priv
->dev
->dev_addr
, sizeof(entry
->mac
));
545 entry
->reg_id
= reg_id
;
547 hlist_add_head_rcu(&entry
->hlist
,
548 &priv
->mac_hash
[entry
->mac
[MLX4_EN_MAC_HASH_IDX
]]);
553 mlx4_en_uc_steer_release(priv
, priv
->dev
->dev_addr
, *qpn
, reg_id
);
556 mlx4_qp_release_range(dev
, *qpn
, 1);
559 mlx4_unregister_mac(dev
, priv
->port
, mac
);
563 static void mlx4_en_put_qp(struct mlx4_en_priv
*priv
)
565 struct mlx4_en_dev
*mdev
= priv
->mdev
;
566 struct mlx4_dev
*dev
= mdev
->dev
;
567 int qpn
= priv
->base_qpn
;
568 u64 mac
= mlx4_en_mac_to_u64(priv
->dev
->dev_addr
);
570 en_dbg(DRV
, priv
, "Registering MAC: %pM for deleting\n",
571 priv
->dev
->dev_addr
);
572 mlx4_unregister_mac(dev
, priv
->port
, mac
);
574 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
) {
575 struct mlx4_mac_entry
*entry
;
576 struct hlist_node
*tmp
;
577 struct hlist_head
*bucket
;
578 unsigned int mac_hash
;
580 mac_hash
= priv
->dev
->dev_addr
[MLX4_EN_MAC_HASH_IDX
];
581 bucket
= &priv
->mac_hash
[mac_hash
];
582 hlist_for_each_entry_safe(entry
, tmp
, bucket
, hlist
) {
583 if (ether_addr_equal_64bits(entry
->mac
,
584 priv
->dev
->dev_addr
)) {
585 en_dbg(DRV
, priv
, "Releasing qp: port %d, MAC %pM, qpn %d\n",
586 priv
->port
, priv
->dev
->dev_addr
, qpn
);
587 mlx4_en_uc_steer_release(priv
, entry
->mac
,
589 mlx4_qp_release_range(dev
, qpn
, 1);
591 hlist_del_rcu(&entry
->hlist
);
592 kfree_rcu(entry
, rcu
);
599 static int mlx4_en_replace_mac(struct mlx4_en_priv
*priv
, int qpn
,
600 unsigned char *new_mac
, unsigned char *prev_mac
)
602 struct mlx4_en_dev
*mdev
= priv
->mdev
;
603 struct mlx4_dev
*dev
= mdev
->dev
;
605 u64 new_mac_u64
= mlx4_en_mac_to_u64(new_mac
);
607 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
) {
608 struct hlist_head
*bucket
;
609 unsigned int mac_hash
;
610 struct mlx4_mac_entry
*entry
;
611 struct hlist_node
*tmp
;
612 u64 prev_mac_u64
= mlx4_en_mac_to_u64(prev_mac
);
614 bucket
= &priv
->mac_hash
[prev_mac
[MLX4_EN_MAC_HASH_IDX
]];
615 hlist_for_each_entry_safe(entry
, tmp
, bucket
, hlist
) {
616 if (ether_addr_equal_64bits(entry
->mac
, prev_mac
)) {
617 mlx4_en_uc_steer_release(priv
, entry
->mac
,
619 mlx4_unregister_mac(dev
, priv
->port
,
621 hlist_del_rcu(&entry
->hlist
);
623 memcpy(entry
->mac
, new_mac
, ETH_ALEN
);
625 mac_hash
= new_mac
[MLX4_EN_MAC_HASH_IDX
];
626 hlist_add_head_rcu(&entry
->hlist
,
627 &priv
->mac_hash
[mac_hash
]);
628 mlx4_register_mac(dev
, priv
->port
, new_mac_u64
);
629 err
= mlx4_en_uc_steer_add(priv
, new_mac
,
638 return __mlx4_replace_mac(dev
, priv
->port
, qpn
, new_mac_u64
);
641 u64
mlx4_en_mac_to_u64(u8
*addr
)
646 for (i
= 0; i
< ETH_ALEN
; i
++) {
653 static int mlx4_en_set_mac(struct net_device
*dev
, void *addr
)
655 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
656 struct mlx4_en_dev
*mdev
= priv
->mdev
;
657 struct sockaddr
*saddr
= addr
;
659 if (!is_valid_ether_addr(saddr
->sa_data
))
660 return -EADDRNOTAVAIL
;
662 memcpy(dev
->dev_addr
, saddr
->sa_data
, ETH_ALEN
);
663 queue_work(mdev
->workqueue
, &priv
->mac_task
);
667 static void mlx4_en_do_set_mac(struct work_struct
*work
)
669 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
671 struct mlx4_en_dev
*mdev
= priv
->mdev
;
674 mutex_lock(&mdev
->state_lock
);
676 /* Remove old MAC and insert the new one */
677 err
= mlx4_en_replace_mac(priv
, priv
->base_qpn
,
678 priv
->dev
->dev_addr
, priv
->prev_mac
);
680 en_err(priv
, "Failed changing HW MAC address\n");
681 memcpy(priv
->prev_mac
, priv
->dev
->dev_addr
,
682 sizeof(priv
->prev_mac
));
684 en_dbg(HW
, priv
, "Port is down while registering mac, exiting...\n");
686 mutex_unlock(&mdev
->state_lock
);
689 static void mlx4_en_clear_list(struct net_device
*dev
)
691 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
692 struct mlx4_en_mc_list
*tmp
, *mc_to_del
;
694 list_for_each_entry_safe(mc_to_del
, tmp
, &priv
->mc_list
, list
) {
695 list_del(&mc_to_del
->list
);
700 static void mlx4_en_cache_mclist(struct net_device
*dev
)
702 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
703 struct netdev_hw_addr
*ha
;
704 struct mlx4_en_mc_list
*tmp
;
706 mlx4_en_clear_list(dev
);
707 netdev_for_each_mc_addr(ha
, dev
) {
708 tmp
= kzalloc(sizeof(struct mlx4_en_mc_list
), GFP_ATOMIC
);
710 mlx4_en_clear_list(dev
);
713 memcpy(tmp
->addr
, ha
->addr
, ETH_ALEN
);
714 list_add_tail(&tmp
->list
, &priv
->mc_list
);
718 static void update_mclist_flags(struct mlx4_en_priv
*priv
,
719 struct list_head
*dst
,
720 struct list_head
*src
)
722 struct mlx4_en_mc_list
*dst_tmp
, *src_tmp
, *new_mc
;
725 /* Find all the entries that should be removed from dst,
726 * These are the entries that are not found in src
728 list_for_each_entry(dst_tmp
, dst
, list
) {
730 list_for_each_entry(src_tmp
, src
, list
) {
731 if (!memcmp(dst_tmp
->addr
, src_tmp
->addr
, ETH_ALEN
)) {
737 dst_tmp
->action
= MCLIST_REM
;
740 /* Add entries that exist in src but not in dst
741 * mark them as need to add
743 list_for_each_entry(src_tmp
, src
, list
) {
745 list_for_each_entry(dst_tmp
, dst
, list
) {
746 if (!memcmp(dst_tmp
->addr
, src_tmp
->addr
, ETH_ALEN
)) {
747 dst_tmp
->action
= MCLIST_NONE
;
753 new_mc
= kmemdup(src_tmp
,
754 sizeof(struct mlx4_en_mc_list
),
759 new_mc
->action
= MCLIST_ADD
;
760 list_add_tail(&new_mc
->list
, dst
);
765 static void mlx4_en_set_rx_mode(struct net_device
*dev
)
767 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
772 queue_work(priv
->mdev
->workqueue
, &priv
->rx_mode_task
);
775 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv
*priv
,
776 struct mlx4_en_dev
*mdev
)
780 if (!(priv
->flags
& MLX4_EN_FLAG_PROMISC
)) {
781 if (netif_msg_rx_status(priv
))
782 en_warn(priv
, "Entering promiscuous mode\n");
783 priv
->flags
|= MLX4_EN_FLAG_PROMISC
;
785 /* Enable promiscouos mode */
786 switch (mdev
->dev
->caps
.steering_mode
) {
787 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
788 err
= mlx4_flow_steer_promisc_add(mdev
->dev
,
791 MLX4_FS_PROMISC_UPLINK
);
793 en_err(priv
, "Failed enabling promiscuous mode\n");
794 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
797 case MLX4_STEERING_MODE_B0
:
798 err
= mlx4_unicast_promisc_add(mdev
->dev
,
802 en_err(priv
, "Failed enabling unicast promiscuous mode\n");
804 /* Add the default qp number as multicast
807 if (!(priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
)) {
808 err
= mlx4_multicast_promisc_add(mdev
->dev
,
812 en_err(priv
, "Failed enabling multicast promiscuous mode\n");
813 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
817 case MLX4_STEERING_MODE_A0
:
818 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
,
823 en_err(priv
, "Failed enabling promiscuous mode\n");
827 /* Disable port multicast filter (unconditionally) */
828 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
829 0, MLX4_MCAST_DISABLE
);
831 en_err(priv
, "Failed disabling multicast filter\n");
833 /* Disable port VLAN filter */
834 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
836 en_err(priv
, "Failed disabling VLAN filter\n");
840 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv
*priv
,
841 struct mlx4_en_dev
*mdev
)
845 if (netif_msg_rx_status(priv
))
846 en_warn(priv
, "Leaving promiscuous mode\n");
847 priv
->flags
&= ~MLX4_EN_FLAG_PROMISC
;
849 /* Disable promiscouos mode */
850 switch (mdev
->dev
->caps
.steering_mode
) {
851 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
852 err
= mlx4_flow_steer_promisc_remove(mdev
->dev
,
854 MLX4_FS_PROMISC_UPLINK
);
856 en_err(priv
, "Failed disabling promiscuous mode\n");
857 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
860 case MLX4_STEERING_MODE_B0
:
861 err
= mlx4_unicast_promisc_remove(mdev
->dev
,
865 en_err(priv
, "Failed disabling unicast promiscuous mode\n");
866 /* Disable Multicast promisc */
867 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
868 err
= mlx4_multicast_promisc_remove(mdev
->dev
,
872 en_err(priv
, "Failed disabling multicast promiscuous mode\n");
873 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
877 case MLX4_STEERING_MODE_A0
:
878 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
,
882 en_err(priv
, "Failed disabling promiscuous mode\n");
886 /* Enable port VLAN filter */
887 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
889 en_err(priv
, "Failed enabling VLAN filter\n");
892 static void mlx4_en_do_multicast(struct mlx4_en_priv
*priv
,
893 struct net_device
*dev
,
894 struct mlx4_en_dev
*mdev
)
896 struct mlx4_en_mc_list
*mclist
, *tmp
;
898 u8 mc_list
[16] = {0};
901 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
902 if (dev
->flags
& IFF_ALLMULTI
) {
903 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
904 0, MLX4_MCAST_DISABLE
);
906 en_err(priv
, "Failed disabling multicast filter\n");
908 /* Add the default qp number as multicast promisc */
909 if (!(priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
)) {
910 switch (mdev
->dev
->caps
.steering_mode
) {
911 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
912 err
= mlx4_flow_steer_promisc_add(mdev
->dev
,
915 MLX4_FS_PROMISC_ALL_MULTI
);
918 case MLX4_STEERING_MODE_B0
:
919 err
= mlx4_multicast_promisc_add(mdev
->dev
,
924 case MLX4_STEERING_MODE_A0
:
928 en_err(priv
, "Failed entering multicast promisc mode\n");
929 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
932 /* Disable Multicast promisc */
933 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
934 switch (mdev
->dev
->caps
.steering_mode
) {
935 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
936 err
= mlx4_flow_steer_promisc_remove(mdev
->dev
,
938 MLX4_FS_PROMISC_ALL_MULTI
);
941 case MLX4_STEERING_MODE_B0
:
942 err
= mlx4_multicast_promisc_remove(mdev
->dev
,
947 case MLX4_STEERING_MODE_A0
:
951 en_err(priv
, "Failed disabling multicast promiscuous mode\n");
952 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
955 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
956 0, MLX4_MCAST_DISABLE
);
958 en_err(priv
, "Failed disabling multicast filter\n");
960 /* Flush mcast filter and init it with broadcast address */
961 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, ETH_BCAST
,
962 1, MLX4_MCAST_CONFIG
);
964 /* Update multicast list - we cache all addresses so they won't
965 * change while HW is updated holding the command semaphor */
966 netif_addr_lock_bh(dev
);
967 mlx4_en_cache_mclist(dev
);
968 netif_addr_unlock_bh(dev
);
969 list_for_each_entry(mclist
, &priv
->mc_list
, list
) {
970 mcast_addr
= mlx4_en_mac_to_u64(mclist
->addr
);
971 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
,
972 mcast_addr
, 0, MLX4_MCAST_CONFIG
);
974 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
975 0, MLX4_MCAST_ENABLE
);
977 en_err(priv
, "Failed enabling multicast filter\n");
979 update_mclist_flags(priv
, &priv
->curr_list
, &priv
->mc_list
);
980 list_for_each_entry_safe(mclist
, tmp
, &priv
->curr_list
, list
) {
981 if (mclist
->action
== MCLIST_REM
) {
982 /* detach this address and delete from list */
983 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
984 mc_list
[5] = priv
->port
;
985 err
= mlx4_multicast_detach(mdev
->dev
,
986 &priv
->rss_map
.indir_qp
,
991 en_err(priv
, "Fail to detach multicast address\n");
993 /* remove from list */
994 list_del(&mclist
->list
);
996 } else if (mclist
->action
== MCLIST_ADD
) {
997 /* attach the address */
998 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
999 /* needed for B0 steering support */
1000 mc_list
[5] = priv
->port
;
1001 err
= mlx4_multicast_attach(mdev
->dev
,
1002 &priv
->rss_map
.indir_qp
,
1008 en_err(priv
, "Fail to attach multicast address\n");
1015 static void mlx4_en_do_uc_filter(struct mlx4_en_priv
*priv
,
1016 struct net_device
*dev
,
1017 struct mlx4_en_dev
*mdev
)
1019 struct netdev_hw_addr
*ha
;
1020 struct mlx4_mac_entry
*entry
;
1021 struct hlist_node
*tmp
;
1025 struct hlist_head
*bucket
;
1030 /* Note that we do not need to protect our mac_hash traversal with rcu,
1031 * since all modification code is protected by mdev->state_lock
1034 /* find what to remove */
1035 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
) {
1036 bucket
= &priv
->mac_hash
[i
];
1037 hlist_for_each_entry_safe(entry
, tmp
, bucket
, hlist
) {
1039 netdev_for_each_uc_addr(ha
, dev
) {
1040 if (ether_addr_equal_64bits(entry
->mac
,
1047 /* MAC address of the port is not in uc list */
1048 if (ether_addr_equal_64bits(entry
->mac
, dev
->dev_addr
))
1052 mac
= mlx4_en_mac_to_u64(entry
->mac
);
1053 mlx4_en_uc_steer_release(priv
, entry
->mac
,
1056 mlx4_unregister_mac(mdev
->dev
, priv
->port
, mac
);
1058 hlist_del_rcu(&entry
->hlist
);
1059 kfree_rcu(entry
, rcu
);
1060 en_dbg(DRV
, priv
, "Removed MAC %pM on port:%d\n",
1061 entry
->mac
, priv
->port
);
1067 /* if we didn't remove anything, there is no use in trying to add
1068 * again once we are in a forced promisc mode state
1070 if ((priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
) && 0 == removed
)
1073 prev_flags
= priv
->flags
;
1074 priv
->flags
&= ~MLX4_EN_FLAG_FORCE_PROMISC
;
1076 /* find what to add */
1077 netdev_for_each_uc_addr(ha
, dev
) {
1079 bucket
= &priv
->mac_hash
[ha
->addr
[MLX4_EN_MAC_HASH_IDX
]];
1080 hlist_for_each_entry(entry
, bucket
, hlist
) {
1081 if (ether_addr_equal_64bits(entry
->mac
, ha
->addr
)) {
1088 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1090 en_err(priv
, "Failed adding MAC %pM on port:%d (out of memory)\n",
1091 ha
->addr
, priv
->port
);
1092 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1095 mac
= mlx4_en_mac_to_u64(ha
->addr
);
1096 memcpy(entry
->mac
, ha
->addr
, ETH_ALEN
);
1097 err
= mlx4_register_mac(mdev
->dev
, priv
->port
, mac
);
1099 en_err(priv
, "Failed registering MAC %pM on port %d: %d\n",
1100 ha
->addr
, priv
->port
, err
);
1102 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1105 err
= mlx4_en_uc_steer_add(priv
, ha
->addr
,
1109 en_err(priv
, "Failed adding MAC %pM on port %d: %d\n",
1110 ha
->addr
, priv
->port
, err
);
1111 mlx4_unregister_mac(mdev
->dev
, priv
->port
, mac
);
1113 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1116 unsigned int mac_hash
;
1117 en_dbg(DRV
, priv
, "Added MAC %pM on port:%d\n",
1118 ha
->addr
, priv
->port
);
1119 mac_hash
= ha
->addr
[MLX4_EN_MAC_HASH_IDX
];
1120 bucket
= &priv
->mac_hash
[mac_hash
];
1121 hlist_add_head_rcu(&entry
->hlist
, bucket
);
1126 if (priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
) {
1127 en_warn(priv
, "Forcing promiscuous mode on port:%d\n",
1129 } else if (prev_flags
& MLX4_EN_FLAG_FORCE_PROMISC
) {
1130 en_warn(priv
, "Stop forcing promiscuous mode on port:%d\n",
1135 static void mlx4_en_do_set_rx_mode(struct work_struct
*work
)
1137 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1139 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1140 struct net_device
*dev
= priv
->dev
;
1142 mutex_lock(&mdev
->state_lock
);
1143 if (!mdev
->device_up
) {
1144 en_dbg(HW
, priv
, "Card is not up, ignoring rx mode change.\n");
1147 if (!priv
->port_up
) {
1148 en_dbg(HW
, priv
, "Port is down, ignoring rx mode change.\n");
1152 if (!netif_carrier_ok(dev
)) {
1153 if (!mlx4_en_QUERY_PORT(mdev
, priv
->port
)) {
1154 if (priv
->port_state
.link_state
) {
1155 priv
->last_link_state
= MLX4_DEV_EVENT_PORT_UP
;
1156 netif_carrier_on(dev
);
1157 en_dbg(LINK
, priv
, "Link Up\n");
1162 if (dev
->priv_flags
& IFF_UNICAST_FLT
)
1163 mlx4_en_do_uc_filter(priv
, dev
, mdev
);
1165 /* Promsicuous mode: disable all filters */
1166 if ((dev
->flags
& IFF_PROMISC
) ||
1167 (priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
)) {
1168 mlx4_en_set_promisc_mode(priv
, mdev
);
1172 /* Not in promiscuous mode */
1173 if (priv
->flags
& MLX4_EN_FLAG_PROMISC
)
1174 mlx4_en_clear_promisc_mode(priv
, mdev
);
1176 mlx4_en_do_multicast(priv
, dev
, mdev
);
1178 mutex_unlock(&mdev
->state_lock
);
1181 #ifdef CONFIG_NET_POLL_CONTROLLER
1182 static void mlx4_en_netpoll(struct net_device
*dev
)
1184 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1185 struct mlx4_en_cq
*cq
;
1186 unsigned long flags
;
1189 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1190 cq
= &priv
->rx_cq
[i
];
1191 spin_lock_irqsave(&cq
->lock
, flags
);
1192 napi_synchronize(&cq
->napi
);
1193 mlx4_en_process_rx_cq(dev
, cq
, 0);
1194 spin_unlock_irqrestore(&cq
->lock
, flags
);
1199 static void mlx4_en_tx_timeout(struct net_device
*dev
)
1201 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1202 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1204 if (netif_msg_timer(priv
))
1205 en_warn(priv
, "Tx timeout called on port:%d\n", priv
->port
);
1207 priv
->port_stats
.tx_timeout
++;
1208 en_dbg(DRV
, priv
, "Scheduling watchdog\n");
1209 queue_work(mdev
->workqueue
, &priv
->watchdog_task
);
1213 static struct net_device_stats
*mlx4_en_get_stats(struct net_device
*dev
)
1215 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1217 spin_lock_bh(&priv
->stats_lock
);
1218 memcpy(&priv
->ret_stats
, &priv
->stats
, sizeof(priv
->stats
));
1219 spin_unlock_bh(&priv
->stats_lock
);
1221 return &priv
->ret_stats
;
1224 static void mlx4_en_set_default_moderation(struct mlx4_en_priv
*priv
)
1226 struct mlx4_en_cq
*cq
;
1229 /* If we haven't received a specific coalescing setting
1230 * (module param), we set the moderation parameters as follows:
1231 * - moder_cnt is set to the number of mtu sized packets to
1232 * satisfy our coalescing target.
1233 * - moder_time is set to a fixed value.
1235 priv
->rx_frames
= MLX4_EN_RX_COAL_TARGET
;
1236 priv
->rx_usecs
= MLX4_EN_RX_COAL_TIME
;
1237 priv
->tx_frames
= MLX4_EN_TX_COAL_PKTS
;
1238 priv
->tx_usecs
= MLX4_EN_TX_COAL_TIME
;
1239 en_dbg(INTR
, priv
, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1240 priv
->dev
->mtu
, priv
->rx_frames
, priv
->rx_usecs
);
1242 /* Setup cq moderation params */
1243 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1244 cq
= &priv
->rx_cq
[i
];
1245 cq
->moder_cnt
= priv
->rx_frames
;
1246 cq
->moder_time
= priv
->rx_usecs
;
1247 priv
->last_moder_time
[i
] = MLX4_EN_AUTO_CONF
;
1248 priv
->last_moder_packets
[i
] = 0;
1249 priv
->last_moder_bytes
[i
] = 0;
1252 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1253 cq
= &priv
->tx_cq
[i
];
1254 cq
->moder_cnt
= priv
->tx_frames
;
1255 cq
->moder_time
= priv
->tx_usecs
;
1258 /* Reset auto-moderation params */
1259 priv
->pkt_rate_low
= MLX4_EN_RX_RATE_LOW
;
1260 priv
->rx_usecs_low
= MLX4_EN_RX_COAL_TIME_LOW
;
1261 priv
->pkt_rate_high
= MLX4_EN_RX_RATE_HIGH
;
1262 priv
->rx_usecs_high
= MLX4_EN_RX_COAL_TIME_HIGH
;
1263 priv
->sample_interval
= MLX4_EN_SAMPLE_INTERVAL
;
1264 priv
->adaptive_rx_coal
= 1;
1265 priv
->last_moder_jiffies
= 0;
1266 priv
->last_moder_tx_packets
= 0;
1269 static void mlx4_en_auto_moderation(struct mlx4_en_priv
*priv
)
1271 unsigned long period
= (unsigned long) (jiffies
- priv
->last_moder_jiffies
);
1272 struct mlx4_en_cq
*cq
;
1273 unsigned long packets
;
1275 unsigned long avg_pkt_size
;
1276 unsigned long rx_packets
;
1277 unsigned long rx_bytes
;
1278 unsigned long rx_pkt_diff
;
1282 if (!priv
->adaptive_rx_coal
|| period
< priv
->sample_interval
* HZ
)
1285 for (ring
= 0; ring
< priv
->rx_ring_num
; ring
++) {
1286 spin_lock_bh(&priv
->stats_lock
);
1287 rx_packets
= priv
->rx_ring
[ring
].packets
;
1288 rx_bytes
= priv
->rx_ring
[ring
].bytes
;
1289 spin_unlock_bh(&priv
->stats_lock
);
1291 rx_pkt_diff
= ((unsigned long) (rx_packets
-
1292 priv
->last_moder_packets
[ring
]));
1293 packets
= rx_pkt_diff
;
1294 rate
= packets
* HZ
/ period
;
1295 avg_pkt_size
= packets
? ((unsigned long) (rx_bytes
-
1296 priv
->last_moder_bytes
[ring
])) / packets
: 0;
1298 /* Apply auto-moderation only when packet rate
1299 * exceeds a rate that it matters */
1300 if (rate
> (MLX4_EN_RX_RATE_THRESH
/ priv
->rx_ring_num
) &&
1301 avg_pkt_size
> MLX4_EN_AVG_PKT_SMALL
) {
1302 if (rate
< priv
->pkt_rate_low
)
1303 moder_time
= priv
->rx_usecs_low
;
1304 else if (rate
> priv
->pkt_rate_high
)
1305 moder_time
= priv
->rx_usecs_high
;
1307 moder_time
= (rate
- priv
->pkt_rate_low
) *
1308 (priv
->rx_usecs_high
- priv
->rx_usecs_low
) /
1309 (priv
->pkt_rate_high
- priv
->pkt_rate_low
) +
1312 moder_time
= priv
->rx_usecs_low
;
1315 if (moder_time
!= priv
->last_moder_time
[ring
]) {
1316 priv
->last_moder_time
[ring
] = moder_time
;
1317 cq
= &priv
->rx_cq
[ring
];
1318 cq
->moder_time
= moder_time
;
1319 err
= mlx4_en_set_cq_moder(priv
, cq
);
1321 en_err(priv
, "Failed modifying moderation for cq:%d\n",
1324 priv
->last_moder_packets
[ring
] = rx_packets
;
1325 priv
->last_moder_bytes
[ring
] = rx_bytes
;
1328 priv
->last_moder_jiffies
= jiffies
;
1331 static void mlx4_en_do_get_stats(struct work_struct
*work
)
1333 struct delayed_work
*delay
= to_delayed_work(work
);
1334 struct mlx4_en_priv
*priv
= container_of(delay
, struct mlx4_en_priv
,
1336 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1339 mutex_lock(&mdev
->state_lock
);
1340 if (mdev
->device_up
) {
1341 err
= mlx4_en_DUMP_ETH_STATS(mdev
, priv
->port
, 0);
1343 en_dbg(HW
, priv
, "Could not update stats\n");
1346 mlx4_en_auto_moderation(priv
);
1348 queue_delayed_work(mdev
->workqueue
, &priv
->stats_task
, STATS_DELAY
);
1350 if (mdev
->mac_removed
[MLX4_MAX_PORTS
+ 1 - priv
->port
]) {
1351 queue_work(mdev
->workqueue
, &priv
->mac_task
);
1352 mdev
->mac_removed
[MLX4_MAX_PORTS
+ 1 - priv
->port
] = 0;
1354 mutex_unlock(&mdev
->state_lock
);
1357 static void mlx4_en_linkstate(struct work_struct
*work
)
1359 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1361 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1362 int linkstate
= priv
->link_state
;
1364 mutex_lock(&mdev
->state_lock
);
1365 /* If observable port state changed set carrier state and
1366 * report to system log */
1367 if (priv
->last_link_state
!= linkstate
) {
1368 if (linkstate
== MLX4_DEV_EVENT_PORT_DOWN
) {
1369 en_info(priv
, "Link Down\n");
1370 netif_carrier_off(priv
->dev
);
1372 en_info(priv
, "Link Up\n");
1373 netif_carrier_on(priv
->dev
);
1376 priv
->last_link_state
= linkstate
;
1377 mutex_unlock(&mdev
->state_lock
);
1381 int mlx4_en_start_port(struct net_device
*dev
)
1383 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1384 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1385 struct mlx4_en_cq
*cq
;
1386 struct mlx4_en_tx_ring
*tx_ring
;
1392 u8 mc_list
[16] = {0};
1394 if (priv
->port_up
) {
1395 en_dbg(DRV
, priv
, "start port called while port already up\n");
1399 INIT_LIST_HEAD(&priv
->mc_list
);
1400 INIT_LIST_HEAD(&priv
->curr_list
);
1401 INIT_LIST_HEAD(&priv
->ethtool_list
);
1402 memset(&priv
->ethtool_rules
[0], 0,
1403 sizeof(struct ethtool_flow_id
) * MAX_NUM_OF_FS_RULES
);
1405 /* Calculate Rx buf size */
1406 dev
->mtu
= min(dev
->mtu
, priv
->max_mtu
);
1407 mlx4_en_calc_rx_buf(dev
);
1408 en_dbg(DRV
, priv
, "Rx buf size:%d\n", priv
->rx_skb_size
);
1410 /* Configure rx cq's and rings */
1411 err
= mlx4_en_activate_rx_rings(priv
);
1413 en_err(priv
, "Failed to activate RX rings\n");
1416 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1417 cq
= &priv
->rx_cq
[i
];
1419 err
= mlx4_en_activate_cq(priv
, cq
, i
);
1421 en_err(priv
, "Failed activating Rx CQ\n");
1424 for (j
= 0; j
< cq
->size
; j
++)
1425 cq
->buf
[j
].owner_sr_opcode
= MLX4_CQE_OWNER_MASK
;
1426 err
= mlx4_en_set_cq_moder(priv
, cq
);
1428 en_err(priv
, "Failed setting cq moderation parameters");
1429 mlx4_en_deactivate_cq(priv
, cq
);
1432 mlx4_en_arm_cq(priv
, cq
);
1433 priv
->rx_ring
[i
].cqn
= cq
->mcq
.cqn
;
1438 en_dbg(DRV
, priv
, "Getting qp number for port %d\n", priv
->port
);
1439 err
= mlx4_en_get_qp(priv
);
1441 en_err(priv
, "Failed getting eth qp\n");
1444 mdev
->mac_removed
[priv
->port
] = 0;
1446 err
= mlx4_en_config_rss_steer(priv
);
1448 en_err(priv
, "Failed configuring rss steering\n");
1452 err
= mlx4_en_create_drop_qp(priv
);
1456 /* Configure tx cq's and rings */
1457 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1459 cq
= &priv
->tx_cq
[i
];
1460 err
= mlx4_en_activate_cq(priv
, cq
, i
);
1462 en_err(priv
, "Failed allocating Tx CQ\n");
1465 err
= mlx4_en_set_cq_moder(priv
, cq
);
1467 en_err(priv
, "Failed setting cq moderation parameters");
1468 mlx4_en_deactivate_cq(priv
, cq
);
1471 en_dbg(DRV
, priv
, "Resetting index of collapsed CQ:%d to -1\n", i
);
1472 cq
->buf
->wqe_index
= cpu_to_be16(0xffff);
1474 /* Configure ring */
1475 tx_ring
= &priv
->tx_ring
[i
];
1476 err
= mlx4_en_activate_tx_ring(priv
, tx_ring
, cq
->mcq
.cqn
,
1477 i
/ priv
->num_tx_rings_p_up
);
1479 en_err(priv
, "Failed allocating Tx ring\n");
1480 mlx4_en_deactivate_cq(priv
, cq
);
1483 tx_ring
->tx_queue
= netdev_get_tx_queue(dev
, i
);
1485 /* Arm CQ for TX completions */
1486 mlx4_en_arm_cq(priv
, cq
);
1488 /* Set initial ownership of all Tx TXBBs to SW (1) */
1489 for (j
= 0; j
< tx_ring
->buf_size
; j
+= STAMP_STRIDE
)
1490 *((u32
*) (tx_ring
->buf
+ j
)) = 0xffffffff;
1494 /* Configure port */
1495 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
1496 priv
->rx_skb_size
+ ETH_FCS_LEN
,
1497 priv
->prof
->tx_pause
,
1499 priv
->prof
->rx_pause
,
1500 priv
->prof
->rx_ppp
);
1502 en_err(priv
, "Failed setting port general configurations for port %d, with error %d\n",
1506 /* Set default qp number */
1507 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
, priv
->port
, priv
->base_qpn
, 0);
1509 en_err(priv
, "Failed setting default qp numbers\n");
1514 en_dbg(HW
, priv
, "Initializing port\n");
1515 err
= mlx4_INIT_PORT(mdev
->dev
, priv
->port
);
1517 en_err(priv
, "Failed Initializing port\n");
1521 /* Attach rx QP to bradcast address */
1522 memset(&mc_list
[10], 0xff, ETH_ALEN
);
1523 mc_list
[5] = priv
->port
; /* needed for B0 steering support */
1524 if (mlx4_multicast_attach(mdev
->dev
, &priv
->rss_map
.indir_qp
, mc_list
,
1525 priv
->port
, 0, MLX4_PROT_ETH
,
1526 &priv
->broadcast_id
))
1527 mlx4_warn(mdev
, "Failed Attaching Broadcast\n");
1529 /* Must redo promiscuous mode setup. */
1530 priv
->flags
&= ~(MLX4_EN_FLAG_PROMISC
| MLX4_EN_FLAG_MC_PROMISC
);
1532 /* Schedule multicast task to populate multicast list */
1533 queue_work(mdev
->workqueue
, &priv
->rx_mode_task
);
1535 mlx4_set_stats_bitmap(mdev
->dev
, &priv
->stats_bitmap
);
1537 priv
->port_up
= true;
1538 netif_tx_start_all_queues(dev
);
1539 netif_device_attach(dev
);
1544 while (tx_index
--) {
1545 mlx4_en_deactivate_tx_ring(priv
, &priv
->tx_ring
[tx_index
]);
1546 mlx4_en_deactivate_cq(priv
, &priv
->tx_cq
[tx_index
]);
1548 mlx4_en_destroy_drop_qp(priv
);
1550 mlx4_en_release_rss_steer(priv
);
1552 mlx4_en_put_qp(priv
);
1555 mlx4_en_deactivate_cq(priv
, &priv
->rx_cq
[rx_index
]);
1556 for (i
= 0; i
< priv
->rx_ring_num
; i
++)
1557 mlx4_en_deactivate_rx_ring(priv
, &priv
->rx_ring
[i
]);
1559 return err
; /* need to close devices */
1563 void mlx4_en_stop_port(struct net_device
*dev
, int detach
)
1565 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1566 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1567 struct mlx4_en_mc_list
*mclist
, *tmp
;
1568 struct ethtool_flow_id
*flow
, *tmp_flow
;
1570 u8 mc_list
[16] = {0};
1572 if (!priv
->port_up
) {
1573 en_dbg(DRV
, priv
, "stop port called while port already down\n");
1577 /* Synchronize with tx routine */
1578 netif_tx_lock_bh(dev
);
1580 netif_device_detach(dev
);
1581 netif_tx_stop_all_queues(dev
);
1582 netif_tx_unlock_bh(dev
);
1584 netif_tx_disable(dev
);
1586 /* Set port as not active */
1587 priv
->port_up
= false;
1589 /* Promsicuous mode */
1590 if (mdev
->dev
->caps
.steering_mode
==
1591 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1592 priv
->flags
&= ~(MLX4_EN_FLAG_PROMISC
|
1593 MLX4_EN_FLAG_MC_PROMISC
);
1594 mlx4_flow_steer_promisc_remove(mdev
->dev
,
1596 MLX4_FS_PROMISC_UPLINK
);
1597 mlx4_flow_steer_promisc_remove(mdev
->dev
,
1599 MLX4_FS_PROMISC_ALL_MULTI
);
1600 } else if (priv
->flags
& MLX4_EN_FLAG_PROMISC
) {
1601 priv
->flags
&= ~MLX4_EN_FLAG_PROMISC
;
1603 /* Disable promiscouos mode */
1604 mlx4_unicast_promisc_remove(mdev
->dev
, priv
->base_qpn
,
1607 /* Disable Multicast promisc */
1608 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
1609 mlx4_multicast_promisc_remove(mdev
->dev
, priv
->base_qpn
,
1611 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
1615 /* Detach All multicasts */
1616 memset(&mc_list
[10], 0xff, ETH_ALEN
);
1617 mc_list
[5] = priv
->port
; /* needed for B0 steering support */
1618 mlx4_multicast_detach(mdev
->dev
, &priv
->rss_map
.indir_qp
, mc_list
,
1619 MLX4_PROT_ETH
, priv
->broadcast_id
);
1620 list_for_each_entry(mclist
, &priv
->curr_list
, list
) {
1621 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
1622 mc_list
[5] = priv
->port
;
1623 mlx4_multicast_detach(mdev
->dev
, &priv
->rss_map
.indir_qp
,
1624 mc_list
, MLX4_PROT_ETH
, mclist
->reg_id
);
1626 mlx4_en_clear_list(dev
);
1627 list_for_each_entry_safe(mclist
, tmp
, &priv
->curr_list
, list
) {
1628 list_del(&mclist
->list
);
1632 /* Flush multicast filter */
1633 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0, 1, MLX4_MCAST_CONFIG
);
1635 mlx4_en_destroy_drop_qp(priv
);
1638 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1639 mlx4_en_deactivate_tx_ring(priv
, &priv
->tx_ring
[i
]);
1640 mlx4_en_deactivate_cq(priv
, &priv
->tx_cq
[i
]);
1644 for (i
= 0; i
< priv
->tx_ring_num
; i
++)
1645 mlx4_en_free_tx_buf(dev
, &priv
->tx_ring
[i
]);
1648 mlx4_en_release_rss_steer(priv
);
1650 /* Unregister Mac address for the port */
1651 mlx4_en_put_qp(priv
);
1652 if (!(mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN
))
1653 mdev
->mac_removed
[priv
->port
] = 1;
1655 /* Remove flow steering rules for the port*/
1656 if (mdev
->dev
->caps
.steering_mode
==
1657 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1659 list_for_each_entry_safe(flow
, tmp_flow
,
1660 &priv
->ethtool_list
, list
) {
1661 mlx4_flow_detach(mdev
->dev
, flow
->id
);
1662 list_del(&flow
->list
);
1667 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1668 mlx4_en_deactivate_rx_ring(priv
, &priv
->rx_ring
[i
]);
1669 while (test_bit(NAPI_STATE_SCHED
, &priv
->rx_cq
[i
].napi
.state
))
1671 mlx4_en_deactivate_cq(priv
, &priv
->rx_cq
[i
]);
1675 mlx4_CLOSE_PORT(mdev
->dev
, priv
->port
);
1678 static void mlx4_en_restart(struct work_struct
*work
)
1680 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1682 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1683 struct net_device
*dev
= priv
->dev
;
1685 en_dbg(DRV
, priv
, "Watchdog task called for port %d\n", priv
->port
);
1687 mutex_lock(&mdev
->state_lock
);
1688 if (priv
->port_up
) {
1689 mlx4_en_stop_port(dev
, 1);
1690 if (mlx4_en_start_port(dev
))
1691 en_err(priv
, "Failed restarting port %d\n", priv
->port
);
1693 mutex_unlock(&mdev
->state_lock
);
1696 static void mlx4_en_clear_stats(struct net_device
*dev
)
1698 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1699 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1702 if (mlx4_en_DUMP_ETH_STATS(mdev
, priv
->port
, 1))
1703 en_dbg(HW
, priv
, "Failed dumping statistics\n");
1705 memset(&priv
->stats
, 0, sizeof(priv
->stats
));
1706 memset(&priv
->pstats
, 0, sizeof(priv
->pstats
));
1707 memset(&priv
->pkstats
, 0, sizeof(priv
->pkstats
));
1708 memset(&priv
->port_stats
, 0, sizeof(priv
->port_stats
));
1710 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1711 priv
->tx_ring
[i
].bytes
= 0;
1712 priv
->tx_ring
[i
].packets
= 0;
1713 priv
->tx_ring
[i
].tx_csum
= 0;
1715 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1716 priv
->rx_ring
[i
].bytes
= 0;
1717 priv
->rx_ring
[i
].packets
= 0;
1718 priv
->rx_ring
[i
].csum_ok
= 0;
1719 priv
->rx_ring
[i
].csum_none
= 0;
1723 static int mlx4_en_open(struct net_device
*dev
)
1725 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1726 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1729 mutex_lock(&mdev
->state_lock
);
1731 if (!mdev
->device_up
) {
1732 en_err(priv
, "Cannot open - device down/disabled\n");
1737 /* Reset HW statistics and SW counters */
1738 mlx4_en_clear_stats(dev
);
1740 err
= mlx4_en_start_port(dev
);
1742 en_err(priv
, "Failed starting port:%d\n", priv
->port
);
1745 mutex_unlock(&mdev
->state_lock
);
1750 static int mlx4_en_close(struct net_device
*dev
)
1752 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1753 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1755 en_dbg(IFDOWN
, priv
, "Close port called\n");
1757 mutex_lock(&mdev
->state_lock
);
1759 mlx4_en_stop_port(dev
, 0);
1760 netif_carrier_off(dev
);
1762 mutex_unlock(&mdev
->state_lock
);
1766 void mlx4_en_free_resources(struct mlx4_en_priv
*priv
)
1770 #ifdef CONFIG_RFS_ACCEL
1771 free_irq_cpu_rmap(priv
->dev
->rx_cpu_rmap
);
1772 priv
->dev
->rx_cpu_rmap
= NULL
;
1775 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1776 if (priv
->tx_ring
[i
].tx_info
)
1777 mlx4_en_destroy_tx_ring(priv
, &priv
->tx_ring
[i
]);
1778 if (priv
->tx_cq
[i
].buf
)
1779 mlx4_en_destroy_cq(priv
, &priv
->tx_cq
[i
]);
1782 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1783 if (priv
->rx_ring
[i
].rx_info
)
1784 mlx4_en_destroy_rx_ring(priv
, &priv
->rx_ring
[i
],
1785 priv
->prof
->rx_ring_size
, priv
->stride
);
1786 if (priv
->rx_cq
[i
].buf
)
1787 mlx4_en_destroy_cq(priv
, &priv
->rx_cq
[i
]);
1790 if (priv
->base_tx_qpn
) {
1791 mlx4_qp_release_range(priv
->mdev
->dev
, priv
->base_tx_qpn
, priv
->tx_ring_num
);
1792 priv
->base_tx_qpn
= 0;
1796 int mlx4_en_alloc_resources(struct mlx4_en_priv
*priv
)
1798 struct mlx4_en_port_profile
*prof
= priv
->prof
;
1802 err
= mlx4_qp_reserve_range(priv
->mdev
->dev
, priv
->tx_ring_num
, 256, &priv
->base_tx_qpn
);
1804 en_err(priv
, "failed reserving range for TX rings\n");
1808 /* Create tx Rings */
1809 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1810 if (mlx4_en_create_cq(priv
, &priv
->tx_cq
[i
],
1811 prof
->tx_ring_size
, i
, TX
))
1814 if (mlx4_en_create_tx_ring(priv
, &priv
->tx_ring
[i
], priv
->base_tx_qpn
+ i
,
1815 prof
->tx_ring_size
, TXBB_SIZE
))
1819 /* Create rx Rings */
1820 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1821 if (mlx4_en_create_cq(priv
, &priv
->rx_cq
[i
],
1822 prof
->rx_ring_size
, i
, RX
))
1825 if (mlx4_en_create_rx_ring(priv
, &priv
->rx_ring
[i
],
1826 prof
->rx_ring_size
, priv
->stride
))
1830 #ifdef CONFIG_RFS_ACCEL
1831 priv
->dev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(priv
->mdev
->dev
->caps
.comp_pool
);
1832 if (!priv
->dev
->rx_cpu_rmap
)
1839 en_err(priv
, "Failed to allocate NIC resources\n");
1844 void mlx4_en_destroy_netdev(struct net_device
*dev
)
1846 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1847 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1849 en_dbg(DRV
, priv
, "Destroying netdev on port:%d\n", priv
->port
);
1851 /* Unregister device - this will close the port if it was up */
1852 if (priv
->registered
)
1853 unregister_netdev(dev
);
1855 if (priv
->allocated
)
1856 mlx4_free_hwq_res(mdev
->dev
, &priv
->res
, MLX4_EN_PAGE_SIZE
);
1858 cancel_delayed_work(&priv
->stats_task
);
1859 /* flush any pending task for this netdev */
1860 flush_workqueue(mdev
->workqueue
);
1862 /* Detach the netdev so tasks would not attempt to access it */
1863 mutex_lock(&mdev
->state_lock
);
1864 mdev
->pndev
[priv
->port
] = NULL
;
1865 mutex_unlock(&mdev
->state_lock
);
1867 mlx4_en_free_resources(priv
);
1869 kfree(priv
->tx_ring
);
1875 static int mlx4_en_change_mtu(struct net_device
*dev
, int new_mtu
)
1877 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1878 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1881 en_dbg(DRV
, priv
, "Change MTU called - current:%d new:%d\n",
1884 if ((new_mtu
< MLX4_EN_MIN_MTU
) || (new_mtu
> priv
->max_mtu
)) {
1885 en_err(priv
, "Bad MTU size:%d.\n", new_mtu
);
1890 if (netif_running(dev
)) {
1891 mutex_lock(&mdev
->state_lock
);
1892 if (!mdev
->device_up
) {
1893 /* NIC is probably restarting - let watchdog task reset
1895 en_dbg(DRV
, priv
, "Change MTU called with card down!?\n");
1897 mlx4_en_stop_port(dev
, 1);
1898 err
= mlx4_en_start_port(dev
);
1900 en_err(priv
, "Failed restarting port:%d\n",
1902 queue_work(mdev
->workqueue
, &priv
->watchdog_task
);
1905 mutex_unlock(&mdev
->state_lock
);
1910 static int mlx4_en_set_features(struct net_device
*netdev
,
1911 netdev_features_t features
)
1913 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
1915 if (features
& NETIF_F_LOOPBACK
)
1916 priv
->ctrl_flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
1919 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK
);
1921 mlx4_en_update_loopback_state(netdev
, features
);
1927 static int mlx4_en_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
1928 struct net_device
*dev
,
1929 const unsigned char *addr
, u16 flags
)
1931 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1932 struct mlx4_dev
*mdev
= priv
->mdev
->dev
;
1935 if (!mlx4_is_mfunc(mdev
))
1938 /* Hardware does not support aging addresses, allow only
1939 * permanent addresses if ndm_state is given
1941 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
1942 en_info(priv
, "Add FDB only supports static addresses\n");
1946 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
1947 err
= dev_uc_add_excl(dev
, addr
);
1948 else if (is_multicast_ether_addr(addr
))
1949 err
= dev_mc_add_excl(dev
, addr
);
1953 /* Only return duplicate errors if NLM_F_EXCL is set */
1954 if (err
== -EEXIST
&& !(flags
& NLM_F_EXCL
))
1960 static int mlx4_en_fdb_del(struct ndmsg
*ndm
,
1961 struct nlattr
*tb
[],
1962 struct net_device
*dev
,
1963 const unsigned char *addr
)
1965 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1966 struct mlx4_dev
*mdev
= priv
->mdev
->dev
;
1969 if (!mlx4_is_mfunc(mdev
))
1972 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
1973 en_info(priv
, "Del FDB only supports static addresses\n");
1977 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
1978 err
= dev_uc_del(dev
, addr
);
1979 else if (is_multicast_ether_addr(addr
))
1980 err
= dev_mc_del(dev
, addr
);
1987 static int mlx4_en_fdb_dump(struct sk_buff
*skb
,
1988 struct netlink_callback
*cb
,
1989 struct net_device
*dev
, int idx
)
1991 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1992 struct mlx4_dev
*mdev
= priv
->mdev
->dev
;
1994 if (mlx4_is_mfunc(mdev
))
1995 idx
= ndo_dflt_fdb_dump(skb
, cb
, dev
, idx
);
2000 static const struct net_device_ops mlx4_netdev_ops
= {
2001 .ndo_open
= mlx4_en_open
,
2002 .ndo_stop
= mlx4_en_close
,
2003 .ndo_start_xmit
= mlx4_en_xmit
,
2004 .ndo_select_queue
= mlx4_en_select_queue
,
2005 .ndo_get_stats
= mlx4_en_get_stats
,
2006 .ndo_set_rx_mode
= mlx4_en_set_rx_mode
,
2007 .ndo_set_mac_address
= mlx4_en_set_mac
,
2008 .ndo_validate_addr
= eth_validate_addr
,
2009 .ndo_change_mtu
= mlx4_en_change_mtu
,
2010 .ndo_tx_timeout
= mlx4_en_tx_timeout
,
2011 .ndo_vlan_rx_add_vid
= mlx4_en_vlan_rx_add_vid
,
2012 .ndo_vlan_rx_kill_vid
= mlx4_en_vlan_rx_kill_vid
,
2013 #ifdef CONFIG_NET_POLL_CONTROLLER
2014 .ndo_poll_controller
= mlx4_en_netpoll
,
2016 .ndo_set_features
= mlx4_en_set_features
,
2017 .ndo_setup_tc
= mlx4_en_setup_tc
,
2018 #ifdef CONFIG_RFS_ACCEL
2019 .ndo_rx_flow_steer
= mlx4_en_filter_rfs
,
2021 .ndo_fdb_add
= mlx4_en_fdb_add
,
2022 .ndo_fdb_del
= mlx4_en_fdb_del
,
2023 .ndo_fdb_dump
= mlx4_en_fdb_dump
,
2026 int mlx4_en_init_netdev(struct mlx4_en_dev
*mdev
, int port
,
2027 struct mlx4_en_port_profile
*prof
)
2029 struct net_device
*dev
;
2030 struct mlx4_en_priv
*priv
;
2034 dev
= alloc_etherdev_mqs(sizeof(struct mlx4_en_priv
),
2035 MAX_TX_RINGS
, MAX_RX_RINGS
);
2039 netif_set_real_num_tx_queues(dev
, prof
->tx_ring_num
);
2040 netif_set_real_num_rx_queues(dev
, prof
->rx_ring_num
);
2042 SET_NETDEV_DEV(dev
, &mdev
->dev
->pdev
->dev
);
2043 dev
->dev_id
= port
- 1;
2046 * Initialize driver private data
2049 priv
= netdev_priv(dev
);
2050 memset(priv
, 0, sizeof(struct mlx4_en_priv
));
2053 priv
->ddev
= &mdev
->pdev
->dev
;
2056 priv
->port_up
= false;
2057 priv
->flags
= prof
->flags
;
2058 priv
->ctrl_flags
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
|
2059 MLX4_WQE_CTRL_SOLICITED
);
2060 priv
->num_tx_rings_p_up
= mdev
->profile
.num_tx_rings_p_up
;
2061 priv
->tx_ring_num
= prof
->tx_ring_num
;
2063 priv
->tx_ring
= kzalloc(sizeof(struct mlx4_en_tx_ring
) * MAX_TX_RINGS
,
2065 if (!priv
->tx_ring
) {
2069 priv
->tx_cq
= kzalloc(sizeof(struct mlx4_en_cq
) * MAX_TX_RINGS
,
2075 priv
->rx_ring_num
= prof
->rx_ring_num
;
2076 priv
->cqe_factor
= (mdev
->dev
->caps
.cqe_size
== 64) ? 1 : 0;
2077 priv
->mac_index
= -1;
2078 priv
->msg_enable
= MLX4_EN_MSG_LEVEL
;
2079 spin_lock_init(&priv
->stats_lock
);
2080 INIT_WORK(&priv
->rx_mode_task
, mlx4_en_do_set_rx_mode
);
2081 INIT_WORK(&priv
->mac_task
, mlx4_en_do_set_mac
);
2082 INIT_WORK(&priv
->watchdog_task
, mlx4_en_restart
);
2083 INIT_WORK(&priv
->linkstate_task
, mlx4_en_linkstate
);
2084 INIT_DELAYED_WORK(&priv
->stats_task
, mlx4_en_do_get_stats
);
2085 #ifdef CONFIG_MLX4_EN_DCB
2086 if (!mlx4_is_slave(priv
->mdev
->dev
))
2087 dev
->dcbnl_ops
= &mlx4_en_dcbnl_ops
;
2090 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
)
2091 INIT_HLIST_HEAD(&priv
->mac_hash
[i
]);
2093 /* Query for default mac and max mtu */
2094 priv
->max_mtu
= mdev
->dev
->caps
.eth_mtu_cap
[priv
->port
];
2096 /* Set default MAC */
2097 dev
->addr_len
= ETH_ALEN
;
2098 mlx4_en_u64_to_mac(dev
->dev_addr
, mdev
->dev
->caps
.def_mac
[priv
->port
]);
2099 if (!is_valid_ether_addr(dev
->dev_addr
)) {
2100 en_err(priv
, "Port: %d, invalid mac burned: %pM, quiting\n",
2101 priv
->port
, dev
->dev_addr
);
2106 memcpy(priv
->prev_mac
, dev
->dev_addr
, sizeof(priv
->prev_mac
));
2108 priv
->stride
= roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc
) +
2109 DS_SIZE
* MLX4_EN_MAX_RX_FRAGS
);
2110 err
= mlx4_en_alloc_resources(priv
);
2114 #ifdef CONFIG_RFS_ACCEL
2115 INIT_LIST_HEAD(&priv
->filters
);
2116 spin_lock_init(&priv
->filters_lock
);
2119 /* Allocate page for receive rings */
2120 err
= mlx4_alloc_hwq_res(mdev
->dev
, &priv
->res
,
2121 MLX4_EN_PAGE_SIZE
, MLX4_EN_PAGE_SIZE
);
2123 en_err(priv
, "Failed to allocate page for rx qps\n");
2126 priv
->allocated
= 1;
2129 * Initialize netdev entry points
2131 dev
->netdev_ops
= &mlx4_netdev_ops
;
2132 dev
->watchdog_timeo
= MLX4_EN_WATCHDOG_TIMEOUT
;
2133 netif_set_real_num_tx_queues(dev
, priv
->tx_ring_num
);
2134 netif_set_real_num_rx_queues(dev
, priv
->rx_ring_num
);
2136 SET_ETHTOOL_OPS(dev
, &mlx4_en_ethtool_ops
);
2139 * Set driver features
2141 dev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2142 if (mdev
->LSO_support
)
2143 dev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
2145 dev
->vlan_features
= dev
->hw_features
;
2147 dev
->hw_features
|= NETIF_F_RXCSUM
| NETIF_F_RXHASH
;
2148 dev
->features
= dev
->hw_features
| NETIF_F_HIGHDMA
|
2149 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
|
2150 NETIF_F_HW_VLAN_FILTER
;
2151 dev
->hw_features
|= NETIF_F_LOOPBACK
;
2153 if (mdev
->dev
->caps
.steering_mode
==
2154 MLX4_STEERING_MODE_DEVICE_MANAGED
)
2155 dev
->hw_features
|= NETIF_F_NTUPLE
;
2157 if (mdev
->dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
)
2158 dev
->priv_flags
|= IFF_UNICAST_FLT
;
2160 mdev
->pndev
[port
] = dev
;
2162 netif_carrier_off(dev
);
2163 err
= register_netdev(dev
);
2165 en_err(priv
, "Netdev registration failed for port %d\n", port
);
2168 priv
->registered
= 1;
2170 en_warn(priv
, "Using %d TX rings\n", prof
->tx_ring_num
);
2171 en_warn(priv
, "Using %d RX rings\n", prof
->rx_ring_num
);
2173 mlx4_en_update_loopback_state(priv
->dev
, priv
->dev
->features
);
2175 /* Configure port */
2176 mlx4_en_calc_rx_buf(dev
);
2177 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
2178 priv
->rx_skb_size
+ ETH_FCS_LEN
,
2179 prof
->tx_pause
, prof
->tx_ppp
,
2180 prof
->rx_pause
, prof
->rx_ppp
);
2182 en_err(priv
, "Failed setting port general configurations "
2183 "for port %d, with error %d\n", priv
->port
, err
);
2188 en_warn(priv
, "Initializing port\n");
2189 err
= mlx4_INIT_PORT(mdev
->dev
, priv
->port
);
2191 en_err(priv
, "Failed Initializing port\n");
2194 mlx4_en_set_default_moderation(priv
);
2195 queue_delayed_work(mdev
->workqueue
, &priv
->stats_task
, STATS_DELAY
);
2199 mlx4_en_destroy_netdev(dev
);