2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/hash.h>
42 #include <linux/mlx4/driver.h>
43 #include <linux/mlx4/device.h>
44 #include <linux/mlx4/cmd.h>
45 #include <linux/mlx4/cq.h>
50 int mlx4_en_setup_tc(struct net_device
*dev
, u8 up
)
52 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
54 unsigned int offset
= 0;
56 if (up
&& up
!= MLX4_EN_NUM_UP
)
59 netdev_set_num_tc(dev
, up
);
61 /* Partition Tx queues evenly amongst UP's */
62 for (i
= 0; i
< up
; i
++) {
63 netdev_set_tc_queue(dev
, i
, priv
->num_tx_rings_p_up
, offset
);
64 offset
+= priv
->num_tx_rings_p_up
;
70 #ifdef CONFIG_RFS_ACCEL
72 struct mlx4_en_filter
{
73 struct list_head next
;
74 struct work_struct work
;
82 struct mlx4_en_priv
*priv
;
83 u32 flow_id
; /* RFS infrastructure id */
84 int id
; /* mlx4_en driver id */
85 u64 reg_id
; /* Flow steering API id */
86 u8 activated
; /* Used to prevent expiry before filter
89 struct hlist_node filter_chain
;
92 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv
*priv
);
94 static void mlx4_en_filter_work(struct work_struct
*work
)
96 struct mlx4_en_filter
*filter
= container_of(work
,
97 struct mlx4_en_filter
,
99 struct mlx4_en_priv
*priv
= filter
->priv
;
100 struct mlx4_spec_list spec_tcp
= {
101 .id
= MLX4_NET_TRANS_RULE_ID_TCP
,
104 .dst_port
= filter
->dst_port
,
105 .dst_port_msk
= (__force __be16
)-1,
106 .src_port
= filter
->src_port
,
107 .src_port_msk
= (__force __be16
)-1,
111 struct mlx4_spec_list spec_ip
= {
112 .id
= MLX4_NET_TRANS_RULE_ID_IPV4
,
115 .dst_ip
= filter
->dst_ip
,
116 .dst_ip_msk
= (__force __be32
)-1,
117 .src_ip
= filter
->src_ip
,
118 .src_ip_msk
= (__force __be32
)-1,
122 struct mlx4_spec_list spec_eth
= {
123 .id
= MLX4_NET_TRANS_RULE_ID_ETH
,
125 struct mlx4_net_trans_rule rule
= {
126 .list
= LIST_HEAD_INIT(rule
.list
),
127 .queue_mode
= MLX4_NET_TRANS_Q_LIFO
,
130 .promisc_mode
= MLX4_FS_PROMISC_NONE
,
132 .priority
= MLX4_DOMAIN_RFS
,
135 __be64 mac_mask
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
137 list_add_tail(&spec_eth
.list
, &rule
.list
);
138 list_add_tail(&spec_ip
.list
, &rule
.list
);
139 list_add_tail(&spec_tcp
.list
, &rule
.list
);
141 rule
.qpn
= priv
->rss_map
.qps
[filter
->rxq_index
].qpn
;
142 memcpy(spec_eth
.eth
.dst_mac
, priv
->dev
->dev_addr
, ETH_ALEN
);
143 memcpy(spec_eth
.eth
.dst_mac_msk
, &mac_mask
, ETH_ALEN
);
145 filter
->activated
= 0;
147 if (filter
->reg_id
) {
148 rc
= mlx4_flow_detach(priv
->mdev
->dev
, filter
->reg_id
);
149 if (rc
&& rc
!= -ENOENT
)
150 en_err(priv
, "Error detaching flow. rc = %d\n", rc
);
153 rc
= mlx4_flow_attach(priv
->mdev
->dev
, &rule
, &filter
->reg_id
);
155 en_err(priv
, "Error attaching flow. err = %d\n", rc
);
157 mlx4_en_filter_rfs_expire(priv
);
159 filter
->activated
= 1;
162 static inline struct hlist_head
*
163 filter_hash_bucket(struct mlx4_en_priv
*priv
, __be32 src_ip
, __be32 dst_ip
,
164 __be16 src_port
, __be16 dst_port
)
169 l
= (__force
unsigned long)src_port
|
170 ((__force
unsigned long)dst_port
<< 2);
171 l
^= (__force
unsigned long)(src_ip
^ dst_ip
);
173 bucket_idx
= hash_long(l
, MLX4_EN_FILTER_HASH_SHIFT
);
175 return &priv
->filter_hash
[bucket_idx
];
178 static struct mlx4_en_filter
*
179 mlx4_en_filter_alloc(struct mlx4_en_priv
*priv
, int rxq_index
, __be32 src_ip
,
180 __be32 dst_ip
, __be16 src_port
, __be16 dst_port
,
183 struct mlx4_en_filter
*filter
= NULL
;
185 filter
= kzalloc(sizeof(struct mlx4_en_filter
), GFP_ATOMIC
);
190 filter
->rxq_index
= rxq_index
;
191 INIT_WORK(&filter
->work
, mlx4_en_filter_work
);
193 filter
->src_ip
= src_ip
;
194 filter
->dst_ip
= dst_ip
;
195 filter
->src_port
= src_port
;
196 filter
->dst_port
= dst_port
;
198 filter
->flow_id
= flow_id
;
200 filter
->id
= priv
->last_filter_id
++ % RPS_NO_FILTER
;
202 list_add_tail(&filter
->next
, &priv
->filters
);
203 hlist_add_head(&filter
->filter_chain
,
204 filter_hash_bucket(priv
, src_ip
, dst_ip
, src_port
,
210 static void mlx4_en_filter_free(struct mlx4_en_filter
*filter
)
212 struct mlx4_en_priv
*priv
= filter
->priv
;
215 list_del(&filter
->next
);
217 rc
= mlx4_flow_detach(priv
->mdev
->dev
, filter
->reg_id
);
218 if (rc
&& rc
!= -ENOENT
)
219 en_err(priv
, "Error detaching flow. rc = %d\n", rc
);
224 static inline struct mlx4_en_filter
*
225 mlx4_en_filter_find(struct mlx4_en_priv
*priv
, __be32 src_ip
, __be32 dst_ip
,
226 __be16 src_port
, __be16 dst_port
)
228 struct hlist_node
*elem
;
229 struct mlx4_en_filter
*filter
;
230 struct mlx4_en_filter
*ret
= NULL
;
232 hlist_for_each_entry(filter
, elem
,
233 filter_hash_bucket(priv
, src_ip
, dst_ip
,
236 if (filter
->src_ip
== src_ip
&&
237 filter
->dst_ip
== dst_ip
&&
238 filter
->src_port
== src_port
&&
239 filter
->dst_port
== dst_port
) {
249 mlx4_en_filter_rfs(struct net_device
*net_dev
, const struct sk_buff
*skb
,
250 u16 rxq_index
, u32 flow_id
)
252 struct mlx4_en_priv
*priv
= netdev_priv(net_dev
);
253 struct mlx4_en_filter
*filter
;
254 const struct iphdr
*ip
;
260 int nhoff
= skb_network_offset(skb
);
263 if (skb
->protocol
!= htons(ETH_P_IP
))
264 return -EPROTONOSUPPORT
;
266 ip
= (const struct iphdr
*)(skb
->data
+ nhoff
);
267 if (ip_is_fragment(ip
))
268 return -EPROTONOSUPPORT
;
270 ports
= (const __be16
*)(skb
->data
+ nhoff
+ 4 * ip
->ihl
);
277 if (ip
->protocol
!= IPPROTO_TCP
)
278 return -EPROTONOSUPPORT
;
280 spin_lock_bh(&priv
->filters_lock
);
281 filter
= mlx4_en_filter_find(priv
, src_ip
, dst_ip
, src_port
, dst_port
);
283 if (filter
->rxq_index
== rxq_index
)
286 filter
->rxq_index
= rxq_index
;
288 filter
= mlx4_en_filter_alloc(priv
, rxq_index
,
290 src_port
, dst_port
, flow_id
);
297 queue_work(priv
->mdev
->workqueue
, &filter
->work
);
302 spin_unlock_bh(&priv
->filters_lock
);
307 void mlx4_en_cleanup_filters(struct mlx4_en_priv
*priv
,
308 struct mlx4_en_rx_ring
*rx_ring
)
310 struct mlx4_en_filter
*filter
, *tmp
;
313 spin_lock_bh(&priv
->filters_lock
);
314 list_for_each_entry_safe(filter
, tmp
, &priv
->filters
, next
) {
315 list_move(&filter
->next
, &del_list
);
316 hlist_del(&filter
->filter_chain
);
318 spin_unlock_bh(&priv
->filters_lock
);
320 list_for_each_entry_safe(filter
, tmp
, &del_list
, next
) {
321 cancel_work_sync(&filter
->work
);
322 mlx4_en_filter_free(filter
);
326 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv
*priv
)
328 struct mlx4_en_filter
*filter
= NULL
, *tmp
, *last_filter
= NULL
;
332 spin_lock_bh(&priv
->filters_lock
);
333 list_for_each_entry_safe(filter
, tmp
, &priv
->filters
, next
) {
334 if (i
> MLX4_EN_FILTER_EXPIRY_QUOTA
)
337 if (filter
->activated
&&
338 !work_pending(&filter
->work
) &&
339 rps_may_expire_flow(priv
->dev
,
340 filter
->rxq_index
, filter
->flow_id
,
342 list_move(&filter
->next
, &del_list
);
343 hlist_del(&filter
->filter_chain
);
345 last_filter
= filter
;
350 if (last_filter
&& (&last_filter
->next
!= priv
->filters
.next
))
351 list_move(&priv
->filters
, &last_filter
->next
);
353 spin_unlock_bh(&priv
->filters_lock
);
355 list_for_each_entry_safe(filter
, tmp
, &del_list
, next
)
356 mlx4_en_filter_free(filter
);
360 static int mlx4_en_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
362 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
363 struct mlx4_en_dev
*mdev
= priv
->mdev
;
367 en_dbg(HW
, priv
, "adding VLAN:%d\n", vid
);
369 set_bit(vid
, priv
->active_vlans
);
371 /* Add VID to port VLAN filter */
372 mutex_lock(&mdev
->state_lock
);
373 if (mdev
->device_up
&& priv
->port_up
) {
374 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
376 en_err(priv
, "Failed configuring VLAN filter\n");
378 if (mlx4_register_vlan(mdev
->dev
, priv
->port
, vid
, &idx
))
379 en_err(priv
, "failed adding vlan %d\n", vid
);
380 mutex_unlock(&mdev
->state_lock
);
385 static int mlx4_en_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
387 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
388 struct mlx4_en_dev
*mdev
= priv
->mdev
;
392 en_dbg(HW
, priv
, "Killing VID:%d\n", vid
);
394 clear_bit(vid
, priv
->active_vlans
);
396 /* Remove VID from port VLAN filter */
397 mutex_lock(&mdev
->state_lock
);
398 if (!mlx4_find_cached_vlan(mdev
->dev
, priv
->port
, vid
, &idx
))
399 mlx4_unregister_vlan(mdev
->dev
, priv
->port
, idx
);
401 en_err(priv
, "could not find vid %d in cache\n", vid
);
403 if (mdev
->device_up
&& priv
->port_up
) {
404 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
406 en_err(priv
, "Failed configuring VLAN filter\n");
408 mutex_unlock(&mdev
->state_lock
);
413 static void mlx4_en_u64_to_mac(unsigned char dst_mac
[ETH_ALEN
+ 2], u64 src_mac
)
416 for (i
= ETH_ALEN
- 1; i
; --i
) {
417 dst_mac
[i
] = src_mac
& 0xff;
420 memset(&dst_mac
[ETH_ALEN
], 0, 2);
423 static int mlx4_en_uc_steer_add(struct mlx4_en_priv
*priv
,
424 unsigned char *mac
, int *qpn
, u64
*reg_id
)
426 struct mlx4_en_dev
*mdev
= priv
->mdev
;
427 struct mlx4_dev
*dev
= mdev
->dev
;
430 switch (dev
->caps
.steering_mode
) {
431 case MLX4_STEERING_MODE_B0
: {
436 memcpy(&gid
[10], mac
, ETH_ALEN
);
439 err
= mlx4_unicast_attach(dev
, &qp
, gid
, 0, MLX4_PROT_ETH
);
442 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
443 struct mlx4_spec_list spec_eth
= { {NULL
} };
444 __be64 mac_mask
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
446 struct mlx4_net_trans_rule rule
= {
447 .queue_mode
= MLX4_NET_TRANS_Q_FIFO
,
450 .promisc_mode
= MLX4_FS_PROMISC_NONE
,
451 .priority
= MLX4_DOMAIN_NIC
,
454 rule
.port
= priv
->port
;
456 INIT_LIST_HEAD(&rule
.list
);
458 spec_eth
.id
= MLX4_NET_TRANS_RULE_ID_ETH
;
459 memcpy(spec_eth
.eth
.dst_mac
, mac
, ETH_ALEN
);
460 memcpy(spec_eth
.eth
.dst_mac_msk
, &mac_mask
, ETH_ALEN
);
461 list_add_tail(&spec_eth
.list
, &rule
.list
);
463 err
= mlx4_flow_attach(dev
, &rule
, reg_id
);
470 en_warn(priv
, "Failed Attaching Unicast\n");
475 static void mlx4_en_uc_steer_release(struct mlx4_en_priv
*priv
,
476 unsigned char *mac
, int qpn
, u64 reg_id
)
478 struct mlx4_en_dev
*mdev
= priv
->mdev
;
479 struct mlx4_dev
*dev
= mdev
->dev
;
481 switch (dev
->caps
.steering_mode
) {
482 case MLX4_STEERING_MODE_B0
: {
487 memcpy(&gid
[10], mac
, ETH_ALEN
);
490 mlx4_unicast_detach(dev
, &qp
, gid
, MLX4_PROT_ETH
);
493 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
494 mlx4_flow_detach(dev
, reg_id
);
498 en_err(priv
, "Invalid steering mode.\n");
502 static int mlx4_en_get_qp(struct mlx4_en_priv
*priv
)
504 struct mlx4_en_dev
*mdev
= priv
->mdev
;
505 struct mlx4_dev
*dev
= mdev
->dev
;
506 struct mlx4_mac_entry
*entry
;
510 int *qpn
= &priv
->base_qpn
;
511 u64 mac
= mlx4_en_mac_to_u64(priv
->dev
->dev_addr
);
513 en_dbg(DRV
, priv
, "Registering MAC: %pM for adding\n",
514 priv
->dev
->dev_addr
);
515 index
= mlx4_register_mac(dev
, priv
->port
, mac
);
518 en_err(priv
, "Failed adding MAC: %pM\n",
519 priv
->dev
->dev_addr
);
523 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_A0
) {
524 int base_qpn
= mlx4_get_base_qpn(dev
, priv
->port
);
525 *qpn
= base_qpn
+ index
;
529 err
= mlx4_qp_reserve_range(dev
, 1, 1, qpn
);
530 en_dbg(DRV
, priv
, "Reserved qp %d\n", *qpn
);
532 en_err(priv
, "Failed to reserve qp for mac registration\n");
536 err
= mlx4_en_uc_steer_add(priv
, priv
->dev
->dev_addr
, qpn
, ®_id
);
540 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
545 memcpy(entry
->mac
, priv
->dev
->dev_addr
, sizeof(entry
->mac
));
546 entry
->reg_id
= reg_id
;
548 hlist_add_head_rcu(&entry
->hlist
,
549 &priv
->mac_hash
[entry
->mac
[MLX4_EN_MAC_HASH_IDX
]]);
554 mlx4_en_uc_steer_release(priv
, priv
->dev
->dev_addr
, *qpn
, reg_id
);
557 mlx4_qp_release_range(dev
, *qpn
, 1);
560 mlx4_unregister_mac(dev
, priv
->port
, mac
);
564 static void mlx4_en_put_qp(struct mlx4_en_priv
*priv
)
566 struct mlx4_en_dev
*mdev
= priv
->mdev
;
567 struct mlx4_dev
*dev
= mdev
->dev
;
568 int qpn
= priv
->base_qpn
;
569 u64 mac
= mlx4_en_mac_to_u64(priv
->dev
->dev_addr
);
571 en_dbg(DRV
, priv
, "Registering MAC: %pM for deleting\n",
572 priv
->dev
->dev_addr
);
573 mlx4_unregister_mac(dev
, priv
->port
, mac
);
575 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
) {
576 struct mlx4_mac_entry
*entry
;
577 struct hlist_node
*n
, *tmp
;
578 struct hlist_head
*bucket
;
579 unsigned int mac_hash
;
581 mac_hash
= priv
->dev
->dev_addr
[MLX4_EN_MAC_HASH_IDX
];
582 bucket
= &priv
->mac_hash
[mac_hash
];
583 hlist_for_each_entry_safe(entry
, n
, tmp
, bucket
, hlist
) {
584 if (ether_addr_equal_64bits(entry
->mac
,
585 priv
->dev
->dev_addr
)) {
586 en_dbg(DRV
, priv
, "Releasing qp: port %d, MAC %pM, qpn %d\n",
587 priv
->port
, priv
->dev
->dev_addr
, qpn
);
588 mlx4_en_uc_steer_release(priv
, entry
->mac
,
590 mlx4_qp_release_range(dev
, qpn
, 1);
592 hlist_del_rcu(&entry
->hlist
);
593 kfree_rcu(entry
, rcu
);
600 static int mlx4_en_replace_mac(struct mlx4_en_priv
*priv
, int qpn
,
601 unsigned char *new_mac
, unsigned char *prev_mac
)
603 struct mlx4_en_dev
*mdev
= priv
->mdev
;
604 struct mlx4_dev
*dev
= mdev
->dev
;
606 u64 new_mac_u64
= mlx4_en_mac_to_u64(new_mac
);
608 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
) {
609 struct hlist_head
*bucket
;
610 unsigned int mac_hash
;
611 struct mlx4_mac_entry
*entry
;
612 struct hlist_node
*n
, *tmp
;
613 u64 prev_mac_u64
= mlx4_en_mac_to_u64(prev_mac
);
615 bucket
= &priv
->mac_hash
[prev_mac
[MLX4_EN_MAC_HASH_IDX
]];
616 hlist_for_each_entry_safe(entry
, n
, tmp
, bucket
, hlist
) {
617 if (ether_addr_equal_64bits(entry
->mac
, prev_mac
)) {
618 mlx4_en_uc_steer_release(priv
, entry
->mac
,
620 mlx4_unregister_mac(dev
, priv
->port
,
622 hlist_del_rcu(&entry
->hlist
);
624 memcpy(entry
->mac
, new_mac
, ETH_ALEN
);
626 mac_hash
= new_mac
[MLX4_EN_MAC_HASH_IDX
];
627 hlist_add_head_rcu(&entry
->hlist
,
628 &priv
->mac_hash
[mac_hash
]);
629 mlx4_register_mac(dev
, priv
->port
, new_mac_u64
);
630 err
= mlx4_en_uc_steer_add(priv
, new_mac
,
639 return __mlx4_replace_mac(dev
, priv
->port
, qpn
, new_mac_u64
);
642 u64
mlx4_en_mac_to_u64(u8
*addr
)
647 for (i
= 0; i
< ETH_ALEN
; i
++) {
654 static int mlx4_en_set_mac(struct net_device
*dev
, void *addr
)
656 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
657 struct mlx4_en_dev
*mdev
= priv
->mdev
;
658 struct sockaddr
*saddr
= addr
;
660 if (!is_valid_ether_addr(saddr
->sa_data
))
661 return -EADDRNOTAVAIL
;
663 memcpy(dev
->dev_addr
, saddr
->sa_data
, ETH_ALEN
);
664 queue_work(mdev
->workqueue
, &priv
->mac_task
);
668 static void mlx4_en_do_set_mac(struct work_struct
*work
)
670 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
672 struct mlx4_en_dev
*mdev
= priv
->mdev
;
675 mutex_lock(&mdev
->state_lock
);
677 /* Remove old MAC and insert the new one */
678 err
= mlx4_en_replace_mac(priv
, priv
->base_qpn
,
679 priv
->dev
->dev_addr
, priv
->prev_mac
);
681 en_err(priv
, "Failed changing HW MAC address\n");
682 memcpy(priv
->prev_mac
, priv
->dev
->dev_addr
,
683 sizeof(priv
->prev_mac
));
685 en_dbg(HW
, priv
, "Port is down while registering mac, exiting...\n");
687 mutex_unlock(&mdev
->state_lock
);
690 static void mlx4_en_clear_list(struct net_device
*dev
)
692 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
693 struct mlx4_en_mc_list
*tmp
, *mc_to_del
;
695 list_for_each_entry_safe(mc_to_del
, tmp
, &priv
->mc_list
, list
) {
696 list_del(&mc_to_del
->list
);
701 static void mlx4_en_cache_mclist(struct net_device
*dev
)
703 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
704 struct netdev_hw_addr
*ha
;
705 struct mlx4_en_mc_list
*tmp
;
707 mlx4_en_clear_list(dev
);
708 netdev_for_each_mc_addr(ha
, dev
) {
709 tmp
= kzalloc(sizeof(struct mlx4_en_mc_list
), GFP_ATOMIC
);
711 mlx4_en_clear_list(dev
);
714 memcpy(tmp
->addr
, ha
->addr
, ETH_ALEN
);
715 list_add_tail(&tmp
->list
, &priv
->mc_list
);
719 static void update_mclist_flags(struct mlx4_en_priv
*priv
,
720 struct list_head
*dst
,
721 struct list_head
*src
)
723 struct mlx4_en_mc_list
*dst_tmp
, *src_tmp
, *new_mc
;
726 /* Find all the entries that should be removed from dst,
727 * These are the entries that are not found in src
729 list_for_each_entry(dst_tmp
, dst
, list
) {
731 list_for_each_entry(src_tmp
, src
, list
) {
732 if (!memcmp(dst_tmp
->addr
, src_tmp
->addr
, ETH_ALEN
)) {
738 dst_tmp
->action
= MCLIST_REM
;
741 /* Add entries that exist in src but not in dst
742 * mark them as need to add
744 list_for_each_entry(src_tmp
, src
, list
) {
746 list_for_each_entry(dst_tmp
, dst
, list
) {
747 if (!memcmp(dst_tmp
->addr
, src_tmp
->addr
, ETH_ALEN
)) {
748 dst_tmp
->action
= MCLIST_NONE
;
754 new_mc
= kmemdup(src_tmp
,
755 sizeof(struct mlx4_en_mc_list
),
760 new_mc
->action
= MCLIST_ADD
;
761 list_add_tail(&new_mc
->list
, dst
);
766 static void mlx4_en_set_rx_mode(struct net_device
*dev
)
768 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
773 queue_work(priv
->mdev
->workqueue
, &priv
->rx_mode_task
);
776 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv
*priv
,
777 struct mlx4_en_dev
*mdev
)
781 if (!(priv
->flags
& MLX4_EN_FLAG_PROMISC
)) {
782 if (netif_msg_rx_status(priv
))
783 en_warn(priv
, "Entering promiscuous mode\n");
784 priv
->flags
|= MLX4_EN_FLAG_PROMISC
;
786 /* Enable promiscouos mode */
787 switch (mdev
->dev
->caps
.steering_mode
) {
788 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
789 err
= mlx4_flow_steer_promisc_add(mdev
->dev
,
792 MLX4_FS_PROMISC_UPLINK
);
794 en_err(priv
, "Failed enabling promiscuous mode\n");
795 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
798 case MLX4_STEERING_MODE_B0
:
799 err
= mlx4_unicast_promisc_add(mdev
->dev
,
803 en_err(priv
, "Failed enabling unicast promiscuous mode\n");
805 /* Add the default qp number as multicast
808 if (!(priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
)) {
809 err
= mlx4_multicast_promisc_add(mdev
->dev
,
813 en_err(priv
, "Failed enabling multicast promiscuous mode\n");
814 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
818 case MLX4_STEERING_MODE_A0
:
819 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
,
824 en_err(priv
, "Failed enabling promiscuous mode\n");
828 /* Disable port multicast filter (unconditionally) */
829 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
830 0, MLX4_MCAST_DISABLE
);
832 en_err(priv
, "Failed disabling multicast filter\n");
834 /* Disable port VLAN filter */
835 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
837 en_err(priv
, "Failed disabling VLAN filter\n");
841 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv
*priv
,
842 struct mlx4_en_dev
*mdev
)
846 if (netif_msg_rx_status(priv
))
847 en_warn(priv
, "Leaving promiscuous mode\n");
848 priv
->flags
&= ~MLX4_EN_FLAG_PROMISC
;
850 /* Disable promiscouos mode */
851 switch (mdev
->dev
->caps
.steering_mode
) {
852 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
853 err
= mlx4_flow_steer_promisc_remove(mdev
->dev
,
855 MLX4_FS_PROMISC_UPLINK
);
857 en_err(priv
, "Failed disabling promiscuous mode\n");
858 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
861 case MLX4_STEERING_MODE_B0
:
862 err
= mlx4_unicast_promisc_remove(mdev
->dev
,
866 en_err(priv
, "Failed disabling unicast promiscuous mode\n");
867 /* Disable Multicast promisc */
868 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
869 err
= mlx4_multicast_promisc_remove(mdev
->dev
,
873 en_err(priv
, "Failed disabling multicast promiscuous mode\n");
874 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
878 case MLX4_STEERING_MODE_A0
:
879 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
,
883 en_err(priv
, "Failed disabling promiscuous mode\n");
887 /* Enable port VLAN filter */
888 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
890 en_err(priv
, "Failed enabling VLAN filter\n");
893 static void mlx4_en_do_multicast(struct mlx4_en_priv
*priv
,
894 struct net_device
*dev
,
895 struct mlx4_en_dev
*mdev
)
897 struct mlx4_en_mc_list
*mclist
, *tmp
;
899 u8 mc_list
[16] = {0};
902 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
903 if (dev
->flags
& IFF_ALLMULTI
) {
904 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
905 0, MLX4_MCAST_DISABLE
);
907 en_err(priv
, "Failed disabling multicast filter\n");
909 /* Add the default qp number as multicast promisc */
910 if (!(priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
)) {
911 switch (mdev
->dev
->caps
.steering_mode
) {
912 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
913 err
= mlx4_flow_steer_promisc_add(mdev
->dev
,
916 MLX4_FS_PROMISC_ALL_MULTI
);
919 case MLX4_STEERING_MODE_B0
:
920 err
= mlx4_multicast_promisc_add(mdev
->dev
,
925 case MLX4_STEERING_MODE_A0
:
929 en_err(priv
, "Failed entering multicast promisc mode\n");
930 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
933 /* Disable Multicast promisc */
934 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
935 switch (mdev
->dev
->caps
.steering_mode
) {
936 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
937 err
= mlx4_flow_steer_promisc_remove(mdev
->dev
,
939 MLX4_FS_PROMISC_ALL_MULTI
);
942 case MLX4_STEERING_MODE_B0
:
943 err
= mlx4_multicast_promisc_remove(mdev
->dev
,
948 case MLX4_STEERING_MODE_A0
:
952 en_err(priv
, "Failed disabling multicast promiscuous mode\n");
953 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
956 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
957 0, MLX4_MCAST_DISABLE
);
959 en_err(priv
, "Failed disabling multicast filter\n");
961 /* Flush mcast filter and init it with broadcast address */
962 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, ETH_BCAST
,
963 1, MLX4_MCAST_CONFIG
);
965 /* Update multicast list - we cache all addresses so they won't
966 * change while HW is updated holding the command semaphor */
967 netif_addr_lock_bh(dev
);
968 mlx4_en_cache_mclist(dev
);
969 netif_addr_unlock_bh(dev
);
970 list_for_each_entry(mclist
, &priv
->mc_list
, list
) {
971 mcast_addr
= mlx4_en_mac_to_u64(mclist
->addr
);
972 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
,
973 mcast_addr
, 0, MLX4_MCAST_CONFIG
);
975 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
976 0, MLX4_MCAST_ENABLE
);
978 en_err(priv
, "Failed enabling multicast filter\n");
980 update_mclist_flags(priv
, &priv
->curr_list
, &priv
->mc_list
);
981 list_for_each_entry_safe(mclist
, tmp
, &priv
->curr_list
, list
) {
982 if (mclist
->action
== MCLIST_REM
) {
983 /* detach this address and delete from list */
984 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
985 mc_list
[5] = priv
->port
;
986 err
= mlx4_multicast_detach(mdev
->dev
,
987 &priv
->rss_map
.indir_qp
,
992 en_err(priv
, "Fail to detach multicast address\n");
994 /* remove from list */
995 list_del(&mclist
->list
);
997 } else if (mclist
->action
== MCLIST_ADD
) {
998 /* attach the address */
999 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
1000 /* needed for B0 steering support */
1001 mc_list
[5] = priv
->port
;
1002 err
= mlx4_multicast_attach(mdev
->dev
,
1003 &priv
->rss_map
.indir_qp
,
1009 en_err(priv
, "Fail to attach multicast address\n");
1016 static void mlx4_en_do_uc_filter(struct mlx4_en_priv
*priv
,
1017 struct net_device
*dev
,
1018 struct mlx4_en_dev
*mdev
)
1020 struct netdev_hw_addr
*ha
;
1021 struct mlx4_mac_entry
*entry
;
1022 struct hlist_node
*n
, *tmp
;
1026 struct hlist_head
*bucket
;
1031 /* Note that we do not need to protect our mac_hash traversal with rcu,
1032 * since all modification code is protected by mdev->state_lock
1035 /* find what to remove */
1036 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
) {
1037 bucket
= &priv
->mac_hash
[i
];
1038 hlist_for_each_entry_safe(entry
, n
, tmp
, bucket
, hlist
) {
1040 netdev_for_each_uc_addr(ha
, dev
) {
1041 if (ether_addr_equal_64bits(entry
->mac
,
1048 /* MAC address of the port is not in uc list */
1049 if (ether_addr_equal_64bits(entry
->mac
, dev
->dev_addr
))
1053 mac
= mlx4_en_mac_to_u64(entry
->mac
);
1054 mlx4_en_uc_steer_release(priv
, entry
->mac
,
1057 mlx4_unregister_mac(mdev
->dev
, priv
->port
, mac
);
1059 hlist_del_rcu(&entry
->hlist
);
1060 kfree_rcu(entry
, rcu
);
1061 en_dbg(DRV
, priv
, "Removed MAC %pM on port:%d\n",
1062 entry
->mac
, priv
->port
);
1068 /* if we didn't remove anything, there is no use in trying to add
1069 * again once we are in a forced promisc mode state
1071 if ((priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
) && 0 == removed
)
1074 prev_flags
= priv
->flags
;
1075 priv
->flags
&= ~MLX4_EN_FLAG_FORCE_PROMISC
;
1077 /* find what to add */
1078 netdev_for_each_uc_addr(ha
, dev
) {
1080 bucket
= &priv
->mac_hash
[ha
->addr
[MLX4_EN_MAC_HASH_IDX
]];
1081 hlist_for_each_entry(entry
, n
, bucket
, hlist
) {
1082 if (ether_addr_equal_64bits(entry
->mac
, ha
->addr
)) {
1089 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1091 en_err(priv
, "Failed adding MAC %pM on port:%d (out of memory)\n",
1092 ha
->addr
, priv
->port
);
1093 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1096 mac
= mlx4_en_mac_to_u64(ha
->addr
);
1097 memcpy(entry
->mac
, ha
->addr
, ETH_ALEN
);
1098 err
= mlx4_register_mac(mdev
->dev
, priv
->port
, mac
);
1100 en_err(priv
, "Failed registering MAC %pM on port %d: %d\n",
1101 ha
->addr
, priv
->port
, err
);
1103 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1106 err
= mlx4_en_uc_steer_add(priv
, ha
->addr
,
1110 en_err(priv
, "Failed adding MAC %pM on port %d: %d\n",
1111 ha
->addr
, priv
->port
, err
);
1112 mlx4_unregister_mac(mdev
->dev
, priv
->port
, mac
);
1114 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1117 unsigned int mac_hash
;
1118 en_dbg(DRV
, priv
, "Added MAC %pM on port:%d\n",
1119 ha
->addr
, priv
->port
);
1120 mac_hash
= ha
->addr
[MLX4_EN_MAC_HASH_IDX
];
1121 bucket
= &priv
->mac_hash
[mac_hash
];
1122 hlist_add_head_rcu(&entry
->hlist
, bucket
);
1127 if (priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
) {
1128 en_warn(priv
, "Forcing promiscuous mode on port:%d\n",
1130 } else if (prev_flags
& MLX4_EN_FLAG_FORCE_PROMISC
) {
1131 en_warn(priv
, "Stop forcing promiscuous mode on port:%d\n",
1136 static void mlx4_en_do_set_rx_mode(struct work_struct
*work
)
1138 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1140 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1141 struct net_device
*dev
= priv
->dev
;
1143 mutex_lock(&mdev
->state_lock
);
1144 if (!mdev
->device_up
) {
1145 en_dbg(HW
, priv
, "Card is not up, ignoring rx mode change.\n");
1148 if (!priv
->port_up
) {
1149 en_dbg(HW
, priv
, "Port is down, ignoring rx mode change.\n");
1153 if (!netif_carrier_ok(dev
)) {
1154 if (!mlx4_en_QUERY_PORT(mdev
, priv
->port
)) {
1155 if (priv
->port_state
.link_state
) {
1156 priv
->last_link_state
= MLX4_DEV_EVENT_PORT_UP
;
1157 netif_carrier_on(dev
);
1158 en_dbg(LINK
, priv
, "Link Up\n");
1163 if (dev
->priv_flags
& IFF_UNICAST_FLT
)
1164 mlx4_en_do_uc_filter(priv
, dev
, mdev
);
1166 /* Promsicuous mode: disable all filters */
1167 if ((dev
->flags
& IFF_PROMISC
) ||
1168 (priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
)) {
1169 mlx4_en_set_promisc_mode(priv
, mdev
);
1173 /* Not in promiscuous mode */
1174 if (priv
->flags
& MLX4_EN_FLAG_PROMISC
)
1175 mlx4_en_clear_promisc_mode(priv
, mdev
);
1177 mlx4_en_do_multicast(priv
, dev
, mdev
);
1179 mutex_unlock(&mdev
->state_lock
);
1182 #ifdef CONFIG_NET_POLL_CONTROLLER
1183 static void mlx4_en_netpoll(struct net_device
*dev
)
1185 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1186 struct mlx4_en_cq
*cq
;
1187 unsigned long flags
;
1190 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1191 cq
= &priv
->rx_cq
[i
];
1192 spin_lock_irqsave(&cq
->lock
, flags
);
1193 napi_synchronize(&cq
->napi
);
1194 mlx4_en_process_rx_cq(dev
, cq
, 0);
1195 spin_unlock_irqrestore(&cq
->lock
, flags
);
1200 static void mlx4_en_tx_timeout(struct net_device
*dev
)
1202 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1203 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1205 if (netif_msg_timer(priv
))
1206 en_warn(priv
, "Tx timeout called on port:%d\n", priv
->port
);
1208 priv
->port_stats
.tx_timeout
++;
1209 en_dbg(DRV
, priv
, "Scheduling watchdog\n");
1210 queue_work(mdev
->workqueue
, &priv
->watchdog_task
);
1214 static struct net_device_stats
*mlx4_en_get_stats(struct net_device
*dev
)
1216 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1218 spin_lock_bh(&priv
->stats_lock
);
1219 memcpy(&priv
->ret_stats
, &priv
->stats
, sizeof(priv
->stats
));
1220 spin_unlock_bh(&priv
->stats_lock
);
1222 return &priv
->ret_stats
;
1225 static void mlx4_en_set_default_moderation(struct mlx4_en_priv
*priv
)
1227 struct mlx4_en_cq
*cq
;
1230 /* If we haven't received a specific coalescing setting
1231 * (module param), we set the moderation parameters as follows:
1232 * - moder_cnt is set to the number of mtu sized packets to
1233 * satisfy our coalescing target.
1234 * - moder_time is set to a fixed value.
1236 priv
->rx_frames
= MLX4_EN_RX_COAL_TARGET
;
1237 priv
->rx_usecs
= MLX4_EN_RX_COAL_TIME
;
1238 priv
->tx_frames
= MLX4_EN_TX_COAL_PKTS
;
1239 priv
->tx_usecs
= MLX4_EN_TX_COAL_TIME
;
1240 en_dbg(INTR
, priv
, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1241 priv
->dev
->mtu
, priv
->rx_frames
, priv
->rx_usecs
);
1243 /* Setup cq moderation params */
1244 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1245 cq
= &priv
->rx_cq
[i
];
1246 cq
->moder_cnt
= priv
->rx_frames
;
1247 cq
->moder_time
= priv
->rx_usecs
;
1248 priv
->last_moder_time
[i
] = MLX4_EN_AUTO_CONF
;
1249 priv
->last_moder_packets
[i
] = 0;
1250 priv
->last_moder_bytes
[i
] = 0;
1253 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1254 cq
= &priv
->tx_cq
[i
];
1255 cq
->moder_cnt
= priv
->tx_frames
;
1256 cq
->moder_time
= priv
->tx_usecs
;
1259 /* Reset auto-moderation params */
1260 priv
->pkt_rate_low
= MLX4_EN_RX_RATE_LOW
;
1261 priv
->rx_usecs_low
= MLX4_EN_RX_COAL_TIME_LOW
;
1262 priv
->pkt_rate_high
= MLX4_EN_RX_RATE_HIGH
;
1263 priv
->rx_usecs_high
= MLX4_EN_RX_COAL_TIME_HIGH
;
1264 priv
->sample_interval
= MLX4_EN_SAMPLE_INTERVAL
;
1265 priv
->adaptive_rx_coal
= 1;
1266 priv
->last_moder_jiffies
= 0;
1267 priv
->last_moder_tx_packets
= 0;
1270 static void mlx4_en_auto_moderation(struct mlx4_en_priv
*priv
)
1272 unsigned long period
= (unsigned long) (jiffies
- priv
->last_moder_jiffies
);
1273 struct mlx4_en_cq
*cq
;
1274 unsigned long packets
;
1276 unsigned long avg_pkt_size
;
1277 unsigned long rx_packets
;
1278 unsigned long rx_bytes
;
1279 unsigned long rx_pkt_diff
;
1283 if (!priv
->adaptive_rx_coal
|| period
< priv
->sample_interval
* HZ
)
1286 for (ring
= 0; ring
< priv
->rx_ring_num
; ring
++) {
1287 spin_lock_bh(&priv
->stats_lock
);
1288 rx_packets
= priv
->rx_ring
[ring
].packets
;
1289 rx_bytes
= priv
->rx_ring
[ring
].bytes
;
1290 spin_unlock_bh(&priv
->stats_lock
);
1292 rx_pkt_diff
= ((unsigned long) (rx_packets
-
1293 priv
->last_moder_packets
[ring
]));
1294 packets
= rx_pkt_diff
;
1295 rate
= packets
* HZ
/ period
;
1296 avg_pkt_size
= packets
? ((unsigned long) (rx_bytes
-
1297 priv
->last_moder_bytes
[ring
])) / packets
: 0;
1299 /* Apply auto-moderation only when packet rate
1300 * exceeds a rate that it matters */
1301 if (rate
> (MLX4_EN_RX_RATE_THRESH
/ priv
->rx_ring_num
) &&
1302 avg_pkt_size
> MLX4_EN_AVG_PKT_SMALL
) {
1303 if (rate
< priv
->pkt_rate_low
)
1304 moder_time
= priv
->rx_usecs_low
;
1305 else if (rate
> priv
->pkt_rate_high
)
1306 moder_time
= priv
->rx_usecs_high
;
1308 moder_time
= (rate
- priv
->pkt_rate_low
) *
1309 (priv
->rx_usecs_high
- priv
->rx_usecs_low
) /
1310 (priv
->pkt_rate_high
- priv
->pkt_rate_low
) +
1313 moder_time
= priv
->rx_usecs_low
;
1316 if (moder_time
!= priv
->last_moder_time
[ring
]) {
1317 priv
->last_moder_time
[ring
] = moder_time
;
1318 cq
= &priv
->rx_cq
[ring
];
1319 cq
->moder_time
= moder_time
;
1320 err
= mlx4_en_set_cq_moder(priv
, cq
);
1322 en_err(priv
, "Failed modifying moderation for cq:%d\n",
1325 priv
->last_moder_packets
[ring
] = rx_packets
;
1326 priv
->last_moder_bytes
[ring
] = rx_bytes
;
1329 priv
->last_moder_jiffies
= jiffies
;
1332 static void mlx4_en_do_get_stats(struct work_struct
*work
)
1334 struct delayed_work
*delay
= to_delayed_work(work
);
1335 struct mlx4_en_priv
*priv
= container_of(delay
, struct mlx4_en_priv
,
1337 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1340 mutex_lock(&mdev
->state_lock
);
1341 if (mdev
->device_up
) {
1342 err
= mlx4_en_DUMP_ETH_STATS(mdev
, priv
->port
, 0);
1344 en_dbg(HW
, priv
, "Could not update stats\n");
1347 mlx4_en_auto_moderation(priv
);
1349 queue_delayed_work(mdev
->workqueue
, &priv
->stats_task
, STATS_DELAY
);
1351 if (mdev
->mac_removed
[MLX4_MAX_PORTS
+ 1 - priv
->port
]) {
1352 queue_work(mdev
->workqueue
, &priv
->mac_task
);
1353 mdev
->mac_removed
[MLX4_MAX_PORTS
+ 1 - priv
->port
] = 0;
1355 mutex_unlock(&mdev
->state_lock
);
1358 static void mlx4_en_linkstate(struct work_struct
*work
)
1360 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1362 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1363 int linkstate
= priv
->link_state
;
1365 mutex_lock(&mdev
->state_lock
);
1366 /* If observable port state changed set carrier state and
1367 * report to system log */
1368 if (priv
->last_link_state
!= linkstate
) {
1369 if (linkstate
== MLX4_DEV_EVENT_PORT_DOWN
) {
1370 en_info(priv
, "Link Down\n");
1371 netif_carrier_off(priv
->dev
);
1373 en_info(priv
, "Link Up\n");
1374 netif_carrier_on(priv
->dev
);
1377 priv
->last_link_state
= linkstate
;
1378 mutex_unlock(&mdev
->state_lock
);
1382 int mlx4_en_start_port(struct net_device
*dev
)
1384 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1385 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1386 struct mlx4_en_cq
*cq
;
1387 struct mlx4_en_tx_ring
*tx_ring
;
1393 u8 mc_list
[16] = {0};
1395 if (priv
->port_up
) {
1396 en_dbg(DRV
, priv
, "start port called while port already up\n");
1400 INIT_LIST_HEAD(&priv
->mc_list
);
1401 INIT_LIST_HEAD(&priv
->curr_list
);
1402 INIT_LIST_HEAD(&priv
->ethtool_list
);
1403 memset(&priv
->ethtool_rules
[0], 0,
1404 sizeof(struct ethtool_flow_id
) * MAX_NUM_OF_FS_RULES
);
1406 /* Calculate Rx buf size */
1407 dev
->mtu
= min(dev
->mtu
, priv
->max_mtu
);
1408 mlx4_en_calc_rx_buf(dev
);
1409 en_dbg(DRV
, priv
, "Rx buf size:%d\n", priv
->rx_skb_size
);
1411 /* Configure rx cq's and rings */
1412 err
= mlx4_en_activate_rx_rings(priv
);
1414 en_err(priv
, "Failed to activate RX rings\n");
1417 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1418 cq
= &priv
->rx_cq
[i
];
1420 err
= mlx4_en_activate_cq(priv
, cq
, i
);
1422 en_err(priv
, "Failed activating Rx CQ\n");
1425 for (j
= 0; j
< cq
->size
; j
++)
1426 cq
->buf
[j
].owner_sr_opcode
= MLX4_CQE_OWNER_MASK
;
1427 err
= mlx4_en_set_cq_moder(priv
, cq
);
1429 en_err(priv
, "Failed setting cq moderation parameters");
1430 mlx4_en_deactivate_cq(priv
, cq
);
1433 mlx4_en_arm_cq(priv
, cq
);
1434 priv
->rx_ring
[i
].cqn
= cq
->mcq
.cqn
;
1439 en_dbg(DRV
, priv
, "Getting qp number for port %d\n", priv
->port
);
1440 err
= mlx4_en_get_qp(priv
);
1442 en_err(priv
, "Failed getting eth qp\n");
1445 mdev
->mac_removed
[priv
->port
] = 0;
1447 err
= mlx4_en_config_rss_steer(priv
);
1449 en_err(priv
, "Failed configuring rss steering\n");
1453 err
= mlx4_en_create_drop_qp(priv
);
1457 /* Configure tx cq's and rings */
1458 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1460 cq
= &priv
->tx_cq
[i
];
1461 err
= mlx4_en_activate_cq(priv
, cq
, i
);
1463 en_err(priv
, "Failed allocating Tx CQ\n");
1466 err
= mlx4_en_set_cq_moder(priv
, cq
);
1468 en_err(priv
, "Failed setting cq moderation parameters");
1469 mlx4_en_deactivate_cq(priv
, cq
);
1472 en_dbg(DRV
, priv
, "Resetting index of collapsed CQ:%d to -1\n", i
);
1473 cq
->buf
->wqe_index
= cpu_to_be16(0xffff);
1475 /* Configure ring */
1476 tx_ring
= &priv
->tx_ring
[i
];
1477 err
= mlx4_en_activate_tx_ring(priv
, tx_ring
, cq
->mcq
.cqn
,
1478 i
/ priv
->num_tx_rings_p_up
);
1480 en_err(priv
, "Failed allocating Tx ring\n");
1481 mlx4_en_deactivate_cq(priv
, cq
);
1484 tx_ring
->tx_queue
= netdev_get_tx_queue(dev
, i
);
1486 /* Arm CQ for TX completions */
1487 mlx4_en_arm_cq(priv
, cq
);
1489 /* Set initial ownership of all Tx TXBBs to SW (1) */
1490 for (j
= 0; j
< tx_ring
->buf_size
; j
+= STAMP_STRIDE
)
1491 *((u32
*) (tx_ring
->buf
+ j
)) = 0xffffffff;
1495 /* Configure port */
1496 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
1497 priv
->rx_skb_size
+ ETH_FCS_LEN
,
1498 priv
->prof
->tx_pause
,
1500 priv
->prof
->rx_pause
,
1501 priv
->prof
->rx_ppp
);
1503 en_err(priv
, "Failed setting port general configurations for port %d, with error %d\n",
1507 /* Set default qp number */
1508 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
, priv
->port
, priv
->base_qpn
, 0);
1510 en_err(priv
, "Failed setting default qp numbers\n");
1515 en_dbg(HW
, priv
, "Initializing port\n");
1516 err
= mlx4_INIT_PORT(mdev
->dev
, priv
->port
);
1518 en_err(priv
, "Failed Initializing port\n");
1522 /* Attach rx QP to bradcast address */
1523 memset(&mc_list
[10], 0xff, ETH_ALEN
);
1524 mc_list
[5] = priv
->port
; /* needed for B0 steering support */
1525 if (mlx4_multicast_attach(mdev
->dev
, &priv
->rss_map
.indir_qp
, mc_list
,
1526 priv
->port
, 0, MLX4_PROT_ETH
,
1527 &priv
->broadcast_id
))
1528 mlx4_warn(mdev
, "Failed Attaching Broadcast\n");
1530 /* Must redo promiscuous mode setup. */
1531 priv
->flags
&= ~(MLX4_EN_FLAG_PROMISC
| MLX4_EN_FLAG_MC_PROMISC
);
1533 /* Schedule multicast task to populate multicast list */
1534 queue_work(mdev
->workqueue
, &priv
->rx_mode_task
);
1536 mlx4_set_stats_bitmap(mdev
->dev
, &priv
->stats_bitmap
);
1538 priv
->port_up
= true;
1539 netif_tx_start_all_queues(dev
);
1540 netif_device_attach(dev
);
1545 while (tx_index
--) {
1546 mlx4_en_deactivate_tx_ring(priv
, &priv
->tx_ring
[tx_index
]);
1547 mlx4_en_deactivate_cq(priv
, &priv
->tx_cq
[tx_index
]);
1549 mlx4_en_destroy_drop_qp(priv
);
1551 mlx4_en_release_rss_steer(priv
);
1553 mlx4_en_put_qp(priv
);
1556 mlx4_en_deactivate_cq(priv
, &priv
->rx_cq
[rx_index
]);
1557 for (i
= 0; i
< priv
->rx_ring_num
; i
++)
1558 mlx4_en_deactivate_rx_ring(priv
, &priv
->rx_ring
[i
]);
1560 return err
; /* need to close devices */
1564 void mlx4_en_stop_port(struct net_device
*dev
, int detach
)
1566 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1567 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1568 struct mlx4_en_mc_list
*mclist
, *tmp
;
1569 struct ethtool_flow_id
*flow
, *tmp_flow
;
1571 u8 mc_list
[16] = {0};
1573 if (!priv
->port_up
) {
1574 en_dbg(DRV
, priv
, "stop port called while port already down\n");
1578 /* Synchronize with tx routine */
1579 netif_tx_lock_bh(dev
);
1581 netif_device_detach(dev
);
1582 netif_tx_stop_all_queues(dev
);
1583 netif_tx_unlock_bh(dev
);
1585 netif_tx_disable(dev
);
1587 /* Set port as not active */
1588 priv
->port_up
= false;
1590 /* Promsicuous mode */
1591 if (mdev
->dev
->caps
.steering_mode
==
1592 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1593 priv
->flags
&= ~(MLX4_EN_FLAG_PROMISC
|
1594 MLX4_EN_FLAG_MC_PROMISC
);
1595 mlx4_flow_steer_promisc_remove(mdev
->dev
,
1597 MLX4_FS_PROMISC_UPLINK
);
1598 mlx4_flow_steer_promisc_remove(mdev
->dev
,
1600 MLX4_FS_PROMISC_ALL_MULTI
);
1601 } else if (priv
->flags
& MLX4_EN_FLAG_PROMISC
) {
1602 priv
->flags
&= ~MLX4_EN_FLAG_PROMISC
;
1604 /* Disable promiscouos mode */
1605 mlx4_unicast_promisc_remove(mdev
->dev
, priv
->base_qpn
,
1608 /* Disable Multicast promisc */
1609 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
1610 mlx4_multicast_promisc_remove(mdev
->dev
, priv
->base_qpn
,
1612 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
1616 /* Detach All multicasts */
1617 memset(&mc_list
[10], 0xff, ETH_ALEN
);
1618 mc_list
[5] = priv
->port
; /* needed for B0 steering support */
1619 mlx4_multicast_detach(mdev
->dev
, &priv
->rss_map
.indir_qp
, mc_list
,
1620 MLX4_PROT_ETH
, priv
->broadcast_id
);
1621 list_for_each_entry(mclist
, &priv
->curr_list
, list
) {
1622 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
1623 mc_list
[5] = priv
->port
;
1624 mlx4_multicast_detach(mdev
->dev
, &priv
->rss_map
.indir_qp
,
1625 mc_list
, MLX4_PROT_ETH
, mclist
->reg_id
);
1627 mlx4_en_clear_list(dev
);
1628 list_for_each_entry_safe(mclist
, tmp
, &priv
->curr_list
, list
) {
1629 list_del(&mclist
->list
);
1633 /* Flush multicast filter */
1634 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0, 1, MLX4_MCAST_CONFIG
);
1636 mlx4_en_destroy_drop_qp(priv
);
1639 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1640 mlx4_en_deactivate_tx_ring(priv
, &priv
->tx_ring
[i
]);
1641 mlx4_en_deactivate_cq(priv
, &priv
->tx_cq
[i
]);
1645 for (i
= 0; i
< priv
->tx_ring_num
; i
++)
1646 mlx4_en_free_tx_buf(dev
, &priv
->tx_ring
[i
]);
1649 mlx4_en_release_rss_steer(priv
);
1651 /* Unregister Mac address for the port */
1652 mlx4_en_put_qp(priv
);
1653 if (!(mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN
))
1654 mdev
->mac_removed
[priv
->port
] = 1;
1656 /* Remove flow steering rules for the port*/
1657 if (mdev
->dev
->caps
.steering_mode
==
1658 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1660 list_for_each_entry_safe(flow
, tmp_flow
,
1661 &priv
->ethtool_list
, list
) {
1662 mlx4_flow_detach(mdev
->dev
, flow
->id
);
1663 list_del(&flow
->list
);
1668 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1669 mlx4_en_deactivate_rx_ring(priv
, &priv
->rx_ring
[i
]);
1670 while (test_bit(NAPI_STATE_SCHED
, &priv
->rx_cq
[i
].napi
.state
))
1672 mlx4_en_deactivate_cq(priv
, &priv
->rx_cq
[i
]);
1676 mlx4_CLOSE_PORT(mdev
->dev
, priv
->port
);
1679 static void mlx4_en_restart(struct work_struct
*work
)
1681 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1683 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1684 struct net_device
*dev
= priv
->dev
;
1686 en_dbg(DRV
, priv
, "Watchdog task called for port %d\n", priv
->port
);
1688 mutex_lock(&mdev
->state_lock
);
1689 if (priv
->port_up
) {
1690 mlx4_en_stop_port(dev
, 1);
1691 if (mlx4_en_start_port(dev
))
1692 en_err(priv
, "Failed restarting port %d\n", priv
->port
);
1694 mutex_unlock(&mdev
->state_lock
);
1697 static void mlx4_en_clear_stats(struct net_device
*dev
)
1699 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1700 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1703 if (mlx4_en_DUMP_ETH_STATS(mdev
, priv
->port
, 1))
1704 en_dbg(HW
, priv
, "Failed dumping statistics\n");
1706 memset(&priv
->stats
, 0, sizeof(priv
->stats
));
1707 memset(&priv
->pstats
, 0, sizeof(priv
->pstats
));
1708 memset(&priv
->pkstats
, 0, sizeof(priv
->pkstats
));
1709 memset(&priv
->port_stats
, 0, sizeof(priv
->port_stats
));
1711 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1712 priv
->tx_ring
[i
].bytes
= 0;
1713 priv
->tx_ring
[i
].packets
= 0;
1714 priv
->tx_ring
[i
].tx_csum
= 0;
1716 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1717 priv
->rx_ring
[i
].bytes
= 0;
1718 priv
->rx_ring
[i
].packets
= 0;
1719 priv
->rx_ring
[i
].csum_ok
= 0;
1720 priv
->rx_ring
[i
].csum_none
= 0;
1724 static int mlx4_en_open(struct net_device
*dev
)
1726 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1727 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1730 mutex_lock(&mdev
->state_lock
);
1732 if (!mdev
->device_up
) {
1733 en_err(priv
, "Cannot open - device down/disabled\n");
1738 /* Reset HW statistics and SW counters */
1739 mlx4_en_clear_stats(dev
);
1741 err
= mlx4_en_start_port(dev
);
1743 en_err(priv
, "Failed starting port:%d\n", priv
->port
);
1746 mutex_unlock(&mdev
->state_lock
);
1751 static int mlx4_en_close(struct net_device
*dev
)
1753 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1754 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1756 en_dbg(IFDOWN
, priv
, "Close port called\n");
1758 mutex_lock(&mdev
->state_lock
);
1760 mlx4_en_stop_port(dev
, 0);
1761 netif_carrier_off(dev
);
1763 mutex_unlock(&mdev
->state_lock
);
1767 void mlx4_en_free_resources(struct mlx4_en_priv
*priv
)
1771 #ifdef CONFIG_RFS_ACCEL
1772 free_irq_cpu_rmap(priv
->dev
->rx_cpu_rmap
);
1773 priv
->dev
->rx_cpu_rmap
= NULL
;
1776 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1777 if (priv
->tx_ring
[i
].tx_info
)
1778 mlx4_en_destroy_tx_ring(priv
, &priv
->tx_ring
[i
]);
1779 if (priv
->tx_cq
[i
].buf
)
1780 mlx4_en_destroy_cq(priv
, &priv
->tx_cq
[i
]);
1783 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1784 if (priv
->rx_ring
[i
].rx_info
)
1785 mlx4_en_destroy_rx_ring(priv
, &priv
->rx_ring
[i
],
1786 priv
->prof
->rx_ring_size
, priv
->stride
);
1787 if (priv
->rx_cq
[i
].buf
)
1788 mlx4_en_destroy_cq(priv
, &priv
->rx_cq
[i
]);
1791 if (priv
->base_tx_qpn
) {
1792 mlx4_qp_release_range(priv
->mdev
->dev
, priv
->base_tx_qpn
, priv
->tx_ring_num
);
1793 priv
->base_tx_qpn
= 0;
1797 int mlx4_en_alloc_resources(struct mlx4_en_priv
*priv
)
1799 struct mlx4_en_port_profile
*prof
= priv
->prof
;
1803 err
= mlx4_qp_reserve_range(priv
->mdev
->dev
, priv
->tx_ring_num
, 256, &priv
->base_tx_qpn
);
1805 en_err(priv
, "failed reserving range for TX rings\n");
1809 /* Create tx Rings */
1810 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1811 if (mlx4_en_create_cq(priv
, &priv
->tx_cq
[i
],
1812 prof
->tx_ring_size
, i
, TX
))
1815 if (mlx4_en_create_tx_ring(priv
, &priv
->tx_ring
[i
], priv
->base_tx_qpn
+ i
,
1816 prof
->tx_ring_size
, TXBB_SIZE
))
1820 /* Create rx Rings */
1821 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1822 if (mlx4_en_create_cq(priv
, &priv
->rx_cq
[i
],
1823 prof
->rx_ring_size
, i
, RX
))
1826 if (mlx4_en_create_rx_ring(priv
, &priv
->rx_ring
[i
],
1827 prof
->rx_ring_size
, priv
->stride
))
1831 #ifdef CONFIG_RFS_ACCEL
1832 priv
->dev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(priv
->mdev
->dev
->caps
.comp_pool
);
1833 if (!priv
->dev
->rx_cpu_rmap
)
1840 en_err(priv
, "Failed to allocate NIC resources\n");
1845 void mlx4_en_destroy_netdev(struct net_device
*dev
)
1847 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1848 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1850 en_dbg(DRV
, priv
, "Destroying netdev on port:%d\n", priv
->port
);
1852 /* Unregister device - this will close the port if it was up */
1853 if (priv
->registered
)
1854 unregister_netdev(dev
);
1856 if (priv
->allocated
)
1857 mlx4_free_hwq_res(mdev
->dev
, &priv
->res
, MLX4_EN_PAGE_SIZE
);
1859 cancel_delayed_work(&priv
->stats_task
);
1860 /* flush any pending task for this netdev */
1861 flush_workqueue(mdev
->workqueue
);
1863 /* Detach the netdev so tasks would not attempt to access it */
1864 mutex_lock(&mdev
->state_lock
);
1865 mdev
->pndev
[priv
->port
] = NULL
;
1866 mutex_unlock(&mdev
->state_lock
);
1868 mlx4_en_free_resources(priv
);
1870 kfree(priv
->tx_ring
);
1876 static int mlx4_en_change_mtu(struct net_device
*dev
, int new_mtu
)
1878 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1879 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1882 en_dbg(DRV
, priv
, "Change MTU called - current:%d new:%d\n",
1885 if ((new_mtu
< MLX4_EN_MIN_MTU
) || (new_mtu
> priv
->max_mtu
)) {
1886 en_err(priv
, "Bad MTU size:%d.\n", new_mtu
);
1891 if (netif_running(dev
)) {
1892 mutex_lock(&mdev
->state_lock
);
1893 if (!mdev
->device_up
) {
1894 /* NIC is probably restarting - let watchdog task reset
1896 en_dbg(DRV
, priv
, "Change MTU called with card down!?\n");
1898 mlx4_en_stop_port(dev
, 1);
1899 err
= mlx4_en_start_port(dev
);
1901 en_err(priv
, "Failed restarting port:%d\n",
1903 queue_work(mdev
->workqueue
, &priv
->watchdog_task
);
1906 mutex_unlock(&mdev
->state_lock
);
1911 static int mlx4_en_set_features(struct net_device
*netdev
,
1912 netdev_features_t features
)
1914 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
1916 if (features
& NETIF_F_LOOPBACK
)
1917 priv
->ctrl_flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
1920 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK
);
1922 mlx4_en_update_loopback_state(netdev
, features
);
1928 static int mlx4_en_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
1929 struct net_device
*dev
,
1930 const unsigned char *addr
, u16 flags
)
1932 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1933 struct mlx4_dev
*mdev
= priv
->mdev
->dev
;
1936 if (!mlx4_is_mfunc(mdev
))
1939 /* Hardware does not support aging addresses, allow only
1940 * permanent addresses if ndm_state is given
1942 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
1943 en_info(priv
, "Add FDB only supports static addresses\n");
1947 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
1948 err
= dev_uc_add_excl(dev
, addr
);
1949 else if (is_multicast_ether_addr(addr
))
1950 err
= dev_mc_add_excl(dev
, addr
);
1954 /* Only return duplicate errors if NLM_F_EXCL is set */
1955 if (err
== -EEXIST
&& !(flags
& NLM_F_EXCL
))
1961 static int mlx4_en_fdb_del(struct ndmsg
*ndm
,
1962 struct nlattr
*tb
[],
1963 struct net_device
*dev
,
1964 const unsigned char *addr
)
1966 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1967 struct mlx4_dev
*mdev
= priv
->mdev
->dev
;
1970 if (!mlx4_is_mfunc(mdev
))
1973 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
1974 en_info(priv
, "Del FDB only supports static addresses\n");
1978 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
1979 err
= dev_uc_del(dev
, addr
);
1980 else if (is_multicast_ether_addr(addr
))
1981 err
= dev_mc_del(dev
, addr
);
1988 static int mlx4_en_fdb_dump(struct sk_buff
*skb
,
1989 struct netlink_callback
*cb
,
1990 struct net_device
*dev
, int idx
)
1992 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1993 struct mlx4_dev
*mdev
= priv
->mdev
->dev
;
1995 if (mlx4_is_mfunc(mdev
))
1996 idx
= ndo_dflt_fdb_dump(skb
, cb
, dev
, idx
);
2001 static const struct net_device_ops mlx4_netdev_ops
= {
2002 .ndo_open
= mlx4_en_open
,
2003 .ndo_stop
= mlx4_en_close
,
2004 .ndo_start_xmit
= mlx4_en_xmit
,
2005 .ndo_select_queue
= mlx4_en_select_queue
,
2006 .ndo_get_stats
= mlx4_en_get_stats
,
2007 .ndo_set_rx_mode
= mlx4_en_set_rx_mode
,
2008 .ndo_set_mac_address
= mlx4_en_set_mac
,
2009 .ndo_validate_addr
= eth_validate_addr
,
2010 .ndo_change_mtu
= mlx4_en_change_mtu
,
2011 .ndo_tx_timeout
= mlx4_en_tx_timeout
,
2012 .ndo_vlan_rx_add_vid
= mlx4_en_vlan_rx_add_vid
,
2013 .ndo_vlan_rx_kill_vid
= mlx4_en_vlan_rx_kill_vid
,
2014 #ifdef CONFIG_NET_POLL_CONTROLLER
2015 .ndo_poll_controller
= mlx4_en_netpoll
,
2017 .ndo_set_features
= mlx4_en_set_features
,
2018 .ndo_setup_tc
= mlx4_en_setup_tc
,
2019 #ifdef CONFIG_RFS_ACCEL
2020 .ndo_rx_flow_steer
= mlx4_en_filter_rfs
,
2022 .ndo_fdb_add
= mlx4_en_fdb_add
,
2023 .ndo_fdb_del
= mlx4_en_fdb_del
,
2024 .ndo_fdb_dump
= mlx4_en_fdb_dump
,
2027 int mlx4_en_init_netdev(struct mlx4_en_dev
*mdev
, int port
,
2028 struct mlx4_en_port_profile
*prof
)
2030 struct net_device
*dev
;
2031 struct mlx4_en_priv
*priv
;
2035 dev
= alloc_etherdev_mqs(sizeof(struct mlx4_en_priv
),
2036 MAX_TX_RINGS
, MAX_RX_RINGS
);
2040 netif_set_real_num_tx_queues(dev
, prof
->tx_ring_num
);
2041 netif_set_real_num_rx_queues(dev
, prof
->rx_ring_num
);
2043 SET_NETDEV_DEV(dev
, &mdev
->dev
->pdev
->dev
);
2044 dev
->dev_id
= port
- 1;
2047 * Initialize driver private data
2050 priv
= netdev_priv(dev
);
2051 memset(priv
, 0, sizeof(struct mlx4_en_priv
));
2054 priv
->ddev
= &mdev
->pdev
->dev
;
2057 priv
->port_up
= false;
2058 priv
->flags
= prof
->flags
;
2059 priv
->ctrl_flags
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
|
2060 MLX4_WQE_CTRL_SOLICITED
);
2061 priv
->num_tx_rings_p_up
= mdev
->profile
.num_tx_rings_p_up
;
2062 priv
->tx_ring_num
= prof
->tx_ring_num
;
2064 priv
->tx_ring
= kzalloc(sizeof(struct mlx4_en_tx_ring
) * MAX_TX_RINGS
,
2066 if (!priv
->tx_ring
) {
2070 priv
->tx_cq
= kzalloc(sizeof(struct mlx4_en_cq
) * MAX_TX_RINGS
,
2076 priv
->rx_ring_num
= prof
->rx_ring_num
;
2077 priv
->cqe_factor
= (mdev
->dev
->caps
.cqe_size
== 64) ? 1 : 0;
2078 priv
->mac_index
= -1;
2079 priv
->msg_enable
= MLX4_EN_MSG_LEVEL
;
2080 spin_lock_init(&priv
->stats_lock
);
2081 INIT_WORK(&priv
->rx_mode_task
, mlx4_en_do_set_rx_mode
);
2082 INIT_WORK(&priv
->mac_task
, mlx4_en_do_set_mac
);
2083 INIT_WORK(&priv
->watchdog_task
, mlx4_en_restart
);
2084 INIT_WORK(&priv
->linkstate_task
, mlx4_en_linkstate
);
2085 INIT_DELAYED_WORK(&priv
->stats_task
, mlx4_en_do_get_stats
);
2086 #ifdef CONFIG_MLX4_EN_DCB
2087 if (!mlx4_is_slave(priv
->mdev
->dev
))
2088 dev
->dcbnl_ops
= &mlx4_en_dcbnl_ops
;
2091 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
)
2092 INIT_HLIST_HEAD(&priv
->mac_hash
[i
]);
2094 /* Query for default mac and max mtu */
2095 priv
->max_mtu
= mdev
->dev
->caps
.eth_mtu_cap
[priv
->port
];
2097 /* Set default MAC */
2098 dev
->addr_len
= ETH_ALEN
;
2099 mlx4_en_u64_to_mac(dev
->dev_addr
, mdev
->dev
->caps
.def_mac
[priv
->port
]);
2100 if (!is_valid_ether_addr(dev
->dev_addr
)) {
2101 en_err(priv
, "Port: %d, invalid mac burned: %pM, quiting\n",
2102 priv
->port
, dev
->dev_addr
);
2107 memcpy(priv
->prev_mac
, dev
->dev_addr
, sizeof(priv
->prev_mac
));
2109 priv
->stride
= roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc
) +
2110 DS_SIZE
* MLX4_EN_MAX_RX_FRAGS
);
2111 err
= mlx4_en_alloc_resources(priv
);
2115 #ifdef CONFIG_RFS_ACCEL
2116 INIT_LIST_HEAD(&priv
->filters
);
2117 spin_lock_init(&priv
->filters_lock
);
2120 /* Allocate page for receive rings */
2121 err
= mlx4_alloc_hwq_res(mdev
->dev
, &priv
->res
,
2122 MLX4_EN_PAGE_SIZE
, MLX4_EN_PAGE_SIZE
);
2124 en_err(priv
, "Failed to allocate page for rx qps\n");
2127 priv
->allocated
= 1;
2130 * Initialize netdev entry points
2132 dev
->netdev_ops
= &mlx4_netdev_ops
;
2133 dev
->watchdog_timeo
= MLX4_EN_WATCHDOG_TIMEOUT
;
2134 netif_set_real_num_tx_queues(dev
, priv
->tx_ring_num
);
2135 netif_set_real_num_rx_queues(dev
, priv
->rx_ring_num
);
2137 SET_ETHTOOL_OPS(dev
, &mlx4_en_ethtool_ops
);
2140 * Set driver features
2142 dev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2143 if (mdev
->LSO_support
)
2144 dev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
2146 dev
->vlan_features
= dev
->hw_features
;
2148 dev
->hw_features
|= NETIF_F_RXCSUM
| NETIF_F_RXHASH
;
2149 dev
->features
= dev
->hw_features
| NETIF_F_HIGHDMA
|
2150 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
|
2151 NETIF_F_HW_VLAN_FILTER
;
2152 dev
->hw_features
|= NETIF_F_LOOPBACK
;
2154 if (mdev
->dev
->caps
.steering_mode
==
2155 MLX4_STEERING_MODE_DEVICE_MANAGED
)
2156 dev
->hw_features
|= NETIF_F_NTUPLE
;
2158 if (mdev
->dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
)
2159 dev
->priv_flags
|= IFF_UNICAST_FLT
;
2161 mdev
->pndev
[port
] = dev
;
2163 netif_carrier_off(dev
);
2164 err
= register_netdev(dev
);
2166 en_err(priv
, "Netdev registration failed for port %d\n", port
);
2169 priv
->registered
= 1;
2171 en_warn(priv
, "Using %d TX rings\n", prof
->tx_ring_num
);
2172 en_warn(priv
, "Using %d RX rings\n", prof
->rx_ring_num
);
2174 mlx4_en_update_loopback_state(priv
->dev
, priv
->dev
->features
);
2176 /* Configure port */
2177 mlx4_en_calc_rx_buf(dev
);
2178 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
2179 priv
->rx_skb_size
+ ETH_FCS_LEN
,
2180 prof
->tx_pause
, prof
->tx_ppp
,
2181 prof
->rx_pause
, prof
->rx_ppp
);
2183 en_err(priv
, "Failed setting port general configurations "
2184 "for port %d, with error %d\n", priv
->port
, err
);
2189 en_warn(priv
, "Initializing port\n");
2190 err
= mlx4_INIT_PORT(mdev
->dev
, priv
->port
);
2192 en_err(priv
, "Failed Initializing port\n");
2195 mlx4_en_set_default_moderation(priv
);
2196 queue_delayed_work(mdev
->workqueue
, &priv
->stats_task
, STATS_DELAY
);
2200 mlx4_en_destroy_netdev(dev
);