2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/hash.h>
41 #include <net/busy_poll.h>
43 #include <linux/mlx4/driver.h>
44 #include <linux/mlx4/device.h>
45 #include <linux/mlx4/cmd.h>
46 #include <linux/mlx4/cq.h>
51 int mlx4_en_setup_tc(struct net_device
*dev
, u8 up
)
53 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
55 unsigned int offset
= 0;
57 if (up
&& up
!= MLX4_EN_NUM_UP
)
60 netdev_set_num_tc(dev
, up
);
62 /* Partition Tx queues evenly amongst UP's */
63 for (i
= 0; i
< up
; i
++) {
64 netdev_set_tc_queue(dev
, i
, priv
->num_tx_rings_p_up
, offset
);
65 offset
+= priv
->num_tx_rings_p_up
;
71 #ifdef CONFIG_NET_RX_BUSY_POLL
72 /* must be called with local_bh_disable()d */
73 static int mlx4_en_low_latency_recv(struct napi_struct
*napi
)
75 struct mlx4_en_cq
*cq
= container_of(napi
, struct mlx4_en_cq
, napi
);
76 struct net_device
*dev
= cq
->dev
;
77 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
78 struct mlx4_en_rx_ring
*rx_ring
= priv
->rx_ring
[cq
->ring
];
82 return LL_FLUSH_FAILED
;
84 if (!mlx4_en_cq_lock_poll(cq
))
87 done
= mlx4_en_process_rx_cq(dev
, cq
, 4);
89 rx_ring
->cleaned
+= done
;
93 mlx4_en_cq_unlock_poll(cq
);
97 #endif /* CONFIG_NET_RX_BUSY_POLL */
99 #ifdef CONFIG_RFS_ACCEL
101 struct mlx4_en_filter
{
102 struct list_head next
;
103 struct work_struct work
;
112 struct mlx4_en_priv
*priv
;
113 u32 flow_id
; /* RFS infrastructure id */
114 int id
; /* mlx4_en driver id */
115 u64 reg_id
; /* Flow steering API id */
116 u8 activated
; /* Used to prevent expiry before filter
119 struct hlist_node filter_chain
;
122 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv
*priv
);
124 static enum mlx4_net_trans_rule_id
mlx4_ip_proto_to_trans_rule_id(u8 ip_proto
)
128 return MLX4_NET_TRANS_RULE_ID_UDP
;
130 return MLX4_NET_TRANS_RULE_ID_TCP
;
132 return -EPROTONOSUPPORT
;
136 static void mlx4_en_filter_work(struct work_struct
*work
)
138 struct mlx4_en_filter
*filter
= container_of(work
,
139 struct mlx4_en_filter
,
141 struct mlx4_en_priv
*priv
= filter
->priv
;
142 struct mlx4_spec_list spec_tcp_udp
= {
143 .id
= mlx4_ip_proto_to_trans_rule_id(filter
->ip_proto
),
146 .dst_port
= filter
->dst_port
,
147 .dst_port_msk
= (__force __be16
)-1,
148 .src_port
= filter
->src_port
,
149 .src_port_msk
= (__force __be16
)-1,
153 struct mlx4_spec_list spec_ip
= {
154 .id
= MLX4_NET_TRANS_RULE_ID_IPV4
,
157 .dst_ip
= filter
->dst_ip
,
158 .dst_ip_msk
= (__force __be32
)-1,
159 .src_ip
= filter
->src_ip
,
160 .src_ip_msk
= (__force __be32
)-1,
164 struct mlx4_spec_list spec_eth
= {
165 .id
= MLX4_NET_TRANS_RULE_ID_ETH
,
167 struct mlx4_net_trans_rule rule
= {
168 .list
= LIST_HEAD_INIT(rule
.list
),
169 .queue_mode
= MLX4_NET_TRANS_Q_LIFO
,
172 .promisc_mode
= MLX4_FS_REGULAR
,
174 .priority
= MLX4_DOMAIN_RFS
,
177 __be64 mac_mask
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
179 if (spec_tcp_udp
.id
< 0) {
180 en_warn(priv
, "RFS: ignoring unsupported ip protocol (%d)\n",
184 list_add_tail(&spec_eth
.list
, &rule
.list
);
185 list_add_tail(&spec_ip
.list
, &rule
.list
);
186 list_add_tail(&spec_tcp_udp
.list
, &rule
.list
);
188 rule
.qpn
= priv
->rss_map
.qps
[filter
->rxq_index
].qpn
;
189 memcpy(spec_eth
.eth
.dst_mac
, priv
->dev
->dev_addr
, ETH_ALEN
);
190 memcpy(spec_eth
.eth
.dst_mac_msk
, &mac_mask
, ETH_ALEN
);
192 filter
->activated
= 0;
194 if (filter
->reg_id
) {
195 rc
= mlx4_flow_detach(priv
->mdev
->dev
, filter
->reg_id
);
196 if (rc
&& rc
!= -ENOENT
)
197 en_err(priv
, "Error detaching flow. rc = %d\n", rc
);
200 rc
= mlx4_flow_attach(priv
->mdev
->dev
, &rule
, &filter
->reg_id
);
202 en_err(priv
, "Error attaching flow. err = %d\n", rc
);
205 mlx4_en_filter_rfs_expire(priv
);
207 filter
->activated
= 1;
210 static inline struct hlist_head
*
211 filter_hash_bucket(struct mlx4_en_priv
*priv
, __be32 src_ip
, __be32 dst_ip
,
212 __be16 src_port
, __be16 dst_port
)
217 l
= (__force
unsigned long)src_port
|
218 ((__force
unsigned long)dst_port
<< 2);
219 l
^= (__force
unsigned long)(src_ip
^ dst_ip
);
221 bucket_idx
= hash_long(l
, MLX4_EN_FILTER_HASH_SHIFT
);
223 return &priv
->filter_hash
[bucket_idx
];
226 static struct mlx4_en_filter
*
227 mlx4_en_filter_alloc(struct mlx4_en_priv
*priv
, int rxq_index
, __be32 src_ip
,
228 __be32 dst_ip
, u8 ip_proto
, __be16 src_port
,
229 __be16 dst_port
, u32 flow_id
)
231 struct mlx4_en_filter
*filter
= NULL
;
233 filter
= kzalloc(sizeof(struct mlx4_en_filter
), GFP_ATOMIC
);
238 filter
->rxq_index
= rxq_index
;
239 INIT_WORK(&filter
->work
, mlx4_en_filter_work
);
241 filter
->src_ip
= src_ip
;
242 filter
->dst_ip
= dst_ip
;
243 filter
->ip_proto
= ip_proto
;
244 filter
->src_port
= src_port
;
245 filter
->dst_port
= dst_port
;
247 filter
->flow_id
= flow_id
;
249 filter
->id
= priv
->last_filter_id
++ % RPS_NO_FILTER
;
251 list_add_tail(&filter
->next
, &priv
->filters
);
252 hlist_add_head(&filter
->filter_chain
,
253 filter_hash_bucket(priv
, src_ip
, dst_ip
, src_port
,
259 static void mlx4_en_filter_free(struct mlx4_en_filter
*filter
)
261 struct mlx4_en_priv
*priv
= filter
->priv
;
264 list_del(&filter
->next
);
266 rc
= mlx4_flow_detach(priv
->mdev
->dev
, filter
->reg_id
);
267 if (rc
&& rc
!= -ENOENT
)
268 en_err(priv
, "Error detaching flow. rc = %d\n", rc
);
273 static inline struct mlx4_en_filter
*
274 mlx4_en_filter_find(struct mlx4_en_priv
*priv
, __be32 src_ip
, __be32 dst_ip
,
275 u8 ip_proto
, __be16 src_port
, __be16 dst_port
)
277 struct mlx4_en_filter
*filter
;
278 struct mlx4_en_filter
*ret
= NULL
;
280 hlist_for_each_entry(filter
,
281 filter_hash_bucket(priv
, src_ip
, dst_ip
,
284 if (filter
->src_ip
== src_ip
&&
285 filter
->dst_ip
== dst_ip
&&
286 filter
->ip_proto
== ip_proto
&&
287 filter
->src_port
== src_port
&&
288 filter
->dst_port
== dst_port
) {
298 mlx4_en_filter_rfs(struct net_device
*net_dev
, const struct sk_buff
*skb
,
299 u16 rxq_index
, u32 flow_id
)
301 struct mlx4_en_priv
*priv
= netdev_priv(net_dev
);
302 struct mlx4_en_filter
*filter
;
303 const struct iphdr
*ip
;
310 int nhoff
= skb_network_offset(skb
);
313 if (skb
->protocol
!= htons(ETH_P_IP
))
314 return -EPROTONOSUPPORT
;
316 ip
= (const struct iphdr
*)(skb
->data
+ nhoff
);
317 if (ip_is_fragment(ip
))
318 return -EPROTONOSUPPORT
;
320 if ((ip
->protocol
!= IPPROTO_TCP
) && (ip
->protocol
!= IPPROTO_UDP
))
321 return -EPROTONOSUPPORT
;
322 ports
= (const __be16
*)(skb
->data
+ nhoff
+ 4 * ip
->ihl
);
324 ip_proto
= ip
->protocol
;
330 spin_lock_bh(&priv
->filters_lock
);
331 filter
= mlx4_en_filter_find(priv
, src_ip
, dst_ip
, ip_proto
,
334 if (filter
->rxq_index
== rxq_index
)
337 filter
->rxq_index
= rxq_index
;
339 filter
= mlx4_en_filter_alloc(priv
, rxq_index
,
340 src_ip
, dst_ip
, ip_proto
,
341 src_port
, dst_port
, flow_id
);
348 queue_work(priv
->mdev
->workqueue
, &filter
->work
);
353 spin_unlock_bh(&priv
->filters_lock
);
358 void mlx4_en_cleanup_filters(struct mlx4_en_priv
*priv
)
360 struct mlx4_en_filter
*filter
, *tmp
;
363 spin_lock_bh(&priv
->filters_lock
);
364 list_for_each_entry_safe(filter
, tmp
, &priv
->filters
, next
) {
365 list_move(&filter
->next
, &del_list
);
366 hlist_del(&filter
->filter_chain
);
368 spin_unlock_bh(&priv
->filters_lock
);
370 list_for_each_entry_safe(filter
, tmp
, &del_list
, next
) {
371 cancel_work_sync(&filter
->work
);
372 mlx4_en_filter_free(filter
);
376 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv
*priv
)
378 struct mlx4_en_filter
*filter
= NULL
, *tmp
, *last_filter
= NULL
;
382 spin_lock_bh(&priv
->filters_lock
);
383 list_for_each_entry_safe(filter
, tmp
, &priv
->filters
, next
) {
384 if (i
> MLX4_EN_FILTER_EXPIRY_QUOTA
)
387 if (filter
->activated
&&
388 !work_pending(&filter
->work
) &&
389 rps_may_expire_flow(priv
->dev
,
390 filter
->rxq_index
, filter
->flow_id
,
392 list_move(&filter
->next
, &del_list
);
393 hlist_del(&filter
->filter_chain
);
395 last_filter
= filter
;
400 if (last_filter
&& (&last_filter
->next
!= priv
->filters
.next
))
401 list_move(&priv
->filters
, &last_filter
->next
);
403 spin_unlock_bh(&priv
->filters_lock
);
405 list_for_each_entry_safe(filter
, tmp
, &del_list
, next
)
406 mlx4_en_filter_free(filter
);
410 static int mlx4_en_vlan_rx_add_vid(struct net_device
*dev
,
411 __be16 proto
, u16 vid
)
413 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
414 struct mlx4_en_dev
*mdev
= priv
->mdev
;
418 en_dbg(HW
, priv
, "adding VLAN:%d\n", vid
);
420 set_bit(vid
, priv
->active_vlans
);
422 /* Add VID to port VLAN filter */
423 mutex_lock(&mdev
->state_lock
);
424 if (mdev
->device_up
&& priv
->port_up
) {
425 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
427 en_err(priv
, "Failed configuring VLAN filter\n");
429 if (mlx4_register_vlan(mdev
->dev
, priv
->port
, vid
, &idx
))
430 en_dbg(HW
, priv
, "failed adding vlan %d\n", vid
);
431 mutex_unlock(&mdev
->state_lock
);
436 static int mlx4_en_vlan_rx_kill_vid(struct net_device
*dev
,
437 __be16 proto
, u16 vid
)
439 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
440 struct mlx4_en_dev
*mdev
= priv
->mdev
;
443 en_dbg(HW
, priv
, "Killing VID:%d\n", vid
);
445 clear_bit(vid
, priv
->active_vlans
);
447 /* Remove VID from port VLAN filter */
448 mutex_lock(&mdev
->state_lock
);
449 mlx4_unregister_vlan(mdev
->dev
, priv
->port
, vid
);
451 if (mdev
->device_up
&& priv
->port_up
) {
452 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
454 en_err(priv
, "Failed configuring VLAN filter\n");
456 mutex_unlock(&mdev
->state_lock
);
461 static void mlx4_en_u64_to_mac(unsigned char dst_mac
[ETH_ALEN
+ 2], u64 src_mac
)
464 for (i
= ETH_ALEN
- 1; i
>= 0; --i
) {
465 dst_mac
[i
] = src_mac
& 0xff;
468 memset(&dst_mac
[ETH_ALEN
], 0, 2);
471 static int mlx4_en_uc_steer_add(struct mlx4_en_priv
*priv
,
472 unsigned char *mac
, int *qpn
, u64
*reg_id
)
474 struct mlx4_en_dev
*mdev
= priv
->mdev
;
475 struct mlx4_dev
*dev
= mdev
->dev
;
478 switch (dev
->caps
.steering_mode
) {
479 case MLX4_STEERING_MODE_B0
: {
484 memcpy(&gid
[10], mac
, ETH_ALEN
);
487 err
= mlx4_unicast_attach(dev
, &qp
, gid
, 0, MLX4_PROT_ETH
);
490 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
491 struct mlx4_spec_list spec_eth
= { {NULL
} };
492 __be64 mac_mask
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
494 struct mlx4_net_trans_rule rule
= {
495 .queue_mode
= MLX4_NET_TRANS_Q_FIFO
,
498 .promisc_mode
= MLX4_FS_REGULAR
,
499 .priority
= MLX4_DOMAIN_NIC
,
502 rule
.port
= priv
->port
;
504 INIT_LIST_HEAD(&rule
.list
);
506 spec_eth
.id
= MLX4_NET_TRANS_RULE_ID_ETH
;
507 memcpy(spec_eth
.eth
.dst_mac
, mac
, ETH_ALEN
);
508 memcpy(spec_eth
.eth
.dst_mac_msk
, &mac_mask
, ETH_ALEN
);
509 list_add_tail(&spec_eth
.list
, &rule
.list
);
511 err
= mlx4_flow_attach(dev
, &rule
, reg_id
);
518 en_warn(priv
, "Failed Attaching Unicast\n");
523 static void mlx4_en_uc_steer_release(struct mlx4_en_priv
*priv
,
524 unsigned char *mac
, int qpn
, u64 reg_id
)
526 struct mlx4_en_dev
*mdev
= priv
->mdev
;
527 struct mlx4_dev
*dev
= mdev
->dev
;
529 switch (dev
->caps
.steering_mode
) {
530 case MLX4_STEERING_MODE_B0
: {
535 memcpy(&gid
[10], mac
, ETH_ALEN
);
538 mlx4_unicast_detach(dev
, &qp
, gid
, MLX4_PROT_ETH
);
541 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
542 mlx4_flow_detach(dev
, reg_id
);
546 en_err(priv
, "Invalid steering mode.\n");
550 static int mlx4_en_get_qp(struct mlx4_en_priv
*priv
)
552 struct mlx4_en_dev
*mdev
= priv
->mdev
;
553 struct mlx4_dev
*dev
= mdev
->dev
;
554 struct mlx4_mac_entry
*entry
;
558 int *qpn
= &priv
->base_qpn
;
559 u64 mac
= mlx4_en_mac_to_u64(priv
->dev
->dev_addr
);
561 en_dbg(DRV
, priv
, "Registering MAC: %pM for adding\n",
562 priv
->dev
->dev_addr
);
563 index
= mlx4_register_mac(dev
, priv
->port
, mac
);
566 en_err(priv
, "Failed adding MAC: %pM\n",
567 priv
->dev
->dev_addr
);
571 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_A0
) {
572 int base_qpn
= mlx4_get_base_qpn(dev
, priv
->port
);
573 *qpn
= base_qpn
+ index
;
577 err
= mlx4_qp_reserve_range(dev
, 1, 1, qpn
);
578 en_dbg(DRV
, priv
, "Reserved qp %d\n", *qpn
);
580 en_err(priv
, "Failed to reserve qp for mac registration\n");
584 err
= mlx4_en_uc_steer_add(priv
, priv
->dev
->dev_addr
, qpn
, ®_id
);
588 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
593 memcpy(entry
->mac
, priv
->dev
->dev_addr
, sizeof(entry
->mac
));
594 entry
->reg_id
= reg_id
;
596 hlist_add_head_rcu(&entry
->hlist
,
597 &priv
->mac_hash
[entry
->mac
[MLX4_EN_MAC_HASH_IDX
]]);
602 mlx4_en_uc_steer_release(priv
, priv
->dev
->dev_addr
, *qpn
, reg_id
);
605 mlx4_qp_release_range(dev
, *qpn
, 1);
608 mlx4_unregister_mac(dev
, priv
->port
, mac
);
612 static void mlx4_en_put_qp(struct mlx4_en_priv
*priv
)
614 struct mlx4_en_dev
*mdev
= priv
->mdev
;
615 struct mlx4_dev
*dev
= mdev
->dev
;
616 int qpn
= priv
->base_qpn
;
619 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_A0
) {
620 mac
= mlx4_en_mac_to_u64(priv
->dev
->dev_addr
);
621 en_dbg(DRV
, priv
, "Registering MAC: %pM for deleting\n",
622 priv
->dev
->dev_addr
);
623 mlx4_unregister_mac(dev
, priv
->port
, mac
);
625 struct mlx4_mac_entry
*entry
;
626 struct hlist_node
*tmp
;
627 struct hlist_head
*bucket
;
630 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
) {
631 bucket
= &priv
->mac_hash
[i
];
632 hlist_for_each_entry_safe(entry
, tmp
, bucket
, hlist
) {
633 mac
= mlx4_en_mac_to_u64(entry
->mac
);
634 en_dbg(DRV
, priv
, "Registering MAC: %pM for deleting\n",
636 mlx4_en_uc_steer_release(priv
, entry
->mac
,
639 mlx4_unregister_mac(dev
, priv
->port
, mac
);
640 hlist_del_rcu(&entry
->hlist
);
641 kfree_rcu(entry
, rcu
);
645 en_dbg(DRV
, priv
, "Releasing qp: port %d, qpn %d\n",
647 mlx4_qp_release_range(dev
, qpn
, 1);
648 priv
->flags
&= ~MLX4_EN_FLAG_FORCE_PROMISC
;
652 static int mlx4_en_replace_mac(struct mlx4_en_priv
*priv
, int qpn
,
653 unsigned char *new_mac
, unsigned char *prev_mac
)
655 struct mlx4_en_dev
*mdev
= priv
->mdev
;
656 struct mlx4_dev
*dev
= mdev
->dev
;
658 u64 new_mac_u64
= mlx4_en_mac_to_u64(new_mac
);
660 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
) {
661 struct hlist_head
*bucket
;
662 unsigned int mac_hash
;
663 struct mlx4_mac_entry
*entry
;
664 struct hlist_node
*tmp
;
665 u64 prev_mac_u64
= mlx4_en_mac_to_u64(prev_mac
);
667 bucket
= &priv
->mac_hash
[prev_mac
[MLX4_EN_MAC_HASH_IDX
]];
668 hlist_for_each_entry_safe(entry
, tmp
, bucket
, hlist
) {
669 if (ether_addr_equal_64bits(entry
->mac
, prev_mac
)) {
670 mlx4_en_uc_steer_release(priv
, entry
->mac
,
672 mlx4_unregister_mac(dev
, priv
->port
,
674 hlist_del_rcu(&entry
->hlist
);
676 memcpy(entry
->mac
, new_mac
, ETH_ALEN
);
678 mac_hash
= new_mac
[MLX4_EN_MAC_HASH_IDX
];
679 hlist_add_head_rcu(&entry
->hlist
,
680 &priv
->mac_hash
[mac_hash
]);
681 mlx4_register_mac(dev
, priv
->port
, new_mac_u64
);
682 err
= mlx4_en_uc_steer_add(priv
, new_mac
,
691 return __mlx4_replace_mac(dev
, priv
->port
, qpn
, new_mac_u64
);
694 u64
mlx4_en_mac_to_u64(u8
*addr
)
699 for (i
= 0; i
< ETH_ALEN
; i
++) {
706 static int mlx4_en_do_set_mac(struct mlx4_en_priv
*priv
)
711 /* Remove old MAC and insert the new one */
712 err
= mlx4_en_replace_mac(priv
, priv
->base_qpn
,
713 priv
->dev
->dev_addr
, priv
->prev_mac
);
715 en_err(priv
, "Failed changing HW MAC address\n");
716 memcpy(priv
->prev_mac
, priv
->dev
->dev_addr
,
717 sizeof(priv
->prev_mac
));
719 en_dbg(HW
, priv
, "Port is down while registering mac, exiting...\n");
724 static int mlx4_en_set_mac(struct net_device
*dev
, void *addr
)
726 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
727 struct mlx4_en_dev
*mdev
= priv
->mdev
;
728 struct sockaddr
*saddr
= addr
;
731 if (!is_valid_ether_addr(saddr
->sa_data
))
732 return -EADDRNOTAVAIL
;
734 memcpy(dev
->dev_addr
, saddr
->sa_data
, ETH_ALEN
);
736 mutex_lock(&mdev
->state_lock
);
737 err
= mlx4_en_do_set_mac(priv
);
738 mutex_unlock(&mdev
->state_lock
);
743 static void mlx4_en_clear_list(struct net_device
*dev
)
745 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
746 struct mlx4_en_mc_list
*tmp
, *mc_to_del
;
748 list_for_each_entry_safe(mc_to_del
, tmp
, &priv
->mc_list
, list
) {
749 list_del(&mc_to_del
->list
);
754 static void mlx4_en_cache_mclist(struct net_device
*dev
)
756 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
757 struct netdev_hw_addr
*ha
;
758 struct mlx4_en_mc_list
*tmp
;
760 mlx4_en_clear_list(dev
);
761 netdev_for_each_mc_addr(ha
, dev
) {
762 tmp
= kzalloc(sizeof(struct mlx4_en_mc_list
), GFP_ATOMIC
);
764 mlx4_en_clear_list(dev
);
767 memcpy(tmp
->addr
, ha
->addr
, ETH_ALEN
);
768 list_add_tail(&tmp
->list
, &priv
->mc_list
);
772 static void update_mclist_flags(struct mlx4_en_priv
*priv
,
773 struct list_head
*dst
,
774 struct list_head
*src
)
776 struct mlx4_en_mc_list
*dst_tmp
, *src_tmp
, *new_mc
;
779 /* Find all the entries that should be removed from dst,
780 * These are the entries that are not found in src
782 list_for_each_entry(dst_tmp
, dst
, list
) {
784 list_for_each_entry(src_tmp
, src
, list
) {
785 if (!memcmp(dst_tmp
->addr
, src_tmp
->addr
, ETH_ALEN
)) {
791 dst_tmp
->action
= MCLIST_REM
;
794 /* Add entries that exist in src but not in dst
795 * mark them as need to add
797 list_for_each_entry(src_tmp
, src
, list
) {
799 list_for_each_entry(dst_tmp
, dst
, list
) {
800 if (!memcmp(dst_tmp
->addr
, src_tmp
->addr
, ETH_ALEN
)) {
801 dst_tmp
->action
= MCLIST_NONE
;
807 new_mc
= kmemdup(src_tmp
,
808 sizeof(struct mlx4_en_mc_list
),
813 new_mc
->action
= MCLIST_ADD
;
814 list_add_tail(&new_mc
->list
, dst
);
819 static void mlx4_en_set_rx_mode(struct net_device
*dev
)
821 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
826 queue_work(priv
->mdev
->workqueue
, &priv
->rx_mode_task
);
829 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv
*priv
,
830 struct mlx4_en_dev
*mdev
)
834 if (!(priv
->flags
& MLX4_EN_FLAG_PROMISC
)) {
835 if (netif_msg_rx_status(priv
))
836 en_warn(priv
, "Entering promiscuous mode\n");
837 priv
->flags
|= MLX4_EN_FLAG_PROMISC
;
839 /* Enable promiscouos mode */
840 switch (mdev
->dev
->caps
.steering_mode
) {
841 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
842 err
= mlx4_flow_steer_promisc_add(mdev
->dev
,
845 MLX4_FS_ALL_DEFAULT
);
847 en_err(priv
, "Failed enabling promiscuous mode\n");
848 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
851 case MLX4_STEERING_MODE_B0
:
852 err
= mlx4_unicast_promisc_add(mdev
->dev
,
856 en_err(priv
, "Failed enabling unicast promiscuous mode\n");
858 /* Add the default qp number as multicast
861 if (!(priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
)) {
862 err
= mlx4_multicast_promisc_add(mdev
->dev
,
866 en_err(priv
, "Failed enabling multicast promiscuous mode\n");
867 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
871 case MLX4_STEERING_MODE_A0
:
872 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
,
877 en_err(priv
, "Failed enabling promiscuous mode\n");
881 /* Disable port multicast filter (unconditionally) */
882 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
883 0, MLX4_MCAST_DISABLE
);
885 en_err(priv
, "Failed disabling multicast filter\n");
887 /* Disable port VLAN filter */
888 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
890 en_err(priv
, "Failed disabling VLAN filter\n");
894 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv
*priv
,
895 struct mlx4_en_dev
*mdev
)
899 if (netif_msg_rx_status(priv
))
900 en_warn(priv
, "Leaving promiscuous mode\n");
901 priv
->flags
&= ~MLX4_EN_FLAG_PROMISC
;
903 /* Disable promiscouos mode */
904 switch (mdev
->dev
->caps
.steering_mode
) {
905 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
906 err
= mlx4_flow_steer_promisc_remove(mdev
->dev
,
908 MLX4_FS_ALL_DEFAULT
);
910 en_err(priv
, "Failed disabling promiscuous mode\n");
911 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
914 case MLX4_STEERING_MODE_B0
:
915 err
= mlx4_unicast_promisc_remove(mdev
->dev
,
919 en_err(priv
, "Failed disabling unicast promiscuous mode\n");
920 /* Disable Multicast promisc */
921 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
922 err
= mlx4_multicast_promisc_remove(mdev
->dev
,
926 en_err(priv
, "Failed disabling multicast promiscuous mode\n");
927 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
931 case MLX4_STEERING_MODE_A0
:
932 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
,
936 en_err(priv
, "Failed disabling promiscuous mode\n");
940 /* Enable port VLAN filter */
941 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
943 en_err(priv
, "Failed enabling VLAN filter\n");
946 static void mlx4_en_do_multicast(struct mlx4_en_priv
*priv
,
947 struct net_device
*dev
,
948 struct mlx4_en_dev
*mdev
)
950 struct mlx4_en_mc_list
*mclist
, *tmp
;
952 u8 mc_list
[16] = {0};
955 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
956 if (dev
->flags
& IFF_ALLMULTI
) {
957 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
958 0, MLX4_MCAST_DISABLE
);
960 en_err(priv
, "Failed disabling multicast filter\n");
962 /* Add the default qp number as multicast promisc */
963 if (!(priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
)) {
964 switch (mdev
->dev
->caps
.steering_mode
) {
965 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
966 err
= mlx4_flow_steer_promisc_add(mdev
->dev
,
972 case MLX4_STEERING_MODE_B0
:
973 err
= mlx4_multicast_promisc_add(mdev
->dev
,
978 case MLX4_STEERING_MODE_A0
:
982 en_err(priv
, "Failed entering multicast promisc mode\n");
983 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
986 /* Disable Multicast promisc */
987 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
988 switch (mdev
->dev
->caps
.steering_mode
) {
989 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
990 err
= mlx4_flow_steer_promisc_remove(mdev
->dev
,
995 case MLX4_STEERING_MODE_B0
:
996 err
= mlx4_multicast_promisc_remove(mdev
->dev
,
1001 case MLX4_STEERING_MODE_A0
:
1005 en_err(priv
, "Failed disabling multicast promiscuous mode\n");
1006 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
1009 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
1010 0, MLX4_MCAST_DISABLE
);
1012 en_err(priv
, "Failed disabling multicast filter\n");
1014 /* Flush mcast filter and init it with broadcast address */
1015 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, ETH_BCAST
,
1016 1, MLX4_MCAST_CONFIG
);
1018 /* Update multicast list - we cache all addresses so they won't
1019 * change while HW is updated holding the command semaphor */
1020 netif_addr_lock_bh(dev
);
1021 mlx4_en_cache_mclist(dev
);
1022 netif_addr_unlock_bh(dev
);
1023 list_for_each_entry(mclist
, &priv
->mc_list
, list
) {
1024 mcast_addr
= mlx4_en_mac_to_u64(mclist
->addr
);
1025 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
,
1026 mcast_addr
, 0, MLX4_MCAST_CONFIG
);
1028 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
1029 0, MLX4_MCAST_ENABLE
);
1031 en_err(priv
, "Failed enabling multicast filter\n");
1033 update_mclist_flags(priv
, &priv
->curr_list
, &priv
->mc_list
);
1034 list_for_each_entry_safe(mclist
, tmp
, &priv
->curr_list
, list
) {
1035 if (mclist
->action
== MCLIST_REM
) {
1036 /* detach this address and delete from list */
1037 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
1038 mc_list
[5] = priv
->port
;
1039 err
= mlx4_multicast_detach(mdev
->dev
,
1040 &priv
->rss_map
.indir_qp
,
1045 en_err(priv
, "Fail to detach multicast address\n");
1047 /* remove from list */
1048 list_del(&mclist
->list
);
1050 } else if (mclist
->action
== MCLIST_ADD
) {
1051 /* attach the address */
1052 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
1053 /* needed for B0 steering support */
1054 mc_list
[5] = priv
->port
;
1055 err
= mlx4_multicast_attach(mdev
->dev
,
1056 &priv
->rss_map
.indir_qp
,
1062 en_err(priv
, "Fail to attach multicast address\n");
1069 static void mlx4_en_do_uc_filter(struct mlx4_en_priv
*priv
,
1070 struct net_device
*dev
,
1071 struct mlx4_en_dev
*mdev
)
1073 struct netdev_hw_addr
*ha
;
1074 struct mlx4_mac_entry
*entry
;
1075 struct hlist_node
*tmp
;
1079 struct hlist_head
*bucket
;
1084 /* Note that we do not need to protect our mac_hash traversal with rcu,
1085 * since all modification code is protected by mdev->state_lock
1088 /* find what to remove */
1089 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
) {
1090 bucket
= &priv
->mac_hash
[i
];
1091 hlist_for_each_entry_safe(entry
, tmp
, bucket
, hlist
) {
1093 netdev_for_each_uc_addr(ha
, dev
) {
1094 if (ether_addr_equal_64bits(entry
->mac
,
1101 /* MAC address of the port is not in uc list */
1102 if (ether_addr_equal_64bits(entry
->mac
, dev
->dev_addr
))
1106 mac
= mlx4_en_mac_to_u64(entry
->mac
);
1107 mlx4_en_uc_steer_release(priv
, entry
->mac
,
1110 mlx4_unregister_mac(mdev
->dev
, priv
->port
, mac
);
1112 hlist_del_rcu(&entry
->hlist
);
1113 kfree_rcu(entry
, rcu
);
1114 en_dbg(DRV
, priv
, "Removed MAC %pM on port:%d\n",
1115 entry
->mac
, priv
->port
);
1121 /* if we didn't remove anything, there is no use in trying to add
1122 * again once we are in a forced promisc mode state
1124 if ((priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
) && 0 == removed
)
1127 prev_flags
= priv
->flags
;
1128 priv
->flags
&= ~MLX4_EN_FLAG_FORCE_PROMISC
;
1130 /* find what to add */
1131 netdev_for_each_uc_addr(ha
, dev
) {
1133 bucket
= &priv
->mac_hash
[ha
->addr
[MLX4_EN_MAC_HASH_IDX
]];
1134 hlist_for_each_entry(entry
, bucket
, hlist
) {
1135 if (ether_addr_equal_64bits(entry
->mac
, ha
->addr
)) {
1142 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1144 en_err(priv
, "Failed adding MAC %pM on port:%d (out of memory)\n",
1145 ha
->addr
, priv
->port
);
1146 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1149 mac
= mlx4_en_mac_to_u64(ha
->addr
);
1150 memcpy(entry
->mac
, ha
->addr
, ETH_ALEN
);
1151 err
= mlx4_register_mac(mdev
->dev
, priv
->port
, mac
);
1153 en_err(priv
, "Failed registering MAC %pM on port %d: %d\n",
1154 ha
->addr
, priv
->port
, err
);
1156 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1159 err
= mlx4_en_uc_steer_add(priv
, ha
->addr
,
1163 en_err(priv
, "Failed adding MAC %pM on port %d: %d\n",
1164 ha
->addr
, priv
->port
, err
);
1165 mlx4_unregister_mac(mdev
->dev
, priv
->port
, mac
);
1167 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1170 unsigned int mac_hash
;
1171 en_dbg(DRV
, priv
, "Added MAC %pM on port:%d\n",
1172 ha
->addr
, priv
->port
);
1173 mac_hash
= ha
->addr
[MLX4_EN_MAC_HASH_IDX
];
1174 bucket
= &priv
->mac_hash
[mac_hash
];
1175 hlist_add_head_rcu(&entry
->hlist
, bucket
);
1180 if (priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
) {
1181 en_warn(priv
, "Forcing promiscuous mode on port:%d\n",
1183 } else if (prev_flags
& MLX4_EN_FLAG_FORCE_PROMISC
) {
1184 en_warn(priv
, "Stop forcing promiscuous mode on port:%d\n",
1189 static void mlx4_en_do_set_rx_mode(struct work_struct
*work
)
1191 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1193 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1194 struct net_device
*dev
= priv
->dev
;
1196 mutex_lock(&mdev
->state_lock
);
1197 if (!mdev
->device_up
) {
1198 en_dbg(HW
, priv
, "Card is not up, ignoring rx mode change.\n");
1201 if (!priv
->port_up
) {
1202 en_dbg(HW
, priv
, "Port is down, ignoring rx mode change.\n");
1206 if (!netif_carrier_ok(dev
)) {
1207 if (!mlx4_en_QUERY_PORT(mdev
, priv
->port
)) {
1208 if (priv
->port_state
.link_state
) {
1209 priv
->last_link_state
= MLX4_DEV_EVENT_PORT_UP
;
1210 netif_carrier_on(dev
);
1211 en_dbg(LINK
, priv
, "Link Up\n");
1216 if (dev
->priv_flags
& IFF_UNICAST_FLT
)
1217 mlx4_en_do_uc_filter(priv
, dev
, mdev
);
1219 /* Promsicuous mode: disable all filters */
1220 if ((dev
->flags
& IFF_PROMISC
) ||
1221 (priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
)) {
1222 mlx4_en_set_promisc_mode(priv
, mdev
);
1226 /* Not in promiscuous mode */
1227 if (priv
->flags
& MLX4_EN_FLAG_PROMISC
)
1228 mlx4_en_clear_promisc_mode(priv
, mdev
);
1230 mlx4_en_do_multicast(priv
, dev
, mdev
);
1232 mutex_unlock(&mdev
->state_lock
);
1235 #ifdef CONFIG_NET_POLL_CONTROLLER
1236 static void mlx4_en_netpoll(struct net_device
*dev
)
1238 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1239 struct mlx4_en_cq
*cq
;
1240 unsigned long flags
;
1243 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1244 cq
= priv
->rx_cq
[i
];
1245 spin_lock_irqsave(&cq
->lock
, flags
);
1246 napi_synchronize(&cq
->napi
);
1247 mlx4_en_process_rx_cq(dev
, cq
, 0);
1248 spin_unlock_irqrestore(&cq
->lock
, flags
);
1253 static void mlx4_en_tx_timeout(struct net_device
*dev
)
1255 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1256 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1259 if (netif_msg_timer(priv
))
1260 en_warn(priv
, "Tx timeout called on port:%d\n", priv
->port
);
1262 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1263 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev
, i
)))
1265 en_warn(priv
, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1266 i
, priv
->tx_ring
[i
]->qpn
, priv
->tx_ring
[i
]->cqn
,
1267 priv
->tx_ring
[i
]->cons
, priv
->tx_ring
[i
]->prod
);
1270 priv
->port_stats
.tx_timeout
++;
1271 en_dbg(DRV
, priv
, "Scheduling watchdog\n");
1272 queue_work(mdev
->workqueue
, &priv
->watchdog_task
);
1276 static struct net_device_stats
*mlx4_en_get_stats(struct net_device
*dev
)
1278 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1280 spin_lock_bh(&priv
->stats_lock
);
1281 memcpy(&priv
->ret_stats
, &priv
->stats
, sizeof(priv
->stats
));
1282 spin_unlock_bh(&priv
->stats_lock
);
1284 return &priv
->ret_stats
;
1287 static void mlx4_en_set_default_moderation(struct mlx4_en_priv
*priv
)
1289 struct mlx4_en_cq
*cq
;
1292 /* If we haven't received a specific coalescing setting
1293 * (module param), we set the moderation parameters as follows:
1294 * - moder_cnt is set to the number of mtu sized packets to
1295 * satisfy our coalescing target.
1296 * - moder_time is set to a fixed value.
1298 priv
->rx_frames
= MLX4_EN_RX_COAL_TARGET
;
1299 priv
->rx_usecs
= MLX4_EN_RX_COAL_TIME
;
1300 priv
->tx_frames
= MLX4_EN_TX_COAL_PKTS
;
1301 priv
->tx_usecs
= MLX4_EN_TX_COAL_TIME
;
1302 en_dbg(INTR
, priv
, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1303 priv
->dev
->mtu
, priv
->rx_frames
, priv
->rx_usecs
);
1305 /* Setup cq moderation params */
1306 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1307 cq
= priv
->rx_cq
[i
];
1308 cq
->moder_cnt
= priv
->rx_frames
;
1309 cq
->moder_time
= priv
->rx_usecs
;
1310 priv
->last_moder_time
[i
] = MLX4_EN_AUTO_CONF
;
1311 priv
->last_moder_packets
[i
] = 0;
1312 priv
->last_moder_bytes
[i
] = 0;
1315 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1316 cq
= priv
->tx_cq
[i
];
1317 cq
->moder_cnt
= priv
->tx_frames
;
1318 cq
->moder_time
= priv
->tx_usecs
;
1321 /* Reset auto-moderation params */
1322 priv
->pkt_rate_low
= MLX4_EN_RX_RATE_LOW
;
1323 priv
->rx_usecs_low
= MLX4_EN_RX_COAL_TIME_LOW
;
1324 priv
->pkt_rate_high
= MLX4_EN_RX_RATE_HIGH
;
1325 priv
->rx_usecs_high
= MLX4_EN_RX_COAL_TIME_HIGH
;
1326 priv
->sample_interval
= MLX4_EN_SAMPLE_INTERVAL
;
1327 priv
->adaptive_rx_coal
= 1;
1328 priv
->last_moder_jiffies
= 0;
1329 priv
->last_moder_tx_packets
= 0;
1332 static void mlx4_en_auto_moderation(struct mlx4_en_priv
*priv
)
1334 unsigned long period
= (unsigned long) (jiffies
- priv
->last_moder_jiffies
);
1335 struct mlx4_en_cq
*cq
;
1336 unsigned long packets
;
1338 unsigned long avg_pkt_size
;
1339 unsigned long rx_packets
;
1340 unsigned long rx_bytes
;
1341 unsigned long rx_pkt_diff
;
1345 if (!priv
->adaptive_rx_coal
|| period
< priv
->sample_interval
* HZ
)
1348 for (ring
= 0; ring
< priv
->rx_ring_num
; ring
++) {
1349 spin_lock_bh(&priv
->stats_lock
);
1350 rx_packets
= priv
->rx_ring
[ring
]->packets
;
1351 rx_bytes
= priv
->rx_ring
[ring
]->bytes
;
1352 spin_unlock_bh(&priv
->stats_lock
);
1354 rx_pkt_diff
= ((unsigned long) (rx_packets
-
1355 priv
->last_moder_packets
[ring
]));
1356 packets
= rx_pkt_diff
;
1357 rate
= packets
* HZ
/ period
;
1358 avg_pkt_size
= packets
? ((unsigned long) (rx_bytes
-
1359 priv
->last_moder_bytes
[ring
])) / packets
: 0;
1361 /* Apply auto-moderation only when packet rate
1362 * exceeds a rate that it matters */
1363 if (rate
> (MLX4_EN_RX_RATE_THRESH
/ priv
->rx_ring_num
) &&
1364 avg_pkt_size
> MLX4_EN_AVG_PKT_SMALL
) {
1365 if (rate
< priv
->pkt_rate_low
)
1366 moder_time
= priv
->rx_usecs_low
;
1367 else if (rate
> priv
->pkt_rate_high
)
1368 moder_time
= priv
->rx_usecs_high
;
1370 moder_time
= (rate
- priv
->pkt_rate_low
) *
1371 (priv
->rx_usecs_high
- priv
->rx_usecs_low
) /
1372 (priv
->pkt_rate_high
- priv
->pkt_rate_low
) +
1375 moder_time
= priv
->rx_usecs_low
;
1378 if (moder_time
!= priv
->last_moder_time
[ring
]) {
1379 priv
->last_moder_time
[ring
] = moder_time
;
1380 cq
= priv
->rx_cq
[ring
];
1381 cq
->moder_time
= moder_time
;
1382 cq
->moder_cnt
= priv
->rx_frames
;
1383 err
= mlx4_en_set_cq_moder(priv
, cq
);
1385 en_err(priv
, "Failed modifying moderation for cq:%d\n",
1388 priv
->last_moder_packets
[ring
] = rx_packets
;
1389 priv
->last_moder_bytes
[ring
] = rx_bytes
;
1392 priv
->last_moder_jiffies
= jiffies
;
1395 static void mlx4_en_do_get_stats(struct work_struct
*work
)
1397 struct delayed_work
*delay
= to_delayed_work(work
);
1398 struct mlx4_en_priv
*priv
= container_of(delay
, struct mlx4_en_priv
,
1400 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1403 mutex_lock(&mdev
->state_lock
);
1404 if (mdev
->device_up
) {
1405 if (priv
->port_up
) {
1406 err
= mlx4_en_DUMP_ETH_STATS(mdev
, priv
->port
, 0);
1408 en_dbg(HW
, priv
, "Could not update stats\n");
1410 mlx4_en_auto_moderation(priv
);
1413 queue_delayed_work(mdev
->workqueue
, &priv
->stats_task
, STATS_DELAY
);
1415 if (mdev
->mac_removed
[MLX4_MAX_PORTS
+ 1 - priv
->port
]) {
1416 mlx4_en_do_set_mac(priv
);
1417 mdev
->mac_removed
[MLX4_MAX_PORTS
+ 1 - priv
->port
] = 0;
1419 mutex_unlock(&mdev
->state_lock
);
1422 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1425 static void mlx4_en_service_task(struct work_struct
*work
)
1427 struct delayed_work
*delay
= to_delayed_work(work
);
1428 struct mlx4_en_priv
*priv
= container_of(delay
, struct mlx4_en_priv
,
1430 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1432 mutex_lock(&mdev
->state_lock
);
1433 if (mdev
->device_up
) {
1434 if (mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_TS
)
1435 mlx4_en_ptp_overflow_check(mdev
);
1437 queue_delayed_work(mdev
->workqueue
, &priv
->service_task
,
1438 SERVICE_TASK_DELAY
);
1440 mutex_unlock(&mdev
->state_lock
);
1443 static void mlx4_en_linkstate(struct work_struct
*work
)
1445 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1447 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1448 int linkstate
= priv
->link_state
;
1450 mutex_lock(&mdev
->state_lock
);
1451 /* If observable port state changed set carrier state and
1452 * report to system log */
1453 if (priv
->last_link_state
!= linkstate
) {
1454 if (linkstate
== MLX4_DEV_EVENT_PORT_DOWN
) {
1455 en_info(priv
, "Link Down\n");
1456 netif_carrier_off(priv
->dev
);
1458 en_info(priv
, "Link Up\n");
1459 netif_carrier_on(priv
->dev
);
1462 priv
->last_link_state
= linkstate
;
1463 mutex_unlock(&mdev
->state_lock
);
1467 int mlx4_en_start_port(struct net_device
*dev
)
1469 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1470 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1471 struct mlx4_en_cq
*cq
;
1472 struct mlx4_en_tx_ring
*tx_ring
;
1478 u8 mc_list
[16] = {0};
1480 if (priv
->port_up
) {
1481 en_dbg(DRV
, priv
, "start port called while port already up\n");
1485 INIT_LIST_HEAD(&priv
->mc_list
);
1486 INIT_LIST_HEAD(&priv
->curr_list
);
1487 INIT_LIST_HEAD(&priv
->ethtool_list
);
1488 memset(&priv
->ethtool_rules
[0], 0,
1489 sizeof(struct ethtool_flow_id
) * MAX_NUM_OF_FS_RULES
);
1491 /* Calculate Rx buf size */
1492 dev
->mtu
= min(dev
->mtu
, priv
->max_mtu
);
1493 mlx4_en_calc_rx_buf(dev
);
1494 en_dbg(DRV
, priv
, "Rx buf size:%d\n", priv
->rx_skb_size
);
1496 /* Configure rx cq's and rings */
1497 err
= mlx4_en_activate_rx_rings(priv
);
1499 en_err(priv
, "Failed to activate RX rings\n");
1502 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1503 cq
= priv
->rx_cq
[i
];
1505 mlx4_en_cq_init_lock(cq
);
1507 err
= mlx4_en_activate_cq(priv
, cq
, i
);
1509 en_err(priv
, "Failed activating Rx CQ\n");
1512 for (j
= 0; j
< cq
->size
; j
++)
1513 cq
->buf
[j
].owner_sr_opcode
= MLX4_CQE_OWNER_MASK
;
1514 err
= mlx4_en_set_cq_moder(priv
, cq
);
1516 en_err(priv
, "Failed setting cq moderation parameters");
1517 mlx4_en_deactivate_cq(priv
, cq
);
1520 mlx4_en_arm_cq(priv
, cq
);
1521 priv
->rx_ring
[i
]->cqn
= cq
->mcq
.cqn
;
1526 en_dbg(DRV
, priv
, "Getting qp number for port %d\n", priv
->port
);
1527 err
= mlx4_en_get_qp(priv
);
1529 en_err(priv
, "Failed getting eth qp\n");
1532 mdev
->mac_removed
[priv
->port
] = 0;
1534 err
= mlx4_en_config_rss_steer(priv
);
1536 en_err(priv
, "Failed configuring rss steering\n");
1540 err
= mlx4_en_create_drop_qp(priv
);
1544 /* Configure tx cq's and rings */
1545 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1547 cq
= priv
->tx_cq
[i
];
1548 err
= mlx4_en_activate_cq(priv
, cq
, i
);
1550 en_err(priv
, "Failed allocating Tx CQ\n");
1553 err
= mlx4_en_set_cq_moder(priv
, cq
);
1555 en_err(priv
, "Failed setting cq moderation parameters");
1556 mlx4_en_deactivate_cq(priv
, cq
);
1559 en_dbg(DRV
, priv
, "Resetting index of collapsed CQ:%d to -1\n", i
);
1560 cq
->buf
->wqe_index
= cpu_to_be16(0xffff);
1562 /* Configure ring */
1563 tx_ring
= priv
->tx_ring
[i
];
1564 err
= mlx4_en_activate_tx_ring(priv
, tx_ring
, cq
->mcq
.cqn
,
1565 i
/ priv
->num_tx_rings_p_up
);
1567 en_err(priv
, "Failed allocating Tx ring\n");
1568 mlx4_en_deactivate_cq(priv
, cq
);
1571 tx_ring
->tx_queue
= netdev_get_tx_queue(dev
, i
);
1573 /* Arm CQ for TX completions */
1574 mlx4_en_arm_cq(priv
, cq
);
1576 /* Set initial ownership of all Tx TXBBs to SW (1) */
1577 for (j
= 0; j
< tx_ring
->buf_size
; j
+= STAMP_STRIDE
)
1578 *((u32
*) (tx_ring
->buf
+ j
)) = 0xffffffff;
1582 /* Configure port */
1583 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
1584 priv
->rx_skb_size
+ ETH_FCS_LEN
,
1585 priv
->prof
->tx_pause
,
1587 priv
->prof
->rx_pause
,
1588 priv
->prof
->rx_ppp
);
1590 en_err(priv
, "Failed setting port general configurations for port %d, with error %d\n",
1594 /* Set default qp number */
1595 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
, priv
->port
, priv
->base_qpn
, 0);
1597 en_err(priv
, "Failed setting default qp numbers\n");
1602 en_dbg(HW
, priv
, "Initializing port\n");
1603 err
= mlx4_INIT_PORT(mdev
->dev
, priv
->port
);
1605 en_err(priv
, "Failed Initializing port\n");
1609 /* Attach rx QP to bradcast address */
1610 memset(&mc_list
[10], 0xff, ETH_ALEN
);
1611 mc_list
[5] = priv
->port
; /* needed for B0 steering support */
1612 if (mlx4_multicast_attach(mdev
->dev
, &priv
->rss_map
.indir_qp
, mc_list
,
1613 priv
->port
, 0, MLX4_PROT_ETH
,
1614 &priv
->broadcast_id
))
1615 mlx4_warn(mdev
, "Failed Attaching Broadcast\n");
1617 /* Must redo promiscuous mode setup. */
1618 priv
->flags
&= ~(MLX4_EN_FLAG_PROMISC
| MLX4_EN_FLAG_MC_PROMISC
);
1620 /* Schedule multicast task to populate multicast list */
1621 queue_work(mdev
->workqueue
, &priv
->rx_mode_task
);
1623 mlx4_set_stats_bitmap(mdev
->dev
, &priv
->stats_bitmap
);
1625 priv
->port_up
= true;
1626 netif_tx_start_all_queues(dev
);
1627 netif_device_attach(dev
);
1632 while (tx_index
--) {
1633 mlx4_en_deactivate_tx_ring(priv
, priv
->tx_ring
[tx_index
]);
1634 mlx4_en_deactivate_cq(priv
, priv
->tx_cq
[tx_index
]);
1636 mlx4_en_destroy_drop_qp(priv
);
1638 mlx4_en_release_rss_steer(priv
);
1640 mlx4_en_put_qp(priv
);
1643 mlx4_en_deactivate_cq(priv
, priv
->rx_cq
[rx_index
]);
1644 for (i
= 0; i
< priv
->rx_ring_num
; i
++)
1645 mlx4_en_deactivate_rx_ring(priv
, priv
->rx_ring
[i
]);
1647 return err
; /* need to close devices */
1651 void mlx4_en_stop_port(struct net_device
*dev
, int detach
)
1653 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1654 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1655 struct mlx4_en_mc_list
*mclist
, *tmp
;
1656 struct ethtool_flow_id
*flow
, *tmp_flow
;
1658 u8 mc_list
[16] = {0};
1660 if (!priv
->port_up
) {
1661 en_dbg(DRV
, priv
, "stop port called while port already down\n");
1666 mlx4_CLOSE_PORT(mdev
->dev
, priv
->port
);
1668 /* Synchronize with tx routine */
1669 netif_tx_lock_bh(dev
);
1671 netif_device_detach(dev
);
1672 netif_tx_stop_all_queues(dev
);
1673 netif_tx_unlock_bh(dev
);
1675 netif_tx_disable(dev
);
1677 /* Set port as not active */
1678 priv
->port_up
= false;
1680 /* Promsicuous mode */
1681 if (mdev
->dev
->caps
.steering_mode
==
1682 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1683 priv
->flags
&= ~(MLX4_EN_FLAG_PROMISC
|
1684 MLX4_EN_FLAG_MC_PROMISC
);
1685 mlx4_flow_steer_promisc_remove(mdev
->dev
,
1687 MLX4_FS_ALL_DEFAULT
);
1688 mlx4_flow_steer_promisc_remove(mdev
->dev
,
1690 MLX4_FS_MC_DEFAULT
);
1691 } else if (priv
->flags
& MLX4_EN_FLAG_PROMISC
) {
1692 priv
->flags
&= ~MLX4_EN_FLAG_PROMISC
;
1694 /* Disable promiscouos mode */
1695 mlx4_unicast_promisc_remove(mdev
->dev
, priv
->base_qpn
,
1698 /* Disable Multicast promisc */
1699 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
1700 mlx4_multicast_promisc_remove(mdev
->dev
, priv
->base_qpn
,
1702 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
1706 /* Detach All multicasts */
1707 memset(&mc_list
[10], 0xff, ETH_ALEN
);
1708 mc_list
[5] = priv
->port
; /* needed for B0 steering support */
1709 mlx4_multicast_detach(mdev
->dev
, &priv
->rss_map
.indir_qp
, mc_list
,
1710 MLX4_PROT_ETH
, priv
->broadcast_id
);
1711 list_for_each_entry(mclist
, &priv
->curr_list
, list
) {
1712 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
1713 mc_list
[5] = priv
->port
;
1714 mlx4_multicast_detach(mdev
->dev
, &priv
->rss_map
.indir_qp
,
1715 mc_list
, MLX4_PROT_ETH
, mclist
->reg_id
);
1717 mlx4_en_clear_list(dev
);
1718 list_for_each_entry_safe(mclist
, tmp
, &priv
->curr_list
, list
) {
1719 list_del(&mclist
->list
);
1723 /* Flush multicast filter */
1724 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0, 1, MLX4_MCAST_CONFIG
);
1726 /* Remove flow steering rules for the port*/
1727 if (mdev
->dev
->caps
.steering_mode
==
1728 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1730 list_for_each_entry_safe(flow
, tmp_flow
,
1731 &priv
->ethtool_list
, list
) {
1732 mlx4_flow_detach(mdev
->dev
, flow
->id
);
1733 list_del(&flow
->list
);
1737 mlx4_en_destroy_drop_qp(priv
);
1740 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1741 mlx4_en_deactivate_tx_ring(priv
, priv
->tx_ring
[i
]);
1742 mlx4_en_deactivate_cq(priv
, priv
->tx_cq
[i
]);
1746 for (i
= 0; i
< priv
->tx_ring_num
; i
++)
1747 mlx4_en_free_tx_buf(dev
, priv
->tx_ring
[i
]);
1750 mlx4_en_release_rss_steer(priv
);
1752 /* Unregister Mac address for the port */
1753 mlx4_en_put_qp(priv
);
1754 if (!(mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN
))
1755 mdev
->mac_removed
[priv
->port
] = 1;
1758 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1759 struct mlx4_en_cq
*cq
= priv
->rx_cq
[i
];
1762 while (!mlx4_en_cq_lock_napi(cq
)) {
1763 pr_info("CQ %d locked\n", i
);
1768 while (test_bit(NAPI_STATE_SCHED
, &cq
->napi
.state
))
1770 mlx4_en_deactivate_rx_ring(priv
, priv
->rx_ring
[i
]);
1771 mlx4_en_deactivate_cq(priv
, cq
);
1775 static void mlx4_en_restart(struct work_struct
*work
)
1777 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1779 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1780 struct net_device
*dev
= priv
->dev
;
1782 en_dbg(DRV
, priv
, "Watchdog task called for port %d\n", priv
->port
);
1784 mutex_lock(&mdev
->state_lock
);
1785 if (priv
->port_up
) {
1786 mlx4_en_stop_port(dev
, 1);
1787 if (mlx4_en_start_port(dev
))
1788 en_err(priv
, "Failed restarting port %d\n", priv
->port
);
1790 mutex_unlock(&mdev
->state_lock
);
1793 static void mlx4_en_clear_stats(struct net_device
*dev
)
1795 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1796 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1799 if (mlx4_en_DUMP_ETH_STATS(mdev
, priv
->port
, 1))
1800 en_dbg(HW
, priv
, "Failed dumping statistics\n");
1802 memset(&priv
->stats
, 0, sizeof(priv
->stats
));
1803 memset(&priv
->pstats
, 0, sizeof(priv
->pstats
));
1804 memset(&priv
->pkstats
, 0, sizeof(priv
->pkstats
));
1805 memset(&priv
->port_stats
, 0, sizeof(priv
->port_stats
));
1807 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1808 priv
->tx_ring
[i
]->bytes
= 0;
1809 priv
->tx_ring
[i
]->packets
= 0;
1810 priv
->tx_ring
[i
]->tx_csum
= 0;
1812 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1813 priv
->rx_ring
[i
]->bytes
= 0;
1814 priv
->rx_ring
[i
]->packets
= 0;
1815 priv
->rx_ring
[i
]->csum_ok
= 0;
1816 priv
->rx_ring
[i
]->csum_none
= 0;
1820 static int mlx4_en_open(struct net_device
*dev
)
1822 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1823 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1826 mutex_lock(&mdev
->state_lock
);
1828 if (!mdev
->device_up
) {
1829 en_err(priv
, "Cannot open - device down/disabled\n");
1834 /* Reset HW statistics and SW counters */
1835 mlx4_en_clear_stats(dev
);
1837 err
= mlx4_en_start_port(dev
);
1839 en_err(priv
, "Failed starting port:%d\n", priv
->port
);
1842 mutex_unlock(&mdev
->state_lock
);
1847 static int mlx4_en_close(struct net_device
*dev
)
1849 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1850 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1852 en_dbg(IFDOWN
, priv
, "Close port called\n");
1854 mutex_lock(&mdev
->state_lock
);
1856 mlx4_en_stop_port(dev
, 0);
1857 netif_carrier_off(dev
);
1859 mutex_unlock(&mdev
->state_lock
);
1863 void mlx4_en_free_resources(struct mlx4_en_priv
*priv
)
1867 #ifdef CONFIG_RFS_ACCEL
1868 free_irq_cpu_rmap(priv
->dev
->rx_cpu_rmap
);
1869 priv
->dev
->rx_cpu_rmap
= NULL
;
1872 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1873 if (priv
->tx_ring
&& priv
->tx_ring
[i
])
1874 mlx4_en_destroy_tx_ring(priv
, &priv
->tx_ring
[i
]);
1875 if (priv
->tx_cq
&& priv
->tx_cq
[i
])
1876 mlx4_en_destroy_cq(priv
, &priv
->tx_cq
[i
]);
1879 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1880 if (priv
->rx_ring
[i
])
1881 mlx4_en_destroy_rx_ring(priv
, &priv
->rx_ring
[i
],
1882 priv
->prof
->rx_ring_size
, priv
->stride
);
1884 mlx4_en_destroy_cq(priv
, &priv
->rx_cq
[i
]);
1887 if (priv
->base_tx_qpn
) {
1888 mlx4_qp_release_range(priv
->mdev
->dev
, priv
->base_tx_qpn
, priv
->tx_ring_num
);
1889 priv
->base_tx_qpn
= 0;
1893 int mlx4_en_alloc_resources(struct mlx4_en_priv
*priv
)
1895 struct mlx4_en_port_profile
*prof
= priv
->prof
;
1900 err
= mlx4_qp_reserve_range(priv
->mdev
->dev
, priv
->tx_ring_num
, 256, &priv
->base_tx_qpn
);
1902 en_err(priv
, "failed reserving range for TX rings\n");
1906 /* Create tx Rings */
1907 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1908 node
= cpu_to_node(i
% num_online_cpus());
1909 if (mlx4_en_create_cq(priv
, &priv
->tx_cq
[i
],
1910 prof
->tx_ring_size
, i
, TX
, node
))
1913 if (mlx4_en_create_tx_ring(priv
, &priv
->tx_ring
[i
], priv
->base_tx_qpn
+ i
,
1914 prof
->tx_ring_size
, TXBB_SIZE
, node
))
1918 /* Create rx Rings */
1919 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1920 node
= cpu_to_node(i
% num_online_cpus());
1921 if (mlx4_en_create_cq(priv
, &priv
->rx_cq
[i
],
1922 prof
->rx_ring_size
, i
, RX
, node
))
1925 if (mlx4_en_create_rx_ring(priv
, &priv
->rx_ring
[i
],
1926 prof
->rx_ring_size
, priv
->stride
,
1931 #ifdef CONFIG_RFS_ACCEL
1932 if (priv
->mdev
->dev
->caps
.comp_pool
) {
1933 priv
->dev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(priv
->mdev
->dev
->caps
.comp_pool
);
1934 if (!priv
->dev
->rx_cpu_rmap
)
1942 en_err(priv
, "Failed to allocate NIC resources\n");
1943 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1944 if (priv
->rx_ring
[i
])
1945 mlx4_en_destroy_rx_ring(priv
, &priv
->rx_ring
[i
],
1949 mlx4_en_destroy_cq(priv
, &priv
->rx_cq
[i
]);
1951 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1952 if (priv
->tx_ring
[i
])
1953 mlx4_en_destroy_tx_ring(priv
, &priv
->tx_ring
[i
]);
1955 mlx4_en_destroy_cq(priv
, &priv
->tx_cq
[i
]);
1961 void mlx4_en_destroy_netdev(struct net_device
*dev
)
1963 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1964 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1966 en_dbg(DRV
, priv
, "Destroying netdev on port:%d\n", priv
->port
);
1968 /* Unregister device - this will close the port if it was up */
1969 if (priv
->registered
)
1970 unregister_netdev(dev
);
1972 if (priv
->allocated
)
1973 mlx4_free_hwq_res(mdev
->dev
, &priv
->res
, MLX4_EN_PAGE_SIZE
);
1975 cancel_delayed_work(&priv
->stats_task
);
1976 cancel_delayed_work(&priv
->service_task
);
1977 /* flush any pending task for this netdev */
1978 flush_workqueue(mdev
->workqueue
);
1980 /* Detach the netdev so tasks would not attempt to access it */
1981 mutex_lock(&mdev
->state_lock
);
1982 mdev
->pndev
[priv
->port
] = NULL
;
1983 mutex_unlock(&mdev
->state_lock
);
1985 mlx4_en_free_resources(priv
);
1987 kfree(priv
->tx_ring
);
1993 static int mlx4_en_change_mtu(struct net_device
*dev
, int new_mtu
)
1995 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1996 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1999 en_dbg(DRV
, priv
, "Change MTU called - current:%d new:%d\n",
2002 if ((new_mtu
< MLX4_EN_MIN_MTU
) || (new_mtu
> priv
->max_mtu
)) {
2003 en_err(priv
, "Bad MTU size:%d.\n", new_mtu
);
2008 if (netif_running(dev
)) {
2009 mutex_lock(&mdev
->state_lock
);
2010 if (!mdev
->device_up
) {
2011 /* NIC is probably restarting - let watchdog task reset
2013 en_dbg(DRV
, priv
, "Change MTU called with card down!?\n");
2015 mlx4_en_stop_port(dev
, 1);
2016 err
= mlx4_en_start_port(dev
);
2018 en_err(priv
, "Failed restarting port:%d\n",
2020 queue_work(mdev
->workqueue
, &priv
->watchdog_task
);
2023 mutex_unlock(&mdev
->state_lock
);
2028 static int mlx4_en_hwtstamp_ioctl(struct net_device
*dev
, struct ifreq
*ifr
)
2030 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
2031 struct mlx4_en_dev
*mdev
= priv
->mdev
;
2032 struct hwtstamp_config config
;
2034 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
2037 /* reserved for future extensions */
2041 /* device doesn't support time stamping */
2042 if (!(mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_TS
))
2045 /* TX HW timestamp */
2046 switch (config
.tx_type
) {
2047 case HWTSTAMP_TX_OFF
:
2048 case HWTSTAMP_TX_ON
:
2054 /* RX HW timestamp */
2055 switch (config
.rx_filter
) {
2056 case HWTSTAMP_FILTER_NONE
:
2058 case HWTSTAMP_FILTER_ALL
:
2059 case HWTSTAMP_FILTER_SOME
:
2060 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
2061 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
2062 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
2063 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
2064 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
2065 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
2066 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
2067 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
2068 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
2069 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
2070 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
2071 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
2072 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2078 if (mlx4_en_timestamp_config(dev
, config
.tx_type
, config
.rx_filter
)) {
2079 config
.tx_type
= HWTSTAMP_TX_OFF
;
2080 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
2083 return copy_to_user(ifr
->ifr_data
, &config
,
2084 sizeof(config
)) ? -EFAULT
: 0;
2087 static int mlx4_en_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2091 return mlx4_en_hwtstamp_ioctl(dev
, ifr
);
2097 static int mlx4_en_set_features(struct net_device
*netdev
,
2098 netdev_features_t features
)
2100 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
2102 if (features
& NETIF_F_LOOPBACK
)
2103 priv
->ctrl_flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
2106 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK
);
2108 mlx4_en_update_loopback_state(netdev
, features
);
2114 static int mlx4_en_set_vf_mac(struct net_device
*dev
, int queue
, u8
*mac
)
2116 struct mlx4_en_priv
*en_priv
= netdev_priv(dev
);
2117 struct mlx4_en_dev
*mdev
= en_priv
->mdev
;
2118 u64 mac_u64
= mlx4_en_mac_to_u64(mac
);
2120 if (!is_valid_ether_addr(mac
))
2123 return mlx4_set_vf_mac(mdev
->dev
, en_priv
->port
, queue
, mac_u64
);
2126 static int mlx4_en_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
)
2128 struct mlx4_en_priv
*en_priv
= netdev_priv(dev
);
2129 struct mlx4_en_dev
*mdev
= en_priv
->mdev
;
2131 return mlx4_set_vf_vlan(mdev
->dev
, en_priv
->port
, vf
, vlan
, qos
);
2134 static int mlx4_en_set_vf_spoofchk(struct net_device
*dev
, int vf
, bool setting
)
2136 struct mlx4_en_priv
*en_priv
= netdev_priv(dev
);
2137 struct mlx4_en_dev
*mdev
= en_priv
->mdev
;
2139 return mlx4_set_vf_spoofchk(mdev
->dev
, en_priv
->port
, vf
, setting
);
2142 static int mlx4_en_get_vf_config(struct net_device
*dev
, int vf
, struct ifla_vf_info
*ivf
)
2144 struct mlx4_en_priv
*en_priv
= netdev_priv(dev
);
2145 struct mlx4_en_dev
*mdev
= en_priv
->mdev
;
2147 return mlx4_get_vf_config(mdev
->dev
, en_priv
->port
, vf
, ivf
);
2150 static int mlx4_en_set_vf_link_state(struct net_device
*dev
, int vf
, int link_state
)
2152 struct mlx4_en_priv
*en_priv
= netdev_priv(dev
);
2153 struct mlx4_en_dev
*mdev
= en_priv
->mdev
;
2155 return mlx4_set_vf_link_state(mdev
->dev
, en_priv
->port
, vf
, link_state
);
2157 static const struct net_device_ops mlx4_netdev_ops
= {
2158 .ndo_open
= mlx4_en_open
,
2159 .ndo_stop
= mlx4_en_close
,
2160 .ndo_start_xmit
= mlx4_en_xmit
,
2161 .ndo_select_queue
= mlx4_en_select_queue
,
2162 .ndo_get_stats
= mlx4_en_get_stats
,
2163 .ndo_set_rx_mode
= mlx4_en_set_rx_mode
,
2164 .ndo_set_mac_address
= mlx4_en_set_mac
,
2165 .ndo_validate_addr
= eth_validate_addr
,
2166 .ndo_change_mtu
= mlx4_en_change_mtu
,
2167 .ndo_do_ioctl
= mlx4_en_ioctl
,
2168 .ndo_tx_timeout
= mlx4_en_tx_timeout
,
2169 .ndo_vlan_rx_add_vid
= mlx4_en_vlan_rx_add_vid
,
2170 .ndo_vlan_rx_kill_vid
= mlx4_en_vlan_rx_kill_vid
,
2171 #ifdef CONFIG_NET_POLL_CONTROLLER
2172 .ndo_poll_controller
= mlx4_en_netpoll
,
2174 .ndo_set_features
= mlx4_en_set_features
,
2175 .ndo_setup_tc
= mlx4_en_setup_tc
,
2176 #ifdef CONFIG_RFS_ACCEL
2177 .ndo_rx_flow_steer
= mlx4_en_filter_rfs
,
2179 #ifdef CONFIG_NET_RX_BUSY_POLL
2180 .ndo_busy_poll
= mlx4_en_low_latency_recv
,
2184 static const struct net_device_ops mlx4_netdev_ops_master
= {
2185 .ndo_open
= mlx4_en_open
,
2186 .ndo_stop
= mlx4_en_close
,
2187 .ndo_start_xmit
= mlx4_en_xmit
,
2188 .ndo_select_queue
= mlx4_en_select_queue
,
2189 .ndo_get_stats
= mlx4_en_get_stats
,
2190 .ndo_set_rx_mode
= mlx4_en_set_rx_mode
,
2191 .ndo_set_mac_address
= mlx4_en_set_mac
,
2192 .ndo_validate_addr
= eth_validate_addr
,
2193 .ndo_change_mtu
= mlx4_en_change_mtu
,
2194 .ndo_tx_timeout
= mlx4_en_tx_timeout
,
2195 .ndo_vlan_rx_add_vid
= mlx4_en_vlan_rx_add_vid
,
2196 .ndo_vlan_rx_kill_vid
= mlx4_en_vlan_rx_kill_vid
,
2197 .ndo_set_vf_mac
= mlx4_en_set_vf_mac
,
2198 .ndo_set_vf_vlan
= mlx4_en_set_vf_vlan
,
2199 .ndo_set_vf_spoofchk
= mlx4_en_set_vf_spoofchk
,
2200 .ndo_set_vf_link_state
= mlx4_en_set_vf_link_state
,
2201 .ndo_get_vf_config
= mlx4_en_get_vf_config
,
2202 #ifdef CONFIG_NET_POLL_CONTROLLER
2203 .ndo_poll_controller
= mlx4_en_netpoll
,
2205 .ndo_set_features
= mlx4_en_set_features
,
2206 .ndo_setup_tc
= mlx4_en_setup_tc
,
2207 #ifdef CONFIG_RFS_ACCEL
2208 .ndo_rx_flow_steer
= mlx4_en_filter_rfs
,
2212 int mlx4_en_init_netdev(struct mlx4_en_dev
*mdev
, int port
,
2213 struct mlx4_en_port_profile
*prof
)
2215 struct net_device
*dev
;
2216 struct mlx4_en_priv
*priv
;
2221 dev
= alloc_etherdev_mqs(sizeof(struct mlx4_en_priv
),
2222 MAX_TX_RINGS
, MAX_RX_RINGS
);
2226 netif_set_real_num_tx_queues(dev
, prof
->tx_ring_num
);
2227 netif_set_real_num_rx_queues(dev
, prof
->rx_ring_num
);
2229 SET_NETDEV_DEV(dev
, &mdev
->dev
->pdev
->dev
);
2230 dev
->dev_id
= port
- 1;
2233 * Initialize driver private data
2236 priv
= netdev_priv(dev
);
2237 memset(priv
, 0, sizeof(struct mlx4_en_priv
));
2240 priv
->ddev
= &mdev
->pdev
->dev
;
2243 priv
->port_up
= false;
2244 priv
->flags
= prof
->flags
;
2245 priv
->ctrl_flags
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
|
2246 MLX4_WQE_CTRL_SOLICITED
);
2247 priv
->num_tx_rings_p_up
= mdev
->profile
.num_tx_rings_p_up
;
2248 priv
->tx_ring_num
= prof
->tx_ring_num
;
2250 priv
->tx_ring
= kzalloc(sizeof(struct mlx4_en_tx_ring
*) * MAX_TX_RINGS
,
2252 if (!priv
->tx_ring
) {
2256 priv
->tx_cq
= kzalloc(sizeof(struct mlx4_en_cq
*) * MAX_TX_RINGS
,
2262 priv
->rx_ring_num
= prof
->rx_ring_num
;
2263 priv
->cqe_factor
= (mdev
->dev
->caps
.cqe_size
== 64) ? 1 : 0;
2264 priv
->mac_index
= -1;
2265 priv
->msg_enable
= MLX4_EN_MSG_LEVEL
;
2266 spin_lock_init(&priv
->stats_lock
);
2267 INIT_WORK(&priv
->rx_mode_task
, mlx4_en_do_set_rx_mode
);
2268 INIT_WORK(&priv
->watchdog_task
, mlx4_en_restart
);
2269 INIT_WORK(&priv
->linkstate_task
, mlx4_en_linkstate
);
2270 INIT_DELAYED_WORK(&priv
->stats_task
, mlx4_en_do_get_stats
);
2271 INIT_DELAYED_WORK(&priv
->service_task
, mlx4_en_service_task
);
2272 #ifdef CONFIG_MLX4_EN_DCB
2273 if (!mlx4_is_slave(priv
->mdev
->dev
)) {
2274 if (mdev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SET_ETH_SCHED
) {
2275 dev
->dcbnl_ops
= &mlx4_en_dcbnl_ops
;
2277 en_info(priv
, "enabling only PFC DCB ops\n");
2278 dev
->dcbnl_ops
= &mlx4_en_dcbnl_pfc_ops
;
2283 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
)
2284 INIT_HLIST_HEAD(&priv
->mac_hash
[i
]);
2286 /* Query for default mac and max mtu */
2287 priv
->max_mtu
= mdev
->dev
->caps
.eth_mtu_cap
[priv
->port
];
2289 /* Set default MAC */
2290 dev
->addr_len
= ETH_ALEN
;
2291 mlx4_en_u64_to_mac(dev
->dev_addr
, mdev
->dev
->caps
.def_mac
[priv
->port
]);
2292 if (!is_valid_ether_addr(dev
->dev_addr
)) {
2293 if (mlx4_is_slave(priv
->mdev
->dev
)) {
2294 eth_hw_addr_random(dev
);
2295 en_warn(priv
, "Assigned random MAC address %pM\n", dev
->dev_addr
);
2296 mac_u64
= mlx4_en_mac_to_u64(dev
->dev_addr
);
2297 mdev
->dev
->caps
.def_mac
[priv
->port
] = mac_u64
;
2299 en_err(priv
, "Port: %d, invalid mac burned: %pM, quiting\n",
2300 priv
->port
, dev
->dev_addr
);
2306 memcpy(priv
->prev_mac
, dev
->dev_addr
, sizeof(priv
->prev_mac
));
2308 priv
->stride
= roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc
) +
2309 DS_SIZE
* MLX4_EN_MAX_RX_FRAGS
);
2310 err
= mlx4_en_alloc_resources(priv
);
2314 #ifdef CONFIG_RFS_ACCEL
2315 INIT_LIST_HEAD(&priv
->filters
);
2316 spin_lock_init(&priv
->filters_lock
);
2319 /* Initialize time stamping config */
2320 priv
->hwtstamp_config
.flags
= 0;
2321 priv
->hwtstamp_config
.tx_type
= HWTSTAMP_TX_OFF
;
2322 priv
->hwtstamp_config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
2324 /* Allocate page for receive rings */
2325 err
= mlx4_alloc_hwq_res(mdev
->dev
, &priv
->res
,
2326 MLX4_EN_PAGE_SIZE
, MLX4_EN_PAGE_SIZE
);
2328 en_err(priv
, "Failed to allocate page for rx qps\n");
2331 priv
->allocated
= 1;
2334 * Initialize netdev entry points
2336 if (mlx4_is_master(priv
->mdev
->dev
))
2337 dev
->netdev_ops
= &mlx4_netdev_ops_master
;
2339 dev
->netdev_ops
= &mlx4_netdev_ops
;
2340 dev
->watchdog_timeo
= MLX4_EN_WATCHDOG_TIMEOUT
;
2341 netif_set_real_num_tx_queues(dev
, priv
->tx_ring_num
);
2342 netif_set_real_num_rx_queues(dev
, priv
->rx_ring_num
);
2344 SET_ETHTOOL_OPS(dev
, &mlx4_en_ethtool_ops
);
2347 * Set driver features
2349 dev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2350 if (mdev
->LSO_support
)
2351 dev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
2353 dev
->vlan_features
= dev
->hw_features
;
2355 dev
->hw_features
|= NETIF_F_RXCSUM
| NETIF_F_RXHASH
;
2356 dev
->features
= dev
->hw_features
| NETIF_F_HIGHDMA
|
2357 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
2358 NETIF_F_HW_VLAN_CTAG_FILTER
;
2359 dev
->hw_features
|= NETIF_F_LOOPBACK
;
2361 if (mdev
->dev
->caps
.steering_mode
==
2362 MLX4_STEERING_MODE_DEVICE_MANAGED
)
2363 dev
->hw_features
|= NETIF_F_NTUPLE
;
2365 if (mdev
->dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
)
2366 dev
->priv_flags
|= IFF_UNICAST_FLT
;
2368 mdev
->pndev
[port
] = dev
;
2370 netif_carrier_off(dev
);
2371 mlx4_en_set_default_moderation(priv
);
2373 err
= register_netdev(dev
);
2375 en_err(priv
, "Netdev registration failed for port %d\n", port
);
2378 priv
->registered
= 1;
2380 en_warn(priv
, "Using %d TX rings\n", prof
->tx_ring_num
);
2381 en_warn(priv
, "Using %d RX rings\n", prof
->rx_ring_num
);
2383 mlx4_en_update_loopback_state(priv
->dev
, priv
->dev
->features
);
2385 /* Configure port */
2386 mlx4_en_calc_rx_buf(dev
);
2387 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
2388 priv
->rx_skb_size
+ ETH_FCS_LEN
,
2389 prof
->tx_pause
, prof
->tx_ppp
,
2390 prof
->rx_pause
, prof
->rx_ppp
);
2392 en_err(priv
, "Failed setting port general configurations "
2393 "for port %d, with error %d\n", priv
->port
, err
);
2398 en_warn(priv
, "Initializing port\n");
2399 err
= mlx4_INIT_PORT(mdev
->dev
, priv
->port
);
2401 en_err(priv
, "Failed Initializing port\n");
2404 queue_delayed_work(mdev
->workqueue
, &priv
->stats_task
, STATS_DELAY
);
2406 if (mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_TS
)
2407 queue_delayed_work(mdev
->workqueue
, &priv
->service_task
,
2408 SERVICE_TASK_DELAY
);
2413 mlx4_en_destroy_netdev(dev
);