net/mlx4_en: Fix a race when closing TX queue
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / en_netdev.c
CommitLineData
c27a02cd
YP
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/etherdevice.h>
35#include <linux/tcp.h>
36#include <linux/if_vlan.h>
37#include <linux/delay.h>
5a0e3ad6 38#include <linux/slab.h>
1eb8c695
AV
39#include <linux/hash.h>
40#include <net/ip.h>
c27a02cd
YP
41
42#include <linux/mlx4/driver.h>
43#include <linux/mlx4/device.h>
44#include <linux/mlx4/cmd.h>
45#include <linux/mlx4/cq.h>
46
47#include "mlx4_en.h"
48#include "en_port.h"
49
d317966b 50int mlx4_en_setup_tc(struct net_device *dev, u8 up)
897d7846 51{
bc6a4744
AV
52 struct mlx4_en_priv *priv = netdev_priv(dev);
53 int i;
d317966b 54 unsigned int offset = 0;
bc6a4744
AV
55
56 if (up && up != MLX4_EN_NUM_UP)
897d7846
AV
57 return -EINVAL;
58
bc6a4744
AV
59 netdev_set_num_tc(dev, up);
60
61 /* Partition Tx queues evenly amongst UP's */
bc6a4744 62 for (i = 0; i < up; i++) {
d317966b
AV
63 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
64 offset += priv->num_tx_rings_p_up;
bc6a4744
AV
65 }
66
897d7846
AV
67 return 0;
68}
69
1eb8c695
AV
70#ifdef CONFIG_RFS_ACCEL
71
72struct mlx4_en_filter {
73 struct list_head next;
74 struct work_struct work;
75
76 __be32 src_ip;
77 __be32 dst_ip;
78 __be16 src_port;
79 __be16 dst_port;
80
81 int rxq_index;
82 struct mlx4_en_priv *priv;
83 u32 flow_id; /* RFS infrastructure id */
84 int id; /* mlx4_en driver id */
85 u64 reg_id; /* Flow steering API id */
86 u8 activated; /* Used to prevent expiry before filter
87 * is attached
88 */
89 struct hlist_node filter_chain;
90};
91
92static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
93
94static void mlx4_en_filter_work(struct work_struct *work)
95{
96 struct mlx4_en_filter *filter = container_of(work,
97 struct mlx4_en_filter,
98 work);
99 struct mlx4_en_priv *priv = filter->priv;
100 struct mlx4_spec_list spec_tcp = {
101 .id = MLX4_NET_TRANS_RULE_ID_TCP,
102 {
103 .tcp_udp = {
104 .dst_port = filter->dst_port,
105 .dst_port_msk = (__force __be16)-1,
106 .src_port = filter->src_port,
107 .src_port_msk = (__force __be16)-1,
108 },
109 },
110 };
111 struct mlx4_spec_list spec_ip = {
112 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
113 {
114 .ipv4 = {
115 .dst_ip = filter->dst_ip,
116 .dst_ip_msk = (__force __be32)-1,
117 .src_ip = filter->src_ip,
118 .src_ip_msk = (__force __be32)-1,
119 },
120 },
121 };
122 struct mlx4_spec_list spec_eth = {
123 .id = MLX4_NET_TRANS_RULE_ID_ETH,
124 };
125 struct mlx4_net_trans_rule rule = {
126 .list = LIST_HEAD_INIT(rule.list),
127 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
128 .exclusive = 1,
129 .allow_loopback = 1,
130 .promisc_mode = MLX4_FS_PROMISC_NONE,
131 .port = priv->port,
132 .priority = MLX4_DOMAIN_RFS,
133 };
134 int rc;
135 __be64 mac;
136 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
137
138 list_add_tail(&spec_eth.list, &rule.list);
139 list_add_tail(&spec_ip.list, &rule.list);
140 list_add_tail(&spec_tcp.list, &rule.list);
141
142 mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
143
144 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
145 memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN);
146 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
147
148 filter->activated = 0;
149
150 if (filter->reg_id) {
151 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
152 if (rc && rc != -ENOENT)
153 en_err(priv, "Error detaching flow. rc = %d\n", rc);
154 }
155
156 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
157 if (rc)
158 en_err(priv, "Error attaching flow. err = %d\n", rc);
159
160 mlx4_en_filter_rfs_expire(priv);
161
162 filter->activated = 1;
163}
164
165static inline struct hlist_head *
166filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
167 __be16 src_port, __be16 dst_port)
168{
169 unsigned long l;
170 int bucket_idx;
171
172 l = (__force unsigned long)src_port |
173 ((__force unsigned long)dst_port << 2);
174 l ^= (__force unsigned long)(src_ip ^ dst_ip);
175
176 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
177
178 return &priv->filter_hash[bucket_idx];
179}
180
181static struct mlx4_en_filter *
182mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
183 __be32 dst_ip, __be16 src_port, __be16 dst_port,
184 u32 flow_id)
185{
186 struct mlx4_en_filter *filter = NULL;
187
188 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
189 if (!filter)
190 return NULL;
191
192 filter->priv = priv;
193 filter->rxq_index = rxq_index;
194 INIT_WORK(&filter->work, mlx4_en_filter_work);
195
196 filter->src_ip = src_ip;
197 filter->dst_ip = dst_ip;
198 filter->src_port = src_port;
199 filter->dst_port = dst_port;
200
201 filter->flow_id = flow_id;
202
ee64c0ee 203 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
1eb8c695
AV
204
205 list_add_tail(&filter->next, &priv->filters);
206 hlist_add_head(&filter->filter_chain,
207 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
208 dst_port));
209
210 return filter;
211}
212
213static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
214{
215 struct mlx4_en_priv *priv = filter->priv;
216 int rc;
217
218 list_del(&filter->next);
219
220 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
221 if (rc && rc != -ENOENT)
222 en_err(priv, "Error detaching flow. rc = %d\n", rc);
223
224 kfree(filter);
225}
226
227static inline struct mlx4_en_filter *
228mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
229 __be16 src_port, __be16 dst_port)
230{
231 struct hlist_node *elem;
232 struct mlx4_en_filter *filter;
233 struct mlx4_en_filter *ret = NULL;
234
235 hlist_for_each_entry(filter, elem,
236 filter_hash_bucket(priv, src_ip, dst_ip,
237 src_port, dst_port),
238 filter_chain) {
239 if (filter->src_ip == src_ip &&
240 filter->dst_ip == dst_ip &&
241 filter->src_port == src_port &&
242 filter->dst_port == dst_port) {
243 ret = filter;
244 break;
245 }
246 }
247
248 return ret;
249}
250
251static int
252mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
253 u16 rxq_index, u32 flow_id)
254{
255 struct mlx4_en_priv *priv = netdev_priv(net_dev);
256 struct mlx4_en_filter *filter;
257 const struct iphdr *ip;
258 const __be16 *ports;
259 __be32 src_ip;
260 __be32 dst_ip;
261 __be16 src_port;
262 __be16 dst_port;
263 int nhoff = skb_network_offset(skb);
264 int ret = 0;
265
266 if (skb->protocol != htons(ETH_P_IP))
267 return -EPROTONOSUPPORT;
268
269 ip = (const struct iphdr *)(skb->data + nhoff);
270 if (ip_is_fragment(ip))
271 return -EPROTONOSUPPORT;
272
273 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
274
275 src_ip = ip->saddr;
276 dst_ip = ip->daddr;
277 src_port = ports[0];
278 dst_port = ports[1];
279
280 if (ip->protocol != IPPROTO_TCP)
281 return -EPROTONOSUPPORT;
282
283 spin_lock_bh(&priv->filters_lock);
284 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
285 if (filter) {
286 if (filter->rxq_index == rxq_index)
287 goto out;
288
289 filter->rxq_index = rxq_index;
290 } else {
291 filter = mlx4_en_filter_alloc(priv, rxq_index,
292 src_ip, dst_ip,
293 src_port, dst_port, flow_id);
294 if (!filter) {
295 ret = -ENOMEM;
296 goto err;
297 }
298 }
299
300 queue_work(priv->mdev->workqueue, &filter->work);
301
302out:
303 ret = filter->id;
304err:
305 spin_unlock_bh(&priv->filters_lock);
306
307 return ret;
308}
309
310void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
311 struct mlx4_en_rx_ring *rx_ring)
312{
313 struct mlx4_en_filter *filter, *tmp;
314 LIST_HEAD(del_list);
315
316 spin_lock_bh(&priv->filters_lock);
317 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
318 list_move(&filter->next, &del_list);
319 hlist_del(&filter->filter_chain);
320 }
321 spin_unlock_bh(&priv->filters_lock);
322
323 list_for_each_entry_safe(filter, tmp, &del_list, next) {
324 cancel_work_sync(&filter->work);
325 mlx4_en_filter_free(filter);
326 }
327}
328
329static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
330{
331 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
332 LIST_HEAD(del_list);
333 int i = 0;
334
335 spin_lock_bh(&priv->filters_lock);
336 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
337 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
338 break;
339
340 if (filter->activated &&
341 !work_pending(&filter->work) &&
342 rps_may_expire_flow(priv->dev,
343 filter->rxq_index, filter->flow_id,
344 filter->id)) {
345 list_move(&filter->next, &del_list);
346 hlist_del(&filter->filter_chain);
347 } else
348 last_filter = filter;
349
350 i++;
351 }
352
353 if (last_filter && (&last_filter->next != priv->filters.next))
354 list_move(&priv->filters, &last_filter->next);
355
356 spin_unlock_bh(&priv->filters_lock);
357
358 list_for_each_entry_safe(filter, tmp, &del_list, next)
359 mlx4_en_filter_free(filter);
360}
361#endif
362
8e586137 363static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
c27a02cd
YP
364{
365 struct mlx4_en_priv *priv = netdev_priv(dev);
366 struct mlx4_en_dev *mdev = priv->mdev;
367 int err;
4c3eb3ca 368 int idx;
c27a02cd 369
f1b553fb 370 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
c27a02cd 371
f1b553fb 372 set_bit(vid, priv->active_vlans);
c27a02cd
YP
373
374 /* Add VID to port VLAN filter */
375 mutex_lock(&mdev->state_lock);
376 if (mdev->device_up && priv->port_up) {
f1b553fb 377 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
c27a02cd 378 if (err)
453a6082 379 en_err(priv, "Failed configuring VLAN filter\n");
c27a02cd 380 }
4c3eb3ca
EC
381 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
382 en_err(priv, "failed adding vlan %d\n", vid);
c27a02cd 383 mutex_unlock(&mdev->state_lock);
4c3eb3ca 384
8e586137 385 return 0;
c27a02cd
YP
386}
387
8e586137 388static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
c27a02cd
YP
389{
390 struct mlx4_en_priv *priv = netdev_priv(dev);
391 struct mlx4_en_dev *mdev = priv->mdev;
392 int err;
4c3eb3ca 393 int idx;
c27a02cd 394
f1b553fb 395 en_dbg(HW, priv, "Killing VID:%d\n", vid);
c27a02cd 396
f1b553fb 397 clear_bit(vid, priv->active_vlans);
c27a02cd
YP
398
399 /* Remove VID from port VLAN filter */
400 mutex_lock(&mdev->state_lock);
4c3eb3ca
EC
401 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
402 mlx4_unregister_vlan(mdev->dev, priv->port, idx);
403 else
404 en_err(priv, "could not find vid %d in cache\n", vid);
405
c27a02cd 406 if (mdev->device_up && priv->port_up) {
f1b553fb 407 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
c27a02cd 408 if (err)
453a6082 409 en_err(priv, "Failed configuring VLAN filter\n");
c27a02cd
YP
410 }
411 mutex_unlock(&mdev->state_lock);
8e586137
JP
412
413 return 0;
c27a02cd
YP
414}
415
e7c1c2c4 416u64 mlx4_en_mac_to_u64(u8 *addr)
c27a02cd
YP
417{
418 u64 mac = 0;
419 int i;
420
421 for (i = 0; i < ETH_ALEN; i++) {
422 mac <<= 8;
423 mac |= addr[i];
424 }
425 return mac;
426}
427
428static int mlx4_en_set_mac(struct net_device *dev, void *addr)
429{
430 struct mlx4_en_priv *priv = netdev_priv(dev);
431 struct mlx4_en_dev *mdev = priv->mdev;
432 struct sockaddr *saddr = addr;
433
434 if (!is_valid_ether_addr(saddr->sa_data))
435 return -EADDRNOTAVAIL;
436
437 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
438 priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
439 queue_work(mdev->workqueue, &priv->mac_task);
440 return 0;
441}
442
443static void mlx4_en_do_set_mac(struct work_struct *work)
444{
445 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
446 mac_task);
447 struct mlx4_en_dev *mdev = priv->mdev;
448 int err = 0;
449
450 mutex_lock(&mdev->state_lock);
451 if (priv->port_up) {
452 /* Remove old MAC and insert the new one */
1679200f 453 err = mlx4_replace_mac(mdev->dev, priv->port,
ffe455ad 454 priv->base_qpn, priv->mac);
c27a02cd 455 if (err)
453a6082 456 en_err(priv, "Failed changing HW MAC address\n");
c27a02cd 457 } else
453a6082
YP
458 en_dbg(HW, priv, "Port is down while "
459 "registering mac, exiting...\n");
c27a02cd
YP
460
461 mutex_unlock(&mdev->state_lock);
462}
463
464static void mlx4_en_clear_list(struct net_device *dev)
465{
466 struct mlx4_en_priv *priv = netdev_priv(dev);
6d199937 467 struct mlx4_en_mc_list *tmp, *mc_to_del;
c27a02cd 468
6d199937
YP
469 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
470 list_del(&mc_to_del->list);
471 kfree(mc_to_del);
472 }
c27a02cd
YP
473}
474
475static void mlx4_en_cache_mclist(struct net_device *dev)
476{
477 struct mlx4_en_priv *priv = netdev_priv(dev);
22bedad3 478 struct netdev_hw_addr *ha;
6d199937 479 struct mlx4_en_mc_list *tmp;
ff6e2163 480
0e03567a 481 mlx4_en_clear_list(dev);
6d199937
YP
482 netdev_for_each_mc_addr(ha, dev) {
483 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
484 if (!tmp) {
485 en_err(priv, "failed to allocate multicast list\n");
486 mlx4_en_clear_list(dev);
487 return;
488 }
489 memcpy(tmp->addr, ha->addr, ETH_ALEN);
490 list_add_tail(&tmp->list, &priv->mc_list);
491 }
c27a02cd
YP
492}
493
6d199937
YP
494static void update_mclist_flags(struct mlx4_en_priv *priv,
495 struct list_head *dst,
496 struct list_head *src)
497{
498 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
499 bool found;
500
501 /* Find all the entries that should be removed from dst,
502 * These are the entries that are not found in src
503 */
504 list_for_each_entry(dst_tmp, dst, list) {
505 found = false;
506 list_for_each_entry(src_tmp, src, list) {
507 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
508 found = true;
509 break;
510 }
511 }
512 if (!found)
513 dst_tmp->action = MCLIST_REM;
514 }
515
516 /* Add entries that exist in src but not in dst
517 * mark them as need to add
518 */
519 list_for_each_entry(src_tmp, src, list) {
520 found = false;
521 list_for_each_entry(dst_tmp, dst, list) {
522 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
523 dst_tmp->action = MCLIST_NONE;
524 found = true;
525 break;
526 }
527 }
528 if (!found) {
529 new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
530 GFP_KERNEL);
531 if (!new_mc) {
532 en_err(priv, "Failed to allocate current multicast list\n");
533 return;
534 }
535 memcpy(new_mc, src_tmp,
536 sizeof(struct mlx4_en_mc_list));
537 new_mc->action = MCLIST_ADD;
538 list_add_tail(&new_mc->list, dst);
539 }
540 }
541}
c27a02cd
YP
542
543static void mlx4_en_set_multicast(struct net_device *dev)
544{
545 struct mlx4_en_priv *priv = netdev_priv(dev);
546
547 if (!priv->port_up)
548 return;
549
550 queue_work(priv->mdev->workqueue, &priv->mcast_task);
551}
552
553static void mlx4_en_do_set_multicast(struct work_struct *work)
554{
555 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
556 mcast_task);
557 struct mlx4_en_dev *mdev = priv->mdev;
558 struct net_device *dev = priv->dev;
6d199937 559 struct mlx4_en_mc_list *mclist, *tmp;
c27a02cd 560 u64 mcast_addr = 0;
1679200f 561 u8 mc_list[16] = {0};
c96d97f4 562 int err = 0;
c27a02cd
YP
563
564 mutex_lock(&mdev->state_lock);
565 if (!mdev->device_up) {
453a6082
YP
566 en_dbg(HW, priv, "Card is not up, "
567 "ignoring multicast change.\n");
c27a02cd
YP
568 goto out;
569 }
570 if (!priv->port_up) {
453a6082
YP
571 en_dbg(HW, priv, "Port is down, "
572 "ignoring multicast change.\n");
c27a02cd
YP
573 goto out;
574 }
575
ffe455ad
EE
576 if (!netif_carrier_ok(dev)) {
577 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
578 if (priv->port_state.link_state) {
579 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
580 netif_carrier_on(dev);
581 en_dbg(LINK, priv, "Link Up\n");
582 }
583 }
584 }
585
c27a02cd
YP
586 /*
587 * Promsicuous mode: disable all filters
588 */
589
590 if (dev->flags & IFF_PROMISC) {
591 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
592 if (netif_msg_rx_status(priv))
453a6082 593 en_warn(priv, "Entering promiscuous mode\n");
c27a02cd
YP
594 priv->flags |= MLX4_EN_FLAG_PROMISC;
595
596 /* Enable promiscouos mode */
c96d97f4 597 switch (mdev->dev->caps.steering_mode) {
592e49dd
HHZ
598 case MLX4_STEERING_MODE_DEVICE_MANAGED:
599 err = mlx4_flow_steer_promisc_add(mdev->dev,
600 priv->port,
601 priv->base_qpn,
602 MLX4_FS_PROMISC_UPLINK);
603 if (err)
604 en_err(priv, "Failed enabling promiscuous mode\n");
605 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
606 break;
607
c96d97f4
HHZ
608 case MLX4_STEERING_MODE_B0:
609 err = mlx4_unicast_promisc_add(mdev->dev,
610 priv->base_qpn,
1679200f 611 priv->port);
c96d97f4
HHZ
612 if (err)
613 en_err(priv, "Failed enabling unicast promiscuous mode\n");
614
615 /* Add the default qp number as multicast
616 * promisc
617 */
618 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
619 err = mlx4_multicast_promisc_add(mdev->dev,
620 priv->base_qpn,
621 priv->port);
622 if (err)
623 en_err(priv, "Failed enabling multicast promiscuous mode\n");
624 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
625 }
626 break;
627
628 case MLX4_STEERING_MODE_A0:
629 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
630 priv->port,
631 priv->base_qpn,
632 1);
633 if (err)
634 en_err(priv, "Failed enabling promiscuous mode\n");
635 break;
636 }
c27a02cd
YP
637
638 /* Disable port multicast filter (unconditionally) */
639 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
640 0, MLX4_MCAST_DISABLE);
641 if (err)
453a6082
YP
642 en_err(priv, "Failed disabling "
643 "multicast filter\n");
c27a02cd 644
f1b553fb
JP
645 /* Disable port VLAN filter */
646 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
647 if (err)
648 en_err(priv, "Failed disabling VLAN filter\n");
c27a02cd
YP
649 }
650 goto out;
651 }
652
653 /*
25985edc 654 * Not in promiscuous mode
c27a02cd
YP
655 */
656
657 if (priv->flags & MLX4_EN_FLAG_PROMISC) {
658 if (netif_msg_rx_status(priv))
453a6082 659 en_warn(priv, "Leaving promiscuous mode\n");
c27a02cd
YP
660 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
661
662 /* Disable promiscouos mode */
c96d97f4 663 switch (mdev->dev->caps.steering_mode) {
592e49dd
HHZ
664 case MLX4_STEERING_MODE_DEVICE_MANAGED:
665 err = mlx4_flow_steer_promisc_remove(mdev->dev,
666 priv->port,
667 MLX4_FS_PROMISC_UPLINK);
668 if (err)
669 en_err(priv, "Failed disabling promiscuous mode\n");
670 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
671 break;
672
c96d97f4
HHZ
673 case MLX4_STEERING_MODE_B0:
674 err = mlx4_unicast_promisc_remove(mdev->dev,
675 priv->base_qpn,
1679200f 676 priv->port);
c96d97f4
HHZ
677 if (err)
678 en_err(priv, "Failed disabling unicast promiscuous mode\n");
679 /* Disable Multicast promisc */
680 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
681 err = mlx4_multicast_promisc_remove(mdev->dev,
682 priv->base_qpn,
683 priv->port);
684 if (err)
685 en_err(priv, "Failed disabling multicast promiscuous mode\n");
686 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
687 }
688 break;
c27a02cd 689
c96d97f4
HHZ
690 case MLX4_STEERING_MODE_A0:
691 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
692 priv->port,
693 priv->base_qpn, 0);
1679200f 694 if (err)
c96d97f4
HHZ
695 en_err(priv, "Failed disabling promiscuous mode\n");
696 break;
1679200f
YP
697 }
698
c27a02cd 699 /* Enable port VLAN filter */
f1b553fb 700 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
c27a02cd 701 if (err)
453a6082 702 en_err(priv, "Failed enabling VLAN filter\n");
c27a02cd
YP
703 }
704
705 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
706 if (dev->flags & IFF_ALLMULTI) {
707 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
708 0, MLX4_MCAST_DISABLE);
709 if (err)
453a6082 710 en_err(priv, "Failed disabling multicast filter\n");
1679200f
YP
711
712 /* Add the default qp number as multicast promisc */
713 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
c96d97f4 714 switch (mdev->dev->caps.steering_mode) {
592e49dd
HHZ
715 case MLX4_STEERING_MODE_DEVICE_MANAGED:
716 err = mlx4_flow_steer_promisc_add(mdev->dev,
717 priv->port,
718 priv->base_qpn,
719 MLX4_FS_PROMISC_ALL_MULTI);
720 break;
721
c96d97f4
HHZ
722 case MLX4_STEERING_MODE_B0:
723 err = mlx4_multicast_promisc_add(mdev->dev,
724 priv->base_qpn,
725 priv->port);
726 break;
727
728 case MLX4_STEERING_MODE_A0:
729 break;
730 }
1679200f
YP
731 if (err)
732 en_err(priv, "Failed entering multicast promisc mode\n");
733 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
734 }
c27a02cd 735 } else {
1679200f
YP
736 /* Disable Multicast promisc */
737 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
c96d97f4 738 switch (mdev->dev->caps.steering_mode) {
592e49dd
HHZ
739 case MLX4_STEERING_MODE_DEVICE_MANAGED:
740 err = mlx4_flow_steer_promisc_remove(mdev->dev,
741 priv->port,
742 MLX4_FS_PROMISC_ALL_MULTI);
743 break;
744
c96d97f4
HHZ
745 case MLX4_STEERING_MODE_B0:
746 err = mlx4_multicast_promisc_remove(mdev->dev,
747 priv->base_qpn,
748 priv->port);
749 break;
750
751 case MLX4_STEERING_MODE_A0:
752 break;
753 }
1679200f 754 if (err)
25985edc 755 en_err(priv, "Failed disabling multicast promiscuous mode\n");
1679200f
YP
756 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
757 }
ff6e2163 758
c27a02cd
YP
759 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
760 0, MLX4_MCAST_DISABLE);
761 if (err)
453a6082 762 en_err(priv, "Failed disabling multicast filter\n");
c27a02cd
YP
763
764 /* Flush mcast filter and init it with broadcast address */
765 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
766 1, MLX4_MCAST_CONFIG);
767
768 /* Update multicast list - we cache all addresses so they won't
769 * change while HW is updated holding the command semaphor */
dbd501a8 770 netif_addr_lock_bh(dev);
c27a02cd 771 mlx4_en_cache_mclist(dev);
dbd501a8 772 netif_addr_unlock_bh(dev);
6d199937
YP
773 list_for_each_entry(mclist, &priv->mc_list, list) {
774 mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
c27a02cd
YP
775 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
776 mcast_addr, 0, MLX4_MCAST_CONFIG);
777 }
778 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
779 0, MLX4_MCAST_ENABLE);
780 if (err)
453a6082 781 en_err(priv, "Failed enabling multicast filter\n");
6d199937
YP
782
783 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
784 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
785 if (mclist->action == MCLIST_REM) {
786 /* detach this address and delete from list */
787 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
788 mc_list[5] = priv->port;
789 err = mlx4_multicast_detach(mdev->dev,
790 &priv->rss_map.indir_qp,
791 mc_list,
0ff1fb65
HHZ
792 MLX4_PROT_ETH,
793 mclist->reg_id);
6d199937
YP
794 if (err)
795 en_err(priv, "Fail to detach multicast address\n");
796
797 /* remove from list */
798 list_del(&mclist->list);
799 kfree(mclist);
9c64508a 800 } else if (mclist->action == MCLIST_ADD) {
6d199937
YP
801 /* attach the address */
802 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
0ff1fb65 803 /* needed for B0 steering support */
6d199937
YP
804 mc_list[5] = priv->port;
805 err = mlx4_multicast_attach(mdev->dev,
806 &priv->rss_map.indir_qp,
0ff1fb65
HHZ
807 mc_list,
808 priv->port, 0,
809 MLX4_PROT_ETH,
810 &mclist->reg_id);
6d199937
YP
811 if (err)
812 en_err(priv, "Fail to attach multicast address\n");
813
814 }
815 }
c27a02cd
YP
816 }
817out:
818 mutex_unlock(&mdev->state_lock);
819}
820
821#ifdef CONFIG_NET_POLL_CONTROLLER
822static void mlx4_en_netpoll(struct net_device *dev)
823{
824 struct mlx4_en_priv *priv = netdev_priv(dev);
825 struct mlx4_en_cq *cq;
826 unsigned long flags;
827 int i;
828
829 for (i = 0; i < priv->rx_ring_num; i++) {
830 cq = &priv->rx_cq[i];
831 spin_lock_irqsave(&cq->lock, flags);
832 napi_synchronize(&cq->napi);
833 mlx4_en_process_rx_cq(dev, cq, 0);
834 spin_unlock_irqrestore(&cq->lock, flags);
835 }
836}
837#endif
838
839static void mlx4_en_tx_timeout(struct net_device *dev)
840{
841 struct mlx4_en_priv *priv = netdev_priv(dev);
842 struct mlx4_en_dev *mdev = priv->mdev;
843
844 if (netif_msg_timer(priv))
453a6082 845 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
c27a02cd 846
1e338db5 847 priv->port_stats.tx_timeout++;
453a6082 848 en_dbg(DRV, priv, "Scheduling watchdog\n");
1e338db5 849 queue_work(mdev->workqueue, &priv->watchdog_task);
c27a02cd
YP
850}
851
852
853static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
854{
855 struct mlx4_en_priv *priv = netdev_priv(dev);
856
857 spin_lock_bh(&priv->stats_lock);
858 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
859 spin_unlock_bh(&priv->stats_lock);
860
861 return &priv->ret_stats;
862}
863
864static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
865{
c27a02cd
YP
866 struct mlx4_en_cq *cq;
867 int i;
868
869 /* If we haven't received a specific coalescing setting
98a1708d 870 * (module param), we set the moderation parameters as follows:
c27a02cd 871 * - moder_cnt is set to the number of mtu sized packets to
ecfd2ce1 872 * satisfy our coalescing target.
c27a02cd
YP
873 * - moder_time is set to a fixed value.
874 */
3db36fb2 875 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
60b9f9e5 876 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
a19a848a
YP
877 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
878 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
453a6082
YP
879 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
880 "rx_frames:%d rx_usecs:%d\n",
c27a02cd
YP
881 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
882
883 /* Setup cq moderation params */
884 for (i = 0; i < priv->rx_ring_num; i++) {
885 cq = &priv->rx_cq[i];
886 cq->moder_cnt = priv->rx_frames;
887 cq->moder_time = priv->rx_usecs;
6b4d8d9f
AG
888 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
889 priv->last_moder_packets[i] = 0;
890 priv->last_moder_bytes[i] = 0;
c27a02cd
YP
891 }
892
893 for (i = 0; i < priv->tx_ring_num; i++) {
894 cq = &priv->tx_cq[i];
a19a848a
YP
895 cq->moder_cnt = priv->tx_frames;
896 cq->moder_time = priv->tx_usecs;
c27a02cd
YP
897 }
898
899 /* Reset auto-moderation params */
900 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
901 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
902 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
903 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
904 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
60b9f9e5 905 priv->adaptive_rx_coal = 1;
c27a02cd 906 priv->last_moder_jiffies = 0;
c27a02cd 907 priv->last_moder_tx_packets = 0;
c27a02cd
YP
908}
909
910static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
911{
912 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
c27a02cd
YP
913 struct mlx4_en_cq *cq;
914 unsigned long packets;
915 unsigned long rate;
916 unsigned long avg_pkt_size;
917 unsigned long rx_packets;
918 unsigned long rx_bytes;
c27a02cd
YP
919 unsigned long rx_pkt_diff;
920 int moder_time;
6b4d8d9f 921 int ring, err;
c27a02cd
YP
922
923 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
924 return;
925
6b4d8d9f
AG
926 for (ring = 0; ring < priv->rx_ring_num; ring++) {
927 spin_lock_bh(&priv->stats_lock);
928 rx_packets = priv->rx_ring[ring].packets;
929 rx_bytes = priv->rx_ring[ring].bytes;
930 spin_unlock_bh(&priv->stats_lock);
931
932 rx_pkt_diff = ((unsigned long) (rx_packets -
933 priv->last_moder_packets[ring]));
934 packets = rx_pkt_diff;
935 rate = packets * HZ / period;
936 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
937 priv->last_moder_bytes[ring])) / packets : 0;
938
939 /* Apply auto-moderation only when packet rate
940 * exceeds a rate that it matters */
941 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
942 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
c27a02cd
YP
943 if (rate < priv->pkt_rate_low)
944 moder_time = priv->rx_usecs_low;
945 else if (rate > priv->pkt_rate_high)
946 moder_time = priv->rx_usecs_high;
947 else
948 moder_time = (rate - priv->pkt_rate_low) *
949 (priv->rx_usecs_high - priv->rx_usecs_low) /
950 (priv->pkt_rate_high - priv->pkt_rate_low) +
951 priv->rx_usecs_low;
6b4d8d9f
AG
952 } else {
953 moder_time = priv->rx_usecs_low;
c27a02cd 954 }
c27a02cd 955
6b4d8d9f
AG
956 if (moder_time != priv->last_moder_time[ring]) {
957 priv->last_moder_time[ring] = moder_time;
958 cq = &priv->rx_cq[ring];
c27a02cd
YP
959 cq->moder_time = moder_time;
960 err = mlx4_en_set_cq_moder(priv, cq);
6b4d8d9f
AG
961 if (err)
962 en_err(priv, "Failed modifying moderation "
963 "for cq:%d\n", ring);
c27a02cd 964 }
6b4d8d9f
AG
965 priv->last_moder_packets[ring] = rx_packets;
966 priv->last_moder_bytes[ring] = rx_bytes;
c27a02cd
YP
967 }
968
c27a02cd
YP
969 priv->last_moder_jiffies = jiffies;
970}
971
972static void mlx4_en_do_get_stats(struct work_struct *work)
973{
bf6aede7 974 struct delayed_work *delay = to_delayed_work(work);
c27a02cd
YP
975 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
976 stats_task);
977 struct mlx4_en_dev *mdev = priv->mdev;
978 int err;
979
c27a02cd
YP
980 mutex_lock(&mdev->state_lock);
981 if (mdev->device_up) {
2d51837f
EE
982 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
983 if (err)
984 en_dbg(HW, priv, "Could not update stats\n");
985
c27a02cd
YP
986 if (priv->port_up)
987 mlx4_en_auto_moderation(priv);
988
989 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
990 }
d7e1a487
YP
991 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
992 queue_work(mdev->workqueue, &priv->mac_task);
993 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
994 }
c27a02cd
YP
995 mutex_unlock(&mdev->state_lock);
996}
997
998static void mlx4_en_linkstate(struct work_struct *work)
999{
1000 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1001 linkstate_task);
1002 struct mlx4_en_dev *mdev = priv->mdev;
1003 int linkstate = priv->link_state;
1004
1005 mutex_lock(&mdev->state_lock);
1006 /* If observable port state changed set carrier state and
1007 * report to system log */
1008 if (priv->last_link_state != linkstate) {
1009 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
e5cc44b2 1010 en_info(priv, "Link Down\n");
c27a02cd
YP
1011 netif_carrier_off(priv->dev);
1012 } else {
e5cc44b2 1013 en_info(priv, "Link Up\n");
c27a02cd
YP
1014 netif_carrier_on(priv->dev);
1015 }
1016 }
1017 priv->last_link_state = linkstate;
1018 mutex_unlock(&mdev->state_lock);
1019}
1020
1021
18cc42a3 1022int mlx4_en_start_port(struct net_device *dev)
c27a02cd
YP
1023{
1024 struct mlx4_en_priv *priv = netdev_priv(dev);
1025 struct mlx4_en_dev *mdev = priv->mdev;
1026 struct mlx4_en_cq *cq;
1027 struct mlx4_en_tx_ring *tx_ring;
c27a02cd
YP
1028 int rx_index = 0;
1029 int tx_index = 0;
c27a02cd
YP
1030 int err = 0;
1031 int i;
1032 int j;
1679200f 1033 u8 mc_list[16] = {0};
c27a02cd
YP
1034
1035 if (priv->port_up) {
453a6082 1036 en_dbg(DRV, priv, "start port called while port already up\n");
c27a02cd
YP
1037 return 0;
1038 }
1039
6d199937
YP
1040 INIT_LIST_HEAD(&priv->mc_list);
1041 INIT_LIST_HEAD(&priv->curr_list);
1042
c27a02cd
YP
1043 /* Calculate Rx buf size */
1044 dev->mtu = min(dev->mtu, priv->max_mtu);
1045 mlx4_en_calc_rx_buf(dev);
453a6082 1046 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
38aab07c 1047
c27a02cd 1048 /* Configure rx cq's and rings */
38aab07c
YP
1049 err = mlx4_en_activate_rx_rings(priv);
1050 if (err) {
453a6082 1051 en_err(priv, "Failed to activate RX rings\n");
38aab07c
YP
1052 return err;
1053 }
c27a02cd
YP
1054 for (i = 0; i < priv->rx_ring_num; i++) {
1055 cq = &priv->rx_cq[i];
c27a02cd 1056
76532d0c 1057 err = mlx4_en_activate_cq(priv, cq, i);
c27a02cd 1058 if (err) {
453a6082 1059 en_err(priv, "Failed activating Rx CQ\n");
a4233304 1060 goto cq_err;
c27a02cd
YP
1061 }
1062 for (j = 0; j < cq->size; j++)
1063 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1064 err = mlx4_en_set_cq_moder(priv, cq);
1065 if (err) {
453a6082 1066 en_err(priv, "Failed setting cq moderation parameters");
c27a02cd
YP
1067 mlx4_en_deactivate_cq(priv, cq);
1068 goto cq_err;
1069 }
1070 mlx4_en_arm_cq(priv, cq);
38aab07c 1071 priv->rx_ring[i].cqn = cq->mcq.cqn;
c27a02cd
YP
1072 ++rx_index;
1073 }
1074
ffe455ad
EE
1075 /* Set qp number */
1076 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1077 err = mlx4_get_eth_qp(mdev->dev, priv->port,
1078 priv->mac, &priv->base_qpn);
1679200f 1079 if (err) {
ffe455ad 1080 en_err(priv, "Failed getting eth qp\n");
1679200f
YP
1081 goto cq_err;
1082 }
1083 mdev->mac_removed[priv->port] = 0;
1084
c27a02cd
YP
1085 err = mlx4_en_config_rss_steer(priv);
1086 if (err) {
453a6082 1087 en_err(priv, "Failed configuring rss steering\n");
1679200f 1088 goto mac_err;
c27a02cd
YP
1089 }
1090
cabdc8ee
HHZ
1091 err = mlx4_en_create_drop_qp(priv);
1092 if (err)
1093 goto rss_err;
1094
c27a02cd
YP
1095 /* Configure tx cq's and rings */
1096 for (i = 0; i < priv->tx_ring_num; i++) {
1097 /* Configure cq */
1098 cq = &priv->tx_cq[i];
76532d0c 1099 err = mlx4_en_activate_cq(priv, cq, i);
c27a02cd 1100 if (err) {
453a6082 1101 en_err(priv, "Failed allocating Tx CQ\n");
c27a02cd
YP
1102 goto tx_err;
1103 }
1104 err = mlx4_en_set_cq_moder(priv, cq);
1105 if (err) {
453a6082 1106 en_err(priv, "Failed setting cq moderation parameters");
c27a02cd
YP
1107 mlx4_en_deactivate_cq(priv, cq);
1108 goto tx_err;
1109 }
453a6082 1110 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
c27a02cd
YP
1111 cq->buf->wqe_index = cpu_to_be16(0xffff);
1112
1113 /* Configure ring */
1114 tx_ring = &priv->tx_ring[i];
0e98b523 1115 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
d317966b 1116 i / priv->num_tx_rings_p_up);
c27a02cd 1117 if (err) {
453a6082 1118 en_err(priv, "Failed allocating Tx ring\n");
c27a02cd
YP
1119 mlx4_en_deactivate_cq(priv, cq);
1120 goto tx_err;
1121 }
5b263f53 1122 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
e22979d9
YP
1123
1124 /* Arm CQ for TX completions */
1125 mlx4_en_arm_cq(priv, cq);
1126
c27a02cd
YP
1127 /* Set initial ownership of all Tx TXBBs to SW (1) */
1128 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1129 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1130 ++tx_index;
1131 }
1132
1133 /* Configure port */
1134 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1135 priv->rx_skb_size + ETH_FCS_LEN,
d53b93f2
YP
1136 priv->prof->tx_pause,
1137 priv->prof->tx_ppp,
1138 priv->prof->rx_pause,
1139 priv->prof->rx_ppp);
c27a02cd 1140 if (err) {
453a6082
YP
1141 en_err(priv, "Failed setting port general configurations "
1142 "for port %d, with error %d\n", priv->port, err);
c27a02cd
YP
1143 goto tx_err;
1144 }
1145 /* Set default qp number */
1146 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1147 if (err) {
453a6082 1148 en_err(priv, "Failed setting default qp numbers\n");
c27a02cd
YP
1149 goto tx_err;
1150 }
c27a02cd
YP
1151
1152 /* Init port */
453a6082 1153 en_dbg(HW, priv, "Initializing port\n");
c27a02cd
YP
1154 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1155 if (err) {
453a6082 1156 en_err(priv, "Failed Initializing port\n");
1679200f 1157 goto tx_err;
c27a02cd
YP
1158 }
1159
1679200f
YP
1160 /* Attach rx QP to bradcast address */
1161 memset(&mc_list[10], 0xff, ETH_ALEN);
0ff1fb65 1162 mc_list[5] = priv->port; /* needed for B0 steering support */
1679200f 1163 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
0ff1fb65
HHZ
1164 priv->port, 0, MLX4_PROT_ETH,
1165 &priv->broadcast_id))
1679200f
YP
1166 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1167
b5845f98
HX
1168 /* Must redo promiscuous mode setup. */
1169 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1170
c27a02cd
YP
1171 /* Schedule multicast task to populate multicast list */
1172 queue_work(mdev->workqueue, &priv->mcast_task);
1173
93ece0c1
EE
1174 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
1175
c27a02cd 1176 priv->port_up = true;
a11faac7 1177 netif_tx_start_all_queues(dev);
c27a02cd
YP
1178 return 0;
1179
c27a02cd
YP
1180tx_err:
1181 while (tx_index--) {
1182 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
1183 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
1184 }
cabdc8ee
HHZ
1185 mlx4_en_destroy_drop_qp(priv);
1186rss_err:
c27a02cd 1187 mlx4_en_release_rss_steer(priv);
1679200f 1188mac_err:
ffe455ad 1189 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
c27a02cd
YP
1190cq_err:
1191 while (rx_index--)
1192 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
38aab07c
YP
1193 for (i = 0; i < priv->rx_ring_num; i++)
1194 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
c27a02cd
YP
1195
1196 return err; /* need to close devices */
1197}
1198
1199
18cc42a3 1200void mlx4_en_stop_port(struct net_device *dev)
c27a02cd
YP
1201{
1202 struct mlx4_en_priv *priv = netdev_priv(dev);
1203 struct mlx4_en_dev *mdev = priv->mdev;
6d199937 1204 struct mlx4_en_mc_list *mclist, *tmp;
c27a02cd 1205 int i;
1679200f 1206 u8 mc_list[16] = {0};
c27a02cd
YP
1207
1208 if (!priv->port_up) {
453a6082 1209 en_dbg(DRV, priv, "stop port called while port already down\n");
c27a02cd
YP
1210 return;
1211 }
c27a02cd
YP
1212
1213 /* Synchronize with tx routine */
1214 netif_tx_lock_bh(dev);
3c05f5ef 1215 netif_tx_stop_all_queues(dev);
c27a02cd
YP
1216 netif_tx_unlock_bh(dev);
1217
7c287380 1218 /* Set port as not active */
3c05f5ef 1219 priv->port_up = false;
c27a02cd 1220
db0e7cba
AY
1221 /* Promsicuous mode */
1222 if (mdev->dev->caps.steering_mode ==
1223 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1224 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1225 MLX4_EN_FLAG_MC_PROMISC);
1226 mlx4_flow_steer_promisc_remove(mdev->dev,
1227 priv->port,
1228 MLX4_FS_PROMISC_UPLINK);
1229 mlx4_flow_steer_promisc_remove(mdev->dev,
1230 priv->port,
1231 MLX4_FS_PROMISC_ALL_MULTI);
1232 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1233 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1234
1235 /* Disable promiscouos mode */
1236 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1237 priv->port);
1238
1239 /* Disable Multicast promisc */
1240 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1241 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1242 priv->port);
1243 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1244 }
1245 }
1246
1679200f
YP
1247 /* Detach All multicasts */
1248 memset(&mc_list[10], 0xff, ETH_ALEN);
0ff1fb65 1249 mc_list[5] = priv->port; /* needed for B0 steering support */
1679200f 1250 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
0ff1fb65 1251 MLX4_PROT_ETH, priv->broadcast_id);
6d199937
YP
1252 list_for_each_entry(mclist, &priv->curr_list, list) {
1253 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1679200f
YP
1254 mc_list[5] = priv->port;
1255 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
0ff1fb65 1256 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1679200f
YP
1257 }
1258 mlx4_en_clear_list(dev);
6d199937
YP
1259 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1260 list_del(&mclist->list);
1261 kfree(mclist);
1262 }
1263
1679200f
YP
1264 /* Flush multicast filter */
1265 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1266
cabdc8ee
HHZ
1267 mlx4_en_destroy_drop_qp(priv);
1268
c27a02cd
YP
1269 /* Free TX Rings */
1270 for (i = 0; i < priv->tx_ring_num; i++) {
1271 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
1272 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
1273 }
1274 msleep(10);
1275
1276 for (i = 0; i < priv->tx_ring_num; i++)
1277 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
1278
1279 /* Free RSS qps */
1280 mlx4_en_release_rss_steer(priv);
1281
ffe455ad
EE
1282 /* Unregister Mac address for the port */
1283 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
1284 mdev->mac_removed[priv->port] = 1;
1285
c27a02cd
YP
1286 /* Free RX Rings */
1287 for (i = 0; i < priv->rx_ring_num; i++) {
1288 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
1289 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
1290 msleep(1);
1291 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
1292 }
7c287380
YP
1293
1294 /* close port*/
1295 mlx4_CLOSE_PORT(mdev->dev, priv->port);
c27a02cd
YP
1296}
1297
1298static void mlx4_en_restart(struct work_struct *work)
1299{
1300 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1301 watchdog_task);
1302 struct mlx4_en_dev *mdev = priv->mdev;
1303 struct net_device *dev = priv->dev;
5b263f53 1304 int i;
c27a02cd 1305
453a6082 1306 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1e338db5
YP
1307
1308 mutex_lock(&mdev->state_lock);
1309 if (priv->port_up) {
1310 mlx4_en_stop_port(dev);
5b263f53
YP
1311 for (i = 0; i < priv->tx_ring_num; i++)
1312 netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
1e338db5 1313 if (mlx4_en_start_port(dev))
453a6082 1314 en_err(priv, "Failed restarting port %d\n", priv->port);
1e338db5
YP
1315 }
1316 mutex_unlock(&mdev->state_lock);
c27a02cd
YP
1317}
1318
b477ba62 1319static void mlx4_en_clear_stats(struct net_device *dev)
c27a02cd
YP
1320{
1321 struct mlx4_en_priv *priv = netdev_priv(dev);
1322 struct mlx4_en_dev *mdev = priv->mdev;
1323 int i;
c27a02cd 1324
c27a02cd 1325 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
453a6082 1326 en_dbg(HW, priv, "Failed dumping statistics\n");
c27a02cd
YP
1327
1328 memset(&priv->stats, 0, sizeof(priv->stats));
1329 memset(&priv->pstats, 0, sizeof(priv->pstats));
b477ba62
EE
1330 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1331 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
c27a02cd
YP
1332
1333 for (i = 0; i < priv->tx_ring_num; i++) {
1334 priv->tx_ring[i].bytes = 0;
1335 priv->tx_ring[i].packets = 0;
b477ba62 1336 priv->tx_ring[i].tx_csum = 0;
c27a02cd
YP
1337 }
1338 for (i = 0; i < priv->rx_ring_num; i++) {
1339 priv->rx_ring[i].bytes = 0;
1340 priv->rx_ring[i].packets = 0;
b477ba62
EE
1341 priv->rx_ring[i].csum_ok = 0;
1342 priv->rx_ring[i].csum_none = 0;
c27a02cd 1343 }
b477ba62
EE
1344}
1345
1346static int mlx4_en_open(struct net_device *dev)
1347{
1348 struct mlx4_en_priv *priv = netdev_priv(dev);
1349 struct mlx4_en_dev *mdev = priv->mdev;
1350 int err = 0;
1351
1352 mutex_lock(&mdev->state_lock);
1353
1354 if (!mdev->device_up) {
1355 en_err(priv, "Cannot open - device down/disabled\n");
1356 err = -EBUSY;
1357 goto out;
1358 }
1359
1360 /* Reset HW statistics and SW counters */
1361 mlx4_en_clear_stats(dev);
c27a02cd 1362
c27a02cd
YP
1363 err = mlx4_en_start_port(dev);
1364 if (err)
453a6082 1365 en_err(priv, "Failed starting port:%d\n", priv->port);
c27a02cd
YP
1366
1367out:
1368 mutex_unlock(&mdev->state_lock);
1369 return err;
1370}
1371
1372
1373static int mlx4_en_close(struct net_device *dev)
1374{
1375 struct mlx4_en_priv *priv = netdev_priv(dev);
1376 struct mlx4_en_dev *mdev = priv->mdev;
1377
453a6082 1378 en_dbg(IFDOWN, priv, "Close port called\n");
c27a02cd
YP
1379
1380 mutex_lock(&mdev->state_lock);
1381
1382 mlx4_en_stop_port(dev);
1383 netif_carrier_off(dev);
1384
1385 mutex_unlock(&mdev->state_lock);
1386 return 0;
1387}
1388
fe0af03c 1389void mlx4_en_free_resources(struct mlx4_en_priv *priv)
c27a02cd
YP
1390{
1391 int i;
1392
1eb8c695
AV
1393#ifdef CONFIG_RFS_ACCEL
1394 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1395 priv->dev->rx_cpu_rmap = NULL;
1396#endif
1397
c27a02cd
YP
1398 for (i = 0; i < priv->tx_ring_num; i++) {
1399 if (priv->tx_ring[i].tx_info)
1400 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1401 if (priv->tx_cq[i].buf)
fe0af03c 1402 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
c27a02cd
YP
1403 }
1404
1405 for (i = 0; i < priv->rx_ring_num; i++) {
1406 if (priv->rx_ring[i].rx_info)
68355f71
TLSC
1407 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1408 priv->prof->rx_ring_size, priv->stride);
c27a02cd 1409 if (priv->rx_cq[i].buf)
fe0af03c 1410 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
c27a02cd 1411 }
044ca2a5
YP
1412
1413 if (priv->base_tx_qpn) {
1414 mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
1415 priv->base_tx_qpn = 0;
1416 }
c27a02cd
YP
1417}
1418
18cc42a3 1419int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
c27a02cd 1420{
c27a02cd
YP
1421 struct mlx4_en_port_profile *prof = priv->prof;
1422 int i;
044ca2a5 1423 int err;
87a5c389 1424
044ca2a5 1425 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
87a5c389
YP
1426 if (err) {
1427 en_err(priv, "failed reserving range for TX rings\n");
1428 return err;
1429 }
c27a02cd
YP
1430
1431 /* Create tx Rings */
1432 for (i = 0; i < priv->tx_ring_num; i++) {
1433 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1434 prof->tx_ring_size, i, TX))
1435 goto err;
1436
044ca2a5 1437 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
c27a02cd
YP
1438 prof->tx_ring_size, TXBB_SIZE))
1439 goto err;
1440 }
1441
1442 /* Create rx Rings */
1443 for (i = 0; i < priv->rx_ring_num; i++) {
1444 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
1445 prof->rx_ring_size, i, RX))
1446 goto err;
1447
1448 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
1449 prof->rx_ring_size, priv->stride))
1450 goto err;
1451 }
1452
1eb8c695
AV
1453#ifdef CONFIG_RFS_ACCEL
1454 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
1455 if (!priv->dev->rx_cpu_rmap)
1456 goto err;
1457
1458 INIT_LIST_HEAD(&priv->filters);
1459 spin_lock_init(&priv->filters_lock);
1460#endif
1461
c27a02cd
YP
1462 return 0;
1463
1464err:
453a6082 1465 en_err(priv, "Failed to allocate NIC resources\n");
c27a02cd
YP
1466 return -ENOMEM;
1467}
1468
1469
1470void mlx4_en_destroy_netdev(struct net_device *dev)
1471{
1472 struct mlx4_en_priv *priv = netdev_priv(dev);
1473 struct mlx4_en_dev *mdev = priv->mdev;
1474
453a6082 1475 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
c27a02cd
YP
1476
1477 /* Unregister device - this will close the port if it was up */
1478 if (priv->registered)
1479 unregister_netdev(dev);
1480
1481 if (priv->allocated)
1482 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
1483
1484 cancel_delayed_work(&priv->stats_task);
c27a02cd
YP
1485 /* flush any pending task for this netdev */
1486 flush_workqueue(mdev->workqueue);
1487
1488 /* Detach the netdev so tasks would not attempt to access it */
1489 mutex_lock(&mdev->state_lock);
1490 mdev->pndev[priv->port] = NULL;
1491 mutex_unlock(&mdev->state_lock);
1492
fe0af03c 1493 mlx4_en_free_resources(priv);
564c274c 1494
bc6a4744
AV
1495 kfree(priv->tx_ring);
1496 kfree(priv->tx_cq);
1497
c27a02cd
YP
1498 free_netdev(dev);
1499}
1500
1501static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
1502{
1503 struct mlx4_en_priv *priv = netdev_priv(dev);
1504 struct mlx4_en_dev *mdev = priv->mdev;
1505 int err = 0;
1506
453a6082 1507 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
c27a02cd
YP
1508 dev->mtu, new_mtu);
1509
1510 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
453a6082 1511 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
c27a02cd
YP
1512 return -EPERM;
1513 }
1514 dev->mtu = new_mtu;
1515
1516 if (netif_running(dev)) {
1517 mutex_lock(&mdev->state_lock);
1518 if (!mdev->device_up) {
1519 /* NIC is probably restarting - let watchdog task reset
1520 * the port */
453a6082 1521 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
c27a02cd
YP
1522 } else {
1523 mlx4_en_stop_port(dev);
c27a02cd
YP
1524 err = mlx4_en_start_port(dev);
1525 if (err) {
453a6082 1526 en_err(priv, "Failed restarting port:%d\n",
c27a02cd
YP
1527 priv->port);
1528 queue_work(mdev->workqueue, &priv->watchdog_task);
1529 }
1530 }
1531 mutex_unlock(&mdev->state_lock);
1532 }
1533 return 0;
1534}
1535
60d6fe99
AV
1536static int mlx4_en_set_features(struct net_device *netdev,
1537 netdev_features_t features)
1538{
1539 struct mlx4_en_priv *priv = netdev_priv(netdev);
1540
1541 if (features & NETIF_F_LOOPBACK)
1542 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
1543 else
1544 priv->ctrl_flags &=
1545 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
1546
1547 return 0;
1548
1549}
1550
3addc568
SH
1551static const struct net_device_ops mlx4_netdev_ops = {
1552 .ndo_open = mlx4_en_open,
1553 .ndo_stop = mlx4_en_close,
1554 .ndo_start_xmit = mlx4_en_xmit,
f813cad8 1555 .ndo_select_queue = mlx4_en_select_queue,
3addc568 1556 .ndo_get_stats = mlx4_en_get_stats,
afc4b13d 1557 .ndo_set_rx_mode = mlx4_en_set_multicast,
3addc568 1558 .ndo_set_mac_address = mlx4_en_set_mac,
52255bbe 1559 .ndo_validate_addr = eth_validate_addr,
3addc568
SH
1560 .ndo_change_mtu = mlx4_en_change_mtu,
1561 .ndo_tx_timeout = mlx4_en_tx_timeout,
3addc568
SH
1562 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
1563 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
1564#ifdef CONFIG_NET_POLL_CONTROLLER
1565 .ndo_poll_controller = mlx4_en_netpoll,
1566#endif
60d6fe99 1567 .ndo_set_features = mlx4_en_set_features,
897d7846 1568 .ndo_setup_tc = mlx4_en_setup_tc,
1eb8c695
AV
1569#ifdef CONFIG_RFS_ACCEL
1570 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
1571#endif
3addc568
SH
1572};
1573
c27a02cd
YP
1574int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1575 struct mlx4_en_port_profile *prof)
1576{
1577 struct net_device *dev;
1578 struct mlx4_en_priv *priv;
1579 int i;
1580 int err;
1581
f1593d22 1582 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
d317966b 1583 MAX_TX_RINGS, MAX_RX_RINGS);
41de8d4c 1584 if (dev == NULL)
c27a02cd 1585 return -ENOMEM;
c27a02cd 1586
d317966b
AV
1587 netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
1588 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
1589
c27a02cd 1590 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
741a00be 1591 dev->dev_id = port - 1;
c27a02cd
YP
1592
1593 /*
1594 * Initialize driver private data
1595 */
1596
1597 priv = netdev_priv(dev);
1598 memset(priv, 0, sizeof(struct mlx4_en_priv));
1599 priv->dev = dev;
1600 priv->mdev = mdev;
ebf8c9aa 1601 priv->ddev = &mdev->pdev->dev;
c27a02cd
YP
1602 priv->prof = prof;
1603 priv->port = port;
1604 priv->port_up = false;
c27a02cd 1605 priv->flags = prof->flags;
60d6fe99
AV
1606 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
1607 MLX4_WQE_CTRL_SOLICITED);
d317966b 1608 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
c27a02cd 1609 priv->tx_ring_num = prof->tx_ring_num;
d317966b
AV
1610
1611 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
1612 GFP_KERNEL);
bc6a4744
AV
1613 if (!priv->tx_ring) {
1614 err = -ENOMEM;
1615 goto out;
1616 }
d317966b
AV
1617 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_RX_RINGS,
1618 GFP_KERNEL);
bc6a4744
AV
1619 if (!priv->tx_cq) {
1620 err = -ENOMEM;
1621 goto out;
1622 }
c27a02cd 1623 priv->rx_ring_num = prof->rx_ring_num;
08ff3235 1624 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
c27a02cd
YP
1625 priv->mac_index = -1;
1626 priv->msg_enable = MLX4_EN_MSG_LEVEL;
1627 spin_lock_init(&priv->stats_lock);
1628 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
1629 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
c27a02cd
YP
1630 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
1631 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
1632 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
564c274c
AV
1633#ifdef CONFIG_MLX4_EN_DCB
1634 if (!mlx4_is_slave(priv->mdev->dev))
1635 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
1636#endif
c27a02cd
YP
1637
1638 /* Query for default mac and max mtu */
1639 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
1640 priv->mac = mdev->dev->caps.def_mac[priv->port];
1641 if (ILLEGAL_MAC(priv->mac)) {
453a6082 1642 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
c27a02cd
YP
1643 priv->port, priv->mac);
1644 err = -EINVAL;
1645 goto out;
1646 }
1647
1648 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
1649 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
1650 err = mlx4_en_alloc_resources(priv);
1651 if (err)
1652 goto out;
1653
c27a02cd
YP
1654 /* Allocate page for receive rings */
1655 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
1656 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
1657 if (err) {
453a6082 1658 en_err(priv, "Failed to allocate page for rx qps\n");
c27a02cd
YP
1659 goto out;
1660 }
1661 priv->allocated = 1;
1662
c27a02cd
YP
1663 /*
1664 * Initialize netdev entry points
1665 */
3addc568 1666 dev->netdev_ops = &mlx4_netdev_ops;
c27a02cd 1667 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1eb63a28
BH
1668 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1669 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
3addc568 1670
c27a02cd
YP
1671 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1672
1673 /* Set defualt MAC */
1674 dev->addr_len = ETH_ALEN;
aaeb6cdf 1675 for (i = 0; i < ETH_ALEN; i++)
8bf2e58f 1676 dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
c27a02cd
YP
1677
1678 /*
1679 * Set driver features
1680 */
c8c64cff
MM
1681 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1682 if (mdev->LSO_support)
1683 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1684
1685 dev->vlan_features = dev->hw_features;
1686
ad86107f 1687 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
c8c64cff
MM
1688 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
1689 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1690 NETIF_F_HW_VLAN_FILTER;
60d6fe99 1691 dev->hw_features |= NETIF_F_LOOPBACK;
c27a02cd 1692
1eb8c695
AV
1693 if (mdev->dev->caps.steering_mode ==
1694 MLX4_STEERING_MODE_DEVICE_MANAGED)
1695 dev->hw_features |= NETIF_F_NTUPLE;
1696
c27a02cd
YP
1697 mdev->pndev[port] = dev;
1698
1699 netif_carrier_off(dev);
1700 err = register_netdev(dev);
1701 if (err) {
453a6082 1702 en_err(priv, "Netdev registration failed for port %d\n", port);
c27a02cd
YP
1703 goto out;
1704 }
4234144f 1705 priv->registered = 1;
453a6082
YP
1706
1707 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1708 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1709
90822265 1710 /* Configure port */
5c8e9046 1711 mlx4_en_calc_rx_buf(dev);
90822265 1712 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
5c8e9046
YP
1713 priv->rx_skb_size + ETH_FCS_LEN,
1714 prof->tx_pause, prof->tx_ppp,
1715 prof->rx_pause, prof->rx_ppp);
90822265
YP
1716 if (err) {
1717 en_err(priv, "Failed setting port general configurations "
1718 "for port %d, with error %d\n", priv->port, err);
1719 goto out;
1720 }
1721
1722 /* Init port */
1723 en_warn(priv, "Initializing port\n");
1724 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1725 if (err) {
1726 en_err(priv, "Failed Initializing port\n");
1727 goto out;
1728 }
39f17b44 1729 mlx4_en_set_default_moderation(priv);
c27a02cd
YP
1730 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1731 return 0;
1732
1733out:
1734 mlx4_en_destroy_netdev(dev);
1735 return err;
1736}
1737
This page took 0.958892 seconds and 5 git commands to generate.