{NET,IB}/mlx4: Add rmap support to mlx4_assign_eq
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / en_netdev.c
1 /*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39
40 #include <linux/mlx4/driver.h>
41 #include <linux/mlx4/device.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/cq.h>
44
45 #include "mlx4_en.h"
46 #include "en_port.h"
47
48 static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
49 {
50 struct mlx4_en_priv *priv = netdev_priv(dev);
51 int i;
52 unsigned int q, offset = 0;
53
54 if (up && up != MLX4_EN_NUM_UP)
55 return -EINVAL;
56
57 netdev_set_num_tc(dev, up);
58
59 /* Partition Tx queues evenly amongst UP's */
60 q = priv->tx_ring_num / up;
61 for (i = 0; i < up; i++) {
62 netdev_set_tc_queue(dev, i, q, offset);
63 offset += q;
64 }
65
66 return 0;
67 }
68
69 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
70 {
71 struct mlx4_en_priv *priv = netdev_priv(dev);
72 struct mlx4_en_dev *mdev = priv->mdev;
73 int err;
74 int idx;
75
76 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
77
78 set_bit(vid, priv->active_vlans);
79
80 /* Add VID to port VLAN filter */
81 mutex_lock(&mdev->state_lock);
82 if (mdev->device_up && priv->port_up) {
83 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
84 if (err)
85 en_err(priv, "Failed configuring VLAN filter\n");
86 }
87 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
88 en_err(priv, "failed adding vlan %d\n", vid);
89 mutex_unlock(&mdev->state_lock);
90
91 return 0;
92 }
93
94 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
95 {
96 struct mlx4_en_priv *priv = netdev_priv(dev);
97 struct mlx4_en_dev *mdev = priv->mdev;
98 int err;
99 int idx;
100
101 en_dbg(HW, priv, "Killing VID:%d\n", vid);
102
103 clear_bit(vid, priv->active_vlans);
104
105 /* Remove VID from port VLAN filter */
106 mutex_lock(&mdev->state_lock);
107 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
108 mlx4_unregister_vlan(mdev->dev, priv->port, idx);
109 else
110 en_err(priv, "could not find vid %d in cache\n", vid);
111
112 if (mdev->device_up && priv->port_up) {
113 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
114 if (err)
115 en_err(priv, "Failed configuring VLAN filter\n");
116 }
117 mutex_unlock(&mdev->state_lock);
118
119 return 0;
120 }
121
122 u64 mlx4_en_mac_to_u64(u8 *addr)
123 {
124 u64 mac = 0;
125 int i;
126
127 for (i = 0; i < ETH_ALEN; i++) {
128 mac <<= 8;
129 mac |= addr[i];
130 }
131 return mac;
132 }
133
134 static int mlx4_en_set_mac(struct net_device *dev, void *addr)
135 {
136 struct mlx4_en_priv *priv = netdev_priv(dev);
137 struct mlx4_en_dev *mdev = priv->mdev;
138 struct sockaddr *saddr = addr;
139
140 if (!is_valid_ether_addr(saddr->sa_data))
141 return -EADDRNOTAVAIL;
142
143 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
144 priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
145 queue_work(mdev->workqueue, &priv->mac_task);
146 return 0;
147 }
148
149 static void mlx4_en_do_set_mac(struct work_struct *work)
150 {
151 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
152 mac_task);
153 struct mlx4_en_dev *mdev = priv->mdev;
154 int err = 0;
155
156 mutex_lock(&mdev->state_lock);
157 if (priv->port_up) {
158 /* Remove old MAC and insert the new one */
159 err = mlx4_replace_mac(mdev->dev, priv->port,
160 priv->base_qpn, priv->mac);
161 if (err)
162 en_err(priv, "Failed changing HW MAC address\n");
163 } else
164 en_dbg(HW, priv, "Port is down while "
165 "registering mac, exiting...\n");
166
167 mutex_unlock(&mdev->state_lock);
168 }
169
170 static void mlx4_en_clear_list(struct net_device *dev)
171 {
172 struct mlx4_en_priv *priv = netdev_priv(dev);
173 struct mlx4_en_mc_list *tmp, *mc_to_del;
174
175 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
176 list_del(&mc_to_del->list);
177 kfree(mc_to_del);
178 }
179 }
180
181 static void mlx4_en_cache_mclist(struct net_device *dev)
182 {
183 struct mlx4_en_priv *priv = netdev_priv(dev);
184 struct netdev_hw_addr *ha;
185 struct mlx4_en_mc_list *tmp;
186
187 mlx4_en_clear_list(dev);
188 netdev_for_each_mc_addr(ha, dev) {
189 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
190 if (!tmp) {
191 en_err(priv, "failed to allocate multicast list\n");
192 mlx4_en_clear_list(dev);
193 return;
194 }
195 memcpy(tmp->addr, ha->addr, ETH_ALEN);
196 list_add_tail(&tmp->list, &priv->mc_list);
197 }
198 }
199
200 static void update_mclist_flags(struct mlx4_en_priv *priv,
201 struct list_head *dst,
202 struct list_head *src)
203 {
204 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
205 bool found;
206
207 /* Find all the entries that should be removed from dst,
208 * These are the entries that are not found in src
209 */
210 list_for_each_entry(dst_tmp, dst, list) {
211 found = false;
212 list_for_each_entry(src_tmp, src, list) {
213 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
214 found = true;
215 break;
216 }
217 }
218 if (!found)
219 dst_tmp->action = MCLIST_REM;
220 }
221
222 /* Add entries that exist in src but not in dst
223 * mark them as need to add
224 */
225 list_for_each_entry(src_tmp, src, list) {
226 found = false;
227 list_for_each_entry(dst_tmp, dst, list) {
228 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
229 dst_tmp->action = MCLIST_NONE;
230 found = true;
231 break;
232 }
233 }
234 if (!found) {
235 new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
236 GFP_KERNEL);
237 if (!new_mc) {
238 en_err(priv, "Failed to allocate current multicast list\n");
239 return;
240 }
241 memcpy(new_mc, src_tmp,
242 sizeof(struct mlx4_en_mc_list));
243 new_mc->action = MCLIST_ADD;
244 list_add_tail(&new_mc->list, dst);
245 }
246 }
247 }
248
249 static void mlx4_en_set_multicast(struct net_device *dev)
250 {
251 struct mlx4_en_priv *priv = netdev_priv(dev);
252
253 if (!priv->port_up)
254 return;
255
256 queue_work(priv->mdev->workqueue, &priv->mcast_task);
257 }
258
259 static void mlx4_en_do_set_multicast(struct work_struct *work)
260 {
261 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
262 mcast_task);
263 struct mlx4_en_dev *mdev = priv->mdev;
264 struct net_device *dev = priv->dev;
265 struct mlx4_en_mc_list *mclist, *tmp;
266 u64 mcast_addr = 0;
267 u8 mc_list[16] = {0};
268 int err = 0;
269
270 mutex_lock(&mdev->state_lock);
271 if (!mdev->device_up) {
272 en_dbg(HW, priv, "Card is not up, "
273 "ignoring multicast change.\n");
274 goto out;
275 }
276 if (!priv->port_up) {
277 en_dbg(HW, priv, "Port is down, "
278 "ignoring multicast change.\n");
279 goto out;
280 }
281
282 if (!netif_carrier_ok(dev)) {
283 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
284 if (priv->port_state.link_state) {
285 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
286 netif_carrier_on(dev);
287 en_dbg(LINK, priv, "Link Up\n");
288 }
289 }
290 }
291
292 /*
293 * Promsicuous mode: disable all filters
294 */
295
296 if (dev->flags & IFF_PROMISC) {
297 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
298 if (netif_msg_rx_status(priv))
299 en_warn(priv, "Entering promiscuous mode\n");
300 priv->flags |= MLX4_EN_FLAG_PROMISC;
301
302 /* Enable promiscouos mode */
303 switch (mdev->dev->caps.steering_mode) {
304 case MLX4_STEERING_MODE_DEVICE_MANAGED:
305 err = mlx4_flow_steer_promisc_add(mdev->dev,
306 priv->port,
307 priv->base_qpn,
308 MLX4_FS_PROMISC_UPLINK);
309 if (err)
310 en_err(priv, "Failed enabling promiscuous mode\n");
311 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
312 break;
313
314 case MLX4_STEERING_MODE_B0:
315 err = mlx4_unicast_promisc_add(mdev->dev,
316 priv->base_qpn,
317 priv->port);
318 if (err)
319 en_err(priv, "Failed enabling unicast promiscuous mode\n");
320
321 /* Add the default qp number as multicast
322 * promisc
323 */
324 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
325 err = mlx4_multicast_promisc_add(mdev->dev,
326 priv->base_qpn,
327 priv->port);
328 if (err)
329 en_err(priv, "Failed enabling multicast promiscuous mode\n");
330 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
331 }
332 break;
333
334 case MLX4_STEERING_MODE_A0:
335 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
336 priv->port,
337 priv->base_qpn,
338 1);
339 if (err)
340 en_err(priv, "Failed enabling promiscuous mode\n");
341 break;
342 }
343
344 /* Disable port multicast filter (unconditionally) */
345 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
346 0, MLX4_MCAST_DISABLE);
347 if (err)
348 en_err(priv, "Failed disabling "
349 "multicast filter\n");
350
351 /* Disable port VLAN filter */
352 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
353 if (err)
354 en_err(priv, "Failed disabling VLAN filter\n");
355 }
356 goto out;
357 }
358
359 /*
360 * Not in promiscuous mode
361 */
362
363 if (priv->flags & MLX4_EN_FLAG_PROMISC) {
364 if (netif_msg_rx_status(priv))
365 en_warn(priv, "Leaving promiscuous mode\n");
366 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
367
368 /* Disable promiscouos mode */
369 switch (mdev->dev->caps.steering_mode) {
370 case MLX4_STEERING_MODE_DEVICE_MANAGED:
371 err = mlx4_flow_steer_promisc_remove(mdev->dev,
372 priv->port,
373 MLX4_FS_PROMISC_UPLINK);
374 if (err)
375 en_err(priv, "Failed disabling promiscuous mode\n");
376 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
377 break;
378
379 case MLX4_STEERING_MODE_B0:
380 err = mlx4_unicast_promisc_remove(mdev->dev,
381 priv->base_qpn,
382 priv->port);
383 if (err)
384 en_err(priv, "Failed disabling unicast promiscuous mode\n");
385 /* Disable Multicast promisc */
386 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
387 err = mlx4_multicast_promisc_remove(mdev->dev,
388 priv->base_qpn,
389 priv->port);
390 if (err)
391 en_err(priv, "Failed disabling multicast promiscuous mode\n");
392 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
393 }
394 break;
395
396 case MLX4_STEERING_MODE_A0:
397 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
398 priv->port,
399 priv->base_qpn, 0);
400 if (err)
401 en_err(priv, "Failed disabling promiscuous mode\n");
402 break;
403 }
404
405 /* Enable port VLAN filter */
406 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
407 if (err)
408 en_err(priv, "Failed enabling VLAN filter\n");
409 }
410
411 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
412 if (dev->flags & IFF_ALLMULTI) {
413 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
414 0, MLX4_MCAST_DISABLE);
415 if (err)
416 en_err(priv, "Failed disabling multicast filter\n");
417
418 /* Add the default qp number as multicast promisc */
419 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
420 switch (mdev->dev->caps.steering_mode) {
421 case MLX4_STEERING_MODE_DEVICE_MANAGED:
422 err = mlx4_flow_steer_promisc_add(mdev->dev,
423 priv->port,
424 priv->base_qpn,
425 MLX4_FS_PROMISC_ALL_MULTI);
426 break;
427
428 case MLX4_STEERING_MODE_B0:
429 err = mlx4_multicast_promisc_add(mdev->dev,
430 priv->base_qpn,
431 priv->port);
432 break;
433
434 case MLX4_STEERING_MODE_A0:
435 break;
436 }
437 if (err)
438 en_err(priv, "Failed entering multicast promisc mode\n");
439 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
440 }
441 } else {
442 /* Disable Multicast promisc */
443 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
444 switch (mdev->dev->caps.steering_mode) {
445 case MLX4_STEERING_MODE_DEVICE_MANAGED:
446 err = mlx4_flow_steer_promisc_remove(mdev->dev,
447 priv->port,
448 MLX4_FS_PROMISC_ALL_MULTI);
449 break;
450
451 case MLX4_STEERING_MODE_B0:
452 err = mlx4_multicast_promisc_remove(mdev->dev,
453 priv->base_qpn,
454 priv->port);
455 break;
456
457 case MLX4_STEERING_MODE_A0:
458 break;
459 }
460 if (err)
461 en_err(priv, "Failed disabling multicast promiscuous mode\n");
462 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
463 }
464
465 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
466 0, MLX4_MCAST_DISABLE);
467 if (err)
468 en_err(priv, "Failed disabling multicast filter\n");
469
470 /* Flush mcast filter and init it with broadcast address */
471 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
472 1, MLX4_MCAST_CONFIG);
473
474 /* Update multicast list - we cache all addresses so they won't
475 * change while HW is updated holding the command semaphor */
476 netif_tx_lock_bh(dev);
477 mlx4_en_cache_mclist(dev);
478 netif_tx_unlock_bh(dev);
479 list_for_each_entry(mclist, &priv->mc_list, list) {
480 mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
481 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
482 mcast_addr, 0, MLX4_MCAST_CONFIG);
483 }
484 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
485 0, MLX4_MCAST_ENABLE);
486 if (err)
487 en_err(priv, "Failed enabling multicast filter\n");
488
489 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
490 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
491 if (mclist->action == MCLIST_REM) {
492 /* detach this address and delete from list */
493 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
494 mc_list[5] = priv->port;
495 err = mlx4_multicast_detach(mdev->dev,
496 &priv->rss_map.indir_qp,
497 mc_list,
498 MLX4_PROT_ETH,
499 mclist->reg_id);
500 if (err)
501 en_err(priv, "Fail to detach multicast address\n");
502
503 /* remove from list */
504 list_del(&mclist->list);
505 kfree(mclist);
506 } else if (mclist->action == MCLIST_ADD) {
507 /* attach the address */
508 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
509 /* needed for B0 steering support */
510 mc_list[5] = priv->port;
511 err = mlx4_multicast_attach(mdev->dev,
512 &priv->rss_map.indir_qp,
513 mc_list,
514 priv->port, 0,
515 MLX4_PROT_ETH,
516 &mclist->reg_id);
517 if (err)
518 en_err(priv, "Fail to attach multicast address\n");
519
520 }
521 }
522 }
523 out:
524 mutex_unlock(&mdev->state_lock);
525 }
526
527 #ifdef CONFIG_NET_POLL_CONTROLLER
528 static void mlx4_en_netpoll(struct net_device *dev)
529 {
530 struct mlx4_en_priv *priv = netdev_priv(dev);
531 struct mlx4_en_cq *cq;
532 unsigned long flags;
533 int i;
534
535 for (i = 0; i < priv->rx_ring_num; i++) {
536 cq = &priv->rx_cq[i];
537 spin_lock_irqsave(&cq->lock, flags);
538 napi_synchronize(&cq->napi);
539 mlx4_en_process_rx_cq(dev, cq, 0);
540 spin_unlock_irqrestore(&cq->lock, flags);
541 }
542 }
543 #endif
544
545 static void mlx4_en_tx_timeout(struct net_device *dev)
546 {
547 struct mlx4_en_priv *priv = netdev_priv(dev);
548 struct mlx4_en_dev *mdev = priv->mdev;
549
550 if (netif_msg_timer(priv))
551 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
552
553 priv->port_stats.tx_timeout++;
554 en_dbg(DRV, priv, "Scheduling watchdog\n");
555 queue_work(mdev->workqueue, &priv->watchdog_task);
556 }
557
558
559 static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
560 {
561 struct mlx4_en_priv *priv = netdev_priv(dev);
562
563 spin_lock_bh(&priv->stats_lock);
564 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
565 spin_unlock_bh(&priv->stats_lock);
566
567 return &priv->ret_stats;
568 }
569
570 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
571 {
572 struct mlx4_en_cq *cq;
573 int i;
574
575 /* If we haven't received a specific coalescing setting
576 * (module param), we set the moderation parameters as follows:
577 * - moder_cnt is set to the number of mtu sized packets to
578 * satisfy our coelsing target.
579 * - moder_time is set to a fixed value.
580 */
581 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
582 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
583 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
584 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
585 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
586 "rx_frames:%d rx_usecs:%d\n",
587 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
588
589 /* Setup cq moderation params */
590 for (i = 0; i < priv->rx_ring_num; i++) {
591 cq = &priv->rx_cq[i];
592 cq->moder_cnt = priv->rx_frames;
593 cq->moder_time = priv->rx_usecs;
594 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
595 priv->last_moder_packets[i] = 0;
596 priv->last_moder_bytes[i] = 0;
597 }
598
599 for (i = 0; i < priv->tx_ring_num; i++) {
600 cq = &priv->tx_cq[i];
601 cq->moder_cnt = priv->tx_frames;
602 cq->moder_time = priv->tx_usecs;
603 }
604
605 /* Reset auto-moderation params */
606 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
607 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
608 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
609 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
610 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
611 priv->adaptive_rx_coal = 1;
612 priv->last_moder_jiffies = 0;
613 priv->last_moder_tx_packets = 0;
614 }
615
616 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
617 {
618 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
619 struct mlx4_en_cq *cq;
620 unsigned long packets;
621 unsigned long rate;
622 unsigned long avg_pkt_size;
623 unsigned long rx_packets;
624 unsigned long rx_bytes;
625 unsigned long rx_pkt_diff;
626 int moder_time;
627 int ring, err;
628
629 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
630 return;
631
632 for (ring = 0; ring < priv->rx_ring_num; ring++) {
633 spin_lock_bh(&priv->stats_lock);
634 rx_packets = priv->rx_ring[ring].packets;
635 rx_bytes = priv->rx_ring[ring].bytes;
636 spin_unlock_bh(&priv->stats_lock);
637
638 rx_pkt_diff = ((unsigned long) (rx_packets -
639 priv->last_moder_packets[ring]));
640 packets = rx_pkt_diff;
641 rate = packets * HZ / period;
642 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
643 priv->last_moder_bytes[ring])) / packets : 0;
644
645 /* Apply auto-moderation only when packet rate
646 * exceeds a rate that it matters */
647 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
648 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
649 if (rate < priv->pkt_rate_low)
650 moder_time = priv->rx_usecs_low;
651 else if (rate > priv->pkt_rate_high)
652 moder_time = priv->rx_usecs_high;
653 else
654 moder_time = (rate - priv->pkt_rate_low) *
655 (priv->rx_usecs_high - priv->rx_usecs_low) /
656 (priv->pkt_rate_high - priv->pkt_rate_low) +
657 priv->rx_usecs_low;
658 } else {
659 moder_time = priv->rx_usecs_low;
660 }
661
662 if (moder_time != priv->last_moder_time[ring]) {
663 priv->last_moder_time[ring] = moder_time;
664 cq = &priv->rx_cq[ring];
665 cq->moder_time = moder_time;
666 err = mlx4_en_set_cq_moder(priv, cq);
667 if (err)
668 en_err(priv, "Failed modifying moderation "
669 "for cq:%d\n", ring);
670 }
671 priv->last_moder_packets[ring] = rx_packets;
672 priv->last_moder_bytes[ring] = rx_bytes;
673 }
674
675 priv->last_moder_jiffies = jiffies;
676 }
677
678 static void mlx4_en_do_get_stats(struct work_struct *work)
679 {
680 struct delayed_work *delay = to_delayed_work(work);
681 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
682 stats_task);
683 struct mlx4_en_dev *mdev = priv->mdev;
684 int err;
685
686 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
687 if (err)
688 en_dbg(HW, priv, "Could not update stats\n");
689
690 mutex_lock(&mdev->state_lock);
691 if (mdev->device_up) {
692 if (priv->port_up)
693 mlx4_en_auto_moderation(priv);
694
695 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
696 }
697 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
698 queue_work(mdev->workqueue, &priv->mac_task);
699 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
700 }
701 mutex_unlock(&mdev->state_lock);
702 }
703
704 static void mlx4_en_linkstate(struct work_struct *work)
705 {
706 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
707 linkstate_task);
708 struct mlx4_en_dev *mdev = priv->mdev;
709 int linkstate = priv->link_state;
710
711 mutex_lock(&mdev->state_lock);
712 /* If observable port state changed set carrier state and
713 * report to system log */
714 if (priv->last_link_state != linkstate) {
715 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
716 en_info(priv, "Link Down\n");
717 netif_carrier_off(priv->dev);
718 } else {
719 en_info(priv, "Link Up\n");
720 netif_carrier_on(priv->dev);
721 }
722 }
723 priv->last_link_state = linkstate;
724 mutex_unlock(&mdev->state_lock);
725 }
726
727
728 int mlx4_en_start_port(struct net_device *dev)
729 {
730 struct mlx4_en_priv *priv = netdev_priv(dev);
731 struct mlx4_en_dev *mdev = priv->mdev;
732 struct mlx4_en_cq *cq;
733 struct mlx4_en_tx_ring *tx_ring;
734 int rx_index = 0;
735 int tx_index = 0;
736 int err = 0;
737 int i;
738 int j;
739 u8 mc_list[16] = {0};
740
741 if (priv->port_up) {
742 en_dbg(DRV, priv, "start port called while port already up\n");
743 return 0;
744 }
745
746 INIT_LIST_HEAD(&priv->mc_list);
747 INIT_LIST_HEAD(&priv->curr_list);
748
749 /* Calculate Rx buf size */
750 dev->mtu = min(dev->mtu, priv->max_mtu);
751 mlx4_en_calc_rx_buf(dev);
752 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
753
754 /* Configure rx cq's and rings */
755 err = mlx4_en_activate_rx_rings(priv);
756 if (err) {
757 en_err(priv, "Failed to activate RX rings\n");
758 return err;
759 }
760 for (i = 0; i < priv->rx_ring_num; i++) {
761 cq = &priv->rx_cq[i];
762
763 err = mlx4_en_activate_cq(priv, cq, i);
764 if (err) {
765 en_err(priv, "Failed activating Rx CQ\n");
766 goto cq_err;
767 }
768 for (j = 0; j < cq->size; j++)
769 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
770 err = mlx4_en_set_cq_moder(priv, cq);
771 if (err) {
772 en_err(priv, "Failed setting cq moderation parameters");
773 mlx4_en_deactivate_cq(priv, cq);
774 goto cq_err;
775 }
776 mlx4_en_arm_cq(priv, cq);
777 priv->rx_ring[i].cqn = cq->mcq.cqn;
778 ++rx_index;
779 }
780
781 /* Set qp number */
782 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
783 err = mlx4_get_eth_qp(mdev->dev, priv->port,
784 priv->mac, &priv->base_qpn);
785 if (err) {
786 en_err(priv, "Failed getting eth qp\n");
787 goto cq_err;
788 }
789 mdev->mac_removed[priv->port] = 0;
790
791 err = mlx4_en_config_rss_steer(priv);
792 if (err) {
793 en_err(priv, "Failed configuring rss steering\n");
794 goto mac_err;
795 }
796
797 err = mlx4_en_create_drop_qp(priv);
798 if (err)
799 goto rss_err;
800
801 /* Configure tx cq's and rings */
802 for (i = 0; i < priv->tx_ring_num; i++) {
803 /* Configure cq */
804 cq = &priv->tx_cq[i];
805 err = mlx4_en_activate_cq(priv, cq, i);
806 if (err) {
807 en_err(priv, "Failed allocating Tx CQ\n");
808 goto tx_err;
809 }
810 err = mlx4_en_set_cq_moder(priv, cq);
811 if (err) {
812 en_err(priv, "Failed setting cq moderation parameters");
813 mlx4_en_deactivate_cq(priv, cq);
814 goto tx_err;
815 }
816 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
817 cq->buf->wqe_index = cpu_to_be16(0xffff);
818
819 /* Configure ring */
820 tx_ring = &priv->tx_ring[i];
821 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
822 i / priv->mdev->profile.num_tx_rings_p_up);
823 if (err) {
824 en_err(priv, "Failed allocating Tx ring\n");
825 mlx4_en_deactivate_cq(priv, cq);
826 goto tx_err;
827 }
828 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
829
830 /* Arm CQ for TX completions */
831 mlx4_en_arm_cq(priv, cq);
832
833 /* Set initial ownership of all Tx TXBBs to SW (1) */
834 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
835 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
836 ++tx_index;
837 }
838
839 /* Configure port */
840 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
841 priv->rx_skb_size + ETH_FCS_LEN,
842 priv->prof->tx_pause,
843 priv->prof->tx_ppp,
844 priv->prof->rx_pause,
845 priv->prof->rx_ppp);
846 if (err) {
847 en_err(priv, "Failed setting port general configurations "
848 "for port %d, with error %d\n", priv->port, err);
849 goto tx_err;
850 }
851 /* Set default qp number */
852 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
853 if (err) {
854 en_err(priv, "Failed setting default qp numbers\n");
855 goto tx_err;
856 }
857
858 /* Init port */
859 en_dbg(HW, priv, "Initializing port\n");
860 err = mlx4_INIT_PORT(mdev->dev, priv->port);
861 if (err) {
862 en_err(priv, "Failed Initializing port\n");
863 goto tx_err;
864 }
865
866 /* Attach rx QP to bradcast address */
867 memset(&mc_list[10], 0xff, ETH_ALEN);
868 mc_list[5] = priv->port; /* needed for B0 steering support */
869 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
870 priv->port, 0, MLX4_PROT_ETH,
871 &priv->broadcast_id))
872 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
873
874 /* Must redo promiscuous mode setup. */
875 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
876 if (mdev->dev->caps.steering_mode ==
877 MLX4_STEERING_MODE_DEVICE_MANAGED) {
878 mlx4_flow_steer_promisc_remove(mdev->dev,
879 priv->port,
880 MLX4_FS_PROMISC_UPLINK);
881 mlx4_flow_steer_promisc_remove(mdev->dev,
882 priv->port,
883 MLX4_FS_PROMISC_ALL_MULTI);
884 }
885
886 /* Schedule multicast task to populate multicast list */
887 queue_work(mdev->workqueue, &priv->mcast_task);
888
889 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
890
891 priv->port_up = true;
892 netif_tx_start_all_queues(dev);
893 return 0;
894
895 tx_err:
896 while (tx_index--) {
897 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
898 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
899 }
900 mlx4_en_destroy_drop_qp(priv);
901 rss_err:
902 mlx4_en_release_rss_steer(priv);
903 mac_err:
904 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
905 cq_err:
906 while (rx_index--)
907 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
908 for (i = 0; i < priv->rx_ring_num; i++)
909 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
910
911 return err; /* need to close devices */
912 }
913
914
915 void mlx4_en_stop_port(struct net_device *dev)
916 {
917 struct mlx4_en_priv *priv = netdev_priv(dev);
918 struct mlx4_en_dev *mdev = priv->mdev;
919 struct mlx4_en_mc_list *mclist, *tmp;
920 int i;
921 u8 mc_list[16] = {0};
922
923 if (!priv->port_up) {
924 en_dbg(DRV, priv, "stop port called while port already down\n");
925 return;
926 }
927
928 /* Synchronize with tx routine */
929 netif_tx_lock_bh(dev);
930 netif_tx_stop_all_queues(dev);
931 netif_tx_unlock_bh(dev);
932
933 /* Set port as not active */
934 priv->port_up = false;
935
936 /* Detach All multicasts */
937 memset(&mc_list[10], 0xff, ETH_ALEN);
938 mc_list[5] = priv->port; /* needed for B0 steering support */
939 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
940 MLX4_PROT_ETH, priv->broadcast_id);
941 list_for_each_entry(mclist, &priv->curr_list, list) {
942 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
943 mc_list[5] = priv->port;
944 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
945 mc_list, MLX4_PROT_ETH, mclist->reg_id);
946 }
947 mlx4_en_clear_list(dev);
948 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
949 list_del(&mclist->list);
950 kfree(mclist);
951 }
952
953 /* Flush multicast filter */
954 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
955
956 mlx4_en_destroy_drop_qp(priv);
957
958 /* Free TX Rings */
959 for (i = 0; i < priv->tx_ring_num; i++) {
960 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
961 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
962 }
963 msleep(10);
964
965 for (i = 0; i < priv->tx_ring_num; i++)
966 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
967
968 /* Free RSS qps */
969 mlx4_en_release_rss_steer(priv);
970
971 /* Unregister Mac address for the port */
972 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
973 mdev->mac_removed[priv->port] = 1;
974
975 /* Free RX Rings */
976 for (i = 0; i < priv->rx_ring_num; i++) {
977 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
978 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
979 msleep(1);
980 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
981 }
982
983 /* close port*/
984 mlx4_CLOSE_PORT(mdev->dev, priv->port);
985 }
986
987 static void mlx4_en_restart(struct work_struct *work)
988 {
989 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
990 watchdog_task);
991 struct mlx4_en_dev *mdev = priv->mdev;
992 struct net_device *dev = priv->dev;
993 int i;
994
995 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
996
997 mutex_lock(&mdev->state_lock);
998 if (priv->port_up) {
999 mlx4_en_stop_port(dev);
1000 for (i = 0; i < priv->tx_ring_num; i++)
1001 netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
1002 if (mlx4_en_start_port(dev))
1003 en_err(priv, "Failed restarting port %d\n", priv->port);
1004 }
1005 mutex_unlock(&mdev->state_lock);
1006 }
1007
1008 static void mlx4_en_clear_stats(struct net_device *dev)
1009 {
1010 struct mlx4_en_priv *priv = netdev_priv(dev);
1011 struct mlx4_en_dev *mdev = priv->mdev;
1012 int i;
1013
1014 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1015 en_dbg(HW, priv, "Failed dumping statistics\n");
1016
1017 memset(&priv->stats, 0, sizeof(priv->stats));
1018 memset(&priv->pstats, 0, sizeof(priv->pstats));
1019 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1020 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1021
1022 for (i = 0; i < priv->tx_ring_num; i++) {
1023 priv->tx_ring[i].bytes = 0;
1024 priv->tx_ring[i].packets = 0;
1025 priv->tx_ring[i].tx_csum = 0;
1026 }
1027 for (i = 0; i < priv->rx_ring_num; i++) {
1028 priv->rx_ring[i].bytes = 0;
1029 priv->rx_ring[i].packets = 0;
1030 priv->rx_ring[i].csum_ok = 0;
1031 priv->rx_ring[i].csum_none = 0;
1032 }
1033 }
1034
1035 static int mlx4_en_open(struct net_device *dev)
1036 {
1037 struct mlx4_en_priv *priv = netdev_priv(dev);
1038 struct mlx4_en_dev *mdev = priv->mdev;
1039 int err = 0;
1040
1041 mutex_lock(&mdev->state_lock);
1042
1043 if (!mdev->device_up) {
1044 en_err(priv, "Cannot open - device down/disabled\n");
1045 err = -EBUSY;
1046 goto out;
1047 }
1048
1049 /* Reset HW statistics and SW counters */
1050 mlx4_en_clear_stats(dev);
1051
1052 err = mlx4_en_start_port(dev);
1053 if (err)
1054 en_err(priv, "Failed starting port:%d\n", priv->port);
1055
1056 out:
1057 mutex_unlock(&mdev->state_lock);
1058 return err;
1059 }
1060
1061
1062 static int mlx4_en_close(struct net_device *dev)
1063 {
1064 struct mlx4_en_priv *priv = netdev_priv(dev);
1065 struct mlx4_en_dev *mdev = priv->mdev;
1066
1067 en_dbg(IFDOWN, priv, "Close port called\n");
1068
1069 mutex_lock(&mdev->state_lock);
1070
1071 mlx4_en_stop_port(dev);
1072 netif_carrier_off(dev);
1073
1074 mutex_unlock(&mdev->state_lock);
1075 return 0;
1076 }
1077
1078 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1079 {
1080 int i;
1081
1082 for (i = 0; i < priv->tx_ring_num; i++) {
1083 if (priv->tx_ring[i].tx_info)
1084 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1085 if (priv->tx_cq[i].buf)
1086 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1087 }
1088
1089 for (i = 0; i < priv->rx_ring_num; i++) {
1090 if (priv->rx_ring[i].rx_info)
1091 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1092 priv->prof->rx_ring_size, priv->stride);
1093 if (priv->rx_cq[i].buf)
1094 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1095 }
1096
1097 if (priv->base_tx_qpn) {
1098 mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
1099 priv->base_tx_qpn = 0;
1100 }
1101 }
1102
1103 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1104 {
1105 struct mlx4_en_port_profile *prof = priv->prof;
1106 int i;
1107 int err;
1108
1109 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
1110 if (err) {
1111 en_err(priv, "failed reserving range for TX rings\n");
1112 return err;
1113 }
1114
1115 /* Create tx Rings */
1116 for (i = 0; i < priv->tx_ring_num; i++) {
1117 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1118 prof->tx_ring_size, i, TX))
1119 goto err;
1120
1121 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
1122 prof->tx_ring_size, TXBB_SIZE))
1123 goto err;
1124 }
1125
1126 /* Create rx Rings */
1127 for (i = 0; i < priv->rx_ring_num; i++) {
1128 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
1129 prof->rx_ring_size, i, RX))
1130 goto err;
1131
1132 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
1133 prof->rx_ring_size, priv->stride))
1134 goto err;
1135 }
1136
1137 return 0;
1138
1139 err:
1140 en_err(priv, "Failed to allocate NIC resources\n");
1141 return -ENOMEM;
1142 }
1143
1144
1145 void mlx4_en_destroy_netdev(struct net_device *dev)
1146 {
1147 struct mlx4_en_priv *priv = netdev_priv(dev);
1148 struct mlx4_en_dev *mdev = priv->mdev;
1149
1150 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
1151
1152 /* Unregister device - this will close the port if it was up */
1153 if (priv->registered)
1154 unregister_netdev(dev);
1155
1156 if (priv->allocated)
1157 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
1158
1159 cancel_delayed_work(&priv->stats_task);
1160 /* flush any pending task for this netdev */
1161 flush_workqueue(mdev->workqueue);
1162
1163 /* Detach the netdev so tasks would not attempt to access it */
1164 mutex_lock(&mdev->state_lock);
1165 mdev->pndev[priv->port] = NULL;
1166 mutex_unlock(&mdev->state_lock);
1167
1168 mlx4_en_free_resources(priv);
1169
1170 kfree(priv->tx_ring);
1171 kfree(priv->tx_cq);
1172
1173 free_netdev(dev);
1174 }
1175
1176 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
1177 {
1178 struct mlx4_en_priv *priv = netdev_priv(dev);
1179 struct mlx4_en_dev *mdev = priv->mdev;
1180 int err = 0;
1181
1182 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
1183 dev->mtu, new_mtu);
1184
1185 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
1186 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
1187 return -EPERM;
1188 }
1189 dev->mtu = new_mtu;
1190
1191 if (netif_running(dev)) {
1192 mutex_lock(&mdev->state_lock);
1193 if (!mdev->device_up) {
1194 /* NIC is probably restarting - let watchdog task reset
1195 * the port */
1196 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
1197 } else {
1198 mlx4_en_stop_port(dev);
1199 err = mlx4_en_start_port(dev);
1200 if (err) {
1201 en_err(priv, "Failed restarting port:%d\n",
1202 priv->port);
1203 queue_work(mdev->workqueue, &priv->watchdog_task);
1204 }
1205 }
1206 mutex_unlock(&mdev->state_lock);
1207 }
1208 return 0;
1209 }
1210
1211 static int mlx4_en_set_features(struct net_device *netdev,
1212 netdev_features_t features)
1213 {
1214 struct mlx4_en_priv *priv = netdev_priv(netdev);
1215
1216 if (features & NETIF_F_LOOPBACK)
1217 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
1218 else
1219 priv->ctrl_flags &=
1220 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
1221
1222 return 0;
1223
1224 }
1225
1226 static const struct net_device_ops mlx4_netdev_ops = {
1227 .ndo_open = mlx4_en_open,
1228 .ndo_stop = mlx4_en_close,
1229 .ndo_start_xmit = mlx4_en_xmit,
1230 .ndo_select_queue = mlx4_en_select_queue,
1231 .ndo_get_stats = mlx4_en_get_stats,
1232 .ndo_set_rx_mode = mlx4_en_set_multicast,
1233 .ndo_set_mac_address = mlx4_en_set_mac,
1234 .ndo_validate_addr = eth_validate_addr,
1235 .ndo_change_mtu = mlx4_en_change_mtu,
1236 .ndo_tx_timeout = mlx4_en_tx_timeout,
1237 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
1238 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
1239 #ifdef CONFIG_NET_POLL_CONTROLLER
1240 .ndo_poll_controller = mlx4_en_netpoll,
1241 #endif
1242 .ndo_set_features = mlx4_en_set_features,
1243 .ndo_setup_tc = mlx4_en_setup_tc,
1244 };
1245
1246 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1247 struct mlx4_en_port_profile *prof)
1248 {
1249 struct net_device *dev;
1250 struct mlx4_en_priv *priv;
1251 int i;
1252 int err;
1253
1254 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
1255 prof->tx_ring_num, prof->rx_ring_num);
1256 if (dev == NULL)
1257 return -ENOMEM;
1258
1259 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
1260 dev->dev_id = port - 1;
1261
1262 /*
1263 * Initialize driver private data
1264 */
1265
1266 priv = netdev_priv(dev);
1267 memset(priv, 0, sizeof(struct mlx4_en_priv));
1268 priv->dev = dev;
1269 priv->mdev = mdev;
1270 priv->ddev = &mdev->pdev->dev;
1271 priv->prof = prof;
1272 priv->port = port;
1273 priv->port_up = false;
1274 priv->flags = prof->flags;
1275 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
1276 MLX4_WQE_CTRL_SOLICITED);
1277 priv->tx_ring_num = prof->tx_ring_num;
1278 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) *
1279 priv->tx_ring_num, GFP_KERNEL);
1280 if (!priv->tx_ring) {
1281 err = -ENOMEM;
1282 goto out;
1283 }
1284 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * priv->tx_ring_num,
1285 GFP_KERNEL);
1286 if (!priv->tx_cq) {
1287 err = -ENOMEM;
1288 goto out;
1289 }
1290 priv->rx_ring_num = prof->rx_ring_num;
1291 priv->mac_index = -1;
1292 priv->msg_enable = MLX4_EN_MSG_LEVEL;
1293 spin_lock_init(&priv->stats_lock);
1294 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
1295 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
1296 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
1297 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
1298 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
1299 #ifdef CONFIG_MLX4_EN_DCB
1300 if (!mlx4_is_slave(priv->mdev->dev))
1301 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
1302 #endif
1303
1304 /* Query for default mac and max mtu */
1305 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
1306 priv->mac = mdev->dev->caps.def_mac[priv->port];
1307 if (ILLEGAL_MAC(priv->mac)) {
1308 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
1309 priv->port, priv->mac);
1310 err = -EINVAL;
1311 goto out;
1312 }
1313
1314 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
1315 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
1316 err = mlx4_en_alloc_resources(priv);
1317 if (err)
1318 goto out;
1319
1320 /* Allocate page for receive rings */
1321 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
1322 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
1323 if (err) {
1324 en_err(priv, "Failed to allocate page for rx qps\n");
1325 goto out;
1326 }
1327 priv->allocated = 1;
1328
1329 /*
1330 * Initialize netdev entry points
1331 */
1332 dev->netdev_ops = &mlx4_netdev_ops;
1333 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1334 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1335 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1336
1337 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1338
1339 /* Set defualt MAC */
1340 dev->addr_len = ETH_ALEN;
1341 for (i = 0; i < ETH_ALEN; i++) {
1342 dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
1343 dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
1344 }
1345
1346 /*
1347 * Set driver features
1348 */
1349 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1350 if (mdev->LSO_support)
1351 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1352
1353 dev->vlan_features = dev->hw_features;
1354
1355 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
1356 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
1357 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1358 NETIF_F_HW_VLAN_FILTER;
1359 dev->hw_features |= NETIF_F_LOOPBACK;
1360
1361 mdev->pndev[port] = dev;
1362
1363 netif_carrier_off(dev);
1364 err = register_netdev(dev);
1365 if (err) {
1366 en_err(priv, "Netdev registration failed for port %d\n", port);
1367 goto out;
1368 }
1369 priv->registered = 1;
1370
1371 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1372 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1373
1374 /* Configure port */
1375 mlx4_en_calc_rx_buf(dev);
1376 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1377 priv->rx_skb_size + ETH_FCS_LEN,
1378 prof->tx_pause, prof->tx_ppp,
1379 prof->rx_pause, prof->rx_ppp);
1380 if (err) {
1381 en_err(priv, "Failed setting port general configurations "
1382 "for port %d, with error %d\n", priv->port, err);
1383 goto out;
1384 }
1385
1386 /* Init port */
1387 en_warn(priv, "Initializing port\n");
1388 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1389 if (err) {
1390 en_err(priv, "Failed Initializing port\n");
1391 goto out;
1392 }
1393 mlx4_en_set_default_moderation(priv);
1394 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1395 return 0;
1396
1397 out:
1398 mlx4_en_destroy_netdev(dev);
1399 return err;
1400 }
1401
This page took 0.0622 seconds and 5 git commands to generate.