drivers/net: Convert compare_ether_addr to ether_addr_equal
[deliverable/linux.git] / drivers / net / ethernet / cisco / enic / enic_main.c
1 /*
2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/workqueue.h>
28 #include <linux/pci.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/if.h>
32 #include <linux/if_ether.h>
33 #include <linux/if_vlan.h>
34 #include <linux/ethtool.h>
35 #include <linux/in.h>
36 #include <linux/ip.h>
37 #include <linux/ipv6.h>
38 #include <linux/tcp.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/prefetch.h>
41 #include <net/ip6_checksum.h>
42
43 #include "cq_enet_desc.h"
44 #include "vnic_dev.h"
45 #include "vnic_intr.h"
46 #include "vnic_stats.h"
47 #include "vnic_vic.h"
48 #include "enic_res.h"
49 #include "enic.h"
50 #include "enic_dev.h"
51 #include "enic_pp.h"
52
53 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
54 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
55 #define MAX_TSO (1 << 16)
56 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
57
58 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
59 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
60 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
61
62 /* Supported devices */
63 static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
64 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
65 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
66 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
67 { 0, } /* end of table */
68 };
69
70 MODULE_DESCRIPTION(DRV_DESCRIPTION);
71 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_VERSION);
74 MODULE_DEVICE_TABLE(pci, enic_id_table);
75
76 struct enic_stat {
77 char name[ETH_GSTRING_LEN];
78 unsigned int offset;
79 };
80
81 #define ENIC_TX_STAT(stat) \
82 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
83 #define ENIC_RX_STAT(stat) \
84 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
85
86 static const struct enic_stat enic_tx_stats[] = {
87 ENIC_TX_STAT(tx_frames_ok),
88 ENIC_TX_STAT(tx_unicast_frames_ok),
89 ENIC_TX_STAT(tx_multicast_frames_ok),
90 ENIC_TX_STAT(tx_broadcast_frames_ok),
91 ENIC_TX_STAT(tx_bytes_ok),
92 ENIC_TX_STAT(tx_unicast_bytes_ok),
93 ENIC_TX_STAT(tx_multicast_bytes_ok),
94 ENIC_TX_STAT(tx_broadcast_bytes_ok),
95 ENIC_TX_STAT(tx_drops),
96 ENIC_TX_STAT(tx_errors),
97 ENIC_TX_STAT(tx_tso),
98 };
99
100 static const struct enic_stat enic_rx_stats[] = {
101 ENIC_RX_STAT(rx_frames_ok),
102 ENIC_RX_STAT(rx_frames_total),
103 ENIC_RX_STAT(rx_unicast_frames_ok),
104 ENIC_RX_STAT(rx_multicast_frames_ok),
105 ENIC_RX_STAT(rx_broadcast_frames_ok),
106 ENIC_RX_STAT(rx_bytes_ok),
107 ENIC_RX_STAT(rx_unicast_bytes_ok),
108 ENIC_RX_STAT(rx_multicast_bytes_ok),
109 ENIC_RX_STAT(rx_broadcast_bytes_ok),
110 ENIC_RX_STAT(rx_drop),
111 ENIC_RX_STAT(rx_no_bufs),
112 ENIC_RX_STAT(rx_errors),
113 ENIC_RX_STAT(rx_rss),
114 ENIC_RX_STAT(rx_crc_errors),
115 ENIC_RX_STAT(rx_frames_64),
116 ENIC_RX_STAT(rx_frames_127),
117 ENIC_RX_STAT(rx_frames_255),
118 ENIC_RX_STAT(rx_frames_511),
119 ENIC_RX_STAT(rx_frames_1023),
120 ENIC_RX_STAT(rx_frames_1518),
121 ENIC_RX_STAT(rx_frames_to_max),
122 };
123
124 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
125 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
126
127 int enic_is_dynamic(struct enic *enic)
128 {
129 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
130 }
131
132 int enic_sriov_enabled(struct enic *enic)
133 {
134 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
135 }
136
137 static int enic_is_sriov_vf(struct enic *enic)
138 {
139 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
140 }
141
142 int enic_is_valid_vf(struct enic *enic, int vf)
143 {
144 #ifdef CONFIG_PCI_IOV
145 return vf >= 0 && vf < enic->num_vfs;
146 #else
147 return 0;
148 #endif
149 }
150
151 static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
152 {
153 return rq;
154 }
155
156 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
157 {
158 return enic->rq_count + wq;
159 }
160
161 static inline unsigned int enic_legacy_io_intr(void)
162 {
163 return 0;
164 }
165
166 static inline unsigned int enic_legacy_err_intr(void)
167 {
168 return 1;
169 }
170
171 static inline unsigned int enic_legacy_notify_intr(void)
172 {
173 return 2;
174 }
175
176 static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
177 {
178 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
179 }
180
181 static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
182 {
183 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
184 }
185
186 static inline unsigned int enic_msix_err_intr(struct enic *enic)
187 {
188 return enic->rq_count + enic->wq_count;
189 }
190
191 static inline unsigned int enic_msix_notify_intr(struct enic *enic)
192 {
193 return enic->rq_count + enic->wq_count + 1;
194 }
195
196 static int enic_get_settings(struct net_device *netdev,
197 struct ethtool_cmd *ecmd)
198 {
199 struct enic *enic = netdev_priv(netdev);
200
201 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
202 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
203 ecmd->port = PORT_FIBRE;
204 ecmd->transceiver = XCVR_EXTERNAL;
205
206 if (netif_carrier_ok(netdev)) {
207 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
208 ecmd->duplex = DUPLEX_FULL;
209 } else {
210 ethtool_cmd_speed_set(ecmd, -1);
211 ecmd->duplex = -1;
212 }
213
214 ecmd->autoneg = AUTONEG_DISABLE;
215
216 return 0;
217 }
218
219 static void enic_get_drvinfo(struct net_device *netdev,
220 struct ethtool_drvinfo *drvinfo)
221 {
222 struct enic *enic = netdev_priv(netdev);
223 struct vnic_devcmd_fw_info *fw_info;
224
225 enic_dev_fw_info(enic, &fw_info);
226
227 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
228 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
229 strlcpy(drvinfo->fw_version, fw_info->fw_version,
230 sizeof(drvinfo->fw_version));
231 strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
232 sizeof(drvinfo->bus_info));
233 }
234
235 static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
236 {
237 unsigned int i;
238
239 switch (stringset) {
240 case ETH_SS_STATS:
241 for (i = 0; i < enic_n_tx_stats; i++) {
242 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
243 data += ETH_GSTRING_LEN;
244 }
245 for (i = 0; i < enic_n_rx_stats; i++) {
246 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
247 data += ETH_GSTRING_LEN;
248 }
249 break;
250 }
251 }
252
253 static int enic_get_sset_count(struct net_device *netdev, int sset)
254 {
255 switch (sset) {
256 case ETH_SS_STATS:
257 return enic_n_tx_stats + enic_n_rx_stats;
258 default:
259 return -EOPNOTSUPP;
260 }
261 }
262
263 static void enic_get_ethtool_stats(struct net_device *netdev,
264 struct ethtool_stats *stats, u64 *data)
265 {
266 struct enic *enic = netdev_priv(netdev);
267 struct vnic_stats *vstats;
268 unsigned int i;
269
270 enic_dev_stats_dump(enic, &vstats);
271
272 for (i = 0; i < enic_n_tx_stats; i++)
273 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
274 for (i = 0; i < enic_n_rx_stats; i++)
275 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
276 }
277
278 static u32 enic_get_msglevel(struct net_device *netdev)
279 {
280 struct enic *enic = netdev_priv(netdev);
281 return enic->msg_enable;
282 }
283
284 static void enic_set_msglevel(struct net_device *netdev, u32 value)
285 {
286 struct enic *enic = netdev_priv(netdev);
287 enic->msg_enable = value;
288 }
289
290 static int enic_get_coalesce(struct net_device *netdev,
291 struct ethtool_coalesce *ecmd)
292 {
293 struct enic *enic = netdev_priv(netdev);
294
295 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
296 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
297
298 return 0;
299 }
300
301 static int enic_set_coalesce(struct net_device *netdev,
302 struct ethtool_coalesce *ecmd)
303 {
304 struct enic *enic = netdev_priv(netdev);
305 u32 tx_coalesce_usecs;
306 u32 rx_coalesce_usecs;
307 unsigned int i, intr;
308
309 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
310 vnic_dev_get_intr_coal_timer_max(enic->vdev));
311 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
312 vnic_dev_get_intr_coal_timer_max(enic->vdev));
313
314 switch (vnic_dev_get_intr_mode(enic->vdev)) {
315 case VNIC_DEV_INTR_MODE_INTX:
316 if (tx_coalesce_usecs != rx_coalesce_usecs)
317 return -EINVAL;
318
319 intr = enic_legacy_io_intr();
320 vnic_intr_coalescing_timer_set(&enic->intr[intr],
321 tx_coalesce_usecs);
322 break;
323 case VNIC_DEV_INTR_MODE_MSI:
324 if (tx_coalesce_usecs != rx_coalesce_usecs)
325 return -EINVAL;
326
327 vnic_intr_coalescing_timer_set(&enic->intr[0],
328 tx_coalesce_usecs);
329 break;
330 case VNIC_DEV_INTR_MODE_MSIX:
331 for (i = 0; i < enic->wq_count; i++) {
332 intr = enic_msix_wq_intr(enic, i);
333 vnic_intr_coalescing_timer_set(&enic->intr[intr],
334 tx_coalesce_usecs);
335 }
336
337 for (i = 0; i < enic->rq_count; i++) {
338 intr = enic_msix_rq_intr(enic, i);
339 vnic_intr_coalescing_timer_set(&enic->intr[intr],
340 rx_coalesce_usecs);
341 }
342
343 break;
344 default:
345 break;
346 }
347
348 enic->tx_coalesce_usecs = tx_coalesce_usecs;
349 enic->rx_coalesce_usecs = rx_coalesce_usecs;
350
351 return 0;
352 }
353
354 static const struct ethtool_ops enic_ethtool_ops = {
355 .get_settings = enic_get_settings,
356 .get_drvinfo = enic_get_drvinfo,
357 .get_msglevel = enic_get_msglevel,
358 .set_msglevel = enic_set_msglevel,
359 .get_link = ethtool_op_get_link,
360 .get_strings = enic_get_strings,
361 .get_sset_count = enic_get_sset_count,
362 .get_ethtool_stats = enic_get_ethtool_stats,
363 .get_coalesce = enic_get_coalesce,
364 .set_coalesce = enic_set_coalesce,
365 };
366
367 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
368 {
369 struct enic *enic = vnic_dev_priv(wq->vdev);
370
371 if (buf->sop)
372 pci_unmap_single(enic->pdev, buf->dma_addr,
373 buf->len, PCI_DMA_TODEVICE);
374 else
375 pci_unmap_page(enic->pdev, buf->dma_addr,
376 buf->len, PCI_DMA_TODEVICE);
377
378 if (buf->os_buf)
379 dev_kfree_skb_any(buf->os_buf);
380 }
381
382 static void enic_wq_free_buf(struct vnic_wq *wq,
383 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
384 {
385 enic_free_wq_buf(wq, buf);
386 }
387
388 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
389 u8 type, u16 q_number, u16 completed_index, void *opaque)
390 {
391 struct enic *enic = vnic_dev_priv(vdev);
392
393 spin_lock(&enic->wq_lock[q_number]);
394
395 vnic_wq_service(&enic->wq[q_number], cq_desc,
396 completed_index, enic_wq_free_buf,
397 opaque);
398
399 if (netif_queue_stopped(enic->netdev) &&
400 vnic_wq_desc_avail(&enic->wq[q_number]) >=
401 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
402 netif_wake_queue(enic->netdev);
403
404 spin_unlock(&enic->wq_lock[q_number]);
405
406 return 0;
407 }
408
409 static void enic_log_q_error(struct enic *enic)
410 {
411 unsigned int i;
412 u32 error_status;
413
414 for (i = 0; i < enic->wq_count; i++) {
415 error_status = vnic_wq_error_status(&enic->wq[i]);
416 if (error_status)
417 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
418 i, error_status);
419 }
420
421 for (i = 0; i < enic->rq_count; i++) {
422 error_status = vnic_rq_error_status(&enic->rq[i]);
423 if (error_status)
424 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
425 i, error_status);
426 }
427 }
428
429 static void enic_msglvl_check(struct enic *enic)
430 {
431 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
432
433 if (msg_enable != enic->msg_enable) {
434 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
435 enic->msg_enable, msg_enable);
436 enic->msg_enable = msg_enable;
437 }
438 }
439
440 static void enic_mtu_check(struct enic *enic)
441 {
442 u32 mtu = vnic_dev_mtu(enic->vdev);
443 struct net_device *netdev = enic->netdev;
444
445 if (mtu && mtu != enic->port_mtu) {
446 enic->port_mtu = mtu;
447 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
448 mtu = max_t(int, ENIC_MIN_MTU,
449 min_t(int, ENIC_MAX_MTU, mtu));
450 if (mtu != netdev->mtu)
451 schedule_work(&enic->change_mtu_work);
452 } else {
453 if (mtu < netdev->mtu)
454 netdev_warn(netdev,
455 "interface MTU (%d) set higher "
456 "than switch port MTU (%d)\n",
457 netdev->mtu, mtu);
458 }
459 }
460 }
461
462 static void enic_link_check(struct enic *enic)
463 {
464 int link_status = vnic_dev_link_status(enic->vdev);
465 int carrier_ok = netif_carrier_ok(enic->netdev);
466
467 if (link_status && !carrier_ok) {
468 netdev_info(enic->netdev, "Link UP\n");
469 netif_carrier_on(enic->netdev);
470 } else if (!link_status && carrier_ok) {
471 netdev_info(enic->netdev, "Link DOWN\n");
472 netif_carrier_off(enic->netdev);
473 }
474 }
475
476 static void enic_notify_check(struct enic *enic)
477 {
478 enic_msglvl_check(enic);
479 enic_mtu_check(enic);
480 enic_link_check(enic);
481 }
482
483 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
484
485 static irqreturn_t enic_isr_legacy(int irq, void *data)
486 {
487 struct net_device *netdev = data;
488 struct enic *enic = netdev_priv(netdev);
489 unsigned int io_intr = enic_legacy_io_intr();
490 unsigned int err_intr = enic_legacy_err_intr();
491 unsigned int notify_intr = enic_legacy_notify_intr();
492 u32 pba;
493
494 vnic_intr_mask(&enic->intr[io_intr]);
495
496 pba = vnic_intr_legacy_pba(enic->legacy_pba);
497 if (!pba) {
498 vnic_intr_unmask(&enic->intr[io_intr]);
499 return IRQ_NONE; /* not our interrupt */
500 }
501
502 if (ENIC_TEST_INTR(pba, notify_intr)) {
503 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
504 enic_notify_check(enic);
505 }
506
507 if (ENIC_TEST_INTR(pba, err_intr)) {
508 vnic_intr_return_all_credits(&enic->intr[err_intr]);
509 enic_log_q_error(enic);
510 /* schedule recovery from WQ/RQ error */
511 schedule_work(&enic->reset);
512 return IRQ_HANDLED;
513 }
514
515 if (ENIC_TEST_INTR(pba, io_intr)) {
516 if (napi_schedule_prep(&enic->napi[0]))
517 __napi_schedule(&enic->napi[0]);
518 } else {
519 vnic_intr_unmask(&enic->intr[io_intr]);
520 }
521
522 return IRQ_HANDLED;
523 }
524
525 static irqreturn_t enic_isr_msi(int irq, void *data)
526 {
527 struct enic *enic = data;
528
529 /* With MSI, there is no sharing of interrupts, so this is
530 * our interrupt and there is no need to ack it. The device
531 * is not providing per-vector masking, so the OS will not
532 * write to PCI config space to mask/unmask the interrupt.
533 * We're using mask_on_assertion for MSI, so the device
534 * automatically masks the interrupt when the interrupt is
535 * generated. Later, when exiting polling, the interrupt
536 * will be unmasked (see enic_poll).
537 *
538 * Also, the device uses the same PCIe Traffic Class (TC)
539 * for Memory Write data and MSI, so there are no ordering
540 * issues; the MSI will always arrive at the Root Complex
541 * _after_ corresponding Memory Writes (i.e. descriptor
542 * writes).
543 */
544
545 napi_schedule(&enic->napi[0]);
546
547 return IRQ_HANDLED;
548 }
549
550 static irqreturn_t enic_isr_msix_rq(int irq, void *data)
551 {
552 struct napi_struct *napi = data;
553
554 /* schedule NAPI polling for RQ cleanup */
555 napi_schedule(napi);
556
557 return IRQ_HANDLED;
558 }
559
560 static irqreturn_t enic_isr_msix_wq(int irq, void *data)
561 {
562 struct enic *enic = data;
563 unsigned int cq = enic_cq_wq(enic, 0);
564 unsigned int intr = enic_msix_wq_intr(enic, 0);
565 unsigned int wq_work_to_do = -1; /* no limit */
566 unsigned int wq_work_done;
567
568 wq_work_done = vnic_cq_service(&enic->cq[cq],
569 wq_work_to_do, enic_wq_service, NULL);
570
571 vnic_intr_return_credits(&enic->intr[intr],
572 wq_work_done,
573 1 /* unmask intr */,
574 1 /* reset intr timer */);
575
576 return IRQ_HANDLED;
577 }
578
579 static irqreturn_t enic_isr_msix_err(int irq, void *data)
580 {
581 struct enic *enic = data;
582 unsigned int intr = enic_msix_err_intr(enic);
583
584 vnic_intr_return_all_credits(&enic->intr[intr]);
585
586 enic_log_q_error(enic);
587
588 /* schedule recovery from WQ/RQ error */
589 schedule_work(&enic->reset);
590
591 return IRQ_HANDLED;
592 }
593
594 static irqreturn_t enic_isr_msix_notify(int irq, void *data)
595 {
596 struct enic *enic = data;
597 unsigned int intr = enic_msix_notify_intr(enic);
598
599 vnic_intr_return_all_credits(&enic->intr[intr]);
600 enic_notify_check(enic);
601
602 return IRQ_HANDLED;
603 }
604
605 static inline void enic_queue_wq_skb_cont(struct enic *enic,
606 struct vnic_wq *wq, struct sk_buff *skb,
607 unsigned int len_left, int loopback)
608 {
609 const skb_frag_t *frag;
610
611 /* Queue additional data fragments */
612 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
613 len_left -= skb_frag_size(frag);
614 enic_queue_wq_desc_cont(wq, skb,
615 skb_frag_dma_map(&enic->pdev->dev,
616 frag, 0, skb_frag_size(frag),
617 DMA_TO_DEVICE),
618 skb_frag_size(frag),
619 (len_left == 0), /* EOP? */
620 loopback);
621 }
622 }
623
624 static inline void enic_queue_wq_skb_vlan(struct enic *enic,
625 struct vnic_wq *wq, struct sk_buff *skb,
626 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
627 {
628 unsigned int head_len = skb_headlen(skb);
629 unsigned int len_left = skb->len - head_len;
630 int eop = (len_left == 0);
631
632 /* Queue the main skb fragment. The fragments are no larger
633 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
634 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
635 * per fragment is queued.
636 */
637 enic_queue_wq_desc(wq, skb,
638 pci_map_single(enic->pdev, skb->data,
639 head_len, PCI_DMA_TODEVICE),
640 head_len,
641 vlan_tag_insert, vlan_tag,
642 eop, loopback);
643
644 if (!eop)
645 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
646 }
647
648 static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
649 struct vnic_wq *wq, struct sk_buff *skb,
650 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
651 {
652 unsigned int head_len = skb_headlen(skb);
653 unsigned int len_left = skb->len - head_len;
654 unsigned int hdr_len = skb_checksum_start_offset(skb);
655 unsigned int csum_offset = hdr_len + skb->csum_offset;
656 int eop = (len_left == 0);
657
658 /* Queue the main skb fragment. The fragments are no larger
659 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
660 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
661 * per fragment is queued.
662 */
663 enic_queue_wq_desc_csum_l4(wq, skb,
664 pci_map_single(enic->pdev, skb->data,
665 head_len, PCI_DMA_TODEVICE),
666 head_len,
667 csum_offset,
668 hdr_len,
669 vlan_tag_insert, vlan_tag,
670 eop, loopback);
671
672 if (!eop)
673 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
674 }
675
676 static inline void enic_queue_wq_skb_tso(struct enic *enic,
677 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
678 int vlan_tag_insert, unsigned int vlan_tag, int loopback)
679 {
680 unsigned int frag_len_left = skb_headlen(skb);
681 unsigned int len_left = skb->len - frag_len_left;
682 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
683 int eop = (len_left == 0);
684 unsigned int len;
685 dma_addr_t dma_addr;
686 unsigned int offset = 0;
687 skb_frag_t *frag;
688
689 /* Preload TCP csum field with IP pseudo hdr calculated
690 * with IP length set to zero. HW will later add in length
691 * to each TCP segment resulting from the TSO.
692 */
693
694 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
695 ip_hdr(skb)->check = 0;
696 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
697 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
698 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
699 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
700 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
701 }
702
703 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
704 * for the main skb fragment
705 */
706 while (frag_len_left) {
707 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
708 dma_addr = pci_map_single(enic->pdev, skb->data + offset,
709 len, PCI_DMA_TODEVICE);
710 enic_queue_wq_desc_tso(wq, skb,
711 dma_addr,
712 len,
713 mss, hdr_len,
714 vlan_tag_insert, vlan_tag,
715 eop && (len == frag_len_left), loopback);
716 frag_len_left -= len;
717 offset += len;
718 }
719
720 if (eop)
721 return;
722
723 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
724 * for additional data fragments
725 */
726 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
727 len_left -= skb_frag_size(frag);
728 frag_len_left = skb_frag_size(frag);
729 offset = 0;
730
731 while (frag_len_left) {
732 len = min(frag_len_left,
733 (unsigned int)WQ_ENET_MAX_DESC_LEN);
734 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
735 offset, len,
736 DMA_TO_DEVICE);
737 enic_queue_wq_desc_cont(wq, skb,
738 dma_addr,
739 len,
740 (len_left == 0) &&
741 (len == frag_len_left), /* EOP? */
742 loopback);
743 frag_len_left -= len;
744 offset += len;
745 }
746 }
747 }
748
749 static inline void enic_queue_wq_skb(struct enic *enic,
750 struct vnic_wq *wq, struct sk_buff *skb)
751 {
752 unsigned int mss = skb_shinfo(skb)->gso_size;
753 unsigned int vlan_tag = 0;
754 int vlan_tag_insert = 0;
755 int loopback = 0;
756
757 if (vlan_tx_tag_present(skb)) {
758 /* VLAN tag from trunking driver */
759 vlan_tag_insert = 1;
760 vlan_tag = vlan_tx_tag_get(skb);
761 } else if (enic->loop_enable) {
762 vlan_tag = enic->loop_tag;
763 loopback = 1;
764 }
765
766 if (mss)
767 enic_queue_wq_skb_tso(enic, wq, skb, mss,
768 vlan_tag_insert, vlan_tag, loopback);
769 else if (skb->ip_summed == CHECKSUM_PARTIAL)
770 enic_queue_wq_skb_csum_l4(enic, wq, skb,
771 vlan_tag_insert, vlan_tag, loopback);
772 else
773 enic_queue_wq_skb_vlan(enic, wq, skb,
774 vlan_tag_insert, vlan_tag, loopback);
775 }
776
777 /* netif_tx_lock held, process context with BHs disabled, or BH */
778 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
779 struct net_device *netdev)
780 {
781 struct enic *enic = netdev_priv(netdev);
782 struct vnic_wq *wq = &enic->wq[0];
783 unsigned long flags;
784
785 if (skb->len <= 0) {
786 dev_kfree_skb(skb);
787 return NETDEV_TX_OK;
788 }
789
790 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
791 * which is very likely. In the off chance it's going to take
792 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
793 */
794
795 if (skb_shinfo(skb)->gso_size == 0 &&
796 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
797 skb_linearize(skb)) {
798 dev_kfree_skb(skb);
799 return NETDEV_TX_OK;
800 }
801
802 spin_lock_irqsave(&enic->wq_lock[0], flags);
803
804 if (vnic_wq_desc_avail(wq) <
805 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
806 netif_stop_queue(netdev);
807 /* This is a hard error, log it */
808 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
809 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
810 return NETDEV_TX_BUSY;
811 }
812
813 enic_queue_wq_skb(enic, wq, skb);
814
815 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
816 netif_stop_queue(netdev);
817
818 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
819
820 return NETDEV_TX_OK;
821 }
822
823 /* dev_base_lock rwlock held, nominally process context */
824 static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
825 struct rtnl_link_stats64 *net_stats)
826 {
827 struct enic *enic = netdev_priv(netdev);
828 struct vnic_stats *stats;
829
830 enic_dev_stats_dump(enic, &stats);
831
832 net_stats->tx_packets = stats->tx.tx_frames_ok;
833 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
834 net_stats->tx_errors = stats->tx.tx_errors;
835 net_stats->tx_dropped = stats->tx.tx_drops;
836
837 net_stats->rx_packets = stats->rx.rx_frames_ok;
838 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
839 net_stats->rx_errors = stats->rx.rx_errors;
840 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
841 net_stats->rx_over_errors = enic->rq_truncated_pkts;
842 net_stats->rx_crc_errors = enic->rq_bad_fcs;
843 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
844
845 return net_stats;
846 }
847
848 void enic_reset_addr_lists(struct enic *enic)
849 {
850 enic->mc_count = 0;
851 enic->uc_count = 0;
852 enic->flags = 0;
853 }
854
855 static int enic_set_mac_addr(struct net_device *netdev, char *addr)
856 {
857 struct enic *enic = netdev_priv(netdev);
858
859 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
860 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
861 return -EADDRNOTAVAIL;
862 } else {
863 if (!is_valid_ether_addr(addr))
864 return -EADDRNOTAVAIL;
865 }
866
867 memcpy(netdev->dev_addr, addr, netdev->addr_len);
868 netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
869
870 return 0;
871 }
872
873 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
874 {
875 struct enic *enic = netdev_priv(netdev);
876 struct sockaddr *saddr = p;
877 char *addr = saddr->sa_data;
878 int err;
879
880 if (netif_running(enic->netdev)) {
881 err = enic_dev_del_station_addr(enic);
882 if (err)
883 return err;
884 }
885
886 err = enic_set_mac_addr(netdev, addr);
887 if (err)
888 return err;
889
890 if (netif_running(enic->netdev)) {
891 err = enic_dev_add_station_addr(enic);
892 if (err)
893 return err;
894 }
895
896 return err;
897 }
898
899 static int enic_set_mac_address(struct net_device *netdev, void *p)
900 {
901 struct sockaddr *saddr = p;
902 char *addr = saddr->sa_data;
903 struct enic *enic = netdev_priv(netdev);
904 int err;
905
906 err = enic_dev_del_station_addr(enic);
907 if (err)
908 return err;
909
910 err = enic_set_mac_addr(netdev, addr);
911 if (err)
912 return err;
913
914 return enic_dev_add_station_addr(enic);
915 }
916
917 static void enic_update_multicast_addr_list(struct enic *enic)
918 {
919 struct net_device *netdev = enic->netdev;
920 struct netdev_hw_addr *ha;
921 unsigned int mc_count = netdev_mc_count(netdev);
922 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
923 unsigned int i, j;
924
925 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
926 netdev_warn(netdev, "Registering only %d out of %d "
927 "multicast addresses\n",
928 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
929 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
930 }
931
932 /* Is there an easier way? Trying to minimize to
933 * calls to add/del multicast addrs. We keep the
934 * addrs from the last call in enic->mc_addr and
935 * look for changes to add/del.
936 */
937
938 i = 0;
939 netdev_for_each_mc_addr(ha, netdev) {
940 if (i == mc_count)
941 break;
942 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
943 }
944
945 for (i = 0; i < enic->mc_count; i++) {
946 for (j = 0; j < mc_count; j++)
947 if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
948 break;
949 if (j == mc_count)
950 enic_dev_del_addr(enic, enic->mc_addr[i]);
951 }
952
953 for (i = 0; i < mc_count; i++) {
954 for (j = 0; j < enic->mc_count; j++)
955 if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
956 break;
957 if (j == enic->mc_count)
958 enic_dev_add_addr(enic, mc_addr[i]);
959 }
960
961 /* Save the list to compare against next time
962 */
963
964 for (i = 0; i < mc_count; i++)
965 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
966
967 enic->mc_count = mc_count;
968 }
969
970 static void enic_update_unicast_addr_list(struct enic *enic)
971 {
972 struct net_device *netdev = enic->netdev;
973 struct netdev_hw_addr *ha;
974 unsigned int uc_count = netdev_uc_count(netdev);
975 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
976 unsigned int i, j;
977
978 if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
979 netdev_warn(netdev, "Registering only %d out of %d "
980 "unicast addresses\n",
981 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
982 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
983 }
984
985 /* Is there an easier way? Trying to minimize to
986 * calls to add/del unicast addrs. We keep the
987 * addrs from the last call in enic->uc_addr and
988 * look for changes to add/del.
989 */
990
991 i = 0;
992 netdev_for_each_uc_addr(ha, netdev) {
993 if (i == uc_count)
994 break;
995 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
996 }
997
998 for (i = 0; i < enic->uc_count; i++) {
999 for (j = 0; j < uc_count; j++)
1000 if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
1001 break;
1002 if (j == uc_count)
1003 enic_dev_del_addr(enic, enic->uc_addr[i]);
1004 }
1005
1006 for (i = 0; i < uc_count; i++) {
1007 for (j = 0; j < enic->uc_count; j++)
1008 if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
1009 break;
1010 if (j == enic->uc_count)
1011 enic_dev_add_addr(enic, uc_addr[i]);
1012 }
1013
1014 /* Save the list to compare against next time
1015 */
1016
1017 for (i = 0; i < uc_count; i++)
1018 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
1019
1020 enic->uc_count = uc_count;
1021 }
1022
1023 /* netif_tx_lock held, BHs disabled */
1024 static void enic_set_rx_mode(struct net_device *netdev)
1025 {
1026 struct enic *enic = netdev_priv(netdev);
1027 int directed = 1;
1028 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1029 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1030 int promisc = (netdev->flags & IFF_PROMISC) ||
1031 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
1032 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1033 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
1034 unsigned int flags = netdev->flags |
1035 (allmulti ? IFF_ALLMULTI : 0) |
1036 (promisc ? IFF_PROMISC : 0);
1037
1038 if (enic->flags != flags) {
1039 enic->flags = flags;
1040 enic_dev_packet_filter(enic, directed,
1041 multicast, broadcast, promisc, allmulti);
1042 }
1043
1044 if (!promisc) {
1045 enic_update_unicast_addr_list(enic);
1046 if (!allmulti)
1047 enic_update_multicast_addr_list(enic);
1048 }
1049 }
1050
1051 /* netif_tx_lock held, BHs disabled */
1052 static void enic_tx_timeout(struct net_device *netdev)
1053 {
1054 struct enic *enic = netdev_priv(netdev);
1055 schedule_work(&enic->reset);
1056 }
1057
1058 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1059 {
1060 struct enic *enic = netdev_priv(netdev);
1061 struct enic_port_profile *pp;
1062 int err;
1063
1064 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1065 if (err)
1066 return err;
1067
1068 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
1069 if (vf == PORT_SELF_VF) {
1070 memcpy(pp->vf_mac, mac, ETH_ALEN);
1071 return 0;
1072 } else {
1073 /*
1074 * For sriov vf's set the mac in hw
1075 */
1076 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
1077 vnic_dev_set_mac_addr, mac);
1078 return enic_dev_status_to_errno(err);
1079 }
1080 } else
1081 return -EINVAL;
1082 }
1083
1084 static int enic_set_vf_port(struct net_device *netdev, int vf,
1085 struct nlattr *port[])
1086 {
1087 struct enic *enic = netdev_priv(netdev);
1088 struct enic_port_profile prev_pp;
1089 struct enic_port_profile *pp;
1090 int err = 0, restore_pp = 1;
1091
1092 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1093 if (err)
1094 return err;
1095
1096 if (!port[IFLA_PORT_REQUEST])
1097 return -EOPNOTSUPP;
1098
1099 memcpy(&prev_pp, pp, sizeof(*enic->pp));
1100 memset(pp, 0, sizeof(*enic->pp));
1101
1102 pp->set |= ENIC_SET_REQUEST;
1103 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1104
1105 if (port[IFLA_PORT_PROFILE]) {
1106 pp->set |= ENIC_SET_NAME;
1107 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
1108 PORT_PROFILE_MAX);
1109 }
1110
1111 if (port[IFLA_PORT_INSTANCE_UUID]) {
1112 pp->set |= ENIC_SET_INSTANCE;
1113 memcpy(pp->instance_uuid,
1114 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1115 }
1116
1117 if (port[IFLA_PORT_HOST_UUID]) {
1118 pp->set |= ENIC_SET_HOST;
1119 memcpy(pp->host_uuid,
1120 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1121 }
1122
1123 if (vf == PORT_SELF_VF) {
1124 /* Special case handling: mac came from IFLA_VF_MAC */
1125 if (!is_zero_ether_addr(prev_pp.vf_mac))
1126 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
1127
1128 if (is_zero_ether_addr(netdev->dev_addr))
1129 eth_hw_addr_random(netdev);
1130 } else {
1131 /* SR-IOV VF: get mac from adapter */
1132 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
1133 vnic_dev_get_mac_addr, pp->mac_addr);
1134 if (err) {
1135 netdev_err(netdev, "Error getting mac for vf %d\n", vf);
1136 memcpy(pp, &prev_pp, sizeof(*pp));
1137 return enic_dev_status_to_errno(err);
1138 }
1139 }
1140
1141 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
1142 if (err) {
1143 if (restore_pp) {
1144 /* Things are still the way they were: Implicit
1145 * DISASSOCIATE failed
1146 */
1147 memcpy(pp, &prev_pp, sizeof(*pp));
1148 } else {
1149 memset(pp, 0, sizeof(*pp));
1150 if (vf == PORT_SELF_VF)
1151 memset(netdev->dev_addr, 0, ETH_ALEN);
1152 }
1153 } else {
1154 /* Set flag to indicate that the port assoc/disassoc
1155 * request has been sent out to fw
1156 */
1157 pp->set |= ENIC_PORT_REQUEST_APPLIED;
1158
1159 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
1160 if (pp->request == PORT_REQUEST_DISASSOCIATE) {
1161 memset(pp->mac_addr, 0, ETH_ALEN);
1162 if (vf == PORT_SELF_VF)
1163 memset(netdev->dev_addr, 0, ETH_ALEN);
1164 }
1165 }
1166
1167 if (vf == PORT_SELF_VF)
1168 memset(pp->vf_mac, 0, ETH_ALEN);
1169
1170 return err;
1171 }
1172
1173 static int enic_get_vf_port(struct net_device *netdev, int vf,
1174 struct sk_buff *skb)
1175 {
1176 struct enic *enic = netdev_priv(netdev);
1177 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1178 struct enic_port_profile *pp;
1179 int err;
1180
1181 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1182 if (err)
1183 return err;
1184
1185 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
1186 return -ENODATA;
1187
1188 err = enic_process_get_pp_request(enic, vf, pp->request, &response);
1189 if (err)
1190 return err;
1191
1192 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
1193 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
1194 ((pp->set & ENIC_SET_NAME) &&
1195 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
1196 ((pp->set & ENIC_SET_INSTANCE) &&
1197 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1198 pp->instance_uuid)) ||
1199 ((pp->set & ENIC_SET_HOST) &&
1200 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
1201 goto nla_put_failure;
1202 return 0;
1203
1204 nla_put_failure:
1205 return -EMSGSIZE;
1206 }
1207
1208 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1209 {
1210 struct enic *enic = vnic_dev_priv(rq->vdev);
1211
1212 if (!buf->os_buf)
1213 return;
1214
1215 pci_unmap_single(enic->pdev, buf->dma_addr,
1216 buf->len, PCI_DMA_FROMDEVICE);
1217 dev_kfree_skb_any(buf->os_buf);
1218 }
1219
1220 static int enic_rq_alloc_buf(struct vnic_rq *rq)
1221 {
1222 struct enic *enic = vnic_dev_priv(rq->vdev);
1223 struct net_device *netdev = enic->netdev;
1224 struct sk_buff *skb;
1225 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1226 unsigned int os_buf_index = 0;
1227 dma_addr_t dma_addr;
1228
1229 skb = netdev_alloc_skb_ip_align(netdev, len);
1230 if (!skb)
1231 return -ENOMEM;
1232
1233 dma_addr = pci_map_single(enic->pdev, skb->data,
1234 len, PCI_DMA_FROMDEVICE);
1235
1236 enic_queue_rq_desc(rq, skb, os_buf_index,
1237 dma_addr, len);
1238
1239 return 0;
1240 }
1241
1242 static void enic_rq_indicate_buf(struct vnic_rq *rq,
1243 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1244 int skipped, void *opaque)
1245 {
1246 struct enic *enic = vnic_dev_priv(rq->vdev);
1247 struct net_device *netdev = enic->netdev;
1248 struct sk_buff *skb;
1249
1250 u8 type, color, eop, sop, ingress_port, vlan_stripped;
1251 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1252 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1253 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1254 u8 packet_error;
1255 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1256 u32 rss_hash;
1257
1258 if (skipped)
1259 return;
1260
1261 skb = buf->os_buf;
1262 prefetch(skb->data - NET_IP_ALIGN);
1263 pci_unmap_single(enic->pdev, buf->dma_addr,
1264 buf->len, PCI_DMA_FROMDEVICE);
1265
1266 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1267 &type, &color, &q_number, &completed_index,
1268 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1269 &csum_not_calc, &rss_hash, &bytes_written,
1270 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
1271 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1272 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1273 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1274 &fcs_ok);
1275
1276 if (packet_error) {
1277
1278 if (!fcs_ok) {
1279 if (bytes_written > 0)
1280 enic->rq_bad_fcs++;
1281 else if (bytes_written == 0)
1282 enic->rq_truncated_pkts++;
1283 }
1284
1285 dev_kfree_skb_any(skb);
1286
1287 return;
1288 }
1289
1290 if (eop && bytes_written > 0) {
1291
1292 /* Good receive
1293 */
1294
1295 skb_put(skb, bytes_written);
1296 skb->protocol = eth_type_trans(skb, netdev);
1297
1298 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
1299 skb->csum = htons(checksum);
1300 skb->ip_summed = CHECKSUM_COMPLETE;
1301 }
1302
1303 skb->dev = netdev;
1304
1305 if (vlan_stripped)
1306 __vlan_hwaccel_put_tag(skb, vlan_tci);
1307
1308 if (netdev->features & NETIF_F_GRO)
1309 napi_gro_receive(&enic->napi[q_number], skb);
1310 else
1311 netif_receive_skb(skb);
1312 } else {
1313
1314 /* Buffer overflow
1315 */
1316
1317 dev_kfree_skb_any(skb);
1318 }
1319 }
1320
1321 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1322 u8 type, u16 q_number, u16 completed_index, void *opaque)
1323 {
1324 struct enic *enic = vnic_dev_priv(vdev);
1325
1326 vnic_rq_service(&enic->rq[q_number], cq_desc,
1327 completed_index, VNIC_RQ_RETURN_DESC,
1328 enic_rq_indicate_buf, opaque);
1329
1330 return 0;
1331 }
1332
1333 static int enic_poll(struct napi_struct *napi, int budget)
1334 {
1335 struct net_device *netdev = napi->dev;
1336 struct enic *enic = netdev_priv(netdev);
1337 unsigned int cq_rq = enic_cq_rq(enic, 0);
1338 unsigned int cq_wq = enic_cq_wq(enic, 0);
1339 unsigned int intr = enic_legacy_io_intr();
1340 unsigned int rq_work_to_do = budget;
1341 unsigned int wq_work_to_do = -1; /* no limit */
1342 unsigned int work_done, rq_work_done, wq_work_done;
1343 int err;
1344
1345 /* Service RQ (first) and WQ
1346 */
1347
1348 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1349 rq_work_to_do, enic_rq_service, NULL);
1350
1351 wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
1352 wq_work_to_do, enic_wq_service, NULL);
1353
1354 /* Accumulate intr event credits for this polling
1355 * cycle. An intr event is the completion of a
1356 * a WQ or RQ packet.
1357 */
1358
1359 work_done = rq_work_done + wq_work_done;
1360
1361 if (work_done > 0)
1362 vnic_intr_return_credits(&enic->intr[intr],
1363 work_done,
1364 0 /* don't unmask intr */,
1365 0 /* don't reset intr timer */);
1366
1367 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1368
1369 /* Buffer allocation failed. Stay in polling
1370 * mode so we can try to fill the ring again.
1371 */
1372
1373 if (err)
1374 rq_work_done = rq_work_to_do;
1375
1376 if (rq_work_done < rq_work_to_do) {
1377
1378 /* Some work done, but not enough to stay in polling,
1379 * exit polling
1380 */
1381
1382 napi_complete(napi);
1383 vnic_intr_unmask(&enic->intr[intr]);
1384 }
1385
1386 return rq_work_done;
1387 }
1388
1389 static int enic_poll_msix(struct napi_struct *napi, int budget)
1390 {
1391 struct net_device *netdev = napi->dev;
1392 struct enic *enic = netdev_priv(netdev);
1393 unsigned int rq = (napi - &enic->napi[0]);
1394 unsigned int cq = enic_cq_rq(enic, rq);
1395 unsigned int intr = enic_msix_rq_intr(enic, rq);
1396 unsigned int work_to_do = budget;
1397 unsigned int work_done;
1398 int err;
1399
1400 /* Service RQ
1401 */
1402
1403 work_done = vnic_cq_service(&enic->cq[cq],
1404 work_to_do, enic_rq_service, NULL);
1405
1406 /* Return intr event credits for this polling
1407 * cycle. An intr event is the completion of a
1408 * RQ packet.
1409 */
1410
1411 if (work_done > 0)
1412 vnic_intr_return_credits(&enic->intr[intr],
1413 work_done,
1414 0 /* don't unmask intr */,
1415 0 /* don't reset intr timer */);
1416
1417 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1418
1419 /* Buffer allocation failed. Stay in polling mode
1420 * so we can try to fill the ring again.
1421 */
1422
1423 if (err)
1424 work_done = work_to_do;
1425
1426 if (work_done < work_to_do) {
1427
1428 /* Some work done, but not enough to stay in polling,
1429 * exit polling
1430 */
1431
1432 napi_complete(napi);
1433 vnic_intr_unmask(&enic->intr[intr]);
1434 }
1435
1436 return work_done;
1437 }
1438
1439 static void enic_notify_timer(unsigned long data)
1440 {
1441 struct enic *enic = (struct enic *)data;
1442
1443 enic_notify_check(enic);
1444
1445 mod_timer(&enic->notify_timer,
1446 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1447 }
1448
1449 static void enic_free_intr(struct enic *enic)
1450 {
1451 struct net_device *netdev = enic->netdev;
1452 unsigned int i;
1453
1454 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1455 case VNIC_DEV_INTR_MODE_INTX:
1456 free_irq(enic->pdev->irq, netdev);
1457 break;
1458 case VNIC_DEV_INTR_MODE_MSI:
1459 free_irq(enic->pdev->irq, enic);
1460 break;
1461 case VNIC_DEV_INTR_MODE_MSIX:
1462 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1463 if (enic->msix[i].requested)
1464 free_irq(enic->msix_entry[i].vector,
1465 enic->msix[i].devid);
1466 break;
1467 default:
1468 break;
1469 }
1470 }
1471
1472 static int enic_request_intr(struct enic *enic)
1473 {
1474 struct net_device *netdev = enic->netdev;
1475 unsigned int i, intr;
1476 int err = 0;
1477
1478 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1479
1480 case VNIC_DEV_INTR_MODE_INTX:
1481
1482 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1483 IRQF_SHARED, netdev->name, netdev);
1484 break;
1485
1486 case VNIC_DEV_INTR_MODE_MSI:
1487
1488 err = request_irq(enic->pdev->irq, enic_isr_msi,
1489 0, netdev->name, enic);
1490 break;
1491
1492 case VNIC_DEV_INTR_MODE_MSIX:
1493
1494 for (i = 0; i < enic->rq_count; i++) {
1495 intr = enic_msix_rq_intr(enic, i);
1496 sprintf(enic->msix[intr].devname,
1497 "%.11s-rx-%d", netdev->name, i);
1498 enic->msix[intr].isr = enic_isr_msix_rq;
1499 enic->msix[intr].devid = &enic->napi[i];
1500 }
1501
1502 for (i = 0; i < enic->wq_count; i++) {
1503 intr = enic_msix_wq_intr(enic, i);
1504 sprintf(enic->msix[intr].devname,
1505 "%.11s-tx-%d", netdev->name, i);
1506 enic->msix[intr].isr = enic_isr_msix_wq;
1507 enic->msix[intr].devid = enic;
1508 }
1509
1510 intr = enic_msix_err_intr(enic);
1511 sprintf(enic->msix[intr].devname,
1512 "%.11s-err", netdev->name);
1513 enic->msix[intr].isr = enic_isr_msix_err;
1514 enic->msix[intr].devid = enic;
1515
1516 intr = enic_msix_notify_intr(enic);
1517 sprintf(enic->msix[intr].devname,
1518 "%.11s-notify", netdev->name);
1519 enic->msix[intr].isr = enic_isr_msix_notify;
1520 enic->msix[intr].devid = enic;
1521
1522 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1523 enic->msix[i].requested = 0;
1524
1525 for (i = 0; i < enic->intr_count; i++) {
1526 err = request_irq(enic->msix_entry[i].vector,
1527 enic->msix[i].isr, 0,
1528 enic->msix[i].devname,
1529 enic->msix[i].devid);
1530 if (err) {
1531 enic_free_intr(enic);
1532 break;
1533 }
1534 enic->msix[i].requested = 1;
1535 }
1536
1537 break;
1538
1539 default:
1540 break;
1541 }
1542
1543 return err;
1544 }
1545
1546 static void enic_synchronize_irqs(struct enic *enic)
1547 {
1548 unsigned int i;
1549
1550 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1551 case VNIC_DEV_INTR_MODE_INTX:
1552 case VNIC_DEV_INTR_MODE_MSI:
1553 synchronize_irq(enic->pdev->irq);
1554 break;
1555 case VNIC_DEV_INTR_MODE_MSIX:
1556 for (i = 0; i < enic->intr_count; i++)
1557 synchronize_irq(enic->msix_entry[i].vector);
1558 break;
1559 default:
1560 break;
1561 }
1562 }
1563
1564 static int enic_dev_notify_set(struct enic *enic)
1565 {
1566 int err;
1567
1568 spin_lock(&enic->devcmd_lock);
1569 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1570 case VNIC_DEV_INTR_MODE_INTX:
1571 err = vnic_dev_notify_set(enic->vdev,
1572 enic_legacy_notify_intr());
1573 break;
1574 case VNIC_DEV_INTR_MODE_MSIX:
1575 err = vnic_dev_notify_set(enic->vdev,
1576 enic_msix_notify_intr(enic));
1577 break;
1578 default:
1579 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1580 break;
1581 }
1582 spin_unlock(&enic->devcmd_lock);
1583
1584 return err;
1585 }
1586
1587 static void enic_notify_timer_start(struct enic *enic)
1588 {
1589 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1590 case VNIC_DEV_INTR_MODE_MSI:
1591 mod_timer(&enic->notify_timer, jiffies);
1592 break;
1593 default:
1594 /* Using intr for notification for INTx/MSI-X */
1595 break;
1596 }
1597 }
1598
1599 /* rtnl lock is held, process context */
1600 static int enic_open(struct net_device *netdev)
1601 {
1602 struct enic *enic = netdev_priv(netdev);
1603 unsigned int i;
1604 int err;
1605
1606 err = enic_request_intr(enic);
1607 if (err) {
1608 netdev_err(netdev, "Unable to request irq.\n");
1609 return err;
1610 }
1611
1612 err = enic_dev_notify_set(enic);
1613 if (err) {
1614 netdev_err(netdev,
1615 "Failed to alloc notify buffer, aborting.\n");
1616 goto err_out_free_intr;
1617 }
1618
1619 for (i = 0; i < enic->rq_count; i++) {
1620 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1621 /* Need at least one buffer on ring to get going */
1622 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1623 netdev_err(netdev, "Unable to alloc receive buffers\n");
1624 err = -ENOMEM;
1625 goto err_out_notify_unset;
1626 }
1627 }
1628
1629 for (i = 0; i < enic->wq_count; i++)
1630 vnic_wq_enable(&enic->wq[i]);
1631 for (i = 0; i < enic->rq_count; i++)
1632 vnic_rq_enable(&enic->rq[i]);
1633
1634 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1635 enic_dev_add_station_addr(enic);
1636
1637 enic_set_rx_mode(netdev);
1638
1639 netif_wake_queue(netdev);
1640
1641 for (i = 0; i < enic->rq_count; i++)
1642 napi_enable(&enic->napi[i]);
1643
1644 enic_dev_enable(enic);
1645
1646 for (i = 0; i < enic->intr_count; i++)
1647 vnic_intr_unmask(&enic->intr[i]);
1648
1649 enic_notify_timer_start(enic);
1650
1651 return 0;
1652
1653 err_out_notify_unset:
1654 enic_dev_notify_unset(enic);
1655 err_out_free_intr:
1656 enic_free_intr(enic);
1657
1658 return err;
1659 }
1660
1661 /* rtnl lock is held, process context */
1662 static int enic_stop(struct net_device *netdev)
1663 {
1664 struct enic *enic = netdev_priv(netdev);
1665 unsigned int i;
1666 int err;
1667
1668 for (i = 0; i < enic->intr_count; i++) {
1669 vnic_intr_mask(&enic->intr[i]);
1670 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1671 }
1672
1673 enic_synchronize_irqs(enic);
1674
1675 del_timer_sync(&enic->notify_timer);
1676
1677 enic_dev_disable(enic);
1678
1679 for (i = 0; i < enic->rq_count; i++)
1680 napi_disable(&enic->napi[i]);
1681
1682 netif_carrier_off(netdev);
1683 netif_tx_disable(netdev);
1684
1685 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1686 enic_dev_del_station_addr(enic);
1687
1688 for (i = 0; i < enic->wq_count; i++) {
1689 err = vnic_wq_disable(&enic->wq[i]);
1690 if (err)
1691 return err;
1692 }
1693 for (i = 0; i < enic->rq_count; i++) {
1694 err = vnic_rq_disable(&enic->rq[i]);
1695 if (err)
1696 return err;
1697 }
1698
1699 enic_dev_notify_unset(enic);
1700 enic_free_intr(enic);
1701
1702 for (i = 0; i < enic->wq_count; i++)
1703 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1704 for (i = 0; i < enic->rq_count; i++)
1705 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1706 for (i = 0; i < enic->cq_count; i++)
1707 vnic_cq_clean(&enic->cq[i]);
1708 for (i = 0; i < enic->intr_count; i++)
1709 vnic_intr_clean(&enic->intr[i]);
1710
1711 return 0;
1712 }
1713
1714 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1715 {
1716 struct enic *enic = netdev_priv(netdev);
1717 int running = netif_running(netdev);
1718
1719 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1720 return -EINVAL;
1721
1722 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
1723 return -EOPNOTSUPP;
1724
1725 if (running)
1726 enic_stop(netdev);
1727
1728 netdev->mtu = new_mtu;
1729
1730 if (netdev->mtu > enic->port_mtu)
1731 netdev_warn(netdev,
1732 "interface MTU (%d) set higher than port MTU (%d)\n",
1733 netdev->mtu, enic->port_mtu);
1734
1735 if (running)
1736 enic_open(netdev);
1737
1738 return 0;
1739 }
1740
1741 static void enic_change_mtu_work(struct work_struct *work)
1742 {
1743 struct enic *enic = container_of(work, struct enic, change_mtu_work);
1744 struct net_device *netdev = enic->netdev;
1745 int new_mtu = vnic_dev_mtu(enic->vdev);
1746 int err;
1747 unsigned int i;
1748
1749 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
1750
1751 rtnl_lock();
1752
1753 /* Stop RQ */
1754 del_timer_sync(&enic->notify_timer);
1755
1756 for (i = 0; i < enic->rq_count; i++)
1757 napi_disable(&enic->napi[i]);
1758
1759 vnic_intr_mask(&enic->intr[0]);
1760 enic_synchronize_irqs(enic);
1761 err = vnic_rq_disable(&enic->rq[0]);
1762 if (err) {
1763 netdev_err(netdev, "Unable to disable RQ.\n");
1764 return;
1765 }
1766 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
1767 vnic_cq_clean(&enic->cq[0]);
1768 vnic_intr_clean(&enic->intr[0]);
1769
1770 /* Fill RQ with new_mtu-sized buffers */
1771 netdev->mtu = new_mtu;
1772 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1773 /* Need at least one buffer on ring to get going */
1774 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
1775 netdev_err(netdev, "Unable to alloc receive buffers.\n");
1776 return;
1777 }
1778
1779 /* Start RQ */
1780 vnic_rq_enable(&enic->rq[0]);
1781 napi_enable(&enic->napi[0]);
1782 vnic_intr_unmask(&enic->intr[0]);
1783 enic_notify_timer_start(enic);
1784
1785 rtnl_unlock();
1786
1787 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
1788 }
1789
1790 #ifdef CONFIG_NET_POLL_CONTROLLER
1791 static void enic_poll_controller(struct net_device *netdev)
1792 {
1793 struct enic *enic = netdev_priv(netdev);
1794 struct vnic_dev *vdev = enic->vdev;
1795 unsigned int i, intr;
1796
1797 switch (vnic_dev_get_intr_mode(vdev)) {
1798 case VNIC_DEV_INTR_MODE_MSIX:
1799 for (i = 0; i < enic->rq_count; i++) {
1800 intr = enic_msix_rq_intr(enic, i);
1801 enic_isr_msix_rq(enic->msix_entry[intr].vector,
1802 &enic->napi[i]);
1803 }
1804
1805 for (i = 0; i < enic->wq_count; i++) {
1806 intr = enic_msix_wq_intr(enic, i);
1807 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
1808 }
1809
1810 break;
1811 case VNIC_DEV_INTR_MODE_MSI:
1812 enic_isr_msi(enic->pdev->irq, enic);
1813 break;
1814 case VNIC_DEV_INTR_MODE_INTX:
1815 enic_isr_legacy(enic->pdev->irq, netdev);
1816 break;
1817 default:
1818 break;
1819 }
1820 }
1821 #endif
1822
1823 static int enic_dev_wait(struct vnic_dev *vdev,
1824 int (*start)(struct vnic_dev *, int),
1825 int (*finished)(struct vnic_dev *, int *),
1826 int arg)
1827 {
1828 unsigned long time;
1829 int done;
1830 int err;
1831
1832 BUG_ON(in_interrupt());
1833
1834 err = start(vdev, arg);
1835 if (err)
1836 return err;
1837
1838 /* Wait for func to complete...2 seconds max
1839 */
1840
1841 time = jiffies + (HZ * 2);
1842 do {
1843
1844 err = finished(vdev, &done);
1845 if (err)
1846 return err;
1847
1848 if (done)
1849 return 0;
1850
1851 schedule_timeout_uninterruptible(HZ / 10);
1852
1853 } while (time_after(time, jiffies));
1854
1855 return -ETIMEDOUT;
1856 }
1857
1858 static int enic_dev_open(struct enic *enic)
1859 {
1860 int err;
1861
1862 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1863 vnic_dev_open_done, 0);
1864 if (err)
1865 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1866 err);
1867
1868 return err;
1869 }
1870
1871 static int enic_dev_hang_reset(struct enic *enic)
1872 {
1873 int err;
1874
1875 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1876 vnic_dev_hang_reset_done, 0);
1877 if (err)
1878 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1879 err);
1880
1881 return err;
1882 }
1883
1884 static int enic_set_rsskey(struct enic *enic)
1885 {
1886 dma_addr_t rss_key_buf_pa;
1887 union vnic_rss_key *rss_key_buf_va = NULL;
1888 union vnic_rss_key rss_key = {
1889 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1890 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1891 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1892 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1893 };
1894 int err;
1895
1896 rss_key_buf_va = pci_alloc_consistent(enic->pdev,
1897 sizeof(union vnic_rss_key), &rss_key_buf_pa);
1898 if (!rss_key_buf_va)
1899 return -ENOMEM;
1900
1901 memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
1902
1903 spin_lock(&enic->devcmd_lock);
1904 err = enic_set_rss_key(enic,
1905 rss_key_buf_pa,
1906 sizeof(union vnic_rss_key));
1907 spin_unlock(&enic->devcmd_lock);
1908
1909 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1910 rss_key_buf_va, rss_key_buf_pa);
1911
1912 return err;
1913 }
1914
1915 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1916 {
1917 dma_addr_t rss_cpu_buf_pa;
1918 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1919 unsigned int i;
1920 int err;
1921
1922 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
1923 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
1924 if (!rss_cpu_buf_va)
1925 return -ENOMEM;
1926
1927 for (i = 0; i < (1 << rss_hash_bits); i++)
1928 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
1929
1930 spin_lock(&enic->devcmd_lock);
1931 err = enic_set_rss_cpu(enic,
1932 rss_cpu_buf_pa,
1933 sizeof(union vnic_rss_cpu));
1934 spin_unlock(&enic->devcmd_lock);
1935
1936 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
1937 rss_cpu_buf_va, rss_cpu_buf_pa);
1938
1939 return err;
1940 }
1941
1942 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1943 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1944 {
1945 const u8 tso_ipid_split_en = 0;
1946 const u8 ig_vlan_strip_en = 1;
1947 int err;
1948
1949 /* Enable VLAN tag stripping.
1950 */
1951
1952 spin_lock(&enic->devcmd_lock);
1953 err = enic_set_nic_cfg(enic,
1954 rss_default_cpu, rss_hash_type,
1955 rss_hash_bits, rss_base_cpu,
1956 rss_enable, tso_ipid_split_en,
1957 ig_vlan_strip_en);
1958 spin_unlock(&enic->devcmd_lock);
1959
1960 return err;
1961 }
1962
1963 static int enic_set_rss_nic_cfg(struct enic *enic)
1964 {
1965 struct device *dev = enic_get_dev(enic);
1966 const u8 rss_default_cpu = 0;
1967 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1968 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1969 NIC_CFG_RSS_HASH_TYPE_IPV6 |
1970 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1971 const u8 rss_hash_bits = 7;
1972 const u8 rss_base_cpu = 0;
1973 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1974
1975 if (rss_enable) {
1976 if (!enic_set_rsskey(enic)) {
1977 if (enic_set_rsscpu(enic, rss_hash_bits)) {
1978 rss_enable = 0;
1979 dev_warn(dev, "RSS disabled, "
1980 "Failed to set RSS cpu indirection table.");
1981 }
1982 } else {
1983 rss_enable = 0;
1984 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
1985 }
1986 }
1987
1988 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1989 rss_hash_bits, rss_base_cpu, rss_enable);
1990 }
1991
1992 static void enic_reset(struct work_struct *work)
1993 {
1994 struct enic *enic = container_of(work, struct enic, reset);
1995
1996 if (!netif_running(enic->netdev))
1997 return;
1998
1999 rtnl_lock();
2000
2001 enic_dev_hang_notify(enic);
2002 enic_stop(enic->netdev);
2003 enic_dev_hang_reset(enic);
2004 enic_reset_addr_lists(enic);
2005 enic_init_vnic_resources(enic);
2006 enic_set_rss_nic_cfg(enic);
2007 enic_dev_set_ig_vlan_rewrite_mode(enic);
2008 enic_open(enic->netdev);
2009
2010 rtnl_unlock();
2011 }
2012
2013 static int enic_set_intr_mode(struct enic *enic)
2014 {
2015 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2016 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2017 unsigned int i;
2018
2019 /* Set interrupt mode (INTx, MSI, MSI-X) depending
2020 * on system capabilities.
2021 *
2022 * Try MSI-X first
2023 *
2024 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2025 * (the second to last INTR is used for WQ/RQ errors)
2026 * (the last INTR is used for notifications)
2027 */
2028
2029 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2030 for (i = 0; i < n + m + 2; i++)
2031 enic->msix_entry[i].entry = i;
2032
2033 /* Use multiple RQs if RSS is enabled
2034 */
2035
2036 if (ENIC_SETTING(enic, RSS) &&
2037 enic->config.intr_mode < 1 &&
2038 enic->rq_count >= n &&
2039 enic->wq_count >= m &&
2040 enic->cq_count >= n + m &&
2041 enic->intr_count >= n + m + 2) {
2042
2043 if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
2044
2045 enic->rq_count = n;
2046 enic->wq_count = m;
2047 enic->cq_count = n + m;
2048 enic->intr_count = n + m + 2;
2049
2050 vnic_dev_set_intr_mode(enic->vdev,
2051 VNIC_DEV_INTR_MODE_MSIX);
2052
2053 return 0;
2054 }
2055 }
2056
2057 if (enic->config.intr_mode < 1 &&
2058 enic->rq_count >= 1 &&
2059 enic->wq_count >= m &&
2060 enic->cq_count >= 1 + m &&
2061 enic->intr_count >= 1 + m + 2) {
2062 if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
2063
2064 enic->rq_count = 1;
2065 enic->wq_count = m;
2066 enic->cq_count = 1 + m;
2067 enic->intr_count = 1 + m + 2;
2068
2069 vnic_dev_set_intr_mode(enic->vdev,
2070 VNIC_DEV_INTR_MODE_MSIX);
2071
2072 return 0;
2073 }
2074 }
2075
2076 /* Next try MSI
2077 *
2078 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2079 */
2080
2081 if (enic->config.intr_mode < 2 &&
2082 enic->rq_count >= 1 &&
2083 enic->wq_count >= 1 &&
2084 enic->cq_count >= 2 &&
2085 enic->intr_count >= 1 &&
2086 !pci_enable_msi(enic->pdev)) {
2087
2088 enic->rq_count = 1;
2089 enic->wq_count = 1;
2090 enic->cq_count = 2;
2091 enic->intr_count = 1;
2092
2093 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2094
2095 return 0;
2096 }
2097
2098 /* Next try INTx
2099 *
2100 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2101 * (the first INTR is used for WQ/RQ)
2102 * (the second INTR is used for WQ/RQ errors)
2103 * (the last INTR is used for notifications)
2104 */
2105
2106 if (enic->config.intr_mode < 3 &&
2107 enic->rq_count >= 1 &&
2108 enic->wq_count >= 1 &&
2109 enic->cq_count >= 2 &&
2110 enic->intr_count >= 3) {
2111
2112 enic->rq_count = 1;
2113 enic->wq_count = 1;
2114 enic->cq_count = 2;
2115 enic->intr_count = 3;
2116
2117 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2118
2119 return 0;
2120 }
2121
2122 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2123
2124 return -EINVAL;
2125 }
2126
2127 static void enic_clear_intr_mode(struct enic *enic)
2128 {
2129 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2130 case VNIC_DEV_INTR_MODE_MSIX:
2131 pci_disable_msix(enic->pdev);
2132 break;
2133 case VNIC_DEV_INTR_MODE_MSI:
2134 pci_disable_msi(enic->pdev);
2135 break;
2136 default:
2137 break;
2138 }
2139
2140 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2141 }
2142
2143 static const struct net_device_ops enic_netdev_dynamic_ops = {
2144 .ndo_open = enic_open,
2145 .ndo_stop = enic_stop,
2146 .ndo_start_xmit = enic_hard_start_xmit,
2147 .ndo_get_stats64 = enic_get_stats,
2148 .ndo_validate_addr = eth_validate_addr,
2149 .ndo_set_rx_mode = enic_set_rx_mode,
2150 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2151 .ndo_change_mtu = enic_change_mtu,
2152 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2153 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2154 .ndo_tx_timeout = enic_tx_timeout,
2155 .ndo_set_vf_port = enic_set_vf_port,
2156 .ndo_get_vf_port = enic_get_vf_port,
2157 .ndo_set_vf_mac = enic_set_vf_mac,
2158 #ifdef CONFIG_NET_POLL_CONTROLLER
2159 .ndo_poll_controller = enic_poll_controller,
2160 #endif
2161 };
2162
2163 static const struct net_device_ops enic_netdev_ops = {
2164 .ndo_open = enic_open,
2165 .ndo_stop = enic_stop,
2166 .ndo_start_xmit = enic_hard_start_xmit,
2167 .ndo_get_stats64 = enic_get_stats,
2168 .ndo_validate_addr = eth_validate_addr,
2169 .ndo_set_mac_address = enic_set_mac_address,
2170 .ndo_set_rx_mode = enic_set_rx_mode,
2171 .ndo_change_mtu = enic_change_mtu,
2172 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2173 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2174 .ndo_tx_timeout = enic_tx_timeout,
2175 .ndo_set_vf_port = enic_set_vf_port,
2176 .ndo_get_vf_port = enic_get_vf_port,
2177 .ndo_set_vf_mac = enic_set_vf_mac,
2178 #ifdef CONFIG_NET_POLL_CONTROLLER
2179 .ndo_poll_controller = enic_poll_controller,
2180 #endif
2181 };
2182
2183 static void enic_dev_deinit(struct enic *enic)
2184 {
2185 unsigned int i;
2186
2187 for (i = 0; i < enic->rq_count; i++)
2188 netif_napi_del(&enic->napi[i]);
2189
2190 enic_free_vnic_resources(enic);
2191 enic_clear_intr_mode(enic);
2192 }
2193
2194 static int enic_dev_init(struct enic *enic)
2195 {
2196 struct device *dev = enic_get_dev(enic);
2197 struct net_device *netdev = enic->netdev;
2198 unsigned int i;
2199 int err;
2200
2201 /* Get interrupt coalesce timer info */
2202 err = enic_dev_intr_coal_timer_info(enic);
2203 if (err) {
2204 dev_warn(dev, "Using default conversion factor for "
2205 "interrupt coalesce timer\n");
2206 vnic_dev_intr_coal_timer_info_default(enic->vdev);
2207 }
2208
2209 /* Get vNIC configuration
2210 */
2211
2212 err = enic_get_vnic_config(enic);
2213 if (err) {
2214 dev_err(dev, "Get vNIC configuration failed, aborting\n");
2215 return err;
2216 }
2217
2218 /* Get available resource counts
2219 */
2220
2221 enic_get_res_counts(enic);
2222
2223 /* Set interrupt mode based on resource counts and system
2224 * capabilities
2225 */
2226
2227 err = enic_set_intr_mode(enic);
2228 if (err) {
2229 dev_err(dev, "Failed to set intr mode based on resource "
2230 "counts and system capabilities, aborting\n");
2231 return err;
2232 }
2233
2234 /* Allocate and configure vNIC resources
2235 */
2236
2237 err = enic_alloc_vnic_resources(enic);
2238 if (err) {
2239 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2240 goto err_out_free_vnic_resources;
2241 }
2242
2243 enic_init_vnic_resources(enic);
2244
2245 err = enic_set_rss_nic_cfg(enic);
2246 if (err) {
2247 dev_err(dev, "Failed to config nic, aborting\n");
2248 goto err_out_free_vnic_resources;
2249 }
2250
2251 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2252 default:
2253 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
2254 break;
2255 case VNIC_DEV_INTR_MODE_MSIX:
2256 for (i = 0; i < enic->rq_count; i++)
2257 netif_napi_add(netdev, &enic->napi[i],
2258 enic_poll_msix, 64);
2259 break;
2260 }
2261
2262 return 0;
2263
2264 err_out_free_vnic_resources:
2265 enic_clear_intr_mode(enic);
2266 enic_free_vnic_resources(enic);
2267
2268 return err;
2269 }
2270
2271 static void enic_iounmap(struct enic *enic)
2272 {
2273 unsigned int i;
2274
2275 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2276 if (enic->bar[i].vaddr)
2277 iounmap(enic->bar[i].vaddr);
2278 }
2279
2280 static int __devinit enic_probe(struct pci_dev *pdev,
2281 const struct pci_device_id *ent)
2282 {
2283 struct device *dev = &pdev->dev;
2284 struct net_device *netdev;
2285 struct enic *enic;
2286 int using_dac = 0;
2287 unsigned int i;
2288 int err;
2289 #ifdef CONFIG_PCI_IOV
2290 int pos = 0;
2291 #endif
2292 int num_pps = 1;
2293
2294 /* Allocate net device structure and initialize. Private
2295 * instance data is initialized to zero.
2296 */
2297
2298 netdev = alloc_etherdev(sizeof(struct enic));
2299 if (!netdev)
2300 return -ENOMEM;
2301
2302 pci_set_drvdata(pdev, netdev);
2303
2304 SET_NETDEV_DEV(netdev, &pdev->dev);
2305
2306 enic = netdev_priv(netdev);
2307 enic->netdev = netdev;
2308 enic->pdev = pdev;
2309
2310 /* Setup PCI resources
2311 */
2312
2313 err = pci_enable_device_mem(pdev);
2314 if (err) {
2315 dev_err(dev, "Cannot enable PCI device, aborting\n");
2316 goto err_out_free_netdev;
2317 }
2318
2319 err = pci_request_regions(pdev, DRV_NAME);
2320 if (err) {
2321 dev_err(dev, "Cannot request PCI regions, aborting\n");
2322 goto err_out_disable_device;
2323 }
2324
2325 pci_set_master(pdev);
2326
2327 /* Query PCI controller on system for DMA addressing
2328 * limitation for the device. Try 40-bit first, and
2329 * fail to 32-bit.
2330 */
2331
2332 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2333 if (err) {
2334 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2335 if (err) {
2336 dev_err(dev, "No usable DMA configuration, aborting\n");
2337 goto err_out_release_regions;
2338 }
2339 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2340 if (err) {
2341 dev_err(dev, "Unable to obtain %u-bit DMA "
2342 "for consistent allocations, aborting\n", 32);
2343 goto err_out_release_regions;
2344 }
2345 } else {
2346 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2347 if (err) {
2348 dev_err(dev, "Unable to obtain %u-bit DMA "
2349 "for consistent allocations, aborting\n", 40);
2350 goto err_out_release_regions;
2351 }
2352 using_dac = 1;
2353 }
2354
2355 /* Map vNIC resources from BAR0-5
2356 */
2357
2358 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2359 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2360 continue;
2361 enic->bar[i].len = pci_resource_len(pdev, i);
2362 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2363 if (!enic->bar[i].vaddr) {
2364 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2365 err = -ENODEV;
2366 goto err_out_iounmap;
2367 }
2368 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2369 }
2370
2371 /* Register vNIC device
2372 */
2373
2374 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2375 ARRAY_SIZE(enic->bar));
2376 if (!enic->vdev) {
2377 dev_err(dev, "vNIC registration failed, aborting\n");
2378 err = -ENODEV;
2379 goto err_out_iounmap;
2380 }
2381
2382 #ifdef CONFIG_PCI_IOV
2383 /* Get number of subvnics */
2384 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2385 if (pos) {
2386 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
2387 &enic->num_vfs);
2388 if (enic->num_vfs) {
2389 err = pci_enable_sriov(pdev, enic->num_vfs);
2390 if (err) {
2391 dev_err(dev, "SRIOV enable failed, aborting."
2392 " pci_enable_sriov() returned %d\n",
2393 err);
2394 goto err_out_vnic_unregister;
2395 }
2396 enic->priv_flags |= ENIC_SRIOV_ENABLED;
2397 num_pps = enic->num_vfs;
2398 }
2399 }
2400 #endif
2401
2402 /* Allocate structure for port profiles */
2403 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
2404 if (!enic->pp) {
2405 err = -ENOMEM;
2406 goto err_out_disable_sriov_pp;
2407 }
2408
2409 /* Issue device open to get device in known state
2410 */
2411
2412 err = enic_dev_open(enic);
2413 if (err) {
2414 dev_err(dev, "vNIC dev open failed, aborting\n");
2415 goto err_out_disable_sriov;
2416 }
2417
2418 /* Setup devcmd lock
2419 */
2420
2421 spin_lock_init(&enic->devcmd_lock);
2422
2423 /*
2424 * Set ingress vlan rewrite mode before vnic initialization
2425 */
2426
2427 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2428 if (err) {
2429 dev_err(dev,
2430 "Failed to set ingress vlan rewrite mode, aborting.\n");
2431 goto err_out_dev_close;
2432 }
2433
2434 /* Issue device init to initialize the vnic-to-switch link.
2435 * We'll start with carrier off and wait for link UP
2436 * notification later to turn on carrier. We don't need
2437 * to wait here for the vnic-to-switch link initialization
2438 * to complete; link UP notification is the indication that
2439 * the process is complete.
2440 */
2441
2442 netif_carrier_off(netdev);
2443
2444 /* Do not call dev_init for a dynamic vnic.
2445 * For a dynamic vnic, init_prov_info will be
2446 * called later by an upper layer.
2447 */
2448
2449 if (!enic_is_dynamic(enic)) {
2450 err = vnic_dev_init(enic->vdev, 0);
2451 if (err) {
2452 dev_err(dev, "vNIC dev init failed, aborting\n");
2453 goto err_out_dev_close;
2454 }
2455 }
2456
2457 err = enic_dev_init(enic);
2458 if (err) {
2459 dev_err(dev, "Device initialization failed, aborting\n");
2460 goto err_out_dev_close;
2461 }
2462
2463 /* Setup notification timer, HW reset task, and wq locks
2464 */
2465
2466 init_timer(&enic->notify_timer);
2467 enic->notify_timer.function = enic_notify_timer;
2468 enic->notify_timer.data = (unsigned long)enic;
2469
2470 INIT_WORK(&enic->reset, enic_reset);
2471 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2472
2473 for (i = 0; i < enic->wq_count; i++)
2474 spin_lock_init(&enic->wq_lock[i]);
2475
2476 /* Register net device
2477 */
2478
2479 enic->port_mtu = enic->config.mtu;
2480 (void)enic_change_mtu(netdev, enic->port_mtu);
2481
2482 err = enic_set_mac_addr(netdev, enic->mac_addr);
2483 if (err) {
2484 dev_err(dev, "Invalid MAC address, aborting\n");
2485 goto err_out_dev_deinit;
2486 }
2487
2488 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2489 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2490
2491 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2492 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2493 else
2494 netdev->netdev_ops = &enic_netdev_ops;
2495
2496 netdev->watchdog_timeo = 2 * HZ;
2497 netdev->ethtool_ops = &enic_ethtool_ops;
2498
2499 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2500 if (ENIC_SETTING(enic, LOOP)) {
2501 netdev->features &= ~NETIF_F_HW_VLAN_TX;
2502 enic->loop_enable = 1;
2503 enic->loop_tag = enic->config.loop_tag;
2504 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2505 }
2506 if (ENIC_SETTING(enic, TXCSUM))
2507 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2508 if (ENIC_SETTING(enic, TSO))
2509 netdev->hw_features |= NETIF_F_TSO |
2510 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2511 if (ENIC_SETTING(enic, RXCSUM))
2512 netdev->hw_features |= NETIF_F_RXCSUM;
2513
2514 netdev->features |= netdev->hw_features;
2515
2516 if (using_dac)
2517 netdev->features |= NETIF_F_HIGHDMA;
2518
2519 netdev->priv_flags |= IFF_UNICAST_FLT;
2520
2521 err = register_netdev(netdev);
2522 if (err) {
2523 dev_err(dev, "Cannot register net device, aborting\n");
2524 goto err_out_dev_deinit;
2525 }
2526
2527 return 0;
2528
2529 err_out_dev_deinit:
2530 enic_dev_deinit(enic);
2531 err_out_dev_close:
2532 vnic_dev_close(enic->vdev);
2533 err_out_disable_sriov:
2534 kfree(enic->pp);
2535 err_out_disable_sriov_pp:
2536 #ifdef CONFIG_PCI_IOV
2537 if (enic_sriov_enabled(enic)) {
2538 pci_disable_sriov(pdev);
2539 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2540 }
2541 err_out_vnic_unregister:
2542 #endif
2543 vnic_dev_unregister(enic->vdev);
2544 err_out_iounmap:
2545 enic_iounmap(enic);
2546 err_out_release_regions:
2547 pci_release_regions(pdev);
2548 err_out_disable_device:
2549 pci_disable_device(pdev);
2550 err_out_free_netdev:
2551 pci_set_drvdata(pdev, NULL);
2552 free_netdev(netdev);
2553
2554 return err;
2555 }
2556
2557 static void __devexit enic_remove(struct pci_dev *pdev)
2558 {
2559 struct net_device *netdev = pci_get_drvdata(pdev);
2560
2561 if (netdev) {
2562 struct enic *enic = netdev_priv(netdev);
2563
2564 cancel_work_sync(&enic->reset);
2565 cancel_work_sync(&enic->change_mtu_work);
2566 unregister_netdev(netdev);
2567 enic_dev_deinit(enic);
2568 vnic_dev_close(enic->vdev);
2569 #ifdef CONFIG_PCI_IOV
2570 if (enic_sriov_enabled(enic)) {
2571 pci_disable_sriov(pdev);
2572 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2573 }
2574 #endif
2575 kfree(enic->pp);
2576 vnic_dev_unregister(enic->vdev);
2577 enic_iounmap(enic);
2578 pci_release_regions(pdev);
2579 pci_disable_device(pdev);
2580 pci_set_drvdata(pdev, NULL);
2581 free_netdev(netdev);
2582 }
2583 }
2584
2585 static struct pci_driver enic_driver = {
2586 .name = DRV_NAME,
2587 .id_table = enic_id_table,
2588 .probe = enic_probe,
2589 .remove = __devexit_p(enic_remove),
2590 };
2591
2592 static int __init enic_init_module(void)
2593 {
2594 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
2595
2596 return pci_register_driver(&enic_driver);
2597 }
2598
2599 static void __exit enic_cleanup_module(void)
2600 {
2601 pci_unregister_driver(&enic_driver);
2602 }
2603
2604 module_init(enic_init_module);
2605 module_exit(enic_cleanup_module);
This page took 0.097169 seconds and 6 git commands to generate.