Commit | Line | Data |
---|---|---|
01f2e4ea SF |
1 | /* |
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | |
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | |
4 | * | |
5 | * This program is free software; you may redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; version 2 of the License. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
16 | * SOFTWARE. | |
17 | * | |
18 | */ | |
19 | ||
20 | #include <linux/module.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/string.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/types.h> | |
25 | #include <linux/init.h> | |
26 | #include <linux/workqueue.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/netdevice.h> | |
29 | #include <linux/etherdevice.h> | |
30 | #include <linux/if_ether.h> | |
31 | #include <linux/if_vlan.h> | |
32 | #include <linux/ethtool.h> | |
33 | #include <linux/in.h> | |
34 | #include <linux/ip.h> | |
35 | #include <linux/ipv6.h> | |
36 | #include <linux/tcp.h> | |
b7c6bfb7 | 37 | #include <net/ip6_checksum.h> |
01f2e4ea SF |
38 | |
39 | #include "cq_enet_desc.h" | |
40 | #include "vnic_dev.h" | |
41 | #include "vnic_intr.h" | |
42 | #include "vnic_stats.h" | |
43 | #include "enic_res.h" | |
44 | #include "enic.h" | |
45 | ||
46 | #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) | |
ea0d7d91 SF |
47 | #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) |
48 | #define MAX_TSO (1 << 16) | |
49 | #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) | |
50 | ||
51 | #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ | |
01f2e4ea SF |
52 | |
53 | /* Supported devices */ | |
54 | static struct pci_device_id enic_id_table[] = { | |
ea0d7d91 | 55 | { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, |
01f2e4ea SF |
56 | { 0, } /* end of table */ |
57 | }; | |
58 | ||
59 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | |
60 | MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); | |
61 | MODULE_LICENSE("GPL"); | |
62 | MODULE_VERSION(DRV_VERSION); | |
63 | MODULE_DEVICE_TABLE(pci, enic_id_table); | |
64 | ||
65 | struct enic_stat { | |
66 | char name[ETH_GSTRING_LEN]; | |
67 | unsigned int offset; | |
68 | }; | |
69 | ||
70 | #define ENIC_TX_STAT(stat) \ | |
71 | { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 } | |
72 | #define ENIC_RX_STAT(stat) \ | |
73 | { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 } | |
74 | ||
75 | static const struct enic_stat enic_tx_stats[] = { | |
76 | ENIC_TX_STAT(tx_frames_ok), | |
77 | ENIC_TX_STAT(tx_unicast_frames_ok), | |
78 | ENIC_TX_STAT(tx_multicast_frames_ok), | |
79 | ENIC_TX_STAT(tx_broadcast_frames_ok), | |
80 | ENIC_TX_STAT(tx_bytes_ok), | |
81 | ENIC_TX_STAT(tx_unicast_bytes_ok), | |
82 | ENIC_TX_STAT(tx_multicast_bytes_ok), | |
83 | ENIC_TX_STAT(tx_broadcast_bytes_ok), | |
84 | ENIC_TX_STAT(tx_drops), | |
85 | ENIC_TX_STAT(tx_errors), | |
86 | ENIC_TX_STAT(tx_tso), | |
87 | }; | |
88 | ||
89 | static const struct enic_stat enic_rx_stats[] = { | |
90 | ENIC_RX_STAT(rx_frames_ok), | |
91 | ENIC_RX_STAT(rx_frames_total), | |
92 | ENIC_RX_STAT(rx_unicast_frames_ok), | |
93 | ENIC_RX_STAT(rx_multicast_frames_ok), | |
94 | ENIC_RX_STAT(rx_broadcast_frames_ok), | |
95 | ENIC_RX_STAT(rx_bytes_ok), | |
96 | ENIC_RX_STAT(rx_unicast_bytes_ok), | |
97 | ENIC_RX_STAT(rx_multicast_bytes_ok), | |
98 | ENIC_RX_STAT(rx_broadcast_bytes_ok), | |
99 | ENIC_RX_STAT(rx_drop), | |
100 | ENIC_RX_STAT(rx_no_bufs), | |
101 | ENIC_RX_STAT(rx_errors), | |
102 | ENIC_RX_STAT(rx_rss), | |
103 | ENIC_RX_STAT(rx_crc_errors), | |
104 | ENIC_RX_STAT(rx_frames_64), | |
105 | ENIC_RX_STAT(rx_frames_127), | |
106 | ENIC_RX_STAT(rx_frames_255), | |
107 | ENIC_RX_STAT(rx_frames_511), | |
108 | ENIC_RX_STAT(rx_frames_1023), | |
109 | ENIC_RX_STAT(rx_frames_1518), | |
110 | ENIC_RX_STAT(rx_frames_to_max), | |
111 | }; | |
112 | ||
113 | static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); | |
114 | static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); | |
115 | ||
116 | static int enic_get_settings(struct net_device *netdev, | |
117 | struct ethtool_cmd *ecmd) | |
118 | { | |
119 | struct enic *enic = netdev_priv(netdev); | |
120 | ||
121 | ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); | |
122 | ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); | |
123 | ecmd->port = PORT_FIBRE; | |
124 | ecmd->transceiver = XCVR_EXTERNAL; | |
125 | ||
126 | if (netif_carrier_ok(netdev)) { | |
127 | ecmd->speed = vnic_dev_port_speed(enic->vdev); | |
128 | ecmd->duplex = DUPLEX_FULL; | |
129 | } else { | |
130 | ecmd->speed = -1; | |
131 | ecmd->duplex = -1; | |
132 | } | |
133 | ||
134 | ecmd->autoneg = AUTONEG_DISABLE; | |
135 | ||
136 | return 0; | |
137 | } | |
138 | ||
139 | static void enic_get_drvinfo(struct net_device *netdev, | |
140 | struct ethtool_drvinfo *drvinfo) | |
141 | { | |
142 | struct enic *enic = netdev_priv(netdev); | |
143 | struct vnic_devcmd_fw_info *fw_info; | |
144 | ||
145 | spin_lock(&enic->devcmd_lock); | |
146 | vnic_dev_fw_info(enic->vdev, &fw_info); | |
147 | spin_unlock(&enic->devcmd_lock); | |
148 | ||
149 | strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); | |
150 | strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); | |
151 | strncpy(drvinfo->fw_version, fw_info->fw_version, | |
152 | sizeof(drvinfo->fw_version)); | |
153 | strncpy(drvinfo->bus_info, pci_name(enic->pdev), | |
154 | sizeof(drvinfo->bus_info)); | |
155 | } | |
156 | ||
157 | static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |
158 | { | |
159 | unsigned int i; | |
160 | ||
161 | switch (stringset) { | |
162 | case ETH_SS_STATS: | |
163 | for (i = 0; i < enic_n_tx_stats; i++) { | |
164 | memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); | |
165 | data += ETH_GSTRING_LEN; | |
166 | } | |
167 | for (i = 0; i < enic_n_rx_stats; i++) { | |
168 | memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); | |
169 | data += ETH_GSTRING_LEN; | |
170 | } | |
171 | break; | |
172 | } | |
173 | } | |
174 | ||
25f0a061 | 175 | static int enic_get_sset_count(struct net_device *netdev, int sset) |
01f2e4ea | 176 | { |
25f0a061 SF |
177 | switch (sset) { |
178 | case ETH_SS_STATS: | |
179 | return enic_n_tx_stats + enic_n_rx_stats; | |
180 | default: | |
181 | return -EOPNOTSUPP; | |
182 | } | |
01f2e4ea SF |
183 | } |
184 | ||
185 | static void enic_get_ethtool_stats(struct net_device *netdev, | |
186 | struct ethtool_stats *stats, u64 *data) | |
187 | { | |
188 | struct enic *enic = netdev_priv(netdev); | |
189 | struct vnic_stats *vstats; | |
190 | unsigned int i; | |
191 | ||
192 | spin_lock(&enic->devcmd_lock); | |
193 | vnic_dev_stats_dump(enic->vdev, &vstats); | |
194 | spin_unlock(&enic->devcmd_lock); | |
195 | ||
196 | for (i = 0; i < enic_n_tx_stats; i++) | |
197 | *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; | |
198 | for (i = 0; i < enic_n_rx_stats; i++) | |
199 | *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; | |
200 | } | |
201 | ||
202 | static u32 enic_get_rx_csum(struct net_device *netdev) | |
203 | { | |
204 | struct enic *enic = netdev_priv(netdev); | |
205 | return enic->csum_rx_enabled; | |
206 | } | |
207 | ||
208 | static int enic_set_rx_csum(struct net_device *netdev, u32 data) | |
209 | { | |
210 | struct enic *enic = netdev_priv(netdev); | |
211 | ||
25f0a061 SF |
212 | if (data && !ENIC_SETTING(enic, RXCSUM)) |
213 | return -EINVAL; | |
214 | ||
215 | enic->csum_rx_enabled = !!data; | |
01f2e4ea SF |
216 | |
217 | return 0; | |
218 | } | |
219 | ||
220 | static int enic_set_tx_csum(struct net_device *netdev, u32 data) | |
221 | { | |
222 | struct enic *enic = netdev_priv(netdev); | |
223 | ||
25f0a061 SF |
224 | if (data && !ENIC_SETTING(enic, TXCSUM)) |
225 | return -EINVAL; | |
226 | ||
227 | if (data) | |
01f2e4ea SF |
228 | netdev->features |= NETIF_F_HW_CSUM; |
229 | else | |
230 | netdev->features &= ~NETIF_F_HW_CSUM; | |
231 | ||
232 | return 0; | |
233 | } | |
234 | ||
235 | static int enic_set_tso(struct net_device *netdev, u32 data) | |
236 | { | |
237 | struct enic *enic = netdev_priv(netdev); | |
238 | ||
25f0a061 SF |
239 | if (data && !ENIC_SETTING(enic, TSO)) |
240 | return -EINVAL; | |
241 | ||
242 | if (data) | |
01f2e4ea SF |
243 | netdev->features |= |
244 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | |
245 | else | |
246 | netdev->features &= | |
247 | ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN); | |
248 | ||
249 | return 0; | |
250 | } | |
251 | ||
252 | static u32 enic_get_msglevel(struct net_device *netdev) | |
253 | { | |
254 | struct enic *enic = netdev_priv(netdev); | |
255 | return enic->msg_enable; | |
256 | } | |
257 | ||
258 | static void enic_set_msglevel(struct net_device *netdev, u32 value) | |
259 | { | |
260 | struct enic *enic = netdev_priv(netdev); | |
261 | enic->msg_enable = value; | |
262 | } | |
263 | ||
7c844599 SF |
264 | static int enic_get_coalesce(struct net_device *netdev, |
265 | struct ethtool_coalesce *ecmd) | |
266 | { | |
267 | struct enic *enic = netdev_priv(netdev); | |
268 | ||
269 | ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; | |
270 | ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; | |
271 | ||
272 | return 0; | |
273 | } | |
274 | ||
275 | static int enic_set_coalesce(struct net_device *netdev, | |
276 | struct ethtool_coalesce *ecmd) | |
277 | { | |
278 | struct enic *enic = netdev_priv(netdev); | |
279 | u32 tx_coalesce_usecs; | |
280 | u32 rx_coalesce_usecs; | |
281 | ||
282 | tx_coalesce_usecs = min_t(u32, | |
283 | INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX), | |
284 | ecmd->tx_coalesce_usecs); | |
285 | rx_coalesce_usecs = min_t(u32, | |
286 | INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX), | |
287 | ecmd->rx_coalesce_usecs); | |
288 | ||
289 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
290 | case VNIC_DEV_INTR_MODE_INTX: | |
291 | if (tx_coalesce_usecs != rx_coalesce_usecs) | |
292 | return -EINVAL; | |
293 | ||
294 | vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ], | |
295 | INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); | |
296 | break; | |
297 | case VNIC_DEV_INTR_MODE_MSI: | |
298 | if (tx_coalesce_usecs != rx_coalesce_usecs) | |
299 | return -EINVAL; | |
300 | ||
301 | vnic_intr_coalescing_timer_set(&enic->intr[0], | |
302 | INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); | |
303 | break; | |
304 | case VNIC_DEV_INTR_MODE_MSIX: | |
305 | vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ], | |
306 | INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); | |
307 | vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ], | |
308 | INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs)); | |
309 | break; | |
310 | default: | |
311 | break; | |
312 | } | |
313 | ||
314 | enic->tx_coalesce_usecs = tx_coalesce_usecs; | |
315 | enic->rx_coalesce_usecs = rx_coalesce_usecs; | |
316 | ||
317 | return 0; | |
318 | } | |
319 | ||
0fc0b732 | 320 | static const struct ethtool_ops enic_ethtool_ops = { |
01f2e4ea SF |
321 | .get_settings = enic_get_settings, |
322 | .get_drvinfo = enic_get_drvinfo, | |
323 | .get_msglevel = enic_get_msglevel, | |
324 | .set_msglevel = enic_set_msglevel, | |
325 | .get_link = ethtool_op_get_link, | |
326 | .get_strings = enic_get_strings, | |
25f0a061 | 327 | .get_sset_count = enic_get_sset_count, |
01f2e4ea SF |
328 | .get_ethtool_stats = enic_get_ethtool_stats, |
329 | .get_rx_csum = enic_get_rx_csum, | |
330 | .set_rx_csum = enic_set_rx_csum, | |
331 | .get_tx_csum = ethtool_op_get_tx_csum, | |
332 | .set_tx_csum = enic_set_tx_csum, | |
333 | .get_sg = ethtool_op_get_sg, | |
334 | .set_sg = ethtool_op_set_sg, | |
335 | .get_tso = ethtool_op_get_tso, | |
336 | .set_tso = enic_set_tso, | |
7c844599 SF |
337 | .get_coalesce = enic_get_coalesce, |
338 | .set_coalesce = enic_set_coalesce, | |
86ca9db7 SF |
339 | .get_flags = ethtool_op_get_flags, |
340 | .set_flags = ethtool_op_set_flags, | |
01f2e4ea SF |
341 | }; |
342 | ||
343 | static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) | |
344 | { | |
345 | struct enic *enic = vnic_dev_priv(wq->vdev); | |
346 | ||
347 | if (buf->sop) | |
348 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
349 | buf->len, PCI_DMA_TODEVICE); | |
350 | else | |
351 | pci_unmap_page(enic->pdev, buf->dma_addr, | |
352 | buf->len, PCI_DMA_TODEVICE); | |
353 | ||
354 | if (buf->os_buf) | |
355 | dev_kfree_skb_any(buf->os_buf); | |
356 | } | |
357 | ||
358 | static void enic_wq_free_buf(struct vnic_wq *wq, | |
359 | struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) | |
360 | { | |
361 | enic_free_wq_buf(wq, buf); | |
362 | } | |
363 | ||
364 | static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |
365 | u8 type, u16 q_number, u16 completed_index, void *opaque) | |
366 | { | |
367 | struct enic *enic = vnic_dev_priv(vdev); | |
368 | ||
369 | spin_lock(&enic->wq_lock[q_number]); | |
370 | ||
371 | vnic_wq_service(&enic->wq[q_number], cq_desc, | |
372 | completed_index, enic_wq_free_buf, | |
373 | opaque); | |
374 | ||
375 | if (netif_queue_stopped(enic->netdev) && | |
ea0d7d91 SF |
376 | vnic_wq_desc_avail(&enic->wq[q_number]) >= |
377 | (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) | |
01f2e4ea SF |
378 | netif_wake_queue(enic->netdev); |
379 | ||
380 | spin_unlock(&enic->wq_lock[q_number]); | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
385 | static void enic_log_q_error(struct enic *enic) | |
386 | { | |
387 | unsigned int i; | |
388 | u32 error_status; | |
389 | ||
390 | for (i = 0; i < enic->wq_count; i++) { | |
391 | error_status = vnic_wq_error_status(&enic->wq[i]); | |
392 | if (error_status) | |
393 | printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n", | |
394 | enic->netdev->name, i, error_status); | |
395 | } | |
396 | ||
397 | for (i = 0; i < enic->rq_count; i++) { | |
398 | error_status = vnic_rq_error_status(&enic->rq[i]); | |
399 | if (error_status) | |
400 | printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n", | |
401 | enic->netdev->name, i, error_status); | |
402 | } | |
403 | } | |
404 | ||
405 | static void enic_link_check(struct enic *enic) | |
406 | { | |
407 | int link_status = vnic_dev_link_status(enic->vdev); | |
408 | int carrier_ok = netif_carrier_ok(enic->netdev); | |
409 | ||
410 | if (link_status && !carrier_ok) { | |
411 | printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name); | |
412 | netif_carrier_on(enic->netdev); | |
413 | } else if (!link_status && carrier_ok) { | |
414 | printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name); | |
415 | netif_carrier_off(enic->netdev); | |
416 | } | |
417 | } | |
418 | ||
419 | static void enic_mtu_check(struct enic *enic) | |
420 | { | |
421 | u32 mtu = vnic_dev_mtu(enic->vdev); | |
422 | ||
491598a4 | 423 | if (mtu && mtu != enic->port_mtu) { |
7c844599 | 424 | enic->port_mtu = mtu; |
01f2e4ea SF |
425 | if (mtu < enic->netdev->mtu) |
426 | printk(KERN_WARNING PFX | |
427 | "%s: interface MTU (%d) set higher " | |
428 | "than switch port MTU (%d)\n", | |
429 | enic->netdev->name, enic->netdev->mtu, mtu); | |
01f2e4ea SF |
430 | } |
431 | } | |
432 | ||
433 | static void enic_msglvl_check(struct enic *enic) | |
434 | { | |
435 | u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); | |
436 | ||
437 | if (msg_enable != enic->msg_enable) { | |
438 | printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n", | |
439 | enic->netdev->name, enic->msg_enable, msg_enable); | |
440 | enic->msg_enable = msg_enable; | |
441 | } | |
442 | } | |
443 | ||
444 | static void enic_notify_check(struct enic *enic) | |
445 | { | |
446 | enic_msglvl_check(enic); | |
447 | enic_mtu_check(enic); | |
448 | enic_link_check(enic); | |
449 | } | |
450 | ||
451 | #define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) | |
452 | ||
453 | static irqreturn_t enic_isr_legacy(int irq, void *data) | |
454 | { | |
455 | struct net_device *netdev = data; | |
456 | struct enic *enic = netdev_priv(netdev); | |
457 | u32 pba; | |
458 | ||
459 | vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]); | |
460 | ||
461 | pba = vnic_intr_legacy_pba(enic->legacy_pba); | |
462 | if (!pba) { | |
463 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | |
464 | return IRQ_NONE; /* not our interrupt */ | |
465 | } | |
466 | ||
ed8af6b2 SF |
467 | if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) { |
468 | vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_NOTIFY]); | |
01f2e4ea | 469 | enic_notify_check(enic); |
ed8af6b2 | 470 | } |
01f2e4ea SF |
471 | |
472 | if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) { | |
ed8af6b2 | 473 | vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_ERR]); |
01f2e4ea SF |
474 | enic_log_q_error(enic); |
475 | /* schedule recovery from WQ/RQ error */ | |
476 | schedule_work(&enic->reset); | |
477 | return IRQ_HANDLED; | |
478 | } | |
479 | ||
480 | if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { | |
288379f0 BH |
481 | if (napi_schedule_prep(&enic->napi)) |
482 | __napi_schedule(&enic->napi); | |
01f2e4ea SF |
483 | } else { |
484 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | |
485 | } | |
486 | ||
487 | return IRQ_HANDLED; | |
488 | } | |
489 | ||
490 | static irqreturn_t enic_isr_msi(int irq, void *data) | |
491 | { | |
492 | struct enic *enic = data; | |
493 | ||
494 | /* With MSI, there is no sharing of interrupts, so this is | |
495 | * our interrupt and there is no need to ack it. The device | |
496 | * is not providing per-vector masking, so the OS will not | |
497 | * write to PCI config space to mask/unmask the interrupt. | |
498 | * We're using mask_on_assertion for MSI, so the device | |
499 | * automatically masks the interrupt when the interrupt is | |
500 | * generated. Later, when exiting polling, the interrupt | |
501 | * will be unmasked (see enic_poll). | |
502 | * | |
503 | * Also, the device uses the same PCIe Traffic Class (TC) | |
504 | * for Memory Write data and MSI, so there are no ordering | |
505 | * issues; the MSI will always arrive at the Root Complex | |
506 | * _after_ corresponding Memory Writes (i.e. descriptor | |
507 | * writes). | |
508 | */ | |
509 | ||
288379f0 | 510 | napi_schedule(&enic->napi); |
01f2e4ea SF |
511 | |
512 | return IRQ_HANDLED; | |
513 | } | |
514 | ||
515 | static irqreturn_t enic_isr_msix_rq(int irq, void *data) | |
516 | { | |
517 | struct enic *enic = data; | |
518 | ||
519 | /* schedule NAPI polling for RQ cleanup */ | |
288379f0 | 520 | napi_schedule(&enic->napi); |
01f2e4ea SF |
521 | |
522 | return IRQ_HANDLED; | |
523 | } | |
524 | ||
525 | static irqreturn_t enic_isr_msix_wq(int irq, void *data) | |
526 | { | |
527 | struct enic *enic = data; | |
528 | unsigned int wq_work_to_do = -1; /* no limit */ | |
529 | unsigned int wq_work_done; | |
530 | ||
531 | wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | |
532 | wq_work_to_do, enic_wq_service, NULL); | |
533 | ||
534 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ], | |
535 | wq_work_done, | |
536 | 1 /* unmask intr */, | |
537 | 1 /* reset intr timer */); | |
538 | ||
539 | return IRQ_HANDLED; | |
540 | } | |
541 | ||
542 | static irqreturn_t enic_isr_msix_err(int irq, void *data) | |
543 | { | |
544 | struct enic *enic = data; | |
545 | ||
ed8af6b2 SF |
546 | vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_ERR]); |
547 | ||
01f2e4ea SF |
548 | enic_log_q_error(enic); |
549 | ||
550 | /* schedule recovery from WQ/RQ error */ | |
551 | schedule_work(&enic->reset); | |
552 | ||
553 | return IRQ_HANDLED; | |
554 | } | |
555 | ||
556 | static irqreturn_t enic_isr_msix_notify(int irq, void *data) | |
557 | { | |
558 | struct enic *enic = data; | |
559 | ||
ed8af6b2 | 560 | vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_NOTIFY]); |
01f2e4ea | 561 | enic_notify_check(enic); |
01f2e4ea SF |
562 | |
563 | return IRQ_HANDLED; | |
564 | } | |
565 | ||
566 | static inline void enic_queue_wq_skb_cont(struct enic *enic, | |
567 | struct vnic_wq *wq, struct sk_buff *skb, | |
568 | unsigned int len_left) | |
569 | { | |
570 | skb_frag_t *frag; | |
571 | ||
572 | /* Queue additional data fragments */ | |
573 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { | |
574 | len_left -= frag->size; | |
575 | enic_queue_wq_desc_cont(wq, skb, | |
576 | pci_map_page(enic->pdev, frag->page, | |
577 | frag->page_offset, frag->size, | |
578 | PCI_DMA_TODEVICE), | |
579 | frag->size, | |
580 | (len_left == 0)); /* EOP? */ | |
581 | } | |
582 | } | |
583 | ||
584 | static inline void enic_queue_wq_skb_vlan(struct enic *enic, | |
585 | struct vnic_wq *wq, struct sk_buff *skb, | |
586 | int vlan_tag_insert, unsigned int vlan_tag) | |
587 | { | |
588 | unsigned int head_len = skb_headlen(skb); | |
589 | unsigned int len_left = skb->len - head_len; | |
590 | int eop = (len_left == 0); | |
591 | ||
ea0d7d91 SF |
592 | /* Queue the main skb fragment. The fragments are no larger |
593 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less | |
594 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor | |
595 | * per fragment is queued. | |
596 | */ | |
01f2e4ea SF |
597 | enic_queue_wq_desc(wq, skb, |
598 | pci_map_single(enic->pdev, skb->data, | |
599 | head_len, PCI_DMA_TODEVICE), | |
600 | head_len, | |
601 | vlan_tag_insert, vlan_tag, | |
602 | eop); | |
603 | ||
604 | if (!eop) | |
605 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | |
606 | } | |
607 | ||
608 | static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, | |
609 | struct vnic_wq *wq, struct sk_buff *skb, | |
610 | int vlan_tag_insert, unsigned int vlan_tag) | |
611 | { | |
612 | unsigned int head_len = skb_headlen(skb); | |
613 | unsigned int len_left = skb->len - head_len; | |
614 | unsigned int hdr_len = skb_transport_offset(skb); | |
615 | unsigned int csum_offset = hdr_len + skb->csum_offset; | |
616 | int eop = (len_left == 0); | |
617 | ||
ea0d7d91 SF |
618 | /* Queue the main skb fragment. The fragments are no larger |
619 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less | |
620 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor | |
621 | * per fragment is queued. | |
622 | */ | |
01f2e4ea SF |
623 | enic_queue_wq_desc_csum_l4(wq, skb, |
624 | pci_map_single(enic->pdev, skb->data, | |
625 | head_len, PCI_DMA_TODEVICE), | |
626 | head_len, | |
627 | csum_offset, | |
628 | hdr_len, | |
629 | vlan_tag_insert, vlan_tag, | |
630 | eop); | |
631 | ||
632 | if (!eop) | |
633 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | |
634 | } | |
635 | ||
636 | static inline void enic_queue_wq_skb_tso(struct enic *enic, | |
637 | struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, | |
638 | int vlan_tag_insert, unsigned int vlan_tag) | |
639 | { | |
ea0d7d91 SF |
640 | unsigned int frag_len_left = skb_headlen(skb); |
641 | unsigned int len_left = skb->len - frag_len_left; | |
01f2e4ea SF |
642 | unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
643 | int eop = (len_left == 0); | |
ea0d7d91 SF |
644 | unsigned int len; |
645 | dma_addr_t dma_addr; | |
646 | unsigned int offset = 0; | |
647 | skb_frag_t *frag; | |
01f2e4ea SF |
648 | |
649 | /* Preload TCP csum field with IP pseudo hdr calculated | |
650 | * with IP length set to zero. HW will later add in length | |
651 | * to each TCP segment resulting from the TSO. | |
652 | */ | |
653 | ||
09640e63 | 654 | if (skb->protocol == cpu_to_be16(ETH_P_IP)) { |
01f2e4ea SF |
655 | ip_hdr(skb)->check = 0; |
656 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | |
657 | ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | |
09640e63 | 658 | } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { |
01f2e4ea SF |
659 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
660 | &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | |
661 | } | |
662 | ||
ea0d7d91 SF |
663 | /* Queue WQ_ENET_MAX_DESC_LEN length descriptors |
664 | * for the main skb fragment | |
665 | */ | |
666 | while (frag_len_left) { | |
667 | len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); | |
668 | dma_addr = pci_map_single(enic->pdev, skb->data + offset, | |
669 | len, PCI_DMA_TODEVICE); | |
670 | enic_queue_wq_desc_tso(wq, skb, | |
671 | dma_addr, | |
672 | len, | |
673 | mss, hdr_len, | |
674 | vlan_tag_insert, vlan_tag, | |
675 | eop && (len == frag_len_left)); | |
676 | frag_len_left -= len; | |
677 | offset += len; | |
678 | } | |
01f2e4ea | 679 | |
ea0d7d91 SF |
680 | if (eop) |
681 | return; | |
682 | ||
683 | /* Queue WQ_ENET_MAX_DESC_LEN length descriptors | |
684 | * for additional data fragments | |
685 | */ | |
686 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { | |
687 | len_left -= frag->size; | |
688 | frag_len_left = frag->size; | |
689 | offset = frag->page_offset; | |
690 | ||
691 | while (frag_len_left) { | |
692 | len = min(frag_len_left, | |
693 | (unsigned int)WQ_ENET_MAX_DESC_LEN); | |
694 | dma_addr = pci_map_page(enic->pdev, frag->page, | |
695 | offset, len, | |
696 | PCI_DMA_TODEVICE); | |
697 | enic_queue_wq_desc_cont(wq, skb, | |
698 | dma_addr, | |
699 | len, | |
700 | (len_left == 0) && | |
701 | (len == frag_len_left)); /* EOP? */ | |
702 | frag_len_left -= len; | |
703 | offset += len; | |
704 | } | |
705 | } | |
01f2e4ea SF |
706 | } |
707 | ||
708 | static inline void enic_queue_wq_skb(struct enic *enic, | |
709 | struct vnic_wq *wq, struct sk_buff *skb) | |
710 | { | |
711 | unsigned int mss = skb_shinfo(skb)->gso_size; | |
712 | unsigned int vlan_tag = 0; | |
713 | int vlan_tag_insert = 0; | |
714 | ||
715 | if (enic->vlan_group && vlan_tx_tag_present(skb)) { | |
716 | /* VLAN tag from trunking driver */ | |
717 | vlan_tag_insert = 1; | |
718 | vlan_tag = vlan_tx_tag_get(skb); | |
719 | } | |
720 | ||
721 | if (mss) | |
722 | enic_queue_wq_skb_tso(enic, wq, skb, mss, | |
723 | vlan_tag_insert, vlan_tag); | |
724 | else if (skb->ip_summed == CHECKSUM_PARTIAL) | |
725 | enic_queue_wq_skb_csum_l4(enic, wq, skb, | |
726 | vlan_tag_insert, vlan_tag); | |
727 | else | |
728 | enic_queue_wq_skb_vlan(enic, wq, skb, | |
729 | vlan_tag_insert, vlan_tag); | |
730 | } | |
731 | ||
ed8af6b2 | 732 | /* netif_tx_lock held, process context with BHs disabled, or BH */ |
61357325 | 733 | static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, |
d87fd25d | 734 | struct net_device *netdev) |
01f2e4ea SF |
735 | { |
736 | struct enic *enic = netdev_priv(netdev); | |
737 | struct vnic_wq *wq = &enic->wq[0]; | |
738 | unsigned long flags; | |
739 | ||
740 | if (skb->len <= 0) { | |
741 | dev_kfree_skb(skb); | |
742 | return NETDEV_TX_OK; | |
743 | } | |
744 | ||
745 | /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, | |
746 | * which is very likely. In the off chance it's going to take | |
747 | * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. | |
748 | */ | |
749 | ||
750 | if (skb_shinfo(skb)->gso_size == 0 && | |
751 | skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && | |
752 | skb_linearize(skb)) { | |
753 | dev_kfree_skb(skb); | |
754 | return NETDEV_TX_OK; | |
755 | } | |
756 | ||
757 | spin_lock_irqsave(&enic->wq_lock[0], flags); | |
758 | ||
ea0d7d91 SF |
759 | if (vnic_wq_desc_avail(wq) < |
760 | skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { | |
01f2e4ea SF |
761 | netif_stop_queue(netdev); |
762 | /* This is a hard error, log it */ | |
763 | printk(KERN_ERR PFX "%s: BUG! Tx ring full when " | |
764 | "queue awake!\n", netdev->name); | |
765 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); | |
766 | return NETDEV_TX_BUSY; | |
767 | } | |
768 | ||
769 | enic_queue_wq_skb(enic, wq, skb); | |
770 | ||
ea0d7d91 | 771 | if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) |
01f2e4ea SF |
772 | netif_stop_queue(netdev); |
773 | ||
01f2e4ea SF |
774 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); |
775 | ||
776 | return NETDEV_TX_OK; | |
777 | } | |
778 | ||
779 | /* dev_base_lock rwlock held, nominally process context */ | |
780 | static struct net_device_stats *enic_get_stats(struct net_device *netdev) | |
781 | { | |
782 | struct enic *enic = netdev_priv(netdev); | |
25f0a061 | 783 | struct net_device_stats *net_stats = &netdev->stats; |
01f2e4ea SF |
784 | struct vnic_stats *stats; |
785 | ||
786 | spin_lock(&enic->devcmd_lock); | |
787 | vnic_dev_stats_dump(enic->vdev, &stats); | |
788 | spin_unlock(&enic->devcmd_lock); | |
789 | ||
25f0a061 SF |
790 | net_stats->tx_packets = stats->tx.tx_frames_ok; |
791 | net_stats->tx_bytes = stats->tx.tx_bytes_ok; | |
792 | net_stats->tx_errors = stats->tx.tx_errors; | |
793 | net_stats->tx_dropped = stats->tx.tx_drops; | |
01f2e4ea | 794 | |
25f0a061 SF |
795 | net_stats->rx_packets = stats->rx.rx_frames_ok; |
796 | net_stats->rx_bytes = stats->rx.rx_bytes_ok; | |
797 | net_stats->rx_errors = stats->rx.rx_errors; | |
798 | net_stats->multicast = stats->rx.rx_multicast_frames_ok; | |
350991e1 | 799 | net_stats->rx_over_errors = enic->rq_truncated_pkts; |
bd9fb1a4 | 800 | net_stats->rx_crc_errors = enic->rq_bad_fcs; |
350991e1 | 801 | net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; |
01f2e4ea | 802 | |
25f0a061 | 803 | return net_stats; |
01f2e4ea SF |
804 | } |
805 | ||
806 | static void enic_reset_mcaddrs(struct enic *enic) | |
807 | { | |
808 | enic->mc_count = 0; | |
809 | } | |
810 | ||
811 | static int enic_set_mac_addr(struct net_device *netdev, char *addr) | |
812 | { | |
813 | if (!is_valid_ether_addr(addr)) | |
814 | return -EADDRNOTAVAIL; | |
815 | ||
816 | memcpy(netdev->dev_addr, addr, netdev->addr_len); | |
817 | ||
818 | return 0; | |
819 | } | |
820 | ||
821 | /* netif_tx_lock held, BHs disabled */ | |
822 | static void enic_set_multicast_list(struct net_device *netdev) | |
823 | { | |
824 | struct enic *enic = netdev_priv(netdev); | |
825 | struct dev_mc_list *list = netdev->mc_list; | |
826 | int directed = 1; | |
827 | int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; | |
828 | int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; | |
829 | int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; | |
830 | int allmulti = (netdev->flags & IFF_ALLMULTI) || | |
831 | (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS); | |
9959a185 | 832 | unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0); |
01f2e4ea SF |
833 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; |
834 | unsigned int mc_count = netdev->mc_count; | |
835 | unsigned int i, j; | |
836 | ||
837 | if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) | |
838 | mc_count = ENIC_MULTICAST_PERFECT_FILTERS; | |
839 | ||
840 | spin_lock(&enic->devcmd_lock); | |
841 | ||
9959a185 SF |
842 | if (enic->flags != flags) { |
843 | enic->flags = flags; | |
844 | vnic_dev_packet_filter(enic->vdev, directed, | |
845 | multicast, broadcast, promisc, allmulti); | |
846 | } | |
01f2e4ea SF |
847 | |
848 | /* Is there an easier way? Trying to minimize to | |
849 | * calls to add/del multicast addrs. We keep the | |
850 | * addrs from the last call in enic->mc_addr and | |
851 | * look for changes to add/del. | |
852 | */ | |
853 | ||
854 | for (i = 0; list && i < mc_count; i++) { | |
855 | memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN); | |
856 | list = list->next; | |
857 | } | |
858 | ||
859 | for (i = 0; i < enic->mc_count; i++) { | |
860 | for (j = 0; j < mc_count; j++) | |
861 | if (compare_ether_addr(enic->mc_addr[i], | |
862 | mc_addr[j]) == 0) | |
863 | break; | |
864 | if (j == mc_count) | |
865 | enic_del_multicast_addr(enic, enic->mc_addr[i]); | |
866 | } | |
867 | ||
868 | for (i = 0; i < mc_count; i++) { | |
869 | for (j = 0; j < enic->mc_count; j++) | |
870 | if (compare_ether_addr(mc_addr[i], | |
871 | enic->mc_addr[j]) == 0) | |
872 | break; | |
873 | if (j == enic->mc_count) | |
874 | enic_add_multicast_addr(enic, mc_addr[i]); | |
875 | } | |
876 | ||
877 | /* Save the list to compare against next time | |
878 | */ | |
879 | ||
880 | for (i = 0; i < mc_count; i++) | |
881 | memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN); | |
882 | ||
883 | enic->mc_count = mc_count; | |
884 | ||
885 | spin_unlock(&enic->devcmd_lock); | |
886 | } | |
887 | ||
888 | /* rtnl lock is held */ | |
889 | static void enic_vlan_rx_register(struct net_device *netdev, | |
890 | struct vlan_group *vlan_group) | |
891 | { | |
892 | struct enic *enic = netdev_priv(netdev); | |
893 | enic->vlan_group = vlan_group; | |
894 | } | |
895 | ||
896 | /* rtnl lock is held */ | |
897 | static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |
898 | { | |
899 | struct enic *enic = netdev_priv(netdev); | |
900 | ||
901 | spin_lock(&enic->devcmd_lock); | |
902 | enic_add_vlan(enic, vid); | |
903 | spin_unlock(&enic->devcmd_lock); | |
904 | } | |
905 | ||
906 | /* rtnl lock is held */ | |
907 | static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |
908 | { | |
909 | struct enic *enic = netdev_priv(netdev); | |
910 | ||
911 | spin_lock(&enic->devcmd_lock); | |
912 | enic_del_vlan(enic, vid); | |
913 | spin_unlock(&enic->devcmd_lock); | |
914 | } | |
915 | ||
916 | /* netif_tx_lock held, BHs disabled */ | |
917 | static void enic_tx_timeout(struct net_device *netdev) | |
918 | { | |
919 | struct enic *enic = netdev_priv(netdev); | |
920 | schedule_work(&enic->reset); | |
921 | } | |
922 | ||
923 | static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) | |
924 | { | |
925 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
926 | ||
927 | if (!buf->os_buf) | |
928 | return; | |
929 | ||
930 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
931 | buf->len, PCI_DMA_FROMDEVICE); | |
932 | dev_kfree_skb_any(buf->os_buf); | |
933 | } | |
934 | ||
01f2e4ea SF |
935 | static int enic_rq_alloc_buf(struct vnic_rq *rq) |
936 | { | |
937 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
d19e22dc | 938 | struct net_device *netdev = enic->netdev; |
01f2e4ea | 939 | struct sk_buff *skb; |
d19e22dc | 940 | unsigned int len = netdev->mtu + ETH_HLEN; |
01f2e4ea SF |
941 | unsigned int os_buf_index = 0; |
942 | dma_addr_t dma_addr; | |
943 | ||
89d71a66 | 944 | skb = netdev_alloc_skb_ip_align(netdev, len); |
01f2e4ea SF |
945 | if (!skb) |
946 | return -ENOMEM; | |
947 | ||
948 | dma_addr = pci_map_single(enic->pdev, skb->data, | |
949 | len, PCI_DMA_FROMDEVICE); | |
950 | ||
951 | enic_queue_rq_desc(rq, skb, os_buf_index, | |
952 | dma_addr, len); | |
953 | ||
954 | return 0; | |
955 | } | |
956 | ||
4badc385 SF |
957 | static int enic_rq_alloc_buf_a1(struct vnic_rq *rq) |
958 | { | |
959 | struct rq_enet_desc *desc = vnic_rq_next_desc(rq); | |
960 | ||
961 | if (vnic_rq_posting_soon(rq)) { | |
962 | ||
963 | /* SW workaround for A0 HW erratum: if we're just about | |
964 | * to write posted_index, insert a dummy desc | |
965 | * of type resvd | |
966 | */ | |
967 | ||
968 | rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0); | |
969 | vnic_rq_post(rq, 0, 0, 0, 0); | |
970 | } else { | |
971 | return enic_rq_alloc_buf(rq); | |
972 | } | |
973 | ||
974 | return 0; | |
975 | } | |
976 | ||
977 | static int enic_set_rq_alloc_buf(struct enic *enic) | |
978 | { | |
979 | enum vnic_dev_hw_version hw_ver; | |
980 | int err; | |
981 | ||
982 | err = vnic_dev_hw_version(enic->vdev, &hw_ver); | |
983 | if (err) | |
984 | return err; | |
985 | ||
986 | switch (hw_ver) { | |
987 | case VNIC_DEV_HW_VER_A1: | |
988 | enic->rq_alloc_buf = enic_rq_alloc_buf_a1; | |
989 | break; | |
990 | case VNIC_DEV_HW_VER_A2: | |
991 | case VNIC_DEV_HW_VER_UNKNOWN: | |
992 | enic->rq_alloc_buf = enic_rq_alloc_buf; | |
993 | break; | |
994 | default: | |
995 | return -ENODEV; | |
996 | } | |
997 | ||
998 | return 0; | |
999 | } | |
1000 | ||
01f2e4ea SF |
1001 | static int enic_get_skb_header(struct sk_buff *skb, void **iphdr, |
1002 | void **tcph, u64 *hdr_flags, void *priv) | |
1003 | { | |
1004 | struct cq_enet_rq_desc *cq_desc = priv; | |
1005 | unsigned int ip_len; | |
1006 | struct iphdr *iph; | |
1007 | ||
1008 | u8 type, color, eop, sop, ingress_port, vlan_stripped; | |
1009 | u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; | |
1010 | u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; | |
1011 | u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; | |
1012 | u8 packet_error; | |
1013 | u16 q_number, completed_index, bytes_written, vlan, checksum; | |
1014 | u32 rss_hash; | |
1015 | ||
1016 | cq_enet_rq_desc_dec(cq_desc, | |
1017 | &type, &color, &q_number, &completed_index, | |
1018 | &ingress_port, &fcoe, &eop, &sop, &rss_type, | |
1019 | &csum_not_calc, &rss_hash, &bytes_written, | |
1020 | &packet_error, &vlan_stripped, &vlan, &checksum, | |
1021 | &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, | |
1022 | &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, | |
1023 | &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, | |
1024 | &fcs_ok); | |
1025 | ||
1026 | if (!(ipv4 && tcp && !ipv4_fragment)) | |
1027 | return -1; | |
1028 | ||
1029 | skb_reset_network_header(skb); | |
1030 | iph = ip_hdr(skb); | |
1031 | ||
1032 | ip_len = ip_hdrlen(skb); | |
1033 | skb_set_transport_header(skb, ip_len); | |
1034 | ||
1035 | /* check if ip header and tcp header are complete */ | |
1036 | if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) | |
1037 | return -1; | |
1038 | ||
1039 | *hdr_flags = LRO_IPV4 | LRO_TCP; | |
1040 | *tcph = tcp_hdr(skb); | |
1041 | *iphdr = iph; | |
1042 | ||
1043 | return 0; | |
1044 | } | |
1045 | ||
1046 | static void enic_rq_indicate_buf(struct vnic_rq *rq, | |
1047 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | |
1048 | int skipped, void *opaque) | |
1049 | { | |
1050 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
86ca9db7 | 1051 | struct net_device *netdev = enic->netdev; |
01f2e4ea SF |
1052 | struct sk_buff *skb; |
1053 | ||
1054 | u8 type, color, eop, sop, ingress_port, vlan_stripped; | |
1055 | u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; | |
1056 | u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; | |
1057 | u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; | |
1058 | u8 packet_error; | |
1059 | u16 q_number, completed_index, bytes_written, vlan, checksum; | |
1060 | u32 rss_hash; | |
1061 | ||
1062 | if (skipped) | |
1063 | return; | |
1064 | ||
1065 | skb = buf->os_buf; | |
1066 | prefetch(skb->data - NET_IP_ALIGN); | |
1067 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
1068 | buf->len, PCI_DMA_FROMDEVICE); | |
1069 | ||
1070 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, | |
1071 | &type, &color, &q_number, &completed_index, | |
1072 | &ingress_port, &fcoe, &eop, &sop, &rss_type, | |
1073 | &csum_not_calc, &rss_hash, &bytes_written, | |
1074 | &packet_error, &vlan_stripped, &vlan, &checksum, | |
1075 | &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, | |
1076 | &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, | |
1077 | &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, | |
1078 | &fcs_ok); | |
1079 | ||
1080 | if (packet_error) { | |
1081 | ||
350991e1 SF |
1082 | if (!fcs_ok) { |
1083 | if (bytes_written > 0) | |
1084 | enic->rq_bad_fcs++; | |
1085 | else if (bytes_written == 0) | |
1086 | enic->rq_truncated_pkts++; | |
1087 | } | |
01f2e4ea SF |
1088 | |
1089 | dev_kfree_skb_any(skb); | |
1090 | ||
1091 | return; | |
1092 | } | |
1093 | ||
1094 | if (eop && bytes_written > 0) { | |
1095 | ||
1096 | /* Good receive | |
1097 | */ | |
1098 | ||
1099 | skb_put(skb, bytes_written); | |
86ca9db7 | 1100 | skb->protocol = eth_type_trans(skb, netdev); |
01f2e4ea SF |
1101 | |
1102 | if (enic->csum_rx_enabled && !csum_not_calc) { | |
1103 | skb->csum = htons(checksum); | |
1104 | skb->ip_summed = CHECKSUM_COMPLETE; | |
1105 | } | |
1106 | ||
86ca9db7 | 1107 | skb->dev = netdev; |
01f2e4ea SF |
1108 | |
1109 | if (enic->vlan_group && vlan_stripped) { | |
1110 | ||
86ca9db7 | 1111 | if ((netdev->features & NETIF_F_LRO) && ipv4) |
01f2e4ea SF |
1112 | lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, |
1113 | skb, enic->vlan_group, | |
1114 | vlan, cq_desc); | |
1115 | else | |
1116 | vlan_hwaccel_receive_skb(skb, | |
1117 | enic->vlan_group, vlan); | |
1118 | ||
1119 | } else { | |
1120 | ||
86ca9db7 | 1121 | if ((netdev->features & NETIF_F_LRO) && ipv4) |
01f2e4ea SF |
1122 | lro_receive_skb(&enic->lro_mgr, skb, cq_desc); |
1123 | else | |
1124 | netif_receive_skb(skb); | |
1125 | ||
1126 | } | |
1127 | ||
1128 | } else { | |
1129 | ||
1130 | /* Buffer overflow | |
1131 | */ | |
1132 | ||
1133 | dev_kfree_skb_any(skb); | |
1134 | } | |
1135 | } | |
1136 | ||
1137 | static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |
1138 | u8 type, u16 q_number, u16 completed_index, void *opaque) | |
1139 | { | |
1140 | struct enic *enic = vnic_dev_priv(vdev); | |
1141 | ||
1142 | vnic_rq_service(&enic->rq[q_number], cq_desc, | |
1143 | completed_index, VNIC_RQ_RETURN_DESC, | |
1144 | enic_rq_indicate_buf, opaque); | |
1145 | ||
1146 | return 0; | |
1147 | } | |
1148 | ||
01f2e4ea SF |
1149 | static int enic_poll(struct napi_struct *napi, int budget) |
1150 | { | |
1151 | struct enic *enic = container_of(napi, struct enic, napi); | |
1152 | struct net_device *netdev = enic->netdev; | |
1153 | unsigned int rq_work_to_do = budget; | |
1154 | unsigned int wq_work_to_do = -1; /* no limit */ | |
1155 | unsigned int work_done, rq_work_done, wq_work_done; | |
2d6ddced | 1156 | int err; |
01f2e4ea SF |
1157 | |
1158 | /* Service RQ (first) and WQ | |
1159 | */ | |
1160 | ||
1161 | rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | |
1162 | rq_work_to_do, enic_rq_service, NULL); | |
1163 | ||
1164 | wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | |
1165 | wq_work_to_do, enic_wq_service, NULL); | |
1166 | ||
1167 | /* Accumulate intr event credits for this polling | |
1168 | * cycle. An intr event is the completion of a | |
1169 | * a WQ or RQ packet. | |
1170 | */ | |
1171 | ||
1172 | work_done = rq_work_done + wq_work_done; | |
1173 | ||
1174 | if (work_done > 0) | |
1175 | vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ], | |
1176 | work_done, | |
1177 | 0 /* don't unmask intr */, | |
1178 | 0 /* don't reset intr timer */); | |
1179 | ||
2d6ddced | 1180 | err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); |
01f2e4ea | 1181 | |
2d6ddced SF |
1182 | /* Buffer allocation failed. Stay in polling |
1183 | * mode so we can try to fill the ring again. | |
1184 | */ | |
01f2e4ea | 1185 | |
2d6ddced SF |
1186 | if (err) |
1187 | rq_work_done = rq_work_to_do; | |
01f2e4ea | 1188 | |
2d6ddced | 1189 | if (rq_work_done < rq_work_to_do) { |
01f2e4ea | 1190 | |
2d6ddced SF |
1191 | /* Some work done, but not enough to stay in polling, |
1192 | * flush all LROs and exit polling | |
01f2e4ea SF |
1193 | */ |
1194 | ||
86ca9db7 | 1195 | if (netdev->features & NETIF_F_LRO) |
01f2e4ea SF |
1196 | lro_flush_all(&enic->lro_mgr); |
1197 | ||
288379f0 | 1198 | napi_complete(napi); |
ed8af6b2 | 1199 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); |
01f2e4ea SF |
1200 | } |
1201 | ||
1202 | return rq_work_done; | |
1203 | } | |
1204 | ||
1205 | static int enic_poll_msix(struct napi_struct *napi, int budget) | |
1206 | { | |
1207 | struct enic *enic = container_of(napi, struct enic, napi); | |
1208 | struct net_device *netdev = enic->netdev; | |
1209 | unsigned int work_to_do = budget; | |
1210 | unsigned int work_done; | |
2d6ddced | 1211 | int err; |
01f2e4ea SF |
1212 | |
1213 | /* Service RQ | |
1214 | */ | |
1215 | ||
1216 | work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | |
1217 | work_to_do, enic_rq_service, NULL); | |
1218 | ||
2d6ddced SF |
1219 | /* Return intr event credits for this polling |
1220 | * cycle. An intr event is the completion of a | |
1221 | * RQ packet. | |
1222 | */ | |
01f2e4ea | 1223 | |
2d6ddced | 1224 | if (work_done > 0) |
01f2e4ea SF |
1225 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], |
1226 | work_done, | |
1227 | 0 /* don't unmask intr */, | |
1228 | 0 /* don't reset intr timer */); | |
01f2e4ea | 1229 | |
2d6ddced SF |
1230 | err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); |
1231 | ||
1232 | /* Buffer allocation failed. Stay in polling mode | |
1233 | * so we can try to fill the ring again. | |
1234 | */ | |
1235 | ||
1236 | if (err) | |
1237 | work_done = work_to_do; | |
1238 | ||
1239 | if (work_done < work_to_do) { | |
1240 | ||
1241 | /* Some work done, but not enough to stay in polling, | |
1242 | * flush all LROs and exit polling | |
01f2e4ea SF |
1243 | */ |
1244 | ||
86ca9db7 | 1245 | if (netdev->features & NETIF_F_LRO) |
01f2e4ea SF |
1246 | lro_flush_all(&enic->lro_mgr); |
1247 | ||
288379f0 | 1248 | napi_complete(napi); |
01f2e4ea SF |
1249 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); |
1250 | } | |
1251 | ||
1252 | return work_done; | |
1253 | } | |
1254 | ||
1255 | static void enic_notify_timer(unsigned long data) | |
1256 | { | |
1257 | struct enic *enic = (struct enic *)data; | |
1258 | ||
1259 | enic_notify_check(enic); | |
1260 | ||
25f0a061 SF |
1261 | mod_timer(&enic->notify_timer, |
1262 | round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD)); | |
01f2e4ea SF |
1263 | } |
1264 | ||
1265 | static void enic_free_intr(struct enic *enic) | |
1266 | { | |
1267 | struct net_device *netdev = enic->netdev; | |
1268 | unsigned int i; | |
1269 | ||
1270 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1271 | case VNIC_DEV_INTR_MODE_INTX: | |
01f2e4ea SF |
1272 | free_irq(enic->pdev->irq, netdev); |
1273 | break; | |
8f4d248c SF |
1274 | case VNIC_DEV_INTR_MODE_MSI: |
1275 | free_irq(enic->pdev->irq, enic); | |
1276 | break; | |
01f2e4ea SF |
1277 | case VNIC_DEV_INTR_MODE_MSIX: |
1278 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) | |
1279 | if (enic->msix[i].requested) | |
1280 | free_irq(enic->msix_entry[i].vector, | |
1281 | enic->msix[i].devid); | |
1282 | break; | |
1283 | default: | |
1284 | break; | |
1285 | } | |
1286 | } | |
1287 | ||
1288 | static int enic_request_intr(struct enic *enic) | |
1289 | { | |
1290 | struct net_device *netdev = enic->netdev; | |
1291 | unsigned int i; | |
1292 | int err = 0; | |
1293 | ||
1294 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1295 | ||
1296 | case VNIC_DEV_INTR_MODE_INTX: | |
1297 | ||
1298 | err = request_irq(enic->pdev->irq, enic_isr_legacy, | |
1299 | IRQF_SHARED, netdev->name, netdev); | |
1300 | break; | |
1301 | ||
1302 | case VNIC_DEV_INTR_MODE_MSI: | |
1303 | ||
1304 | err = request_irq(enic->pdev->irq, enic_isr_msi, | |
1305 | 0, netdev->name, enic); | |
1306 | break; | |
1307 | ||
1308 | case VNIC_DEV_INTR_MODE_MSIX: | |
1309 | ||
1310 | sprintf(enic->msix[ENIC_MSIX_RQ].devname, | |
8f4d248c | 1311 | "%.11s-rx-0", netdev->name); |
01f2e4ea SF |
1312 | enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq; |
1313 | enic->msix[ENIC_MSIX_RQ].devid = enic; | |
1314 | ||
1315 | sprintf(enic->msix[ENIC_MSIX_WQ].devname, | |
8f4d248c | 1316 | "%.11s-tx-0", netdev->name); |
01f2e4ea SF |
1317 | enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq; |
1318 | enic->msix[ENIC_MSIX_WQ].devid = enic; | |
1319 | ||
1320 | sprintf(enic->msix[ENIC_MSIX_ERR].devname, | |
1321 | "%.11s-err", netdev->name); | |
1322 | enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err; | |
1323 | enic->msix[ENIC_MSIX_ERR].devid = enic; | |
1324 | ||
1325 | sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname, | |
1326 | "%.11s-notify", netdev->name); | |
1327 | enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify; | |
1328 | enic->msix[ENIC_MSIX_NOTIFY].devid = enic; | |
1329 | ||
1330 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) { | |
1331 | err = request_irq(enic->msix_entry[i].vector, | |
1332 | enic->msix[i].isr, 0, | |
1333 | enic->msix[i].devname, | |
1334 | enic->msix[i].devid); | |
1335 | if (err) { | |
1336 | enic_free_intr(enic); | |
1337 | break; | |
1338 | } | |
1339 | enic->msix[i].requested = 1; | |
1340 | } | |
1341 | ||
1342 | break; | |
1343 | ||
1344 | default: | |
1345 | break; | |
1346 | } | |
1347 | ||
1348 | return err; | |
1349 | } | |
1350 | ||
b3d18d19 SF |
1351 | static void enic_synchronize_irqs(struct enic *enic) |
1352 | { | |
1353 | unsigned int i; | |
1354 | ||
1355 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1356 | case VNIC_DEV_INTR_MODE_INTX: | |
1357 | case VNIC_DEV_INTR_MODE_MSI: | |
1358 | synchronize_irq(enic->pdev->irq); | |
1359 | break; | |
1360 | case VNIC_DEV_INTR_MODE_MSIX: | |
1361 | for (i = 0; i < enic->intr_count; i++) | |
1362 | synchronize_irq(enic->msix_entry[i].vector); | |
1363 | break; | |
1364 | default: | |
1365 | break; | |
1366 | } | |
1367 | } | |
1368 | ||
01f2e4ea SF |
1369 | static int enic_notify_set(struct enic *enic) |
1370 | { | |
1371 | int err; | |
1372 | ||
56ac88b3 | 1373 | spin_lock(&enic->devcmd_lock); |
01f2e4ea SF |
1374 | switch (vnic_dev_get_intr_mode(enic->vdev)) { |
1375 | case VNIC_DEV_INTR_MODE_INTX: | |
1376 | err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY); | |
1377 | break; | |
1378 | case VNIC_DEV_INTR_MODE_MSIX: | |
1379 | err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY); | |
1380 | break; | |
1381 | default: | |
1382 | err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); | |
1383 | break; | |
1384 | } | |
56ac88b3 | 1385 | spin_unlock(&enic->devcmd_lock); |
01f2e4ea SF |
1386 | |
1387 | return err; | |
1388 | } | |
1389 | ||
1390 | static void enic_notify_timer_start(struct enic *enic) | |
1391 | { | |
1392 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1393 | case VNIC_DEV_INTR_MODE_MSI: | |
1394 | mod_timer(&enic->notify_timer, jiffies); | |
1395 | break; | |
1396 | default: | |
1397 | /* Using intr for notification for INTx/MSI-X */ | |
1398 | break; | |
1399 | }; | |
1400 | } | |
1401 | ||
1402 | /* rtnl lock is held, process context */ | |
1403 | static int enic_open(struct net_device *netdev) | |
1404 | { | |
1405 | struct enic *enic = netdev_priv(netdev); | |
1406 | unsigned int i; | |
1407 | int err; | |
1408 | ||
4b75a442 SF |
1409 | err = enic_request_intr(enic); |
1410 | if (err) { | |
1411 | printk(KERN_ERR PFX "%s: Unable to request irq.\n", | |
1412 | netdev->name); | |
1413 | return err; | |
1414 | } | |
1415 | ||
1416 | err = enic_notify_set(enic); | |
1417 | if (err) { | |
1418 | printk(KERN_ERR PFX | |
1419 | "%s: Failed to alloc notify buffer, aborting.\n", | |
1420 | netdev->name); | |
1421 | goto err_out_free_intr; | |
1422 | } | |
1423 | ||
01f2e4ea | 1424 | for (i = 0; i < enic->rq_count; i++) { |
2d6ddced SF |
1425 | vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); |
1426 | /* Need at least one buffer on ring to get going */ | |
1427 | if (vnic_rq_desc_used(&enic->rq[i]) == 0) { | |
01f2e4ea SF |
1428 | printk(KERN_ERR PFX |
1429 | "%s: Unable to alloc receive buffers.\n", | |
1430 | netdev->name); | |
2d6ddced | 1431 | err = -ENOMEM; |
4b75a442 | 1432 | goto err_out_notify_unset; |
01f2e4ea SF |
1433 | } |
1434 | } | |
1435 | ||
1436 | for (i = 0; i < enic->wq_count; i++) | |
1437 | vnic_wq_enable(&enic->wq[i]); | |
1438 | for (i = 0; i < enic->rq_count; i++) | |
1439 | vnic_rq_enable(&enic->rq[i]); | |
1440 | ||
56ac88b3 | 1441 | spin_lock(&enic->devcmd_lock); |
01f2e4ea | 1442 | enic_add_station_addr(enic); |
56ac88b3 | 1443 | spin_unlock(&enic->devcmd_lock); |
01f2e4ea SF |
1444 | enic_set_multicast_list(netdev); |
1445 | ||
1446 | netif_wake_queue(netdev); | |
1447 | napi_enable(&enic->napi); | |
56ac88b3 | 1448 | spin_lock(&enic->devcmd_lock); |
01f2e4ea | 1449 | vnic_dev_enable(enic->vdev); |
56ac88b3 | 1450 | spin_unlock(&enic->devcmd_lock); |
01f2e4ea SF |
1451 | |
1452 | for (i = 0; i < enic->intr_count; i++) | |
1453 | vnic_intr_unmask(&enic->intr[i]); | |
1454 | ||
1455 | enic_notify_timer_start(enic); | |
1456 | ||
1457 | return 0; | |
4b75a442 SF |
1458 | |
1459 | err_out_notify_unset: | |
56ac88b3 | 1460 | spin_lock(&enic->devcmd_lock); |
4b75a442 | 1461 | vnic_dev_notify_unset(enic->vdev); |
56ac88b3 | 1462 | spin_unlock(&enic->devcmd_lock); |
4b75a442 SF |
1463 | err_out_free_intr: |
1464 | enic_free_intr(enic); | |
1465 | ||
1466 | return err; | |
01f2e4ea SF |
1467 | } |
1468 | ||
1469 | /* rtnl lock is held, process context */ | |
1470 | static int enic_stop(struct net_device *netdev) | |
1471 | { | |
1472 | struct enic *enic = netdev_priv(netdev); | |
1473 | unsigned int i; | |
1474 | int err; | |
1475 | ||
b3d18d19 SF |
1476 | for (i = 0; i < enic->intr_count; i++) |
1477 | vnic_intr_mask(&enic->intr[i]); | |
1478 | ||
1479 | enic_synchronize_irqs(enic); | |
1480 | ||
01f2e4ea SF |
1481 | del_timer_sync(&enic->notify_timer); |
1482 | ||
56ac88b3 | 1483 | spin_lock(&enic->devcmd_lock); |
01f2e4ea | 1484 | vnic_dev_disable(enic->vdev); |
56ac88b3 | 1485 | spin_unlock(&enic->devcmd_lock); |
01f2e4ea | 1486 | napi_disable(&enic->napi); |
b3d18d19 SF |
1487 | netif_carrier_off(netdev); |
1488 | netif_tx_disable(netdev); | |
01f2e4ea SF |
1489 | |
1490 | for (i = 0; i < enic->wq_count; i++) { | |
1491 | err = vnic_wq_disable(&enic->wq[i]); | |
1492 | if (err) | |
1493 | return err; | |
1494 | } | |
1495 | for (i = 0; i < enic->rq_count; i++) { | |
1496 | err = vnic_rq_disable(&enic->rq[i]); | |
1497 | if (err) | |
1498 | return err; | |
1499 | } | |
1500 | ||
56ac88b3 | 1501 | spin_lock(&enic->devcmd_lock); |
4b75a442 | 1502 | vnic_dev_notify_unset(enic->vdev); |
56ac88b3 | 1503 | spin_unlock(&enic->devcmd_lock); |
4b75a442 SF |
1504 | enic_free_intr(enic); |
1505 | ||
01f2e4ea SF |
1506 | for (i = 0; i < enic->wq_count; i++) |
1507 | vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); | |
1508 | for (i = 0; i < enic->rq_count; i++) | |
1509 | vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); | |
1510 | for (i = 0; i < enic->cq_count; i++) | |
1511 | vnic_cq_clean(&enic->cq[i]); | |
1512 | for (i = 0; i < enic->intr_count; i++) | |
1513 | vnic_intr_clean(&enic->intr[i]); | |
1514 | ||
1515 | return 0; | |
1516 | } | |
1517 | ||
1518 | static int enic_change_mtu(struct net_device *netdev, int new_mtu) | |
1519 | { | |
1520 | struct enic *enic = netdev_priv(netdev); | |
1521 | int running = netif_running(netdev); | |
1522 | ||
25f0a061 SF |
1523 | if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) |
1524 | return -EINVAL; | |
1525 | ||
01f2e4ea SF |
1526 | if (running) |
1527 | enic_stop(netdev); | |
1528 | ||
01f2e4ea SF |
1529 | netdev->mtu = new_mtu; |
1530 | ||
1531 | if (netdev->mtu > enic->port_mtu) | |
1532 | printk(KERN_WARNING PFX | |
1533 | "%s: interface MTU (%d) set higher " | |
1534 | "than port MTU (%d)\n", | |
1535 | netdev->name, netdev->mtu, enic->port_mtu); | |
1536 | ||
1537 | if (running) | |
1538 | enic_open(netdev); | |
1539 | ||
1540 | return 0; | |
1541 | } | |
1542 | ||
1543 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1544 | static void enic_poll_controller(struct net_device *netdev) | |
1545 | { | |
1546 | struct enic *enic = netdev_priv(netdev); | |
1547 | struct vnic_dev *vdev = enic->vdev; | |
1548 | ||
1549 | switch (vnic_dev_get_intr_mode(vdev)) { | |
1550 | case VNIC_DEV_INTR_MODE_MSIX: | |
1551 | enic_isr_msix_rq(enic->pdev->irq, enic); | |
1552 | enic_isr_msix_wq(enic->pdev->irq, enic); | |
1553 | break; | |
1554 | case VNIC_DEV_INTR_MODE_MSI: | |
1555 | enic_isr_msi(enic->pdev->irq, enic); | |
1556 | break; | |
1557 | case VNIC_DEV_INTR_MODE_INTX: | |
1558 | enic_isr_legacy(enic->pdev->irq, netdev); | |
1559 | break; | |
1560 | default: | |
1561 | break; | |
1562 | } | |
1563 | } | |
1564 | #endif | |
1565 | ||
1566 | static int enic_dev_wait(struct vnic_dev *vdev, | |
1567 | int (*start)(struct vnic_dev *, int), | |
1568 | int (*finished)(struct vnic_dev *, int *), | |
1569 | int arg) | |
1570 | { | |
1571 | unsigned long time; | |
1572 | int done; | |
1573 | int err; | |
1574 | ||
1575 | BUG_ON(in_interrupt()); | |
1576 | ||
1577 | err = start(vdev, arg); | |
1578 | if (err) | |
1579 | return err; | |
1580 | ||
1581 | /* Wait for func to complete...2 seconds max | |
1582 | */ | |
1583 | ||
1584 | time = jiffies + (HZ * 2); | |
1585 | do { | |
1586 | ||
1587 | err = finished(vdev, &done); | |
1588 | if (err) | |
1589 | return err; | |
1590 | ||
1591 | if (done) | |
1592 | return 0; | |
1593 | ||
1594 | schedule_timeout_uninterruptible(HZ / 10); | |
1595 | ||
1596 | } while (time_after(time, jiffies)); | |
1597 | ||
1598 | return -ETIMEDOUT; | |
1599 | } | |
1600 | ||
1601 | static int enic_dev_open(struct enic *enic) | |
1602 | { | |
1603 | int err; | |
1604 | ||
1605 | err = enic_dev_wait(enic->vdev, vnic_dev_open, | |
1606 | vnic_dev_open_done, 0); | |
1607 | if (err) | |
1608 | printk(KERN_ERR PFX | |
1609 | "vNIC device open failed, err %d.\n", err); | |
1610 | ||
1611 | return err; | |
1612 | } | |
1613 | ||
1614 | static int enic_dev_soft_reset(struct enic *enic) | |
1615 | { | |
1616 | int err; | |
1617 | ||
1618 | err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, | |
1619 | vnic_dev_soft_reset_done, 0); | |
1620 | if (err) | |
1621 | printk(KERN_ERR PFX | |
1622 | "vNIC soft reset failed, err %d.\n", err); | |
1623 | ||
1624 | return err; | |
1625 | } | |
1626 | ||
68f71708 SF |
1627 | static int enic_set_niccfg(struct enic *enic) |
1628 | { | |
1629 | const u8 rss_default_cpu = 0; | |
1630 | const u8 rss_hash_type = 0; | |
1631 | const u8 rss_hash_bits = 0; | |
1632 | const u8 rss_base_cpu = 0; | |
1633 | const u8 rss_enable = 0; | |
1634 | const u8 tso_ipid_split_en = 0; | |
1635 | const u8 ig_vlan_strip_en = 1; | |
1636 | ||
1637 | /* Enable VLAN tag stripping. RSS not enabled (yet). | |
6ba9cdc0 | 1638 | */ |
68f71708 SF |
1639 | |
1640 | return enic_set_nic_cfg(enic, | |
1641 | rss_default_cpu, rss_hash_type, | |
1642 | rss_hash_bits, rss_base_cpu, | |
1643 | rss_enable, tso_ipid_split_en, | |
1644 | ig_vlan_strip_en); | |
1645 | } | |
1646 | ||
01f2e4ea SF |
1647 | static void enic_reset(struct work_struct *work) |
1648 | { | |
1649 | struct enic *enic = container_of(work, struct enic, reset); | |
1650 | ||
1651 | if (!netif_running(enic->netdev)) | |
1652 | return; | |
1653 | ||
1654 | rtnl_lock(); | |
1655 | ||
1656 | spin_lock(&enic->devcmd_lock); | |
1657 | vnic_dev_hang_notify(enic->vdev); | |
1658 | spin_unlock(&enic->devcmd_lock); | |
1659 | ||
1660 | enic_stop(enic->netdev); | |
1661 | enic_dev_soft_reset(enic); | |
68f71708 | 1662 | vnic_dev_init(enic->vdev, 0); |
01f2e4ea SF |
1663 | enic_reset_mcaddrs(enic); |
1664 | enic_init_vnic_resources(enic); | |
68f71708 | 1665 | enic_set_niccfg(enic); |
01f2e4ea SF |
1666 | enic_open(enic->netdev); |
1667 | ||
1668 | rtnl_unlock(); | |
1669 | } | |
1670 | ||
1671 | static int enic_set_intr_mode(struct enic *enic) | |
1672 | { | |
6ba9cdc0 SF |
1673 | unsigned int n = 1; |
1674 | unsigned int m = 1; | |
01f2e4ea SF |
1675 | unsigned int i; |
1676 | ||
1677 | /* Set interrupt mode (INTx, MSI, MSI-X) depending | |
1678 | * system capabilities. | |
1679 | * | |
1680 | * Try MSI-X first | |
1681 | * | |
1682 | * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs | |
1683 | * (the second to last INTR is used for WQ/RQ errors) | |
1684 | * (the last INTR is used for notifications) | |
1685 | */ | |
1686 | ||
1687 | BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); | |
1688 | for (i = 0; i < n + m + 2; i++) | |
1689 | enic->msix_entry[i].entry = i; | |
1690 | ||
1691 | if (enic->config.intr_mode < 1 && | |
1692 | enic->rq_count >= n && | |
1693 | enic->wq_count >= m && | |
1694 | enic->cq_count >= n + m && | |
1695 | enic->intr_count >= n + m + 2 && | |
1696 | !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { | |
1697 | ||
1698 | enic->rq_count = n; | |
1699 | enic->wq_count = m; | |
1700 | enic->cq_count = n + m; | |
1701 | enic->intr_count = n + m + 2; | |
1702 | ||
1703 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); | |
1704 | ||
1705 | return 0; | |
1706 | } | |
1707 | ||
1708 | /* Next try MSI | |
1709 | * | |
1710 | * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR | |
1711 | */ | |
1712 | ||
1713 | if (enic->config.intr_mode < 2 && | |
1714 | enic->rq_count >= 1 && | |
1715 | enic->wq_count >= 1 && | |
1716 | enic->cq_count >= 2 && | |
1717 | enic->intr_count >= 1 && | |
1718 | !pci_enable_msi(enic->pdev)) { | |
1719 | ||
1720 | enic->rq_count = 1; | |
1721 | enic->wq_count = 1; | |
1722 | enic->cq_count = 2; | |
1723 | enic->intr_count = 1; | |
1724 | ||
1725 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); | |
1726 | ||
1727 | return 0; | |
1728 | } | |
1729 | ||
1730 | /* Next try INTx | |
1731 | * | |
1732 | * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs | |
1733 | * (the first INTR is used for WQ/RQ) | |
1734 | * (the second INTR is used for WQ/RQ errors) | |
1735 | * (the last INTR is used for notifications) | |
1736 | */ | |
1737 | ||
1738 | if (enic->config.intr_mode < 3 && | |
1739 | enic->rq_count >= 1 && | |
1740 | enic->wq_count >= 1 && | |
1741 | enic->cq_count >= 2 && | |
1742 | enic->intr_count >= 3) { | |
1743 | ||
1744 | enic->rq_count = 1; | |
1745 | enic->wq_count = 1; | |
1746 | enic->cq_count = 2; | |
1747 | enic->intr_count = 3; | |
1748 | ||
1749 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); | |
1750 | ||
1751 | return 0; | |
1752 | } | |
1753 | ||
1754 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | |
1755 | ||
1756 | return -EINVAL; | |
1757 | } | |
1758 | ||
1759 | static void enic_clear_intr_mode(struct enic *enic) | |
1760 | { | |
1761 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1762 | case VNIC_DEV_INTR_MODE_MSIX: | |
1763 | pci_disable_msix(enic->pdev); | |
1764 | break; | |
1765 | case VNIC_DEV_INTR_MODE_MSI: | |
1766 | pci_disable_msi(enic->pdev); | |
1767 | break; | |
1768 | default: | |
1769 | break; | |
1770 | } | |
1771 | ||
1772 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | |
1773 | } | |
1774 | ||
afe29f7a SH |
1775 | static const struct net_device_ops enic_netdev_ops = { |
1776 | .ndo_open = enic_open, | |
1777 | .ndo_stop = enic_stop, | |
00829823 | 1778 | .ndo_start_xmit = enic_hard_start_xmit, |
afe29f7a SH |
1779 | .ndo_get_stats = enic_get_stats, |
1780 | .ndo_validate_addr = eth_validate_addr, | |
fe96aaa1 | 1781 | .ndo_set_mac_address = eth_mac_addr, |
afe29f7a SH |
1782 | .ndo_set_multicast_list = enic_set_multicast_list, |
1783 | .ndo_change_mtu = enic_change_mtu, | |
1784 | .ndo_vlan_rx_register = enic_vlan_rx_register, | |
1785 | .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, | |
1786 | .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, | |
1787 | .ndo_tx_timeout = enic_tx_timeout, | |
1788 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1789 | .ndo_poll_controller = enic_poll_controller, | |
1790 | #endif | |
1791 | }; | |
1792 | ||
6fdfa970 SF |
1793 | void enic_dev_deinit(struct enic *enic) |
1794 | { | |
1795 | netif_napi_del(&enic->napi); | |
1796 | enic_free_vnic_resources(enic); | |
1797 | enic_clear_intr_mode(enic); | |
1798 | } | |
1799 | ||
1800 | int enic_dev_init(struct enic *enic) | |
1801 | { | |
1802 | struct net_device *netdev = enic->netdev; | |
1803 | int err; | |
1804 | ||
1805 | /* Get vNIC configuration | |
1806 | */ | |
1807 | ||
1808 | err = enic_get_vnic_config(enic); | |
1809 | if (err) { | |
1810 | printk(KERN_ERR PFX | |
1811 | "Get vNIC configuration failed, aborting.\n"); | |
1812 | return err; | |
1813 | } | |
1814 | ||
1815 | /* Get available resource counts | |
1816 | */ | |
1817 | ||
1818 | enic_get_res_counts(enic); | |
1819 | ||
1820 | /* Set interrupt mode based on resource counts and system | |
1821 | * capabilities | |
1822 | */ | |
1823 | ||
1824 | err = enic_set_intr_mode(enic); | |
1825 | if (err) { | |
1826 | printk(KERN_ERR PFX | |
d87fd25d SF |
1827 | "Failed to set intr mode based on resource " |
1828 | "counts and system capabilities, aborting.\n"); | |
6fdfa970 SF |
1829 | return err; |
1830 | } | |
1831 | ||
1832 | /* Allocate and configure vNIC resources | |
1833 | */ | |
1834 | ||
1835 | err = enic_alloc_vnic_resources(enic); | |
1836 | if (err) { | |
1837 | printk(KERN_ERR PFX | |
1838 | "Failed to alloc vNIC resources, aborting.\n"); | |
1839 | goto err_out_free_vnic_resources; | |
1840 | } | |
1841 | ||
1842 | enic_init_vnic_resources(enic); | |
1843 | ||
1844 | err = enic_set_rq_alloc_buf(enic); | |
1845 | if (err) { | |
1846 | printk(KERN_ERR PFX | |
1847 | "Failed to set RQ buffer allocator, aborting.\n"); | |
1848 | goto err_out_free_vnic_resources; | |
1849 | } | |
1850 | ||
1851 | err = enic_set_niccfg(enic); | |
1852 | if (err) { | |
1853 | printk(KERN_ERR PFX | |
1854 | "Failed to config nic, aborting.\n"); | |
1855 | goto err_out_free_vnic_resources; | |
1856 | } | |
1857 | ||
1858 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1859 | default: | |
1860 | netif_napi_add(netdev, &enic->napi, enic_poll, 64); | |
1861 | break; | |
1862 | case VNIC_DEV_INTR_MODE_MSIX: | |
1863 | netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64); | |
1864 | break; | |
1865 | } | |
1866 | ||
1867 | return 0; | |
1868 | ||
1869 | err_out_free_vnic_resources: | |
1870 | enic_clear_intr_mode(enic); | |
1871 | enic_free_vnic_resources(enic); | |
1872 | ||
1873 | return err; | |
1874 | } | |
1875 | ||
27e6c7d3 SF |
1876 | static void enic_iounmap(struct enic *enic) |
1877 | { | |
1878 | unsigned int i; | |
1879 | ||
1880 | for (i = 0; i < ARRAY_SIZE(enic->bar); i++) | |
1881 | if (enic->bar[i].vaddr) | |
1882 | iounmap(enic->bar[i].vaddr); | |
1883 | } | |
1884 | ||
01f2e4ea SF |
1885 | static int __devinit enic_probe(struct pci_dev *pdev, |
1886 | const struct pci_device_id *ent) | |
1887 | { | |
1888 | struct net_device *netdev; | |
1889 | struct enic *enic; | |
1890 | int using_dac = 0; | |
1891 | unsigned int i; | |
1892 | int err; | |
1893 | ||
01f2e4ea SF |
1894 | /* Allocate net device structure and initialize. Private |
1895 | * instance data is initialized to zero. | |
1896 | */ | |
1897 | ||
1898 | netdev = alloc_etherdev(sizeof(struct enic)); | |
1899 | if (!netdev) { | |
1900 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | |
1901 | return -ENOMEM; | |
1902 | } | |
1903 | ||
01f2e4ea SF |
1904 | pci_set_drvdata(pdev, netdev); |
1905 | ||
1906 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
1907 | ||
1908 | enic = netdev_priv(netdev); | |
1909 | enic->netdev = netdev; | |
1910 | enic->pdev = pdev; | |
1911 | ||
1912 | /* Setup PCI resources | |
1913 | */ | |
1914 | ||
1915 | err = pci_enable_device(pdev); | |
1916 | if (err) { | |
1917 | printk(KERN_ERR PFX | |
4b75a442 | 1918 | "Cannot enable PCI device, aborting.\n"); |
01f2e4ea SF |
1919 | goto err_out_free_netdev; |
1920 | } | |
1921 | ||
1922 | err = pci_request_regions(pdev, DRV_NAME); | |
1923 | if (err) { | |
1924 | printk(KERN_ERR PFX | |
4b75a442 | 1925 | "Cannot request PCI regions, aborting.\n"); |
01f2e4ea SF |
1926 | goto err_out_disable_device; |
1927 | } | |
1928 | ||
1929 | pci_set_master(pdev); | |
1930 | ||
1931 | /* Query PCI controller on system for DMA addressing | |
1932 | * limitation for the device. Try 40-bit first, and | |
1933 | * fail to 32-bit. | |
1934 | */ | |
1935 | ||
50cf156a | 1936 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); |
01f2e4ea | 1937 | if (err) { |
284901a9 | 1938 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
01f2e4ea SF |
1939 | if (err) { |
1940 | printk(KERN_ERR PFX | |
4b75a442 | 1941 | "No usable DMA configuration, aborting.\n"); |
01f2e4ea SF |
1942 | goto err_out_release_regions; |
1943 | } | |
284901a9 | 1944 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
01f2e4ea SF |
1945 | if (err) { |
1946 | printk(KERN_ERR PFX | |
4b75a442 SF |
1947 | "Unable to obtain 32-bit DMA " |
1948 | "for consistent allocations, aborting.\n"); | |
01f2e4ea SF |
1949 | goto err_out_release_regions; |
1950 | } | |
1951 | } else { | |
50cf156a | 1952 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); |
01f2e4ea SF |
1953 | if (err) { |
1954 | printk(KERN_ERR PFX | |
4b75a442 SF |
1955 | "Unable to obtain 40-bit DMA " |
1956 | "for consistent allocations, aborting.\n"); | |
01f2e4ea SF |
1957 | goto err_out_release_regions; |
1958 | } | |
1959 | using_dac = 1; | |
1960 | } | |
1961 | ||
27e6c7d3 | 1962 | /* Map vNIC resources from BAR0-5 |
01f2e4ea SF |
1963 | */ |
1964 | ||
27e6c7d3 SF |
1965 | for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { |
1966 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) | |
1967 | continue; | |
1968 | enic->bar[i].len = pci_resource_len(pdev, i); | |
1969 | enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); | |
1970 | if (!enic->bar[i].vaddr) { | |
1971 | printk(KERN_ERR PFX | |
1972 | "Cannot memory-map BAR %d, aborting.\n", i); | |
1973 | err = -ENODEV; | |
1974 | goto err_out_iounmap; | |
1975 | } | |
1976 | enic->bar[i].bus_addr = pci_resource_start(pdev, i); | |
01f2e4ea SF |
1977 | } |
1978 | ||
1979 | /* Register vNIC device | |
1980 | */ | |
1981 | ||
27e6c7d3 SF |
1982 | enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, |
1983 | ARRAY_SIZE(enic->bar)); | |
01f2e4ea SF |
1984 | if (!enic->vdev) { |
1985 | printk(KERN_ERR PFX | |
4b75a442 | 1986 | "vNIC registration failed, aborting.\n"); |
01f2e4ea SF |
1987 | err = -ENODEV; |
1988 | goto err_out_iounmap; | |
1989 | } | |
1990 | ||
1991 | /* Issue device open to get device in known state | |
1992 | */ | |
1993 | ||
1994 | err = enic_dev_open(enic); | |
1995 | if (err) { | |
1996 | printk(KERN_ERR PFX | |
4b75a442 | 1997 | "vNIC dev open failed, aborting.\n"); |
01f2e4ea SF |
1998 | goto err_out_vnic_unregister; |
1999 | } | |
2000 | ||
2001 | /* Issue device init to initialize the vnic-to-switch link. | |
2002 | * We'll start with carrier off and wait for link UP | |
2003 | * notification later to turn on carrier. We don't need | |
2004 | * to wait here for the vnic-to-switch link initialization | |
2005 | * to complete; link UP notification is the indication that | |
2006 | * the process is complete. | |
2007 | */ | |
2008 | ||
2009 | netif_carrier_off(netdev); | |
2010 | ||
2011 | err = vnic_dev_init(enic->vdev, 0); | |
2012 | if (err) { | |
2013 | printk(KERN_ERR PFX | |
4b75a442 | 2014 | "vNIC dev init failed, aborting.\n"); |
01f2e4ea SF |
2015 | goto err_out_dev_close; |
2016 | } | |
2017 | ||
6fdfa970 | 2018 | err = enic_dev_init(enic); |
01f2e4ea SF |
2019 | if (err) { |
2020 | printk(KERN_ERR PFX | |
6fdfa970 | 2021 | "Device initialization failed, aborting.\n"); |
01f2e4ea SF |
2022 | goto err_out_dev_close; |
2023 | } | |
2024 | ||
01f2e4ea SF |
2025 | /* Setup notification timer, HW reset task, and locks |
2026 | */ | |
2027 | ||
2028 | init_timer(&enic->notify_timer); | |
2029 | enic->notify_timer.function = enic_notify_timer; | |
2030 | enic->notify_timer.data = (unsigned long)enic; | |
2031 | ||
2032 | INIT_WORK(&enic->reset, enic_reset); | |
2033 | ||
2034 | for (i = 0; i < enic->wq_count; i++) | |
2035 | spin_lock_init(&enic->wq_lock[i]); | |
2036 | ||
2037 | spin_lock_init(&enic->devcmd_lock); | |
2038 | ||
2039 | /* Register net device | |
2040 | */ | |
2041 | ||
2042 | enic->port_mtu = enic->config.mtu; | |
2043 | (void)enic_change_mtu(netdev, enic->port_mtu); | |
2044 | ||
2045 | err = enic_set_mac_addr(netdev, enic->mac_addr); | |
2046 | if (err) { | |
2047 | printk(KERN_ERR PFX | |
4b75a442 | 2048 | "Invalid MAC address, aborting.\n"); |
6fdfa970 | 2049 | goto err_out_dev_deinit; |
01f2e4ea SF |
2050 | } |
2051 | ||
7c844599 SF |
2052 | enic->tx_coalesce_usecs = enic->config.intr_timer_usec; |
2053 | enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; | |
2054 | ||
afe29f7a | 2055 | netdev->netdev_ops = &enic_netdev_ops; |
01f2e4ea SF |
2056 | netdev->watchdog_timeo = 2 * HZ; |
2057 | netdev->ethtool_ops = &enic_ethtool_ops; | |
01f2e4ea | 2058 | |
9f63a7c6 SF |
2059 | netdev->features |= NETIF_F_HW_VLAN_TX | |
2060 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; | |
01f2e4ea SF |
2061 | if (ENIC_SETTING(enic, TXCSUM)) |
2062 | netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | |
2063 | if (ENIC_SETTING(enic, TSO)) | |
2064 | netdev->features |= NETIF_F_TSO | | |
2065 | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | |
86ca9db7 SF |
2066 | if (ENIC_SETTING(enic, LRO)) |
2067 | netdev->features |= NETIF_F_LRO; | |
01f2e4ea SF |
2068 | if (using_dac) |
2069 | netdev->features |= NETIF_F_HIGHDMA; | |
2070 | ||
01f2e4ea SF |
2071 | enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); |
2072 | ||
86ca9db7 SF |
2073 | enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR; |
2074 | enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC; | |
2075 | enic->lro_mgr.lro_arr = enic->lro_desc; | |
2076 | enic->lro_mgr.get_skb_header = enic_get_skb_header; | |
2077 | enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; | |
2078 | enic->lro_mgr.dev = netdev; | |
2079 | enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE; | |
2080 | enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; | |
2081 | ||
01f2e4ea SF |
2082 | err = register_netdev(netdev); |
2083 | if (err) { | |
2084 | printk(KERN_ERR PFX | |
4b75a442 | 2085 | "Cannot register net device, aborting.\n"); |
6fdfa970 | 2086 | goto err_out_dev_deinit; |
01f2e4ea SF |
2087 | } |
2088 | ||
2089 | return 0; | |
2090 | ||
6fdfa970 SF |
2091 | err_out_dev_deinit: |
2092 | enic_dev_deinit(enic); | |
01f2e4ea SF |
2093 | err_out_dev_close: |
2094 | vnic_dev_close(enic->vdev); | |
2095 | err_out_vnic_unregister: | |
01f2e4ea SF |
2096 | vnic_dev_unregister(enic->vdev); |
2097 | err_out_iounmap: | |
2098 | enic_iounmap(enic); | |
2099 | err_out_release_regions: | |
2100 | pci_release_regions(pdev); | |
2101 | err_out_disable_device: | |
2102 | pci_disable_device(pdev); | |
2103 | err_out_free_netdev: | |
2104 | pci_set_drvdata(pdev, NULL); | |
2105 | free_netdev(netdev); | |
2106 | ||
2107 | return err; | |
2108 | } | |
2109 | ||
2110 | static void __devexit enic_remove(struct pci_dev *pdev) | |
2111 | { | |
2112 | struct net_device *netdev = pci_get_drvdata(pdev); | |
2113 | ||
2114 | if (netdev) { | |
2115 | struct enic *enic = netdev_priv(netdev); | |
2116 | ||
2117 | flush_scheduled_work(); | |
2118 | unregister_netdev(netdev); | |
6fdfa970 | 2119 | enic_dev_deinit(enic); |
01f2e4ea | 2120 | vnic_dev_close(enic->vdev); |
01f2e4ea SF |
2121 | vnic_dev_unregister(enic->vdev); |
2122 | enic_iounmap(enic); | |
2123 | pci_release_regions(pdev); | |
2124 | pci_disable_device(pdev); | |
2125 | pci_set_drvdata(pdev, NULL); | |
2126 | free_netdev(netdev); | |
2127 | } | |
2128 | } | |
2129 | ||
2130 | static struct pci_driver enic_driver = { | |
2131 | .name = DRV_NAME, | |
2132 | .id_table = enic_id_table, | |
2133 | .probe = enic_probe, | |
2134 | .remove = __devexit_p(enic_remove), | |
2135 | }; | |
2136 | ||
2137 | static int __init enic_init_module(void) | |
2138 | { | |
2139 | printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); | |
2140 | ||
2141 | return pci_register_driver(&enic_driver); | |
2142 | } | |
2143 | ||
2144 | static void __exit enic_cleanup_module(void) | |
2145 | { | |
2146 | pci_unregister_driver(&enic_driver); | |
2147 | } | |
2148 | ||
2149 | module_init(enic_init_module); | |
2150 | module_exit(enic_cleanup_module); |