1 /*******************************************************************************
3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
31 char ixgb_driver_name
[] = "ixgb";
32 static char ixgb_driver_string
[] = "Intel(R) PRO/10GbE Network Driver";
34 #define DRIVERNAPI "-NAPI"
35 #define DRV_VERSION "1.0.135-k2" DRIVERNAPI
36 const char ixgb_driver_version
[] = DRV_VERSION
;
37 static const char ixgb_copyright
[] = "Copyright (c) 1999-2008 Intel Corporation.";
39 #define IXGB_CB_LENGTH 256
40 static unsigned int copybreak __read_mostly
= IXGB_CB_LENGTH
;
41 module_param(copybreak
, uint
, 0644);
42 MODULE_PARM_DESC(copybreak
,
43 "Maximum size of packet that is copied to a new buffer on receive");
45 /* ixgb_pci_tbl - PCI Device ID Table
47 * Wildcard entries (PCI_ANY_ID) should come last
48 * Last entry must be all 0s
50 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
51 * Class, Class Mask, private data (not used) }
53 static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl
) = {
54 {INTEL_VENDOR_ID
, IXGB_DEVICE_ID_82597EX
,
55 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0},
56 {INTEL_VENDOR_ID
, IXGB_DEVICE_ID_82597EX_CX4
,
57 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0},
58 {INTEL_VENDOR_ID
, IXGB_DEVICE_ID_82597EX_SR
,
59 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0},
60 {INTEL_VENDOR_ID
, IXGB_DEVICE_ID_82597EX_LR
,
61 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0},
63 /* required last entry */
67 MODULE_DEVICE_TABLE(pci
, ixgb_pci_tbl
);
69 /* Local Function Prototypes */
70 static int ixgb_init_module(void);
71 static void ixgb_exit_module(void);
72 static int ixgb_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
73 static void __devexit
ixgb_remove(struct pci_dev
*pdev
);
74 static int ixgb_sw_init(struct ixgb_adapter
*adapter
);
75 static int ixgb_open(struct net_device
*netdev
);
76 static int ixgb_close(struct net_device
*netdev
);
77 static void ixgb_configure_tx(struct ixgb_adapter
*adapter
);
78 static void ixgb_configure_rx(struct ixgb_adapter
*adapter
);
79 static void ixgb_setup_rctl(struct ixgb_adapter
*adapter
);
80 static void ixgb_clean_tx_ring(struct ixgb_adapter
*adapter
);
81 static void ixgb_clean_rx_ring(struct ixgb_adapter
*adapter
);
82 static void ixgb_set_multi(struct net_device
*netdev
);
83 static void ixgb_watchdog(unsigned long data
);
84 static netdev_tx_t
ixgb_xmit_frame(struct sk_buff
*skb
,
85 struct net_device
*netdev
);
86 static struct net_device_stats
*ixgb_get_stats(struct net_device
*netdev
);
87 static int ixgb_change_mtu(struct net_device
*netdev
, int new_mtu
);
88 static int ixgb_set_mac(struct net_device
*netdev
, void *p
);
89 static irqreturn_t
ixgb_intr(int irq
, void *data
);
90 static bool ixgb_clean_tx_irq(struct ixgb_adapter
*adapter
);
92 static int ixgb_clean(struct napi_struct
*, int);
93 static bool ixgb_clean_rx_irq(struct ixgb_adapter
*, int *, int);
94 static void ixgb_alloc_rx_buffers(struct ixgb_adapter
*, int);
96 static void ixgb_tx_timeout(struct net_device
*dev
);
97 static void ixgb_tx_timeout_task(struct work_struct
*work
);
99 static void ixgb_vlan_rx_register(struct net_device
*netdev
,
100 struct vlan_group
*grp
);
101 static void ixgb_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
);
102 static void ixgb_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
);
103 static void ixgb_restore_vlan(struct ixgb_adapter
*adapter
);
105 #ifdef CONFIG_NET_POLL_CONTROLLER
106 /* for netdump / net console */
107 static void ixgb_netpoll(struct net_device
*dev
);
110 static pci_ers_result_t
ixgb_io_error_detected (struct pci_dev
*pdev
,
111 enum pci_channel_state state
);
112 static pci_ers_result_t
ixgb_io_slot_reset (struct pci_dev
*pdev
);
113 static void ixgb_io_resume (struct pci_dev
*pdev
);
115 static struct pci_error_handlers ixgb_err_handler
= {
116 .error_detected
= ixgb_io_error_detected
,
117 .slot_reset
= ixgb_io_slot_reset
,
118 .resume
= ixgb_io_resume
,
121 static struct pci_driver ixgb_driver
= {
122 .name
= ixgb_driver_name
,
123 .id_table
= ixgb_pci_tbl
,
125 .remove
= __devexit_p(ixgb_remove
),
126 .err_handler
= &ixgb_err_handler
129 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
130 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
131 MODULE_LICENSE("GPL");
132 MODULE_VERSION(DRV_VERSION
);
134 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
135 static int debug
= DEFAULT_DEBUG_LEVEL_SHIFT
;
136 module_param(debug
, int, 0);
137 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
140 * ixgb_init_module - Driver Registration Routine
142 * ixgb_init_module is the first routine called when the driver is
143 * loaded. All it does is register with the PCI subsystem.
147 ixgb_init_module(void)
149 printk(KERN_INFO
"%s - version %s\n",
150 ixgb_driver_string
, ixgb_driver_version
);
152 printk(KERN_INFO
"%s\n", ixgb_copyright
);
154 return pci_register_driver(&ixgb_driver
);
157 module_init(ixgb_init_module
);
160 * ixgb_exit_module - Driver Exit Cleanup Routine
162 * ixgb_exit_module is called just before the driver is removed
167 ixgb_exit_module(void)
169 pci_unregister_driver(&ixgb_driver
);
172 module_exit(ixgb_exit_module
);
175 * ixgb_irq_disable - Mask off interrupt generation on the NIC
176 * @adapter: board private structure
180 ixgb_irq_disable(struct ixgb_adapter
*adapter
)
182 IXGB_WRITE_REG(&adapter
->hw
, IMC
, ~0);
183 IXGB_WRITE_FLUSH(&adapter
->hw
);
184 synchronize_irq(adapter
->pdev
->irq
);
188 * ixgb_irq_enable - Enable default interrupt generation settings
189 * @adapter: board private structure
193 ixgb_irq_enable(struct ixgb_adapter
*adapter
)
195 u32 val
= IXGB_INT_RXT0
| IXGB_INT_RXDMT0
|
196 IXGB_INT_TXDW
| IXGB_INT_LSC
;
197 if (adapter
->hw
.subsystem_vendor_id
== SUN_SUBVENDOR_ID
)
198 val
|= IXGB_INT_GPI0
;
199 IXGB_WRITE_REG(&adapter
->hw
, IMS
, val
);
200 IXGB_WRITE_FLUSH(&adapter
->hw
);
204 ixgb_up(struct ixgb_adapter
*adapter
)
206 struct net_device
*netdev
= adapter
->netdev
;
207 int err
, irq_flags
= IRQF_SHARED
;
208 int max_frame
= netdev
->mtu
+ ENET_HEADER_SIZE
+ ENET_FCS_LENGTH
;
209 struct ixgb_hw
*hw
= &adapter
->hw
;
211 /* hardware has been reset, we need to reload some things */
213 ixgb_rar_set(hw
, netdev
->dev_addr
, 0);
214 ixgb_set_multi(netdev
);
216 ixgb_restore_vlan(adapter
);
218 ixgb_configure_tx(adapter
);
219 ixgb_setup_rctl(adapter
);
220 ixgb_configure_rx(adapter
);
221 ixgb_alloc_rx_buffers(adapter
, IXGB_DESC_UNUSED(&adapter
->rx_ring
));
223 /* disable interrupts and get the hardware into a known state */
224 IXGB_WRITE_REG(&adapter
->hw
, IMC
, 0xffffffff);
226 /* only enable MSI if bus is in PCI-X mode */
227 if (IXGB_READ_REG(&adapter
->hw
, STATUS
) & IXGB_STATUS_PCIX_MODE
) {
228 err
= pci_enable_msi(adapter
->pdev
);
230 adapter
->have_msi
= 1;
233 /* proceed to try to request regular interrupt */
236 err
= request_irq(adapter
->pdev
->irq
, ixgb_intr
, irq_flags
,
237 netdev
->name
, netdev
);
239 if (adapter
->have_msi
)
240 pci_disable_msi(adapter
->pdev
);
241 netif_err(adapter
, probe
, adapter
->netdev
,
242 "Unable to allocate interrupt Error: %d\n", err
);
246 if ((hw
->max_frame_size
!= max_frame
) ||
247 (hw
->max_frame_size
!=
248 (IXGB_READ_REG(hw
, MFS
) >> IXGB_MFS_SHIFT
))) {
250 hw
->max_frame_size
= max_frame
;
252 IXGB_WRITE_REG(hw
, MFS
, hw
->max_frame_size
<< IXGB_MFS_SHIFT
);
254 if (hw
->max_frame_size
>
255 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS
+ ENET_FCS_LENGTH
) {
256 u32 ctrl0
= IXGB_READ_REG(hw
, CTRL0
);
258 if (!(ctrl0
& IXGB_CTRL0_JFE
)) {
259 ctrl0
|= IXGB_CTRL0_JFE
;
260 IXGB_WRITE_REG(hw
, CTRL0
, ctrl0
);
265 clear_bit(__IXGB_DOWN
, &adapter
->flags
);
267 napi_enable(&adapter
->napi
);
268 ixgb_irq_enable(adapter
);
270 netif_wake_queue(netdev
);
272 mod_timer(&adapter
->watchdog_timer
, jiffies
);
278 ixgb_down(struct ixgb_adapter
*adapter
, bool kill_watchdog
)
280 struct net_device
*netdev
= adapter
->netdev
;
282 /* prevent the interrupt handler from restarting watchdog */
283 set_bit(__IXGB_DOWN
, &adapter
->flags
);
285 napi_disable(&adapter
->napi
);
286 /* waiting for NAPI to complete can re-enable interrupts */
287 ixgb_irq_disable(adapter
);
288 free_irq(adapter
->pdev
->irq
, netdev
);
290 if (adapter
->have_msi
)
291 pci_disable_msi(adapter
->pdev
);
294 del_timer_sync(&adapter
->watchdog_timer
);
296 adapter
->link_speed
= 0;
297 adapter
->link_duplex
= 0;
298 netif_carrier_off(netdev
);
299 netif_stop_queue(netdev
);
302 ixgb_clean_tx_ring(adapter
);
303 ixgb_clean_rx_ring(adapter
);
307 ixgb_reset(struct ixgb_adapter
*adapter
)
309 struct ixgb_hw
*hw
= &adapter
->hw
;
311 ixgb_adapter_stop(hw
);
312 if (!ixgb_init_hw(hw
))
313 netif_err(adapter
, probe
, adapter
->netdev
, "ixgb_init_hw failed\n");
315 /* restore frame size information */
316 IXGB_WRITE_REG(hw
, MFS
, hw
->max_frame_size
<< IXGB_MFS_SHIFT
);
317 if (hw
->max_frame_size
>
318 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS
+ ENET_FCS_LENGTH
) {
319 u32 ctrl0
= IXGB_READ_REG(hw
, CTRL0
);
320 if (!(ctrl0
& IXGB_CTRL0_JFE
)) {
321 ctrl0
|= IXGB_CTRL0_JFE
;
322 IXGB_WRITE_REG(hw
, CTRL0
, ctrl0
);
327 static const struct net_device_ops ixgb_netdev_ops
= {
328 .ndo_open
= ixgb_open
,
329 .ndo_stop
= ixgb_close
,
330 .ndo_start_xmit
= ixgb_xmit_frame
,
331 .ndo_get_stats
= ixgb_get_stats
,
332 .ndo_set_multicast_list
= ixgb_set_multi
,
333 .ndo_validate_addr
= eth_validate_addr
,
334 .ndo_set_mac_address
= ixgb_set_mac
,
335 .ndo_change_mtu
= ixgb_change_mtu
,
336 .ndo_tx_timeout
= ixgb_tx_timeout
,
337 .ndo_vlan_rx_register
= ixgb_vlan_rx_register
,
338 .ndo_vlan_rx_add_vid
= ixgb_vlan_rx_add_vid
,
339 .ndo_vlan_rx_kill_vid
= ixgb_vlan_rx_kill_vid
,
340 #ifdef CONFIG_NET_POLL_CONTROLLER
341 .ndo_poll_controller
= ixgb_netpoll
,
346 * ixgb_probe - Device Initialization Routine
347 * @pdev: PCI device information struct
348 * @ent: entry in ixgb_pci_tbl
350 * Returns 0 on success, negative on failure
352 * ixgb_probe initializes an adapter identified by a pci_dev structure.
353 * The OS initialization, configuring of the adapter private structure,
354 * and a hardware reset occur.
358 ixgb_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
360 struct net_device
*netdev
= NULL
;
361 struct ixgb_adapter
*adapter
;
362 static int cards_found
= 0;
367 err
= pci_enable_device(pdev
);
371 if (!(err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) &&
372 !(err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)))) {
375 if ((err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32))) ||
376 (err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32)))) {
378 "ixgb: No usable DMA configuration, aborting\n");
384 err
= pci_request_regions(pdev
, ixgb_driver_name
);
386 goto err_request_regions
;
388 pci_set_master(pdev
);
390 netdev
= alloc_etherdev(sizeof(struct ixgb_adapter
));
393 goto err_alloc_etherdev
;
396 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
398 pci_set_drvdata(pdev
, netdev
);
399 adapter
= netdev_priv(netdev
);
400 adapter
->netdev
= netdev
;
401 adapter
->pdev
= pdev
;
402 adapter
->hw
.back
= adapter
;
403 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_DEBUG_LEVEL_SHIFT
);
405 adapter
->hw
.hw_addr
= pci_ioremap_bar(pdev
, BAR_0
);
406 if (!adapter
->hw
.hw_addr
) {
411 for (i
= BAR_1
; i
<= BAR_5
; i
++) {
412 if (pci_resource_len(pdev
, i
) == 0)
414 if (pci_resource_flags(pdev
, i
) & IORESOURCE_IO
) {
415 adapter
->hw
.io_base
= pci_resource_start(pdev
, i
);
420 netdev
->netdev_ops
= &ixgb_netdev_ops
;
421 ixgb_set_ethtool_ops(netdev
);
422 netdev
->watchdog_timeo
= 5 * HZ
;
423 netif_napi_add(netdev
, &adapter
->napi
, ixgb_clean
, 64);
425 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
427 adapter
->bd_number
= cards_found
;
428 adapter
->link_speed
= 0;
429 adapter
->link_duplex
= 0;
431 /* setup the private structure */
433 err
= ixgb_sw_init(adapter
);
437 netdev
->features
= NETIF_F_SG
|
441 NETIF_F_HW_VLAN_FILTER
;
442 netdev
->features
|= NETIF_F_TSO
;
445 netdev
->features
|= NETIF_F_HIGHDMA
;
447 /* make sure the EEPROM is good */
449 if (!ixgb_validate_eeprom_checksum(&adapter
->hw
)) {
450 netif_err(adapter
, probe
, adapter
->netdev
,
451 "The EEPROM Checksum Is Not Valid\n");
456 ixgb_get_ee_mac_addr(&adapter
->hw
, netdev
->dev_addr
);
457 memcpy(netdev
->perm_addr
, netdev
->dev_addr
, netdev
->addr_len
);
459 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
460 netif_err(adapter
, probe
, adapter
->netdev
, "Invalid MAC Address\n");
465 adapter
->part_num
= ixgb_get_ee_pba_number(&adapter
->hw
);
467 init_timer(&adapter
->watchdog_timer
);
468 adapter
->watchdog_timer
.function
= &ixgb_watchdog
;
469 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
471 INIT_WORK(&adapter
->tx_timeout_task
, ixgb_tx_timeout_task
);
473 strcpy(netdev
->name
, "eth%d");
474 err
= register_netdev(netdev
);
478 /* carrier off reporting is important to ethtool even BEFORE open */
479 netif_carrier_off(netdev
);
481 netif_info(adapter
, probe
, adapter
->netdev
,
482 "Intel(R) PRO/10GbE Network Connection\n");
483 ixgb_check_options(adapter
);
484 /* reset the hardware with the new settings */
494 iounmap(adapter
->hw
.hw_addr
);
498 pci_release_regions(pdev
);
501 pci_disable_device(pdev
);
506 * ixgb_remove - Device Removal Routine
507 * @pdev: PCI device information struct
509 * ixgb_remove is called by the PCI subsystem to alert the driver
510 * that it should release a PCI device. The could be caused by a
511 * Hot-Plug event, or because the driver is going to be removed from
515 static void __devexit
516 ixgb_remove(struct pci_dev
*pdev
)
518 struct net_device
*netdev
= pci_get_drvdata(pdev
);
519 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
521 flush_scheduled_work();
523 unregister_netdev(netdev
);
525 iounmap(adapter
->hw
.hw_addr
);
526 pci_release_regions(pdev
);
532 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
533 * @adapter: board private structure to initialize
535 * ixgb_sw_init initializes the Adapter private data structure.
536 * Fields are initialized based on PCI device information and
537 * OS network device settings (MTU size).
541 ixgb_sw_init(struct ixgb_adapter
*adapter
)
543 struct ixgb_hw
*hw
= &adapter
->hw
;
544 struct net_device
*netdev
= adapter
->netdev
;
545 struct pci_dev
*pdev
= adapter
->pdev
;
547 /* PCI config space info */
549 hw
->vendor_id
= pdev
->vendor
;
550 hw
->device_id
= pdev
->device
;
551 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
552 hw
->subsystem_id
= pdev
->subsystem_device
;
554 hw
->max_frame_size
= netdev
->mtu
+ ENET_HEADER_SIZE
+ ENET_FCS_LENGTH
;
555 adapter
->rx_buffer_len
= hw
->max_frame_size
+ 8; /* + 8 for errata */
557 if ((hw
->device_id
== IXGB_DEVICE_ID_82597EX
) ||
558 (hw
->device_id
== IXGB_DEVICE_ID_82597EX_CX4
) ||
559 (hw
->device_id
== IXGB_DEVICE_ID_82597EX_LR
) ||
560 (hw
->device_id
== IXGB_DEVICE_ID_82597EX_SR
))
561 hw
->mac_type
= ixgb_82597
;
563 /* should never have loaded on this device */
564 netif_err(adapter
, probe
, adapter
->netdev
, "unsupported device id\n");
567 /* enable flow control to be programmed */
570 set_bit(__IXGB_DOWN
, &adapter
->flags
);
575 * ixgb_open - Called when a network interface is made active
576 * @netdev: network interface device structure
578 * Returns 0 on success, negative value on failure
580 * The open entry point is called when a network interface is made
581 * active by the system (IFF_UP). At this point all resources needed
582 * for transmit and receive operations are allocated, the interrupt
583 * handler is registered with the OS, the watchdog timer is started,
584 * and the stack is notified that the interface is ready.
588 ixgb_open(struct net_device
*netdev
)
590 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
593 /* allocate transmit descriptors */
594 err
= ixgb_setup_tx_resources(adapter
);
598 netif_carrier_off(netdev
);
600 /* allocate receive descriptors */
602 err
= ixgb_setup_rx_resources(adapter
);
606 err
= ixgb_up(adapter
);
610 netif_start_queue(netdev
);
615 ixgb_free_rx_resources(adapter
);
617 ixgb_free_tx_resources(adapter
);
625 * ixgb_close - Disables a network interface
626 * @netdev: network interface device structure
628 * Returns 0, this is not allowed to fail
630 * The close entry point is called when an interface is de-activated
631 * by the OS. The hardware is still under the drivers control, but
632 * needs to be disabled. A global MAC reset is issued to stop the
633 * hardware, and all transmit and receive resources are freed.
637 ixgb_close(struct net_device
*netdev
)
639 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
641 ixgb_down(adapter
, true);
643 ixgb_free_tx_resources(adapter
);
644 ixgb_free_rx_resources(adapter
);
650 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
651 * @adapter: board private structure
653 * Return 0 on success, negative on failure
657 ixgb_setup_tx_resources(struct ixgb_adapter
*adapter
)
659 struct ixgb_desc_ring
*txdr
= &adapter
->tx_ring
;
660 struct pci_dev
*pdev
= adapter
->pdev
;
663 size
= sizeof(struct ixgb_buffer
) * txdr
->count
;
664 txdr
->buffer_info
= vmalloc(size
);
665 if (!txdr
->buffer_info
) {
666 netif_err(adapter
, probe
, adapter
->netdev
,
667 "Unable to allocate transmit descriptor ring memory\n");
670 memset(txdr
->buffer_info
, 0, size
);
672 /* round up to nearest 4K */
674 txdr
->size
= txdr
->count
* sizeof(struct ixgb_tx_desc
);
675 txdr
->size
= ALIGN(txdr
->size
, 4096);
677 txdr
->desc
= pci_alloc_consistent(pdev
, txdr
->size
, &txdr
->dma
);
679 vfree(txdr
->buffer_info
);
680 netif_err(adapter
, probe
, adapter
->netdev
,
681 "Unable to allocate transmit descriptor memory\n");
684 memset(txdr
->desc
, 0, txdr
->size
);
686 txdr
->next_to_use
= 0;
687 txdr
->next_to_clean
= 0;
693 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
694 * @adapter: board private structure
696 * Configure the Tx unit of the MAC after a reset.
700 ixgb_configure_tx(struct ixgb_adapter
*adapter
)
702 u64 tdba
= adapter
->tx_ring
.dma
;
703 u32 tdlen
= adapter
->tx_ring
.count
* sizeof(struct ixgb_tx_desc
);
705 struct ixgb_hw
*hw
= &adapter
->hw
;
707 /* Setup the Base and Length of the Tx Descriptor Ring
708 * tx_ring.dma can be either a 32 or 64 bit value
711 IXGB_WRITE_REG(hw
, TDBAL
, (tdba
& 0x00000000ffffffffULL
));
712 IXGB_WRITE_REG(hw
, TDBAH
, (tdba
>> 32));
714 IXGB_WRITE_REG(hw
, TDLEN
, tdlen
);
716 /* Setup the HW Tx Head and Tail descriptor pointers */
718 IXGB_WRITE_REG(hw
, TDH
, 0);
719 IXGB_WRITE_REG(hw
, TDT
, 0);
721 /* don't set up txdctl, it induces performance problems if configured
723 /* Set the Tx Interrupt Delay register */
725 IXGB_WRITE_REG(hw
, TIDV
, adapter
->tx_int_delay
);
727 /* Program the Transmit Control Register */
729 tctl
= IXGB_TCTL_TCE
| IXGB_TCTL_TXEN
| IXGB_TCTL_TPDE
;
730 IXGB_WRITE_REG(hw
, TCTL
, tctl
);
732 /* Setup Transmit Descriptor Settings for this adapter */
733 adapter
->tx_cmd_type
=
735 (adapter
->tx_int_delay_enable
? IXGB_TX_DESC_CMD_IDE
: 0);
739 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
740 * @adapter: board private structure
742 * Returns 0 on success, negative on failure
746 ixgb_setup_rx_resources(struct ixgb_adapter
*adapter
)
748 struct ixgb_desc_ring
*rxdr
= &adapter
->rx_ring
;
749 struct pci_dev
*pdev
= adapter
->pdev
;
752 size
= sizeof(struct ixgb_buffer
) * rxdr
->count
;
753 rxdr
->buffer_info
= vmalloc(size
);
754 if (!rxdr
->buffer_info
) {
755 netif_err(adapter
, probe
, adapter
->netdev
,
756 "Unable to allocate receive descriptor ring\n");
759 memset(rxdr
->buffer_info
, 0, size
);
761 /* Round up to nearest 4K */
763 rxdr
->size
= rxdr
->count
* sizeof(struct ixgb_rx_desc
);
764 rxdr
->size
= ALIGN(rxdr
->size
, 4096);
766 rxdr
->desc
= pci_alloc_consistent(pdev
, rxdr
->size
, &rxdr
->dma
);
769 vfree(rxdr
->buffer_info
);
770 netif_err(adapter
, probe
, adapter
->netdev
,
771 "Unable to allocate receive descriptors\n");
774 memset(rxdr
->desc
, 0, rxdr
->size
);
776 rxdr
->next_to_clean
= 0;
777 rxdr
->next_to_use
= 0;
783 * ixgb_setup_rctl - configure the receive control register
784 * @adapter: Board private structure
788 ixgb_setup_rctl(struct ixgb_adapter
*adapter
)
792 rctl
= IXGB_READ_REG(&adapter
->hw
, RCTL
);
794 rctl
&= ~(3 << IXGB_RCTL_MO_SHIFT
);
797 IXGB_RCTL_BAM
| IXGB_RCTL_RDMTS_1_2
|
798 IXGB_RCTL_RXEN
| IXGB_RCTL_CFF
|
799 (adapter
->hw
.mc_filter_type
<< IXGB_RCTL_MO_SHIFT
);
801 rctl
|= IXGB_RCTL_SECRC
;
803 if (adapter
->rx_buffer_len
<= IXGB_RXBUFFER_2048
)
804 rctl
|= IXGB_RCTL_BSIZE_2048
;
805 else if (adapter
->rx_buffer_len
<= IXGB_RXBUFFER_4096
)
806 rctl
|= IXGB_RCTL_BSIZE_4096
;
807 else if (adapter
->rx_buffer_len
<= IXGB_RXBUFFER_8192
)
808 rctl
|= IXGB_RCTL_BSIZE_8192
;
809 else if (adapter
->rx_buffer_len
<= IXGB_RXBUFFER_16384
)
810 rctl
|= IXGB_RCTL_BSIZE_16384
;
812 IXGB_WRITE_REG(&adapter
->hw
, RCTL
, rctl
);
816 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
817 * @adapter: board private structure
819 * Configure the Rx unit of the MAC after a reset.
823 ixgb_configure_rx(struct ixgb_adapter
*adapter
)
825 u64 rdba
= adapter
->rx_ring
.dma
;
826 u32 rdlen
= adapter
->rx_ring
.count
* sizeof(struct ixgb_rx_desc
);
827 struct ixgb_hw
*hw
= &adapter
->hw
;
831 /* make sure receives are disabled while setting up the descriptors */
833 rctl
= IXGB_READ_REG(hw
, RCTL
);
834 IXGB_WRITE_REG(hw
, RCTL
, rctl
& ~IXGB_RCTL_RXEN
);
836 /* set the Receive Delay Timer Register */
838 IXGB_WRITE_REG(hw
, RDTR
, adapter
->rx_int_delay
);
840 /* Setup the Base and Length of the Rx Descriptor Ring */
842 IXGB_WRITE_REG(hw
, RDBAL
, (rdba
& 0x00000000ffffffffULL
));
843 IXGB_WRITE_REG(hw
, RDBAH
, (rdba
>> 32));
845 IXGB_WRITE_REG(hw
, RDLEN
, rdlen
);
847 /* Setup the HW Rx Head and Tail Descriptor Pointers */
848 IXGB_WRITE_REG(hw
, RDH
, 0);
849 IXGB_WRITE_REG(hw
, RDT
, 0);
851 /* due to the hardware errata with RXDCTL, we are unable to use any of
852 * the performance enhancing features of it without causing other
853 * subtle bugs, some of the bugs could include receive length
854 * corruption at high data rates (WTHRESH > 0) and/or receive
855 * descriptor ring irregularites (particularly in hardware cache) */
856 IXGB_WRITE_REG(hw
, RXDCTL
, 0);
858 /* Enable Receive Checksum Offload for TCP and UDP */
859 if (adapter
->rx_csum
) {
860 rxcsum
= IXGB_READ_REG(hw
, RXCSUM
);
861 rxcsum
|= IXGB_RXCSUM_TUOFL
;
862 IXGB_WRITE_REG(hw
, RXCSUM
, rxcsum
);
865 /* Enable Receives */
867 IXGB_WRITE_REG(hw
, RCTL
, rctl
);
871 * ixgb_free_tx_resources - Free Tx Resources
872 * @adapter: board private structure
874 * Free all transmit software resources
878 ixgb_free_tx_resources(struct ixgb_adapter
*adapter
)
880 struct pci_dev
*pdev
= adapter
->pdev
;
882 ixgb_clean_tx_ring(adapter
);
884 vfree(adapter
->tx_ring
.buffer_info
);
885 adapter
->tx_ring
.buffer_info
= NULL
;
887 pci_free_consistent(pdev
, adapter
->tx_ring
.size
,
888 adapter
->tx_ring
.desc
, adapter
->tx_ring
.dma
);
890 adapter
->tx_ring
.desc
= NULL
;
894 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter
*adapter
,
895 struct ixgb_buffer
*buffer_info
)
897 if (buffer_info
->dma
) {
898 if (buffer_info
->mapped_as_page
)
899 pci_unmap_page(adapter
->pdev
, buffer_info
->dma
,
900 buffer_info
->length
, PCI_DMA_TODEVICE
);
902 pci_unmap_single(adapter
->pdev
, buffer_info
->dma
,
905 buffer_info
->dma
= 0;
908 if (buffer_info
->skb
) {
909 dev_kfree_skb_any(buffer_info
->skb
);
910 buffer_info
->skb
= NULL
;
912 buffer_info
->time_stamp
= 0;
913 /* these fields must always be initialized in tx
914 * buffer_info->length = 0;
915 * buffer_info->next_to_watch = 0; */
919 * ixgb_clean_tx_ring - Free Tx Buffers
920 * @adapter: board private structure
924 ixgb_clean_tx_ring(struct ixgb_adapter
*adapter
)
926 struct ixgb_desc_ring
*tx_ring
= &adapter
->tx_ring
;
927 struct ixgb_buffer
*buffer_info
;
931 /* Free all the Tx ring sk_buffs */
933 for (i
= 0; i
< tx_ring
->count
; i
++) {
934 buffer_info
= &tx_ring
->buffer_info
[i
];
935 ixgb_unmap_and_free_tx_resource(adapter
, buffer_info
);
938 size
= sizeof(struct ixgb_buffer
) * tx_ring
->count
;
939 memset(tx_ring
->buffer_info
, 0, size
);
941 /* Zero out the descriptor ring */
943 memset(tx_ring
->desc
, 0, tx_ring
->size
);
945 tx_ring
->next_to_use
= 0;
946 tx_ring
->next_to_clean
= 0;
948 IXGB_WRITE_REG(&adapter
->hw
, TDH
, 0);
949 IXGB_WRITE_REG(&adapter
->hw
, TDT
, 0);
953 * ixgb_free_rx_resources - Free Rx Resources
954 * @adapter: board private structure
956 * Free all receive software resources
960 ixgb_free_rx_resources(struct ixgb_adapter
*adapter
)
962 struct ixgb_desc_ring
*rx_ring
= &adapter
->rx_ring
;
963 struct pci_dev
*pdev
= adapter
->pdev
;
965 ixgb_clean_rx_ring(adapter
);
967 vfree(rx_ring
->buffer_info
);
968 rx_ring
->buffer_info
= NULL
;
970 pci_free_consistent(pdev
, rx_ring
->size
, rx_ring
->desc
, rx_ring
->dma
);
972 rx_ring
->desc
= NULL
;
976 * ixgb_clean_rx_ring - Free Rx Buffers
977 * @adapter: board private structure
981 ixgb_clean_rx_ring(struct ixgb_adapter
*adapter
)
983 struct ixgb_desc_ring
*rx_ring
= &adapter
->rx_ring
;
984 struct ixgb_buffer
*buffer_info
;
985 struct pci_dev
*pdev
= adapter
->pdev
;
989 /* Free all the Rx ring sk_buffs */
991 for (i
= 0; i
< rx_ring
->count
; i
++) {
992 buffer_info
= &rx_ring
->buffer_info
[i
];
993 if (buffer_info
->dma
) {
994 pci_unmap_single(pdev
,
998 buffer_info
->dma
= 0;
999 buffer_info
->length
= 0;
1002 if (buffer_info
->skb
) {
1003 dev_kfree_skb(buffer_info
->skb
);
1004 buffer_info
->skb
= NULL
;
1008 size
= sizeof(struct ixgb_buffer
) * rx_ring
->count
;
1009 memset(rx_ring
->buffer_info
, 0, size
);
1011 /* Zero out the descriptor ring */
1013 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1015 rx_ring
->next_to_clean
= 0;
1016 rx_ring
->next_to_use
= 0;
1018 IXGB_WRITE_REG(&adapter
->hw
, RDH
, 0);
1019 IXGB_WRITE_REG(&adapter
->hw
, RDT
, 0);
1023 * ixgb_set_mac - Change the Ethernet Address of the NIC
1024 * @netdev: network interface device structure
1025 * @p: pointer to an address structure
1027 * Returns 0 on success, negative on failure
1031 ixgb_set_mac(struct net_device
*netdev
, void *p
)
1033 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
1034 struct sockaddr
*addr
= p
;
1036 if (!is_valid_ether_addr(addr
->sa_data
))
1037 return -EADDRNOTAVAIL
;
1039 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1041 ixgb_rar_set(&adapter
->hw
, addr
->sa_data
, 0);
1047 * ixgb_set_multi - Multicast and Promiscuous mode set
1048 * @netdev: network interface device structure
1050 * The set_multi entry point is called whenever the multicast address
1051 * list or the network interface flags are updated. This routine is
1052 * responsible for configuring the hardware for proper multicast,
1053 * promiscuous mode, and all-multi behavior.
1057 ixgb_set_multi(struct net_device
*netdev
)
1059 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
1060 struct ixgb_hw
*hw
= &adapter
->hw
;
1061 struct dev_mc_list
*mc_ptr
;
1065 /* Check for Promiscuous and All Multicast modes */
1067 rctl
= IXGB_READ_REG(hw
, RCTL
);
1069 if (netdev
->flags
& IFF_PROMISC
) {
1070 rctl
|= (IXGB_RCTL_UPE
| IXGB_RCTL_MPE
);
1071 rctl
&= ~IXGB_RCTL_VFE
;
1073 if (netdev
->flags
& IFF_ALLMULTI
) {
1074 rctl
|= IXGB_RCTL_MPE
;
1075 rctl
&= ~IXGB_RCTL_UPE
;
1077 rctl
&= ~(IXGB_RCTL_UPE
| IXGB_RCTL_MPE
);
1079 rctl
|= IXGB_RCTL_VFE
;
1082 if (netdev_mc_count(netdev
) > IXGB_MAX_NUM_MULTICAST_ADDRESSES
) {
1083 rctl
|= IXGB_RCTL_MPE
;
1084 IXGB_WRITE_REG(hw
, RCTL
, rctl
);
1086 u8 mta
[IXGB_MAX_NUM_MULTICAST_ADDRESSES
*
1087 IXGB_ETH_LENGTH_OF_ADDRESS
];
1089 IXGB_WRITE_REG(hw
, RCTL
, rctl
);
1092 netdev_for_each_mc_addr(mc_ptr
, netdev
)
1093 memcpy(&mta
[i
++ * IXGB_ETH_LENGTH_OF_ADDRESS
],
1094 mc_ptr
->dmi_addr
, IXGB_ETH_LENGTH_OF_ADDRESS
);
1096 ixgb_mc_addr_list_update(hw
, mta
, netdev_mc_count(netdev
), 0);
1101 * ixgb_watchdog - Timer Call-back
1102 * @data: pointer to netdev cast into an unsigned long
1106 ixgb_watchdog(unsigned long data
)
1108 struct ixgb_adapter
*adapter
= (struct ixgb_adapter
*)data
;
1109 struct net_device
*netdev
= adapter
->netdev
;
1110 struct ixgb_desc_ring
*txdr
= &adapter
->tx_ring
;
1112 ixgb_check_for_link(&adapter
->hw
);
1114 if (ixgb_check_for_bad_link(&adapter
->hw
)) {
1115 /* force the reset path */
1116 netif_stop_queue(netdev
);
1119 if (adapter
->hw
.link_up
) {
1120 if (!netif_carrier_ok(netdev
)) {
1121 printk(KERN_INFO
"ixgb: %s NIC Link is Up 10 Gbps "
1122 "Full Duplex, Flow Control: %s\n",
1124 (adapter
->hw
.fc
.type
== ixgb_fc_full
) ?
1126 ((adapter
->hw
.fc
.type
== ixgb_fc_rx_pause
) ?
1128 ((adapter
->hw
.fc
.type
== ixgb_fc_tx_pause
) ?
1130 adapter
->link_speed
= 10000;
1131 adapter
->link_duplex
= FULL_DUPLEX
;
1132 netif_carrier_on(netdev
);
1135 if (netif_carrier_ok(netdev
)) {
1136 adapter
->link_speed
= 0;
1137 adapter
->link_duplex
= 0;
1138 printk(KERN_INFO
"ixgb: %s NIC Link is Down\n",
1140 netif_carrier_off(netdev
);
1144 ixgb_update_stats(adapter
);
1146 if (!netif_carrier_ok(netdev
)) {
1147 if (IXGB_DESC_UNUSED(txdr
) + 1 < txdr
->count
) {
1148 /* We've lost link, so the controller stops DMA,
1149 * but we've got queued Tx work that's never going
1150 * to get done, so reset controller to flush Tx.
1151 * (Do the reset outside of interrupt context). */
1152 schedule_work(&adapter
->tx_timeout_task
);
1153 /* return immediately since reset is imminent */
1158 /* Force detection of hung controller every watchdog period */
1159 adapter
->detect_tx_hung
= true;
1161 /* generate an interrupt to force clean up of any stragglers */
1162 IXGB_WRITE_REG(&adapter
->hw
, ICS
, IXGB_INT_TXDW
);
1164 /* Reset the timer */
1165 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 2 * HZ
);
1168 #define IXGB_TX_FLAGS_CSUM 0x00000001
1169 #define IXGB_TX_FLAGS_VLAN 0x00000002
1170 #define IXGB_TX_FLAGS_TSO 0x00000004
1173 ixgb_tso(struct ixgb_adapter
*adapter
, struct sk_buff
*skb
)
1175 struct ixgb_context_desc
*context_desc
;
1177 u8 ipcss
, ipcso
, tucss
, tucso
, hdr_len
;
1178 u16 ipcse
, tucse
, mss
;
1181 if (likely(skb_is_gso(skb
))) {
1182 struct ixgb_buffer
*buffer_info
;
1185 if (skb_header_cloned(skb
)) {
1186 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
1191 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1192 mss
= skb_shinfo(skb
)->gso_size
;
1196 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
1199 ipcss
= skb_network_offset(skb
);
1200 ipcso
= (void *)&(iph
->check
) - (void *)skb
->data
;
1201 ipcse
= skb_transport_offset(skb
) - 1;
1202 tucss
= skb_transport_offset(skb
);
1203 tucso
= (void *)&(tcp_hdr(skb
)->check
) - (void *)skb
->data
;
1206 i
= adapter
->tx_ring
.next_to_use
;
1207 context_desc
= IXGB_CONTEXT_DESC(adapter
->tx_ring
, i
);
1208 buffer_info
= &adapter
->tx_ring
.buffer_info
[i
];
1209 WARN_ON(buffer_info
->dma
!= 0);
1211 context_desc
->ipcss
= ipcss
;
1212 context_desc
->ipcso
= ipcso
;
1213 context_desc
->ipcse
= cpu_to_le16(ipcse
);
1214 context_desc
->tucss
= tucss
;
1215 context_desc
->tucso
= tucso
;
1216 context_desc
->tucse
= cpu_to_le16(tucse
);
1217 context_desc
->mss
= cpu_to_le16(mss
);
1218 context_desc
->hdr_len
= hdr_len
;
1219 context_desc
->status
= 0;
1220 context_desc
->cmd_type_len
= cpu_to_le32(
1221 IXGB_CONTEXT_DESC_TYPE
1222 | IXGB_CONTEXT_DESC_CMD_TSE
1223 | IXGB_CONTEXT_DESC_CMD_IP
1224 | IXGB_CONTEXT_DESC_CMD_TCP
1225 | IXGB_CONTEXT_DESC_CMD_IDE
1226 | (skb
->len
- (hdr_len
)));
1229 if (++i
== adapter
->tx_ring
.count
) i
= 0;
1230 adapter
->tx_ring
.next_to_use
= i
;
1239 ixgb_tx_csum(struct ixgb_adapter
*adapter
, struct sk_buff
*skb
)
1241 struct ixgb_context_desc
*context_desc
;
1245 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
1246 struct ixgb_buffer
*buffer_info
;
1247 css
= skb_transport_offset(skb
);
1248 cso
= css
+ skb
->csum_offset
;
1250 i
= adapter
->tx_ring
.next_to_use
;
1251 context_desc
= IXGB_CONTEXT_DESC(adapter
->tx_ring
, i
);
1252 buffer_info
= &adapter
->tx_ring
.buffer_info
[i
];
1253 WARN_ON(buffer_info
->dma
!= 0);
1255 context_desc
->tucss
= css
;
1256 context_desc
->tucso
= cso
;
1257 context_desc
->tucse
= 0;
1258 /* zero out any previously existing data in one instruction */
1259 *(u32
*)&(context_desc
->ipcss
) = 0;
1260 context_desc
->status
= 0;
1261 context_desc
->hdr_len
= 0;
1262 context_desc
->mss
= 0;
1263 context_desc
->cmd_type_len
=
1264 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1265 | IXGB_TX_DESC_CMD_IDE
);
1267 if (++i
== adapter
->tx_ring
.count
) i
= 0;
1268 adapter
->tx_ring
.next_to_use
= i
;
1276 #define IXGB_MAX_TXD_PWR 14
1277 #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1280 ixgb_tx_map(struct ixgb_adapter
*adapter
, struct sk_buff
*skb
,
1283 struct ixgb_desc_ring
*tx_ring
= &adapter
->tx_ring
;
1284 struct pci_dev
*pdev
= adapter
->pdev
;
1285 struct ixgb_buffer
*buffer_info
;
1286 int len
= skb_headlen(skb
);
1287 unsigned int offset
= 0, size
, count
= 0, i
;
1288 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
1289 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1292 i
= tx_ring
->next_to_use
;
1295 buffer_info
= &tx_ring
->buffer_info
[i
];
1296 size
= min(len
, IXGB_MAX_DATA_PER_TXD
);
1297 /* Workaround for premature desc write-backs
1298 * in TSO mode. Append 4-byte sentinel desc */
1299 if (unlikely(mss
&& !nr_frags
&& size
== len
&& size
> 8))
1302 buffer_info
->length
= size
;
1303 WARN_ON(buffer_info
->dma
!= 0);
1304 buffer_info
->time_stamp
= jiffies
;
1305 buffer_info
->mapped_as_page
= false;
1306 buffer_info
->dma
= pci_map_single(pdev
, skb
->data
+ offset
,
1307 size
, PCI_DMA_TODEVICE
);
1308 if (pci_dma_mapping_error(pdev
, buffer_info
->dma
))
1310 buffer_info
->next_to_watch
= 0;
1317 if (i
== tx_ring
->count
)
1322 for (f
= 0; f
< nr_frags
; f
++) {
1323 struct skb_frag_struct
*frag
;
1325 frag
= &skb_shinfo(skb
)->frags
[f
];
1327 offset
= frag
->page_offset
;
1331 if (i
== tx_ring
->count
)
1334 buffer_info
= &tx_ring
->buffer_info
[i
];
1335 size
= min(len
, IXGB_MAX_DATA_PER_TXD
);
1337 /* Workaround for premature desc write-backs
1338 * in TSO mode. Append 4-byte sentinel desc */
1339 if (unlikely(mss
&& (f
== (nr_frags
- 1))
1340 && size
== len
&& size
> 8))
1343 buffer_info
->length
= size
;
1344 buffer_info
->time_stamp
= jiffies
;
1345 buffer_info
->mapped_as_page
= true;
1347 pci_map_page(pdev
, frag
->page
,
1350 if (pci_dma_mapping_error(pdev
, buffer_info
->dma
))
1352 buffer_info
->next_to_watch
= 0;
1359 tx_ring
->buffer_info
[i
].skb
= skb
;
1360 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
1365 dev_err(&pdev
->dev
, "TX DMA map failed\n");
1366 buffer_info
->dma
= 0;
1372 i
+= tx_ring
->count
;
1374 buffer_info
= &tx_ring
->buffer_info
[i
];
1375 ixgb_unmap_and_free_tx_resource(adapter
, buffer_info
);
1382 ixgb_tx_queue(struct ixgb_adapter
*adapter
, int count
, int vlan_id
,int tx_flags
)
1384 struct ixgb_desc_ring
*tx_ring
= &adapter
->tx_ring
;
1385 struct ixgb_tx_desc
*tx_desc
= NULL
;
1386 struct ixgb_buffer
*buffer_info
;
1387 u32 cmd_type_len
= adapter
->tx_cmd_type
;
1392 if (tx_flags
& IXGB_TX_FLAGS_TSO
) {
1393 cmd_type_len
|= IXGB_TX_DESC_CMD_TSE
;
1394 popts
|= (IXGB_TX_DESC_POPTS_IXSM
| IXGB_TX_DESC_POPTS_TXSM
);
1397 if (tx_flags
& IXGB_TX_FLAGS_CSUM
)
1398 popts
|= IXGB_TX_DESC_POPTS_TXSM
;
1400 if (tx_flags
& IXGB_TX_FLAGS_VLAN
)
1401 cmd_type_len
|= IXGB_TX_DESC_CMD_VLE
;
1403 i
= tx_ring
->next_to_use
;
1406 buffer_info
= &tx_ring
->buffer_info
[i
];
1407 tx_desc
= IXGB_TX_DESC(*tx_ring
, i
);
1408 tx_desc
->buff_addr
= cpu_to_le64(buffer_info
->dma
);
1409 tx_desc
->cmd_type_len
=
1410 cpu_to_le32(cmd_type_len
| buffer_info
->length
);
1411 tx_desc
->status
= status
;
1412 tx_desc
->popts
= popts
;
1413 tx_desc
->vlan
= cpu_to_le16(vlan_id
);
1415 if (++i
== tx_ring
->count
) i
= 0;
1418 tx_desc
->cmd_type_len
|=
1419 cpu_to_le32(IXGB_TX_DESC_CMD_EOP
| IXGB_TX_DESC_CMD_RS
);
1421 /* Force memory writes to complete before letting h/w
1422 * know there are new descriptors to fetch. (Only
1423 * applicable for weak-ordered memory model archs,
1424 * such as IA-64). */
1427 tx_ring
->next_to_use
= i
;
1428 IXGB_WRITE_REG(&adapter
->hw
, TDT
, i
);
1431 static int __ixgb_maybe_stop_tx(struct net_device
*netdev
, int size
)
1433 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
1434 struct ixgb_desc_ring
*tx_ring
= &adapter
->tx_ring
;
1436 netif_stop_queue(netdev
);
1437 /* Herbert's original patch had:
1438 * smp_mb__after_netif_stop_queue();
1439 * but since that doesn't exist yet, just open code it. */
1442 /* We need to check again in a case another CPU has just
1443 * made room available. */
1444 if (likely(IXGB_DESC_UNUSED(tx_ring
) < size
))
1448 netif_start_queue(netdev
);
1449 ++adapter
->restart_queue
;
1453 static int ixgb_maybe_stop_tx(struct net_device
*netdev
,
1454 struct ixgb_desc_ring
*tx_ring
, int size
)
1456 if (likely(IXGB_DESC_UNUSED(tx_ring
) >= size
))
1458 return __ixgb_maybe_stop_tx(netdev
, size
);
1462 /* Tx Descriptors needed, worst case */
1463 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1464 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1465 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1466 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1467 + 1 /* one more needed for sentinel TSO workaround */
1470 ixgb_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
1472 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
1474 unsigned int tx_flags
= 0;
1479 if (test_bit(__IXGB_DOWN
, &adapter
->flags
)) {
1481 return NETDEV_TX_OK
;
1484 if (skb
->len
<= 0) {
1486 return NETDEV_TX_OK
;
1489 if (unlikely(ixgb_maybe_stop_tx(netdev
, &adapter
->tx_ring
,
1491 return NETDEV_TX_BUSY
;
1493 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
1494 tx_flags
|= IXGB_TX_FLAGS_VLAN
;
1495 vlan_id
= vlan_tx_tag_get(skb
);
1498 first
= adapter
->tx_ring
.next_to_use
;
1500 tso
= ixgb_tso(adapter
, skb
);
1503 return NETDEV_TX_OK
;
1507 tx_flags
|= IXGB_TX_FLAGS_TSO
;
1508 else if (ixgb_tx_csum(adapter
, skb
))
1509 tx_flags
|= IXGB_TX_FLAGS_CSUM
;
1511 count
= ixgb_tx_map(adapter
, skb
, first
);
1514 ixgb_tx_queue(adapter
, count
, vlan_id
, tx_flags
);
1515 /* Make sure there is space in the ring for the next send. */
1516 ixgb_maybe_stop_tx(netdev
, &adapter
->tx_ring
, DESC_NEEDED
);
1519 dev_kfree_skb_any(skb
);
1520 adapter
->tx_ring
.buffer_info
[first
].time_stamp
= 0;
1521 adapter
->tx_ring
.next_to_use
= first
;
1524 return NETDEV_TX_OK
;
1528 * ixgb_tx_timeout - Respond to a Tx Hang
1529 * @netdev: network interface device structure
1533 ixgb_tx_timeout(struct net_device
*netdev
)
1535 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
1537 /* Do the reset outside of interrupt context */
1538 schedule_work(&adapter
->tx_timeout_task
);
1542 ixgb_tx_timeout_task(struct work_struct
*work
)
1544 struct ixgb_adapter
*adapter
=
1545 container_of(work
, struct ixgb_adapter
, tx_timeout_task
);
1547 adapter
->tx_timeout_count
++;
1548 ixgb_down(adapter
, true);
1553 * ixgb_get_stats - Get System Network Statistics
1554 * @netdev: network interface device structure
1556 * Returns the address of the device statistics structure.
1557 * The statistics are actually updated from the timer callback.
1560 static struct net_device_stats
*
1561 ixgb_get_stats(struct net_device
*netdev
)
1563 return &netdev
->stats
;
1567 * ixgb_change_mtu - Change the Maximum Transfer Unit
1568 * @netdev: network interface device structure
1569 * @new_mtu: new value for maximum frame size
1571 * Returns 0 on success, negative on failure
1575 ixgb_change_mtu(struct net_device
*netdev
, int new_mtu
)
1577 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
1578 int max_frame
= new_mtu
+ ENET_HEADER_SIZE
+ ENET_FCS_LENGTH
;
1579 int old_max_frame
= netdev
->mtu
+ ENET_HEADER_SIZE
+ ENET_FCS_LENGTH
;
1581 /* MTU < 68 is an error for IPv4 traffic, just don't allow it */
1582 if ((new_mtu
< 68) ||
1583 (max_frame
> IXGB_MAX_JUMBO_FRAME_SIZE
+ ENET_FCS_LENGTH
)) {
1584 netif_err(adapter
, probe
, adapter
->netdev
,
1585 "Invalid MTU setting %d\n", new_mtu
);
1589 if (old_max_frame
== max_frame
)
1592 if (netif_running(netdev
))
1593 ixgb_down(adapter
, true);
1595 adapter
->rx_buffer_len
= max_frame
+ 8; /* + 8 for errata */
1597 netdev
->mtu
= new_mtu
;
1599 if (netif_running(netdev
))
1606 * ixgb_update_stats - Update the board statistics counters.
1607 * @adapter: board private structure
1611 ixgb_update_stats(struct ixgb_adapter
*adapter
)
1613 struct net_device
*netdev
= adapter
->netdev
;
1614 struct pci_dev
*pdev
= adapter
->pdev
;
1616 /* Prevent stats update while adapter is being reset */
1617 if (pci_channel_offline(pdev
))
1620 if ((netdev
->flags
& IFF_PROMISC
) || (netdev
->flags
& IFF_ALLMULTI
) ||
1621 (netdev_mc_count(netdev
) > IXGB_MAX_NUM_MULTICAST_ADDRESSES
)) {
1622 u64 multi
= IXGB_READ_REG(&adapter
->hw
, MPRCL
);
1623 u32 bcast_l
= IXGB_READ_REG(&adapter
->hw
, BPRCL
);
1624 u32 bcast_h
= IXGB_READ_REG(&adapter
->hw
, BPRCH
);
1625 u64 bcast
= ((u64
)bcast_h
<< 32) | bcast_l
;
1627 multi
|= ((u64
)IXGB_READ_REG(&adapter
->hw
, MPRCH
) << 32);
1628 /* fix up multicast stats by removing broadcasts */
1632 adapter
->stats
.mprcl
+= (multi
& 0xFFFFFFFF);
1633 adapter
->stats
.mprch
+= (multi
>> 32);
1634 adapter
->stats
.bprcl
+= bcast_l
;
1635 adapter
->stats
.bprch
+= bcast_h
;
1637 adapter
->stats
.mprcl
+= IXGB_READ_REG(&adapter
->hw
, MPRCL
);
1638 adapter
->stats
.mprch
+= IXGB_READ_REG(&adapter
->hw
, MPRCH
);
1639 adapter
->stats
.bprcl
+= IXGB_READ_REG(&adapter
->hw
, BPRCL
);
1640 adapter
->stats
.bprch
+= IXGB_READ_REG(&adapter
->hw
, BPRCH
);
1642 adapter
->stats
.tprl
+= IXGB_READ_REG(&adapter
->hw
, TPRL
);
1643 adapter
->stats
.tprh
+= IXGB_READ_REG(&adapter
->hw
, TPRH
);
1644 adapter
->stats
.gprcl
+= IXGB_READ_REG(&adapter
->hw
, GPRCL
);
1645 adapter
->stats
.gprch
+= IXGB_READ_REG(&adapter
->hw
, GPRCH
);
1646 adapter
->stats
.uprcl
+= IXGB_READ_REG(&adapter
->hw
, UPRCL
);
1647 adapter
->stats
.uprch
+= IXGB_READ_REG(&adapter
->hw
, UPRCH
);
1648 adapter
->stats
.vprcl
+= IXGB_READ_REG(&adapter
->hw
, VPRCL
);
1649 adapter
->stats
.vprch
+= IXGB_READ_REG(&adapter
->hw
, VPRCH
);
1650 adapter
->stats
.jprcl
+= IXGB_READ_REG(&adapter
->hw
, JPRCL
);
1651 adapter
->stats
.jprch
+= IXGB_READ_REG(&adapter
->hw
, JPRCH
);
1652 adapter
->stats
.gorcl
+= IXGB_READ_REG(&adapter
->hw
, GORCL
);
1653 adapter
->stats
.gorch
+= IXGB_READ_REG(&adapter
->hw
, GORCH
);
1654 adapter
->stats
.torl
+= IXGB_READ_REG(&adapter
->hw
, TORL
);
1655 adapter
->stats
.torh
+= IXGB_READ_REG(&adapter
->hw
, TORH
);
1656 adapter
->stats
.rnbc
+= IXGB_READ_REG(&adapter
->hw
, RNBC
);
1657 adapter
->stats
.ruc
+= IXGB_READ_REG(&adapter
->hw
, RUC
);
1658 adapter
->stats
.roc
+= IXGB_READ_REG(&adapter
->hw
, ROC
);
1659 adapter
->stats
.rlec
+= IXGB_READ_REG(&adapter
->hw
, RLEC
);
1660 adapter
->stats
.crcerrs
+= IXGB_READ_REG(&adapter
->hw
, CRCERRS
);
1661 adapter
->stats
.icbc
+= IXGB_READ_REG(&adapter
->hw
, ICBC
);
1662 adapter
->stats
.ecbc
+= IXGB_READ_REG(&adapter
->hw
, ECBC
);
1663 adapter
->stats
.mpc
+= IXGB_READ_REG(&adapter
->hw
, MPC
);
1664 adapter
->stats
.tptl
+= IXGB_READ_REG(&adapter
->hw
, TPTL
);
1665 adapter
->stats
.tpth
+= IXGB_READ_REG(&adapter
->hw
, TPTH
);
1666 adapter
->stats
.gptcl
+= IXGB_READ_REG(&adapter
->hw
, GPTCL
);
1667 adapter
->stats
.gptch
+= IXGB_READ_REG(&adapter
->hw
, GPTCH
);
1668 adapter
->stats
.bptcl
+= IXGB_READ_REG(&adapter
->hw
, BPTCL
);
1669 adapter
->stats
.bptch
+= IXGB_READ_REG(&adapter
->hw
, BPTCH
);
1670 adapter
->stats
.mptcl
+= IXGB_READ_REG(&adapter
->hw
, MPTCL
);
1671 adapter
->stats
.mptch
+= IXGB_READ_REG(&adapter
->hw
, MPTCH
);
1672 adapter
->stats
.uptcl
+= IXGB_READ_REG(&adapter
->hw
, UPTCL
);
1673 adapter
->stats
.uptch
+= IXGB_READ_REG(&adapter
->hw
, UPTCH
);
1674 adapter
->stats
.vptcl
+= IXGB_READ_REG(&adapter
->hw
, VPTCL
);
1675 adapter
->stats
.vptch
+= IXGB_READ_REG(&adapter
->hw
, VPTCH
);
1676 adapter
->stats
.jptcl
+= IXGB_READ_REG(&adapter
->hw
, JPTCL
);
1677 adapter
->stats
.jptch
+= IXGB_READ_REG(&adapter
->hw
, JPTCH
);
1678 adapter
->stats
.gotcl
+= IXGB_READ_REG(&adapter
->hw
, GOTCL
);
1679 adapter
->stats
.gotch
+= IXGB_READ_REG(&adapter
->hw
, GOTCH
);
1680 adapter
->stats
.totl
+= IXGB_READ_REG(&adapter
->hw
, TOTL
);
1681 adapter
->stats
.toth
+= IXGB_READ_REG(&adapter
->hw
, TOTH
);
1682 adapter
->stats
.dc
+= IXGB_READ_REG(&adapter
->hw
, DC
);
1683 adapter
->stats
.plt64c
+= IXGB_READ_REG(&adapter
->hw
, PLT64C
);
1684 adapter
->stats
.tsctc
+= IXGB_READ_REG(&adapter
->hw
, TSCTC
);
1685 adapter
->stats
.tsctfc
+= IXGB_READ_REG(&adapter
->hw
, TSCTFC
);
1686 adapter
->stats
.ibic
+= IXGB_READ_REG(&adapter
->hw
, IBIC
);
1687 adapter
->stats
.rfc
+= IXGB_READ_REG(&adapter
->hw
, RFC
);
1688 adapter
->stats
.lfc
+= IXGB_READ_REG(&adapter
->hw
, LFC
);
1689 adapter
->stats
.pfrc
+= IXGB_READ_REG(&adapter
->hw
, PFRC
);
1690 adapter
->stats
.pftc
+= IXGB_READ_REG(&adapter
->hw
, PFTC
);
1691 adapter
->stats
.mcfrc
+= IXGB_READ_REG(&adapter
->hw
, MCFRC
);
1692 adapter
->stats
.mcftc
+= IXGB_READ_REG(&adapter
->hw
, MCFTC
);
1693 adapter
->stats
.xonrxc
+= IXGB_READ_REG(&adapter
->hw
, XONRXC
);
1694 adapter
->stats
.xontxc
+= IXGB_READ_REG(&adapter
->hw
, XONTXC
);
1695 adapter
->stats
.xoffrxc
+= IXGB_READ_REG(&adapter
->hw
, XOFFRXC
);
1696 adapter
->stats
.xofftxc
+= IXGB_READ_REG(&adapter
->hw
, XOFFTXC
);
1697 adapter
->stats
.rjc
+= IXGB_READ_REG(&adapter
->hw
, RJC
);
1699 /* Fill out the OS statistics structure */
1701 netdev
->stats
.rx_packets
= adapter
->stats
.gprcl
;
1702 netdev
->stats
.tx_packets
= adapter
->stats
.gptcl
;
1703 netdev
->stats
.rx_bytes
= adapter
->stats
.gorcl
;
1704 netdev
->stats
.tx_bytes
= adapter
->stats
.gotcl
;
1705 netdev
->stats
.multicast
= adapter
->stats
.mprcl
;
1706 netdev
->stats
.collisions
= 0;
1708 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1709 * with a length in the type/len field */
1710 netdev
->stats
.rx_errors
=
1711 /* adapter->stats.rnbc + */ adapter
->stats
.crcerrs
+
1712 adapter
->stats
.ruc
+
1713 adapter
->stats
.roc
/*+ adapter->stats.rlec */ +
1714 adapter
->stats
.icbc
+
1715 adapter
->stats
.ecbc
+ adapter
->stats
.mpc
;
1718 * netdev->stats.rx_length_errors = adapter->stats.rlec;
1721 netdev
->stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
1722 netdev
->stats
.rx_fifo_errors
= adapter
->stats
.mpc
;
1723 netdev
->stats
.rx_missed_errors
= adapter
->stats
.mpc
;
1724 netdev
->stats
.rx_over_errors
= adapter
->stats
.mpc
;
1726 netdev
->stats
.tx_errors
= 0;
1727 netdev
->stats
.rx_frame_errors
= 0;
1728 netdev
->stats
.tx_aborted_errors
= 0;
1729 netdev
->stats
.tx_carrier_errors
= 0;
1730 netdev
->stats
.tx_fifo_errors
= 0;
1731 netdev
->stats
.tx_heartbeat_errors
= 0;
1732 netdev
->stats
.tx_window_errors
= 0;
1735 #define IXGB_MAX_INTR 10
1737 * ixgb_intr - Interrupt Handler
1738 * @irq: interrupt number
1739 * @data: pointer to a network interface device structure
1743 ixgb_intr(int irq
, void *data
)
1745 struct net_device
*netdev
= data
;
1746 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
1747 struct ixgb_hw
*hw
= &adapter
->hw
;
1748 u32 icr
= IXGB_READ_REG(hw
, ICR
);
1751 return IRQ_NONE
; /* Not our interrupt */
1753 if (unlikely(icr
& (IXGB_INT_RXSEQ
| IXGB_INT_LSC
)))
1754 if (!test_bit(__IXGB_DOWN
, &adapter
->flags
))
1755 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1757 if (napi_schedule_prep(&adapter
->napi
)) {
1759 /* Disable interrupts and register for poll. The flush
1760 of the posted write is intentionally left out.
1763 IXGB_WRITE_REG(&adapter
->hw
, IMC
, ~0);
1764 __napi_schedule(&adapter
->napi
);
1770 * ixgb_clean - NAPI Rx polling callback
1771 * @adapter: board private structure
1775 ixgb_clean(struct napi_struct
*napi
, int budget
)
1777 struct ixgb_adapter
*adapter
= container_of(napi
, struct ixgb_adapter
, napi
);
1780 ixgb_clean_tx_irq(adapter
);
1781 ixgb_clean_rx_irq(adapter
, &work_done
, budget
);
1783 /* If budget not fully consumed, exit the polling mode */
1784 if (work_done
< budget
) {
1785 napi_complete(napi
);
1786 if (!test_bit(__IXGB_DOWN
, &adapter
->flags
))
1787 ixgb_irq_enable(adapter
);
1794 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1795 * @adapter: board private structure
1799 ixgb_clean_tx_irq(struct ixgb_adapter
*adapter
)
1801 struct ixgb_desc_ring
*tx_ring
= &adapter
->tx_ring
;
1802 struct net_device
*netdev
= adapter
->netdev
;
1803 struct ixgb_tx_desc
*tx_desc
, *eop_desc
;
1804 struct ixgb_buffer
*buffer_info
;
1805 unsigned int i
, eop
;
1806 bool cleaned
= false;
1808 i
= tx_ring
->next_to_clean
;
1809 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
1810 eop_desc
= IXGB_TX_DESC(*tx_ring
, eop
);
1812 while (eop_desc
->status
& IXGB_TX_DESC_STATUS_DD
) {
1814 for (cleaned
= false; !cleaned
; ) {
1815 tx_desc
= IXGB_TX_DESC(*tx_ring
, i
);
1816 buffer_info
= &tx_ring
->buffer_info
[i
];
1818 if (tx_desc
->popts
&
1819 (IXGB_TX_DESC_POPTS_TXSM
|
1820 IXGB_TX_DESC_POPTS_IXSM
))
1821 adapter
->hw_csum_tx_good
++;
1823 ixgb_unmap_and_free_tx_resource(adapter
, buffer_info
);
1825 *(u32
*)&(tx_desc
->status
) = 0;
1827 cleaned
= (i
== eop
);
1828 if (++i
== tx_ring
->count
) i
= 0;
1831 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
1832 eop_desc
= IXGB_TX_DESC(*tx_ring
, eop
);
1835 tx_ring
->next_to_clean
= i
;
1837 if (unlikely(cleaned
&& netif_carrier_ok(netdev
) &&
1838 IXGB_DESC_UNUSED(tx_ring
) >= DESC_NEEDED
)) {
1839 /* Make sure that anybody stopping the queue after this
1840 * sees the new next_to_clean. */
1843 if (netif_queue_stopped(netdev
) &&
1844 !(test_bit(__IXGB_DOWN
, &adapter
->flags
))) {
1845 netif_wake_queue(netdev
);
1846 ++adapter
->restart_queue
;
1850 if (adapter
->detect_tx_hung
) {
1851 /* detect a transmit hang in hardware, this serializes the
1852 * check with the clearing of time_stamp and movement of i */
1853 adapter
->detect_tx_hung
= false;
1854 if (tx_ring
->buffer_info
[eop
].time_stamp
&&
1855 time_after(jiffies
, tx_ring
->buffer_info
[eop
].time_stamp
+ HZ
)
1856 && !(IXGB_READ_REG(&adapter
->hw
, STATUS
) &
1857 IXGB_STATUS_TXOFF
)) {
1858 /* detected Tx unit hang */
1859 netif_err(adapter
, drv
, adapter
->netdev
,
1860 "Detected Tx Unit Hang\n"
1863 " next_to_use <%x>\n"
1864 " next_to_clean <%x>\n"
1865 "buffer_info[next_to_clean]\n"
1866 " time_stamp <%lx>\n"
1867 " next_to_watch <%x>\n"
1869 " next_to_watch.status <%x>\n",
1870 IXGB_READ_REG(&adapter
->hw
, TDH
),
1871 IXGB_READ_REG(&adapter
->hw
, TDT
),
1872 tx_ring
->next_to_use
,
1873 tx_ring
->next_to_clean
,
1874 tx_ring
->buffer_info
[eop
].time_stamp
,
1878 netif_stop_queue(netdev
);
1886 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1887 * @adapter: board private structure
1888 * @rx_desc: receive descriptor
1889 * @sk_buff: socket buffer with received data
1893 ixgb_rx_checksum(struct ixgb_adapter
*adapter
,
1894 struct ixgb_rx_desc
*rx_desc
,
1895 struct sk_buff
*skb
)
1897 /* Ignore Checksum bit is set OR
1898 * TCP Checksum has not been calculated
1900 if ((rx_desc
->status
& IXGB_RX_DESC_STATUS_IXSM
) ||
1901 (!(rx_desc
->status
& IXGB_RX_DESC_STATUS_TCPCS
))) {
1902 skb
->ip_summed
= CHECKSUM_NONE
;
1906 /* At this point we know the hardware did the TCP checksum */
1907 /* now look at the TCP checksum error bit */
1908 if (rx_desc
->errors
& IXGB_RX_DESC_ERRORS_TCPE
) {
1909 /* let the stack verify checksum errors */
1910 skb
->ip_summed
= CHECKSUM_NONE
;
1911 adapter
->hw_csum_rx_error
++;
1913 /* TCP checksum is good */
1914 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1915 adapter
->hw_csum_rx_good
++;
1920 * ixgb_clean_rx_irq - Send received data up the network stack,
1921 * @adapter: board private structure
1925 ixgb_clean_rx_irq(struct ixgb_adapter
*adapter
, int *work_done
, int work_to_do
)
1927 struct ixgb_desc_ring
*rx_ring
= &adapter
->rx_ring
;
1928 struct net_device
*netdev
= adapter
->netdev
;
1929 struct pci_dev
*pdev
= adapter
->pdev
;
1930 struct ixgb_rx_desc
*rx_desc
, *next_rxd
;
1931 struct ixgb_buffer
*buffer_info
, *next_buffer
, *next2_buffer
;
1934 int cleaned_count
= 0;
1935 bool cleaned
= false;
1937 i
= rx_ring
->next_to_clean
;
1938 rx_desc
= IXGB_RX_DESC(*rx_ring
, i
);
1939 buffer_info
= &rx_ring
->buffer_info
[i
];
1941 while (rx_desc
->status
& IXGB_RX_DESC_STATUS_DD
) {
1942 struct sk_buff
*skb
;
1945 if (*work_done
>= work_to_do
)
1949 status
= rx_desc
->status
;
1950 skb
= buffer_info
->skb
;
1951 buffer_info
->skb
= NULL
;
1953 prefetch(skb
->data
- NET_IP_ALIGN
);
1955 if (++i
== rx_ring
->count
) i
= 0;
1956 next_rxd
= IXGB_RX_DESC(*rx_ring
, i
);
1959 if ((j
= i
+ 1) == rx_ring
->count
) j
= 0;
1960 next2_buffer
= &rx_ring
->buffer_info
[j
];
1961 prefetch(next2_buffer
);
1963 next_buffer
= &rx_ring
->buffer_info
[i
];
1968 pci_unmap_single(pdev
,
1970 buffer_info
->length
,
1971 PCI_DMA_FROMDEVICE
);
1972 buffer_info
->dma
= 0;
1974 length
= le16_to_cpu(rx_desc
->length
);
1975 rx_desc
->length
= 0;
1977 if (unlikely(!(status
& IXGB_RX_DESC_STATUS_EOP
))) {
1979 /* All receives must fit into a single buffer */
1981 IXGB_DBG("Receive packet consumed multiple buffers "
1982 "length<%x>\n", length
);
1984 dev_kfree_skb_irq(skb
);
1988 if (unlikely(rx_desc
->errors
&
1989 (IXGB_RX_DESC_ERRORS_CE
| IXGB_RX_DESC_ERRORS_SE
|
1990 IXGB_RX_DESC_ERRORS_P
| IXGB_RX_DESC_ERRORS_RXE
))) {
1991 dev_kfree_skb_irq(skb
);
1995 /* code added for copybreak, this should improve
1996 * performance for small packets with large amounts
1997 * of reassembly being done in the stack */
1998 if (length
< copybreak
) {
1999 struct sk_buff
*new_skb
=
2000 netdev_alloc_skb_ip_align(netdev
, length
);
2002 skb_copy_to_linear_data_offset(new_skb
,
2008 /* save the skb in buffer_info as good */
2009 buffer_info
->skb
= skb
;
2013 /* end copybreak code */
2016 skb_put(skb
, length
);
2018 /* Receive Checksum Offload */
2019 ixgb_rx_checksum(adapter
, rx_desc
, skb
);
2021 skb
->protocol
= eth_type_trans(skb
, netdev
);
2022 if (adapter
->vlgrp
&& (status
& IXGB_RX_DESC_STATUS_VP
)) {
2023 vlan_hwaccel_receive_skb(skb
, adapter
->vlgrp
,
2024 le16_to_cpu(rx_desc
->special
));
2026 netif_receive_skb(skb
);
2030 /* clean up descriptor, might be written over by hw */
2031 rx_desc
->status
= 0;
2033 /* return some buffers to hardware, one at a time is too slow */
2034 if (unlikely(cleaned_count
>= IXGB_RX_BUFFER_WRITE
)) {
2035 ixgb_alloc_rx_buffers(adapter
, cleaned_count
);
2039 /* use prefetched values */
2041 buffer_info
= next_buffer
;
2044 rx_ring
->next_to_clean
= i
;
2046 cleaned_count
= IXGB_DESC_UNUSED(rx_ring
);
2048 ixgb_alloc_rx_buffers(adapter
, cleaned_count
);
2054 * ixgb_alloc_rx_buffers - Replace used receive buffers
2055 * @adapter: address of board private structure
2059 ixgb_alloc_rx_buffers(struct ixgb_adapter
*adapter
, int cleaned_count
)
2061 struct ixgb_desc_ring
*rx_ring
= &adapter
->rx_ring
;
2062 struct net_device
*netdev
= adapter
->netdev
;
2063 struct pci_dev
*pdev
= adapter
->pdev
;
2064 struct ixgb_rx_desc
*rx_desc
;
2065 struct ixgb_buffer
*buffer_info
;
2066 struct sk_buff
*skb
;
2070 i
= rx_ring
->next_to_use
;
2071 buffer_info
= &rx_ring
->buffer_info
[i
];
2072 cleancount
= IXGB_DESC_UNUSED(rx_ring
);
2075 /* leave three descriptors unused */
2076 while (--cleancount
> 2 && cleaned_count
--) {
2077 /* recycle! its good for you */
2078 skb
= buffer_info
->skb
;
2084 skb
= netdev_alloc_skb_ip_align(netdev
, adapter
->rx_buffer_len
);
2085 if (unlikely(!skb
)) {
2086 /* Better luck next round */
2087 adapter
->alloc_rx_buff_failed
++;
2091 buffer_info
->skb
= skb
;
2092 buffer_info
->length
= adapter
->rx_buffer_len
;
2094 buffer_info
->dma
= pci_map_single(pdev
,
2096 adapter
->rx_buffer_len
,
2097 PCI_DMA_FROMDEVICE
);
2099 rx_desc
= IXGB_RX_DESC(*rx_ring
, i
);
2100 rx_desc
->buff_addr
= cpu_to_le64(buffer_info
->dma
);
2101 /* guarantee DD bit not set now before h/w gets descriptor
2102 * this is the rest of the workaround for h/w double
2104 rx_desc
->status
= 0;
2107 if (++i
== rx_ring
->count
) i
= 0;
2108 buffer_info
= &rx_ring
->buffer_info
[i
];
2111 if (likely(rx_ring
->next_to_use
!= i
)) {
2112 rx_ring
->next_to_use
= i
;
2113 if (unlikely(i
-- == 0))
2114 i
= (rx_ring
->count
- 1);
2116 /* Force memory writes to complete before letting h/w
2117 * know there are new descriptors to fetch. (Only
2118 * applicable for weak-ordered memory model archs, such
2121 IXGB_WRITE_REG(&adapter
->hw
, RDT
, i
);
2126 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2128 * @param netdev network interface device structure
2129 * @param grp indicates to enable or disable tagging/stripping
2132 ixgb_vlan_rx_register(struct net_device
*netdev
, struct vlan_group
*grp
)
2134 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
2137 ixgb_irq_disable(adapter
);
2138 adapter
->vlgrp
= grp
;
2141 /* enable VLAN tag insert/strip */
2142 ctrl
= IXGB_READ_REG(&adapter
->hw
, CTRL0
);
2143 ctrl
|= IXGB_CTRL0_VME
;
2144 IXGB_WRITE_REG(&adapter
->hw
, CTRL0
, ctrl
);
2146 /* enable VLAN receive filtering */
2148 rctl
= IXGB_READ_REG(&adapter
->hw
, RCTL
);
2149 rctl
&= ~IXGB_RCTL_CFIEN
;
2150 IXGB_WRITE_REG(&adapter
->hw
, RCTL
, rctl
);
2152 /* disable VLAN tag insert/strip */
2154 ctrl
= IXGB_READ_REG(&adapter
->hw
, CTRL0
);
2155 ctrl
&= ~IXGB_CTRL0_VME
;
2156 IXGB_WRITE_REG(&adapter
->hw
, CTRL0
, ctrl
);
2159 /* don't enable interrupts unless we are UP */
2160 if (adapter
->netdev
->flags
& IFF_UP
)
2161 ixgb_irq_enable(adapter
);
2165 ixgb_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
2167 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
2170 /* add VID to filter table */
2172 index
= (vid
>> 5) & 0x7F;
2173 vfta
= IXGB_READ_REG_ARRAY(&adapter
->hw
, VFTA
, index
);
2174 vfta
|= (1 << (vid
& 0x1F));
2175 ixgb_write_vfta(&adapter
->hw
, index
, vfta
);
2179 ixgb_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
2181 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
2184 ixgb_irq_disable(adapter
);
2186 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
2188 /* don't enable interrupts unless we are UP */
2189 if (adapter
->netdev
->flags
& IFF_UP
)
2190 ixgb_irq_enable(adapter
);
2192 /* remove VID from filter table */
2194 index
= (vid
>> 5) & 0x7F;
2195 vfta
= IXGB_READ_REG_ARRAY(&adapter
->hw
, VFTA
, index
);
2196 vfta
&= ~(1 << (vid
& 0x1F));
2197 ixgb_write_vfta(&adapter
->hw
, index
, vfta
);
2201 ixgb_restore_vlan(struct ixgb_adapter
*adapter
)
2203 ixgb_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
2205 if (adapter
->vlgrp
) {
2207 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
2208 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
2210 ixgb_vlan_rx_add_vid(adapter
->netdev
, vid
);
2215 #ifdef CONFIG_NET_POLL_CONTROLLER
2217 * Polling 'interrupt' - used by things like netconsole to send skbs
2218 * without having to re-enable interrupts. It's not called while
2219 * the interrupt routine is executing.
2222 static void ixgb_netpoll(struct net_device
*dev
)
2224 struct ixgb_adapter
*adapter
= netdev_priv(dev
);
2226 disable_irq(adapter
->pdev
->irq
);
2227 ixgb_intr(adapter
->pdev
->irq
, dev
);
2228 enable_irq(adapter
->pdev
->irq
);
2233 * ixgb_io_error_detected() - called when PCI error is detected
2234 * @pdev pointer to pci device with error
2235 * @state pci channel state after error
2237 * This callback is called by the PCI subsystem whenever
2238 * a PCI bus error is detected.
2240 static pci_ers_result_t
ixgb_io_error_detected(struct pci_dev
*pdev
,
2241 enum pci_channel_state state
)
2243 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2244 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
2246 netif_device_detach(netdev
);
2248 if (state
== pci_channel_io_perm_failure
)
2249 return PCI_ERS_RESULT_DISCONNECT
;
2251 if (netif_running(netdev
))
2252 ixgb_down(adapter
, true);
2254 pci_disable_device(pdev
);
2256 /* Request a slot reset. */
2257 return PCI_ERS_RESULT_NEED_RESET
;
2261 * ixgb_io_slot_reset - called after the pci bus has been reset.
2262 * @pdev pointer to pci device with error
2264 * This callback is called after the PCI bus has been reset.
2265 * Basically, this tries to restart the card from scratch.
2266 * This is a shortened version of the device probe/discovery code,
2267 * it resembles the first-half of the ixgb_probe() routine.
2269 static pci_ers_result_t
ixgb_io_slot_reset(struct pci_dev
*pdev
)
2271 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2272 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
2274 if (pci_enable_device(pdev
)) {
2275 netif_err(adapter
, probe
, adapter
->netdev
,
2276 "Cannot re-enable PCI device after reset\n");
2277 return PCI_ERS_RESULT_DISCONNECT
;
2280 /* Perform card reset only on one instance of the card */
2281 if (0 != PCI_FUNC (pdev
->devfn
))
2282 return PCI_ERS_RESULT_RECOVERED
;
2284 pci_set_master(pdev
);
2286 netif_carrier_off(netdev
);
2287 netif_stop_queue(netdev
);
2288 ixgb_reset(adapter
);
2290 /* Make sure the EEPROM is good */
2291 if (!ixgb_validate_eeprom_checksum(&adapter
->hw
)) {
2292 netif_err(adapter
, probe
, adapter
->netdev
,
2293 "After reset, the EEPROM checksum is not valid\n");
2294 return PCI_ERS_RESULT_DISCONNECT
;
2296 ixgb_get_ee_mac_addr(&adapter
->hw
, netdev
->dev_addr
);
2297 memcpy(netdev
->perm_addr
, netdev
->dev_addr
, netdev
->addr_len
);
2299 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
2300 netif_err(adapter
, probe
, adapter
->netdev
,
2301 "After reset, invalid MAC address\n");
2302 return PCI_ERS_RESULT_DISCONNECT
;
2305 return PCI_ERS_RESULT_RECOVERED
;
2309 * ixgb_io_resume - called when its OK to resume normal operations
2310 * @pdev pointer to pci device with error
2312 * The error recovery driver tells us that its OK to resume
2313 * normal operation. Implementation resembles the second-half
2314 * of the ixgb_probe() routine.
2316 static void ixgb_io_resume(struct pci_dev
*pdev
)
2318 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2319 struct ixgb_adapter
*adapter
= netdev_priv(netdev
);
2321 pci_set_master(pdev
);
2323 if (netif_running(netdev
)) {
2324 if (ixgb_up(adapter
)) {
2325 printk ("ixgb: can't bring device back up after reset\n");
2330 netif_device_attach(netdev
);
2331 mod_timer(&adapter
->watchdog_timer
, jiffies
);