1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/vmalloc.h>
33 #include <linux/string.h>
36 #include <linux/tcp.h>
37 #include <linux/ipv6.h>
38 #include <net/checksum.h>
39 #include <net/ip6_checksum.h>
40 #include <linux/ethtool.h>
41 #include <linux/if_vlan.h>
44 #include "ixgbe_common.h"
46 char ixgbe_driver_name
[] = "ixgbe";
47 static const char ixgbe_driver_string
[] =
48 "Intel(R) 10 Gigabit PCI Express Network Driver";
50 #define DRV_VERSION "1.3.30-k2"
51 const char ixgbe_driver_version
[] = DRV_VERSION
;
52 static char ixgbe_copyright
[] = "Copyright (c) 1999-2007 Intel Corporation.";
54 static const struct ixgbe_info
*ixgbe_info_tbl
[] = {
55 [board_82598
] = &ixgbe_82598_info
,
58 /* ixgbe_pci_tbl - PCI Device ID Table
60 * Wildcard entries (PCI_ANY_ID) should come last
61 * Last entry must be all 0s
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64 * Class, Class Mask, private data (not used) }
66 static struct pci_device_id ixgbe_pci_tbl
[] = {
67 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AF_DUAL_PORT
),
69 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AF_SINGLE_PORT
),
71 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598AT
),
73 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_CX4
),
75 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598_CX4_DUAL_PORT
),
77 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598_DA_DUAL_PORT
),
79 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM
),
81 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_XF_LR
),
83 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82598EB_SFP_LOM
),
86 /* required last entry */
89 MODULE_DEVICE_TABLE(pci
, ixgbe_pci_tbl
);
91 #ifdef CONFIG_IXGBE_DCA
92 static int ixgbe_notify_dca(struct notifier_block
*, unsigned long event
,
94 static struct notifier_block dca_notifier
= {
95 .notifier_call
= ixgbe_notify_dca
,
101 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
102 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
103 MODULE_LICENSE("GPL");
104 MODULE_VERSION(DRV_VERSION
);
106 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
108 static void ixgbe_release_hw_control(struct ixgbe_adapter
*adapter
)
112 /* Let firmware take over control of h/w */
113 ctrl_ext
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_CTRL_EXT
);
114 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_CTRL_EXT
,
115 ctrl_ext
& ~IXGBE_CTRL_EXT_DRV_LOAD
);
118 static void ixgbe_get_hw_control(struct ixgbe_adapter
*adapter
)
122 /* Let firmware know the driver has taken over */
123 ctrl_ext
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_CTRL_EXT
);
124 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_CTRL_EXT
,
125 ctrl_ext
| IXGBE_CTRL_EXT_DRV_LOAD
);
128 static void ixgbe_set_ivar(struct ixgbe_adapter
*adapter
, u16 int_alloc_entry
,
133 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
134 index
= (int_alloc_entry
>> 2) & 0x1F;
135 ivar
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_IVAR(index
));
136 ivar
&= ~(0xFF << (8 * (int_alloc_entry
& 0x3)));
137 ivar
|= (msix_vector
<< (8 * (int_alloc_entry
& 0x3)));
138 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_IVAR(index
), ivar
);
141 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter
*adapter
,
142 struct ixgbe_tx_buffer
145 if (tx_buffer_info
->dma
) {
146 pci_unmap_page(adapter
->pdev
, tx_buffer_info
->dma
,
147 tx_buffer_info
->length
, PCI_DMA_TODEVICE
);
148 tx_buffer_info
->dma
= 0;
150 if (tx_buffer_info
->skb
) {
151 dev_kfree_skb_any(tx_buffer_info
->skb
);
152 tx_buffer_info
->skb
= NULL
;
154 /* tx_buffer_info must be completely set up in the transmit path */
157 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter
*adapter
,
158 struct ixgbe_ring
*tx_ring
,
161 struct ixgbe_hw
*hw
= &adapter
->hw
;
164 /* Detect a transmit hang in hardware, this serializes the
165 * check with the clearing of time_stamp and movement of eop */
166 head
= IXGBE_READ_REG(hw
, tx_ring
->head
);
167 tail
= IXGBE_READ_REG(hw
, tx_ring
->tail
);
168 adapter
->detect_tx_hung
= false;
169 if ((head
!= tail
) &&
170 tx_ring
->tx_buffer_info
[eop
].time_stamp
&&
171 time_after(jiffies
, tx_ring
->tx_buffer_info
[eop
].time_stamp
+ HZ
) &&
172 !(IXGBE_READ_REG(&adapter
->hw
, IXGBE_TFCS
) & IXGBE_TFCS_TXOFF
)) {
173 /* detected Tx unit hang */
174 union ixgbe_adv_tx_desc
*tx_desc
;
175 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
176 DPRINTK(DRV
, ERR
, "Detected Tx Unit Hang\n"
178 " TDH, TDT <%x>, <%x>\n"
179 " next_to_use <%x>\n"
180 " next_to_clean <%x>\n"
181 "tx_buffer_info[next_to_clean]\n"
182 " time_stamp <%lx>\n"
184 tx_ring
->queue_index
,
186 tx_ring
->next_to_use
, eop
,
187 tx_ring
->tx_buffer_info
[eop
].time_stamp
, jiffies
);
194 #define IXGBE_MAX_TXD_PWR 14
195 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
197 /* Tx Descriptors needed, worst case */
198 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
199 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
200 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
201 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
203 #define GET_TX_HEAD_FROM_RING(ring) (\
205 ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
206 static void ixgbe_tx_timeout(struct net_device
*netdev
);
209 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
210 * @adapter: board private structure
211 * @tx_ring: tx ring to clean
213 static bool ixgbe_clean_tx_irq(struct ixgbe_adapter
*adapter
,
214 struct ixgbe_ring
*tx_ring
)
216 union ixgbe_adv_tx_desc
*tx_desc
;
217 struct ixgbe_tx_buffer
*tx_buffer_info
;
218 struct net_device
*netdev
= adapter
->netdev
;
222 unsigned int count
= 0;
223 unsigned int total_bytes
= 0, total_packets
= 0;
226 head
= GET_TX_HEAD_FROM_RING(tx_ring
);
227 head
= le32_to_cpu(head
);
228 i
= tx_ring
->next_to_clean
;
231 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
232 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
233 skb
= tx_buffer_info
->skb
;
236 unsigned int segs
, bytecount
;
238 /* gso_segs is currently only valid for tcp */
239 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
240 /* multiply data chunks by size of headers */
241 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
243 total_packets
+= segs
;
244 total_bytes
+= bytecount
;
247 ixgbe_unmap_and_free_tx_resource(adapter
,
251 if (i
== tx_ring
->count
)
255 if (count
== tx_ring
->count
)
260 head
= GET_TX_HEAD_FROM_RING(tx_ring
);
261 head
= le32_to_cpu(head
);
267 tx_ring
->next_to_clean
= i
;
269 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
270 if (unlikely(count
&& netif_carrier_ok(netdev
) &&
271 (IXGBE_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
272 /* Make sure that anybody stopping the queue after this
273 * sees the new next_to_clean.
276 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
277 !test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
278 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
279 ++adapter
->restart_queue
;
283 if (adapter
->detect_tx_hung
) {
284 if (ixgbe_check_tx_hang(adapter
, tx_ring
, i
)) {
285 /* schedule immediate reset if we believe we hung */
287 "tx hang %d detected, resetting adapter\n",
288 adapter
->tx_timeout_count
+ 1);
289 ixgbe_tx_timeout(adapter
->netdev
);
293 /* re-arm the interrupt */
294 if ((total_packets
>= tx_ring
->work_limit
) ||
295 (count
== tx_ring
->count
))
296 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, tx_ring
->v_idx
);
298 tx_ring
->total_bytes
+= total_bytes
;
299 tx_ring
->total_packets
+= total_packets
;
300 tx_ring
->stats
.bytes
+= total_bytes
;
301 tx_ring
->stats
.packets
+= total_packets
;
302 adapter
->net_stats
.tx_bytes
+= total_bytes
;
303 adapter
->net_stats
.tx_packets
+= total_packets
;
304 return (total_packets
? true : false);
307 #ifdef CONFIG_IXGBE_DCA
308 static void ixgbe_update_rx_dca(struct ixgbe_adapter
*adapter
,
309 struct ixgbe_ring
*rx_ring
)
313 int q
= rx_ring
- adapter
->rx_ring
;
315 if (rx_ring
->cpu
!= cpu
) {
316 rxctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DCA_RXCTRL(q
));
317 rxctrl
&= ~IXGBE_DCA_RXCTRL_CPUID_MASK
;
318 rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
319 rxctrl
|= IXGBE_DCA_RXCTRL_DESC_DCA_EN
;
320 rxctrl
|= IXGBE_DCA_RXCTRL_HEAD_DCA_EN
;
321 rxctrl
&= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN
);
322 rxctrl
&= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN
|
323 IXGBE_DCA_RXCTRL_DESC_HSRO_EN
);
324 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_RXCTRL(q
), rxctrl
);
330 static void ixgbe_update_tx_dca(struct ixgbe_adapter
*adapter
,
331 struct ixgbe_ring
*tx_ring
)
335 int q
= tx_ring
- adapter
->tx_ring
;
337 if (tx_ring
->cpu
!= cpu
) {
338 txctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DCA_TXCTRL(q
));
339 txctrl
&= ~IXGBE_DCA_TXCTRL_CPUID_MASK
;
340 txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
341 txctrl
|= IXGBE_DCA_TXCTRL_DESC_DCA_EN
;
342 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_TXCTRL(q
), txctrl
);
348 static void ixgbe_setup_dca(struct ixgbe_adapter
*adapter
)
352 if (!(adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
))
355 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
356 adapter
->tx_ring
[i
].cpu
= -1;
357 ixgbe_update_tx_dca(adapter
, &adapter
->tx_ring
[i
]);
359 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
360 adapter
->rx_ring
[i
].cpu
= -1;
361 ixgbe_update_rx_dca(adapter
, &adapter
->rx_ring
[i
]);
365 static int __ixgbe_notify_dca(struct device
*dev
, void *data
)
367 struct net_device
*netdev
= dev_get_drvdata(dev
);
368 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
369 unsigned long event
= *(unsigned long *)data
;
372 case DCA_PROVIDER_ADD
:
373 /* if we're already enabled, don't do it again */
374 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
376 /* Always use CB2 mode, difference is masked
377 * in the CB driver. */
378 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
, 2);
379 if (dca_add_requester(dev
) == 0) {
380 adapter
->flags
|= IXGBE_FLAG_DCA_ENABLED
;
381 ixgbe_setup_dca(adapter
);
384 /* Fall Through since DCA is disabled. */
385 case DCA_PROVIDER_REMOVE
:
386 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
387 dca_remove_requester(dev
);
388 adapter
->flags
&= ~IXGBE_FLAG_DCA_ENABLED
;
389 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
, 1);
397 #endif /* CONFIG_IXGBE_DCA */
399 * ixgbe_receive_skb - Send a completed packet up the stack
400 * @adapter: board private structure
401 * @skb: packet to send up
402 * @status: hardware indication of status of receive
403 * @rx_ring: rx descriptor ring (for a specific queue) to setup
404 * @rx_desc: rx descriptor
406 static void ixgbe_receive_skb(struct ixgbe_adapter
*adapter
,
407 struct sk_buff
*skb
, u8 status
,
408 struct ixgbe_ring
*ring
,
409 union ixgbe_adv_rx_desc
*rx_desc
)
411 bool is_vlan
= (status
& IXGBE_RXD_STAT_VP
);
412 u16 tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
414 if (adapter
->netdev
->features
& NETIF_F_LRO
&&
415 skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
416 if (adapter
->vlgrp
&& is_vlan
&& (tag
!= 0))
417 lro_vlan_hwaccel_receive_skb(&ring
->lro_mgr
, skb
,
421 lro_receive_skb(&ring
->lro_mgr
, skb
, rx_desc
);
422 ring
->lro_used
= true;
424 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
)) {
425 if (adapter
->vlgrp
&& is_vlan
&& (tag
!= 0))
426 vlan_hwaccel_receive_skb(skb
, adapter
->vlgrp
, tag
);
428 netif_receive_skb(skb
);
430 if (adapter
->vlgrp
&& is_vlan
&& (tag
!= 0))
431 vlan_hwaccel_rx(skb
, adapter
->vlgrp
, tag
);
439 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
440 * @adapter: address of board private structure
441 * @status_err: hardware indication of status of receive
442 * @skb: skb currently being received and modified
444 static inline void ixgbe_rx_checksum(struct ixgbe_adapter
*adapter
,
445 u32 status_err
, struct sk_buff
*skb
)
447 skb
->ip_summed
= CHECKSUM_NONE
;
449 /* Rx csum disabled */
450 if (!(adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
))
453 /* if IP and error */
454 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
455 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
456 adapter
->hw_csum_rx_error
++;
460 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
463 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
464 adapter
->hw_csum_rx_error
++;
468 /* It must be a TCP or UDP packet with a valid checksum */
469 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
470 adapter
->hw_csum_rx_good
++;
474 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
475 * @adapter: address of board private structure
477 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter
*adapter
,
478 struct ixgbe_ring
*rx_ring
,
481 struct pci_dev
*pdev
= adapter
->pdev
;
482 union ixgbe_adv_rx_desc
*rx_desc
;
483 struct ixgbe_rx_buffer
*bi
;
486 i
= rx_ring
->next_to_use
;
487 bi
= &rx_ring
->rx_buffer_info
[i
];
489 while (cleaned_count
--) {
490 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
493 (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
)) {
495 bi
->page
= alloc_page(GFP_ATOMIC
);
497 adapter
->alloc_rx_page_failed
++;
502 /* use a half page if we're re-using */
503 bi
->page_offset
^= (PAGE_SIZE
/ 2);
506 bi
->page_dma
= pci_map_page(pdev
, bi
->page
,
514 skb
= netdev_alloc_skb(adapter
->netdev
,
515 (rx_ring
->rx_buf_len
+
519 adapter
->alloc_rx_buff_failed
++;
524 * Make buffer alignment 2 beyond a 16 byte boundary
525 * this will result in a 16 byte aligned IP header after
526 * the 14 byte MAC header is removed
528 skb_reserve(skb
, NET_IP_ALIGN
);
531 bi
->dma
= pci_map_single(pdev
, skb
->data
,
535 /* Refresh the desc even if buffer_addrs didn't change because
536 * each write-back erases this info. */
537 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
538 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->page_dma
);
539 rx_desc
->read
.hdr_addr
= cpu_to_le64(bi
->dma
);
541 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
545 if (i
== rx_ring
->count
)
547 bi
= &rx_ring
->rx_buffer_info
[i
];
551 if (rx_ring
->next_to_use
!= i
) {
552 rx_ring
->next_to_use
= i
;
554 i
= (rx_ring
->count
- 1);
557 * Force memory writes to complete before letting h/w
558 * know there are new descriptors to fetch. (Only
559 * applicable for weak-ordered memory model archs,
563 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
567 static inline u16
ixgbe_get_hdr_info(union ixgbe_adv_rx_desc
*rx_desc
)
569 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.hdr_info
;
572 static inline u16
ixgbe_get_pkt_info(union ixgbe_adv_rx_desc
*rx_desc
)
574 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
577 static bool ixgbe_clean_rx_irq(struct ixgbe_adapter
*adapter
,
578 struct ixgbe_ring
*rx_ring
,
579 int *work_done
, int work_to_do
)
581 struct pci_dev
*pdev
= adapter
->pdev
;
582 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
583 struct ixgbe_rx_buffer
*rx_buffer_info
, *next_buffer
;
588 bool cleaned
= false;
589 int cleaned_count
= 0;
590 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
592 i
= rx_ring
->next_to_clean
;
593 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
594 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
595 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
597 while (staterr
& IXGBE_RXD_STAT_DD
) {
599 if (*work_done
>= work_to_do
)
603 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
604 hdr_info
= le16_to_cpu(ixgbe_get_hdr_info(rx_desc
));
605 len
= (hdr_info
& IXGBE_RXDADV_HDRBUFLEN_MASK
) >>
606 IXGBE_RXDADV_HDRBUFLEN_SHIFT
;
607 if (hdr_info
& IXGBE_RXDADV_SPH
)
608 adapter
->rx_hdr_split
++;
609 if (len
> IXGBE_RX_HDR_SIZE
)
610 len
= IXGBE_RX_HDR_SIZE
;
611 upper_len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
613 len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
617 skb
= rx_buffer_info
->skb
;
618 prefetch(skb
->data
- NET_IP_ALIGN
);
619 rx_buffer_info
->skb
= NULL
;
621 if (len
&& !skb_shinfo(skb
)->nr_frags
) {
622 pci_unmap_single(pdev
, rx_buffer_info
->dma
,
629 pci_unmap_page(pdev
, rx_buffer_info
->page_dma
,
630 PAGE_SIZE
/ 2, PCI_DMA_FROMDEVICE
);
631 rx_buffer_info
->page_dma
= 0;
632 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
633 rx_buffer_info
->page
,
634 rx_buffer_info
->page_offset
,
637 if ((rx_ring
->rx_buf_len
> (PAGE_SIZE
/ 2)) ||
638 (page_count(rx_buffer_info
->page
) != 1))
639 rx_buffer_info
->page
= NULL
;
641 get_page(rx_buffer_info
->page
);
643 skb
->len
+= upper_len
;
644 skb
->data_len
+= upper_len
;
645 skb
->truesize
+= upper_len
;
649 if (i
== rx_ring
->count
)
651 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
653 next_rxd
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
657 if (staterr
& IXGBE_RXD_STAT_EOP
) {
658 rx_ring
->stats
.packets
++;
659 rx_ring
->stats
.bytes
+= skb
->len
;
661 rx_buffer_info
->skb
= next_buffer
->skb
;
662 rx_buffer_info
->dma
= next_buffer
->dma
;
663 next_buffer
->skb
= skb
;
664 next_buffer
->dma
= 0;
665 adapter
->non_eop_descs
++;
669 if (staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
) {
670 dev_kfree_skb_irq(skb
);
674 ixgbe_rx_checksum(adapter
, staterr
, skb
);
676 /* probably a little skewed due to removing CRC */
677 total_rx_bytes
+= skb
->len
;
680 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
681 ixgbe_receive_skb(adapter
, skb
, staterr
, rx_ring
, rx_desc
);
684 rx_desc
->wb
.upper
.status_error
= 0;
686 /* return some buffers to hardware, one at a time is too slow */
687 if (cleaned_count
>= IXGBE_RX_BUFFER_WRITE
) {
688 ixgbe_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
692 /* use prefetched values */
694 rx_buffer_info
= next_buffer
;
696 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
699 if (rx_ring
->lro_used
) {
700 lro_flush_all(&rx_ring
->lro_mgr
);
701 rx_ring
->lro_used
= false;
704 rx_ring
->next_to_clean
= i
;
705 cleaned_count
= IXGBE_DESC_UNUSED(rx_ring
);
708 ixgbe_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
710 rx_ring
->total_packets
+= total_rx_packets
;
711 rx_ring
->total_bytes
+= total_rx_bytes
;
712 adapter
->net_stats
.rx_bytes
+= total_rx_bytes
;
713 adapter
->net_stats
.rx_packets
+= total_rx_packets
;
718 static int ixgbe_clean_rxonly(struct napi_struct
*, int);
720 * ixgbe_configure_msix - Configure MSI-X hardware
721 * @adapter: board private structure
723 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
726 static void ixgbe_configure_msix(struct ixgbe_adapter
*adapter
)
728 struct ixgbe_q_vector
*q_vector
;
729 int i
, j
, q_vectors
, v_idx
, r_idx
;
732 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
734 /* Populate the IVAR table and set the ITR values to the
735 * corresponding register.
737 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
738 q_vector
= &adapter
->q_vector
[v_idx
];
739 /* XXX for_each_bit(...) */
740 r_idx
= find_first_bit(q_vector
->rxr_idx
,
741 adapter
->num_rx_queues
);
743 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
744 j
= adapter
->rx_ring
[r_idx
].reg_idx
;
745 ixgbe_set_ivar(adapter
, IXGBE_IVAR_RX_QUEUE(j
), v_idx
);
746 r_idx
= find_next_bit(q_vector
->rxr_idx
,
747 adapter
->num_rx_queues
,
750 r_idx
= find_first_bit(q_vector
->txr_idx
,
751 adapter
->num_tx_queues
);
753 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
754 j
= adapter
->tx_ring
[r_idx
].reg_idx
;
755 ixgbe_set_ivar(adapter
, IXGBE_IVAR_TX_QUEUE(j
), v_idx
);
756 r_idx
= find_next_bit(q_vector
->txr_idx
,
757 adapter
->num_tx_queues
,
761 /* if this is a tx only vector halve the interrupt rate */
762 if (q_vector
->txr_count
&& !q_vector
->rxr_count
)
763 q_vector
->eitr
= (adapter
->eitr_param
>> 1);
766 q_vector
->eitr
= adapter
->eitr_param
;
768 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITR(v_idx
),
769 EITR_INTS_PER_SEC_TO_REG(q_vector
->eitr
));
772 ixgbe_set_ivar(adapter
, IXGBE_IVAR_OTHER_CAUSES_INDEX
, v_idx
);
773 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EITR(v_idx
), 1950);
775 /* set up to autoclear timer, and the vectors */
776 mask
= IXGBE_EIMS_ENABLE_MASK
;
777 mask
&= ~(IXGBE_EIMS_OTHER
| IXGBE_EIMS_LSC
);
778 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIAC
, mask
);
785 latency_invalid
= 255
789 * ixgbe_update_itr - update the dynamic ITR value based on statistics
790 * @adapter: pointer to adapter
791 * @eitr: eitr setting (ints per sec) to give last timeslice
792 * @itr_setting: current throttle rate in ints/second
793 * @packets: the number of packets during this measurement interval
794 * @bytes: the number of bytes during this measurement interval
796 * Stores a new ITR value based on packets and byte
797 * counts during the last interrupt. The advantage of per interrupt
798 * computation is faster updates and more accurate ITR for the current
799 * traffic pattern. Constants in this function were computed
800 * based on theoretical maximum wire speed and thresholds were set based
801 * on testing data as well as attempting to minimize response time
802 * while increasing bulk throughput.
803 * this functionality is controlled by the InterruptThrottleRate module
804 * parameter (see ixgbe_param.c)
806 static u8
ixgbe_update_itr(struct ixgbe_adapter
*adapter
,
807 u32 eitr
, u8 itr_setting
,
808 int packets
, int bytes
)
810 unsigned int retval
= itr_setting
;
815 goto update_itr_done
;
818 /* simple throttlerate management
819 * 0-20MB/s lowest (100000 ints/s)
820 * 20-100MB/s low (20000 ints/s)
821 * 100-1249MB/s bulk (8000 ints/s)
823 /* what was last interrupt timeslice? */
824 timepassed_us
= 1000000/eitr
;
825 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
827 switch (itr_setting
) {
829 if (bytes_perint
> adapter
->eitr_low
)
830 retval
= low_latency
;
833 if (bytes_perint
> adapter
->eitr_high
)
834 retval
= bulk_latency
;
835 else if (bytes_perint
<= adapter
->eitr_low
)
836 retval
= lowest_latency
;
839 if (bytes_perint
<= adapter
->eitr_high
)
840 retval
= low_latency
;
848 static void ixgbe_set_itr_msix(struct ixgbe_q_vector
*q_vector
)
850 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
851 struct ixgbe_hw
*hw
= &adapter
->hw
;
853 u8 current_itr
, ret_itr
;
854 int i
, r_idx
, v_idx
= ((void *)q_vector
- (void *)(adapter
->q_vector
)) /
855 sizeof(struct ixgbe_q_vector
);
856 struct ixgbe_ring
*rx_ring
, *tx_ring
;
858 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
859 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
860 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
861 ret_itr
= ixgbe_update_itr(adapter
, q_vector
->eitr
,
863 tx_ring
->total_packets
,
864 tx_ring
->total_bytes
);
865 /* if the result for this queue would decrease interrupt
866 * rate for this vector then use that result */
867 q_vector
->tx_itr
= ((q_vector
->tx_itr
> ret_itr
) ?
868 q_vector
->tx_itr
- 1 : ret_itr
);
869 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
873 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
874 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
875 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
876 ret_itr
= ixgbe_update_itr(adapter
, q_vector
->eitr
,
878 rx_ring
->total_packets
,
879 rx_ring
->total_bytes
);
880 /* if the result for this queue would decrease interrupt
881 * rate for this vector then use that result */
882 q_vector
->rx_itr
= ((q_vector
->rx_itr
> ret_itr
) ?
883 q_vector
->rx_itr
- 1 : ret_itr
);
884 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
888 current_itr
= max(q_vector
->rx_itr
, q_vector
->tx_itr
);
890 switch (current_itr
) {
891 /* counts and packets in update_itr are dependent on these numbers */
896 new_itr
= 20000; /* aka hwitr = ~200 */
904 if (new_itr
!= q_vector
->eitr
) {
906 /* do an exponential smoothing */
907 new_itr
= ((q_vector
->eitr
* 90)/100) + ((new_itr
* 10)/100);
908 q_vector
->eitr
= new_itr
;
909 itr_reg
= EITR_INTS_PER_SEC_TO_REG(new_itr
);
910 /* must write high and low 16 bits to reset counter */
911 DPRINTK(TX_ERR
, DEBUG
, "writing eitr(%d): %08X\n", v_idx
,
913 IXGBE_WRITE_REG(hw
, IXGBE_EITR(v_idx
), itr_reg
| (itr_reg
)<<16);
919 static void ixgbe_check_fan_failure(struct ixgbe_adapter
*adapter
, u32 eicr
)
921 struct ixgbe_hw
*hw
= &adapter
->hw
;
923 if ((adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
) &&
924 (eicr
& IXGBE_EICR_GPI_SDP1
)) {
925 DPRINTK(PROBE
, CRIT
, "Fan has stopped, replace the adapter\n");
926 /* write to clear the interrupt */
927 IXGBE_WRITE_REG(hw
, IXGBE_EICR
, IXGBE_EICR_GPI_SDP1
);
931 static void ixgbe_check_lsc(struct ixgbe_adapter
*adapter
)
933 struct ixgbe_hw
*hw
= &adapter
->hw
;
936 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
937 adapter
->link_check_timeout
= jiffies
;
938 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
939 IXGBE_WRITE_REG(hw
, IXGBE_EIMC
, IXGBE_EIMC_LSC
);
940 schedule_work(&adapter
->watchdog_task
);
944 static irqreturn_t
ixgbe_msix_lsc(int irq
, void *data
)
946 struct net_device
*netdev
= data
;
947 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
948 struct ixgbe_hw
*hw
= &adapter
->hw
;
949 u32 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICR
);
951 if (eicr
& IXGBE_EICR_LSC
)
952 ixgbe_check_lsc(adapter
);
954 ixgbe_check_fan_failure(adapter
, eicr
);
956 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
957 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, IXGBE_EIMS_OTHER
);
962 static irqreturn_t
ixgbe_msix_clean_tx(int irq
, void *data
)
964 struct ixgbe_q_vector
*q_vector
= data
;
965 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
966 struct ixgbe_ring
*tx_ring
;
969 if (!q_vector
->txr_count
)
972 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
973 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
974 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
975 #ifdef CONFIG_IXGBE_DCA
976 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
977 ixgbe_update_tx_dca(adapter
, tx_ring
);
979 tx_ring
->total_bytes
= 0;
980 tx_ring
->total_packets
= 0;
981 ixgbe_clean_tx_irq(adapter
, tx_ring
);
982 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
990 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
992 * @data: pointer to our q_vector struct for this interrupt vector
994 static irqreturn_t
ixgbe_msix_clean_rx(int irq
, void *data
)
996 struct ixgbe_q_vector
*q_vector
= data
;
997 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
998 struct ixgbe_ring
*rx_ring
;
1002 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1003 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
1004 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1005 rx_ring
->total_bytes
= 0;
1006 rx_ring
->total_packets
= 0;
1007 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
1011 if (!q_vector
->rxr_count
)
1014 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1015 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1016 /* disable interrupts on this vector only */
1017 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, rx_ring
->v_idx
);
1018 netif_rx_schedule(&q_vector
->napi
);
1023 static irqreturn_t
ixgbe_msix_clean_many(int irq
, void *data
)
1025 ixgbe_msix_clean_rx(irq
, data
);
1026 ixgbe_msix_clean_tx(irq
, data
);
1032 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1033 * @napi: napi struct with our devices info in it
1034 * @budget: amount of work driver is allowed to do this pass, in packets
1036 * This function is optimized for cleaning one queue only on a single
1039 static int ixgbe_clean_rxonly(struct napi_struct
*napi
, int budget
)
1041 struct ixgbe_q_vector
*q_vector
=
1042 container_of(napi
, struct ixgbe_q_vector
, napi
);
1043 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
1044 struct ixgbe_ring
*rx_ring
= NULL
;
1048 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1049 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1050 #ifdef CONFIG_IXGBE_DCA
1051 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
1052 ixgbe_update_rx_dca(adapter
, rx_ring
);
1055 ixgbe_clean_rx_irq(adapter
, rx_ring
, &work_done
, budget
);
1057 /* If all Rx work done, exit the polling mode */
1058 if (work_done
< budget
) {
1059 netif_rx_complete(napi
);
1060 if (adapter
->itr_setting
& 3)
1061 ixgbe_set_itr_msix(q_vector
);
1062 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1063 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, rx_ring
->v_idx
);
1070 * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
1071 * @napi: napi struct with our devices info in it
1072 * @budget: amount of work driver is allowed to do this pass, in packets
1074 * This function will clean more than one rx queue associated with a
1077 static int ixgbe_clean_rxonly_many(struct napi_struct
*napi
, int budget
)
1079 struct ixgbe_q_vector
*q_vector
=
1080 container_of(napi
, struct ixgbe_q_vector
, napi
);
1081 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
1082 struct ixgbe_ring
*rx_ring
= NULL
;
1083 int work_done
= 0, i
;
1085 u16 enable_mask
= 0;
1087 /* attempt to distribute budget to each queue fairly, but don't allow
1088 * the budget to go below 1 because we'll exit polling */
1089 budget
/= (q_vector
->rxr_count
?: 1);
1090 budget
= max(budget
, 1);
1091 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1092 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
1093 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1094 #ifdef CONFIG_IXGBE_DCA
1095 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
)
1096 ixgbe_update_rx_dca(adapter
, rx_ring
);
1098 ixgbe_clean_rx_irq(adapter
, rx_ring
, &work_done
, budget
);
1099 enable_mask
|= rx_ring
->v_idx
;
1100 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
1104 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1105 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1106 /* If all Rx work done, exit the polling mode */
1107 if (work_done
< budget
) {
1108 netif_rx_complete(napi
);
1109 if (adapter
->itr_setting
& 3)
1110 ixgbe_set_itr_msix(q_vector
);
1111 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1112 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, enable_mask
);
1118 static inline void map_vector_to_rxq(struct ixgbe_adapter
*a
, int v_idx
,
1121 a
->q_vector
[v_idx
].adapter
= a
;
1122 set_bit(r_idx
, a
->q_vector
[v_idx
].rxr_idx
);
1123 a
->q_vector
[v_idx
].rxr_count
++;
1124 a
->rx_ring
[r_idx
].v_idx
= 1 << v_idx
;
1127 static inline void map_vector_to_txq(struct ixgbe_adapter
*a
, int v_idx
,
1130 a
->q_vector
[v_idx
].adapter
= a
;
1131 set_bit(r_idx
, a
->q_vector
[v_idx
].txr_idx
);
1132 a
->q_vector
[v_idx
].txr_count
++;
1133 a
->tx_ring
[r_idx
].v_idx
= 1 << v_idx
;
1137 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1138 * @adapter: board private structure to initialize
1139 * @vectors: allotted vector count for descriptor rings
1141 * This function maps descriptor rings to the queue-specific vectors
1142 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1143 * one vector per ring/queue, but on a constrained vector budget, we
1144 * group the rings as "efficiently" as possible. You would add new
1145 * mapping configurations in here.
1147 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter
*adapter
,
1151 int rxr_idx
= 0, txr_idx
= 0;
1152 int rxr_remaining
= adapter
->num_rx_queues
;
1153 int txr_remaining
= adapter
->num_tx_queues
;
1158 /* No mapping required if MSI-X is disabled. */
1159 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
1163 * The ideal configuration...
1164 * We have enough vectors to map one per queue.
1166 if (vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
1167 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
1168 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
1170 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
1171 map_vector_to_txq(adapter
, v_start
, txr_idx
);
1177 * If we don't have enough vectors for a 1-to-1
1178 * mapping, we'll have to group them so there are
1179 * multiple queues per vector.
1181 /* Re-adjusting *qpv takes care of the remainder. */
1182 for (i
= v_start
; i
< vectors
; i
++) {
1183 rqpv
= DIV_ROUND_UP(rxr_remaining
, vectors
- i
);
1184 for (j
= 0; j
< rqpv
; j
++) {
1185 map_vector_to_rxq(adapter
, i
, rxr_idx
);
1190 for (i
= v_start
; i
< vectors
; i
++) {
1191 tqpv
= DIV_ROUND_UP(txr_remaining
, vectors
- i
);
1192 for (j
= 0; j
< tqpv
; j
++) {
1193 map_vector_to_txq(adapter
, i
, txr_idx
);
1204 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1205 * @adapter: board private structure
1207 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1208 * interrupts from the kernel.
1210 static int ixgbe_request_msix_irqs(struct ixgbe_adapter
*adapter
)
1212 struct net_device
*netdev
= adapter
->netdev
;
1213 irqreturn_t (*handler
)(int, void *);
1214 int i
, vector
, q_vectors
, err
;
1217 /* Decrement for Other and TCP Timer vectors */
1218 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1220 /* Map the Tx/Rx rings to the vectors we were allotted. */
1221 err
= ixgbe_map_rings_to_vectors(adapter
, q_vectors
);
1225 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1226 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1227 &ixgbe_msix_clean_many)
1228 for (vector
= 0; vector
< q_vectors
; vector
++) {
1229 handler
= SET_HANDLER(&adapter
->q_vector
[vector
]);
1231 if(handler
== &ixgbe_msix_clean_rx
) {
1232 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1233 netdev
->name
, "rx", ri
++);
1235 else if(handler
== &ixgbe_msix_clean_tx
) {
1236 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1237 netdev
->name
, "tx", ti
++);
1240 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1241 netdev
->name
, "TxRx", vector
);
1243 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1244 handler
, 0, adapter
->name
[vector
],
1245 &(adapter
->q_vector
[vector
]));
1248 "request_irq failed for MSIX interrupt "
1249 "Error: %d\n", err
);
1250 goto free_queue_irqs
;
1254 sprintf(adapter
->name
[vector
], "%s:lsc", netdev
->name
);
1255 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1256 &ixgbe_msix_lsc
, 0, adapter
->name
[vector
], netdev
);
1259 "request_irq for msix_lsc failed: %d\n", err
);
1260 goto free_queue_irqs
;
1266 for (i
= vector
- 1; i
>= 0; i
--)
1267 free_irq(adapter
->msix_entries
[--vector
].vector
,
1268 &(adapter
->q_vector
[i
]));
1269 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
1270 pci_disable_msix(adapter
->pdev
);
1271 kfree(adapter
->msix_entries
);
1272 adapter
->msix_entries
= NULL
;
1277 static void ixgbe_set_itr(struct ixgbe_adapter
*adapter
)
1279 struct ixgbe_hw
*hw
= &adapter
->hw
;
1280 struct ixgbe_q_vector
*q_vector
= adapter
->q_vector
;
1282 u32 new_itr
= q_vector
->eitr
;
1283 struct ixgbe_ring
*rx_ring
= &adapter
->rx_ring
[0];
1284 struct ixgbe_ring
*tx_ring
= &adapter
->tx_ring
[0];
1286 q_vector
->tx_itr
= ixgbe_update_itr(adapter
, new_itr
,
1288 tx_ring
->total_packets
,
1289 tx_ring
->total_bytes
);
1290 q_vector
->rx_itr
= ixgbe_update_itr(adapter
, new_itr
,
1292 rx_ring
->total_packets
,
1293 rx_ring
->total_bytes
);
1295 current_itr
= max(q_vector
->rx_itr
, q_vector
->tx_itr
);
1297 switch (current_itr
) {
1298 /* counts and packets in update_itr are dependent on these numbers */
1299 case lowest_latency
:
1303 new_itr
= 20000; /* aka hwitr = ~200 */
1312 if (new_itr
!= q_vector
->eitr
) {
1314 /* do an exponential smoothing */
1315 new_itr
= ((q_vector
->eitr
* 90)/100) + ((new_itr
* 10)/100);
1316 q_vector
->eitr
= new_itr
;
1317 itr_reg
= EITR_INTS_PER_SEC_TO_REG(new_itr
);
1318 /* must write high and low 16 bits to reset counter */
1319 IXGBE_WRITE_REG(hw
, IXGBE_EITR(0), itr_reg
| (itr_reg
)<<16);
1326 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1327 * @adapter: board private structure
1329 static inline void ixgbe_irq_disable(struct ixgbe_adapter
*adapter
)
1331 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, ~0);
1332 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1333 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1335 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1336 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1338 synchronize_irq(adapter
->pdev
->irq
);
1343 * ixgbe_irq_enable - Enable default interrupt generation settings
1344 * @adapter: board private structure
1346 static inline void ixgbe_irq_enable(struct ixgbe_adapter
*adapter
)
1349 mask
= IXGBE_EIMS_ENABLE_MASK
;
1350 if (adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
)
1351 mask
|= IXGBE_EIMS_GPI_SDP1
;
1352 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, mask
);
1353 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1357 * ixgbe_intr - legacy mode Interrupt Handler
1358 * @irq: interrupt number
1359 * @data: pointer to a network interface device structure
1361 static irqreturn_t
ixgbe_intr(int irq
, void *data
)
1363 struct net_device
*netdev
= data
;
1364 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1365 struct ixgbe_hw
*hw
= &adapter
->hw
;
1368 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1369 * therefore no explict interrupt disable is necessary */
1370 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICR
);
1372 /* shared interrupt alert!
1373 * make sure interrupts are enabled because the read will
1374 * have disabled interrupts due to EIAM */
1375 ixgbe_irq_enable(adapter
);
1376 return IRQ_NONE
; /* Not our interrupt */
1379 if (eicr
& IXGBE_EICR_LSC
)
1380 ixgbe_check_lsc(adapter
);
1382 ixgbe_check_fan_failure(adapter
, eicr
);
1384 if (netif_rx_schedule_prep(&adapter
->q_vector
[0].napi
)) {
1385 adapter
->tx_ring
[0].total_packets
= 0;
1386 adapter
->tx_ring
[0].total_bytes
= 0;
1387 adapter
->rx_ring
[0].total_packets
= 0;
1388 adapter
->rx_ring
[0].total_bytes
= 0;
1389 /* would disable interrupts here but EIAM disabled it */
1390 __netif_rx_schedule(&adapter
->q_vector
[0].napi
);
1396 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter
*adapter
)
1398 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1400 for (i
= 0; i
< q_vectors
; i
++) {
1401 struct ixgbe_q_vector
*q_vector
= &adapter
->q_vector
[i
];
1402 bitmap_zero(q_vector
->rxr_idx
, MAX_RX_QUEUES
);
1403 bitmap_zero(q_vector
->txr_idx
, MAX_TX_QUEUES
);
1404 q_vector
->rxr_count
= 0;
1405 q_vector
->txr_count
= 0;
1410 * ixgbe_request_irq - initialize interrupts
1411 * @adapter: board private structure
1413 * Attempts to configure interrupts using the best available
1414 * capabilities of the hardware and kernel.
1416 static int ixgbe_request_irq(struct ixgbe_adapter
*adapter
)
1418 struct net_device
*netdev
= adapter
->netdev
;
1421 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1422 err
= ixgbe_request_msix_irqs(adapter
);
1423 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1424 err
= request_irq(adapter
->pdev
->irq
, &ixgbe_intr
, 0,
1425 netdev
->name
, netdev
);
1427 err
= request_irq(adapter
->pdev
->irq
, &ixgbe_intr
, IRQF_SHARED
,
1428 netdev
->name
, netdev
);
1432 DPRINTK(PROBE
, ERR
, "request_irq failed, Error %d\n", err
);
1437 static void ixgbe_free_irq(struct ixgbe_adapter
*adapter
)
1439 struct net_device
*netdev
= adapter
->netdev
;
1441 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
1444 q_vectors
= adapter
->num_msix_vectors
;
1447 free_irq(adapter
->msix_entries
[i
].vector
, netdev
);
1450 for (; i
>= 0; i
--) {
1451 free_irq(adapter
->msix_entries
[i
].vector
,
1452 &(adapter
->q_vector
[i
]));
1455 ixgbe_reset_q_vectors(adapter
);
1457 free_irq(adapter
->pdev
->irq
, netdev
);
1462 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1465 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter
*adapter
)
1467 struct ixgbe_hw
*hw
= &adapter
->hw
;
1469 IXGBE_WRITE_REG(hw
, IXGBE_EITR(0),
1470 EITR_INTS_PER_SEC_TO_REG(adapter
->eitr_param
));
1472 ixgbe_set_ivar(adapter
, IXGBE_IVAR_RX_QUEUE(0), 0);
1473 ixgbe_set_ivar(adapter
, IXGBE_IVAR_TX_QUEUE(0), 0);
1475 map_vector_to_rxq(adapter
, 0, 0);
1476 map_vector_to_txq(adapter
, 0, 0);
1478 DPRINTK(HW
, INFO
, "Legacy interrupt IVAR setup done\n");
1482 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
1483 * @adapter: board private structure
1485 * Configure the Tx unit of the MAC after a reset.
1487 static void ixgbe_configure_tx(struct ixgbe_adapter
*adapter
)
1490 struct ixgbe_hw
*hw
= &adapter
->hw
;
1491 u32 i
, j
, tdlen
, txctrl
;
1493 /* Setup the HW Tx Head and Tail descriptor pointers */
1494 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1495 struct ixgbe_ring
*ring
= &adapter
->tx_ring
[i
];
1498 tdlen
= ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
1499 IXGBE_WRITE_REG(hw
, IXGBE_TDBAL(j
),
1500 (tdba
& DMA_32BIT_MASK
));
1501 IXGBE_WRITE_REG(hw
, IXGBE_TDBAH(j
), (tdba
>> 32));
1503 (ring
->count
* sizeof(union ixgbe_adv_tx_desc
));
1504 tdwba
|= IXGBE_TDWBAL_HEAD_WB_ENABLE
;
1505 IXGBE_WRITE_REG(hw
, IXGBE_TDWBAL(j
), tdwba
& DMA_32BIT_MASK
);
1506 IXGBE_WRITE_REG(hw
, IXGBE_TDWBAH(j
), (tdwba
>> 32));
1507 IXGBE_WRITE_REG(hw
, IXGBE_TDLEN(j
), tdlen
);
1508 IXGBE_WRITE_REG(hw
, IXGBE_TDH(j
), 0);
1509 IXGBE_WRITE_REG(hw
, IXGBE_TDT(j
), 0);
1510 adapter
->tx_ring
[i
].head
= IXGBE_TDH(j
);
1511 adapter
->tx_ring
[i
].tail
= IXGBE_TDT(j
);
1512 /* Disable Tx Head Writeback RO bit, since this hoses
1513 * bookkeeping if things aren't delivered in order.
1515 txctrl
= IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL(j
));
1516 txctrl
&= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN
;
1517 IXGBE_WRITE_REG(hw
, IXGBE_DCA_TXCTRL(j
), txctrl
);
1521 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1523 static void ixgbe_configure_srrctl(struct ixgbe_adapter
*adapter
, int index
)
1525 struct ixgbe_ring
*rx_ring
;
1530 /* program one srrctl register per VMDq index */
1531 if (adapter
->flags
& IXGBE_FLAG_VMDQ_ENABLED
) {
1533 mask
= (unsigned long) adapter
->ring_feature
[RING_F_RSS
].mask
;
1534 len
= sizeof(adapter
->ring_feature
[RING_F_VMDQ
].mask
) * 8;
1535 shift
= find_first_bit(&mask
, len
);
1536 queue0
= index
& mask
;
1537 index
= (index
& mask
) >> shift
;
1538 /* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */
1540 mask
= (unsigned long) adapter
->ring_feature
[RING_F_RSS
].mask
;
1541 queue0
= index
& mask
;
1542 index
= index
& mask
;
1545 rx_ring
= &adapter
->rx_ring
[queue0
];
1547 srrctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_SRRCTL(index
));
1549 srrctl
&= ~IXGBE_SRRCTL_BSIZEHDR_MASK
;
1550 srrctl
&= ~IXGBE_SRRCTL_BSIZEPKT_MASK
;
1552 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1553 srrctl
|= IXGBE_RXBUFFER_2048
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1554 srrctl
|= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1555 srrctl
|= ((IXGBE_RX_HDR_SIZE
<<
1556 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
) &
1557 IXGBE_SRRCTL_BSIZEHDR_MASK
);
1559 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1561 if (rx_ring
->rx_buf_len
== MAXIMUM_ETHERNET_VLAN_SIZE
)
1562 srrctl
|= IXGBE_RXBUFFER_2048
>>
1563 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1565 srrctl
|= rx_ring
->rx_buf_len
>>
1566 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1568 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_SRRCTL(index
), srrctl
);
1572 * ixgbe_get_skb_hdr - helper function for LRO header processing
1573 * @skb: pointer to sk_buff to be added to LRO packet
1574 * @iphdr: pointer to ip header structure
1575 * @tcph: pointer to tcp header structure
1576 * @hdr_flags: pointer to header flags
1577 * @priv: private data
1579 static int ixgbe_get_skb_hdr(struct sk_buff
*skb
, void **iphdr
, void **tcph
,
1580 u64
*hdr_flags
, void *priv
)
1582 union ixgbe_adv_rx_desc
*rx_desc
= priv
;
1584 /* Verify that this is a valid IPv4 TCP packet */
1585 if (!((ixgbe_get_pkt_info(rx_desc
) & IXGBE_RXDADV_PKTTYPE_IPV4
) &&
1586 (ixgbe_get_pkt_info(rx_desc
) & IXGBE_RXDADV_PKTTYPE_TCP
)))
1589 /* Set network headers */
1590 skb_reset_network_header(skb
);
1591 skb_set_transport_header(skb
, ip_hdrlen(skb
));
1592 *iphdr
= ip_hdr(skb
);
1593 *tcph
= tcp_hdr(skb
);
1594 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
1598 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1599 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1602 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
1603 * @adapter: board private structure
1605 * Configure the Rx unit of the MAC after a reset.
1607 static void ixgbe_configure_rx(struct ixgbe_adapter
*adapter
)
1610 struct ixgbe_hw
*hw
= &adapter
->hw
;
1611 struct net_device
*netdev
= adapter
->netdev
;
1612 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1614 u32 rdlen
, rxctrl
, rxcsum
;
1615 static const u32 seed
[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
1616 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
1617 0x6A3E67EA, 0x14364D17, 0x3BED200D};
1624 /* Decide whether to use packet split mode or not */
1625 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1627 /* Set the RX buffer length according to the mode */
1628 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1629 rx_buf_len
= IXGBE_RX_HDR_SIZE
;
1631 if (netdev
->mtu
<= ETH_DATA_LEN
)
1632 rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1634 rx_buf_len
= ALIGN(max_frame
, 1024);
1637 fctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_FCTRL
);
1638 fctrl
|= IXGBE_FCTRL_BAM
;
1639 fctrl
|= IXGBE_FCTRL_DPF
; /* discard pause frames when FC enabled */
1640 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_FCTRL
, fctrl
);
1642 hlreg0
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
1643 if (adapter
->netdev
->mtu
<= ETH_DATA_LEN
)
1644 hlreg0
&= ~IXGBE_HLREG0_JUMBOEN
;
1646 hlreg0
|= IXGBE_HLREG0_JUMBOEN
;
1647 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, hlreg0
);
1649 pages
= PAGE_USE_COUNT(adapter
->netdev
->mtu
);
1651 rdlen
= adapter
->rx_ring
[0].count
* sizeof(union ixgbe_adv_rx_desc
);
1652 /* disable receives while setting up the descriptors */
1653 rxctrl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
1654 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxctrl
& ~IXGBE_RXCTRL_RXEN
);
1656 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1657 * the Base and Length of the Rx Descriptor Ring */
1658 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1659 rdba
= adapter
->rx_ring
[i
].dma
;
1660 j
= adapter
->rx_ring
[i
].reg_idx
;
1661 IXGBE_WRITE_REG(hw
, IXGBE_RDBAL(j
), (rdba
& DMA_32BIT_MASK
));
1662 IXGBE_WRITE_REG(hw
, IXGBE_RDBAH(j
), (rdba
>> 32));
1663 IXGBE_WRITE_REG(hw
, IXGBE_RDLEN(j
), rdlen
);
1664 IXGBE_WRITE_REG(hw
, IXGBE_RDH(j
), 0);
1665 IXGBE_WRITE_REG(hw
, IXGBE_RDT(j
), 0);
1666 adapter
->rx_ring
[i
].head
= IXGBE_RDH(j
);
1667 adapter
->rx_ring
[i
].tail
= IXGBE_RDT(j
);
1668 adapter
->rx_ring
[i
].rx_buf_len
= rx_buf_len
;
1669 /* Intitial LRO Settings */
1670 adapter
->rx_ring
[i
].lro_mgr
.max_aggr
= IXGBE_MAX_LRO_AGGREGATE
;
1671 adapter
->rx_ring
[i
].lro_mgr
.max_desc
= IXGBE_MAX_LRO_DESCRIPTORS
;
1672 adapter
->rx_ring
[i
].lro_mgr
.get_skb_header
= ixgbe_get_skb_hdr
;
1673 adapter
->rx_ring
[i
].lro_mgr
.features
= LRO_F_EXTRACT_VLAN_ID
;
1674 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
))
1675 adapter
->rx_ring
[i
].lro_mgr
.features
|= LRO_F_NAPI
;
1676 adapter
->rx_ring
[i
].lro_mgr
.dev
= adapter
->netdev
;
1677 adapter
->rx_ring
[i
].lro_mgr
.ip_summed
= CHECKSUM_UNNECESSARY
;
1678 adapter
->rx_ring
[i
].lro_mgr
.ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
1680 ixgbe_configure_srrctl(adapter
, j
);
1684 * For VMDq support of different descriptor types or
1685 * buffer sizes through the use of multiple SRRCTL
1686 * registers, RDRXCTL.MVMEN must be set to 1
1688 * also, the manual doesn't mention it clearly but DCA hints
1689 * will only use queue 0's tags unless this bit is set. Side
1690 * effects of setting this bit are only that SRRCTL must be
1691 * fully programmed [0..15]
1693 if (adapter
->flags
&
1694 (IXGBE_FLAG_RSS_ENABLED
| IXGBE_FLAG_VMDQ_ENABLED
)) {
1695 rdrxctl
= IXGBE_READ_REG(hw
, IXGBE_RDRXCTL
);
1696 rdrxctl
|= IXGBE_RDRXCTL_MVMEN
;
1697 IXGBE_WRITE_REG(hw
, IXGBE_RDRXCTL
, rdrxctl
);
1700 if (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
) {
1701 /* Fill out redirection table */
1702 for (i
= 0, j
= 0; i
< 128; i
++, j
++) {
1703 if (j
== adapter
->ring_feature
[RING_F_RSS
].indices
)
1705 /* reta = 4-byte sliding window of
1706 * 0x00..(indices-1)(indices-1)00..etc. */
1707 reta
= (reta
<< 8) | (j
* 0x11);
1709 IXGBE_WRITE_REG(hw
, IXGBE_RETA(i
>> 2), reta
);
1712 /* Fill out hash function seeds */
1713 for (i
= 0; i
< 10; i
++)
1714 IXGBE_WRITE_REG(hw
, IXGBE_RSSRK(i
), seed
[i
]);
1716 mrqc
= IXGBE_MRQC_RSSEN
1717 /* Perform hash on these packet types */
1718 | IXGBE_MRQC_RSS_FIELD_IPV4
1719 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1720 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1721 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1722 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1723 | IXGBE_MRQC_RSS_FIELD_IPV6
1724 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1725 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1726 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
;
1727 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
1730 rxcsum
= IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
1732 if (adapter
->flags
& IXGBE_FLAG_RSS_ENABLED
||
1733 adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
) {
1734 /* Disable indicating checksum in descriptor, enables
1736 rxcsum
|= IXGBE_RXCSUM_PCSD
;
1738 if (!(rxcsum
& IXGBE_RXCSUM_PCSD
)) {
1739 /* Enable IPv4 payload checksum for UDP fragments
1740 * if PCSD is not set */
1741 rxcsum
|= IXGBE_RXCSUM_IPPCSE
;
1744 IXGBE_WRITE_REG(hw
, IXGBE_RXCSUM
, rxcsum
);
1747 static void ixgbe_vlan_rx_register(struct net_device
*netdev
,
1748 struct vlan_group
*grp
)
1750 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1753 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1754 ixgbe_irq_disable(adapter
);
1755 adapter
->vlgrp
= grp
;
1758 * For a DCB driver, always enable VLAN tag stripping so we can
1759 * still receive traffic from a DCB-enabled host even if we're
1762 ctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_VLNCTRL
);
1763 ctrl
|= IXGBE_VLNCTRL_VME
;
1764 ctrl
&= ~IXGBE_VLNCTRL_CFIEN
;
1765 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_VLNCTRL
, ctrl
);
1768 /* enable VLAN tag insert/strip */
1769 ctrl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_VLNCTRL
);
1770 ctrl
|= IXGBE_VLNCTRL_VME
;
1771 ctrl
&= ~IXGBE_VLNCTRL_CFIEN
;
1772 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_VLNCTRL
, ctrl
);
1775 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1776 ixgbe_irq_enable(adapter
);
1779 static void ixgbe_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1781 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1782 struct ixgbe_hw
*hw
= &adapter
->hw
;
1784 /* add VID to filter table */
1785 hw
->mac
.ops
.set_vfta(&adapter
->hw
, vid
, 0, true);
1788 static void ixgbe_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1790 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1791 struct ixgbe_hw
*hw
= &adapter
->hw
;
1793 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1794 ixgbe_irq_disable(adapter
);
1796 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
1798 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
1799 ixgbe_irq_enable(adapter
);
1801 /* remove VID from filter table */
1802 hw
->mac
.ops
.set_vfta(&adapter
->hw
, vid
, 0, false);
1805 static void ixgbe_restore_vlan(struct ixgbe_adapter
*adapter
)
1807 ixgbe_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
1809 if (adapter
->vlgrp
) {
1811 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
1812 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
1814 ixgbe_vlan_rx_add_vid(adapter
->netdev
, vid
);
1819 static u8
*ixgbe_addr_list_itr(struct ixgbe_hw
*hw
, u8
**mc_addr_ptr
, u32
*vmdq
)
1821 struct dev_mc_list
*mc_ptr
;
1822 u8
*addr
= *mc_addr_ptr
;
1825 mc_ptr
= container_of(addr
, struct dev_mc_list
, dmi_addr
[0]);
1827 *mc_addr_ptr
= mc_ptr
->next
->dmi_addr
;
1829 *mc_addr_ptr
= NULL
;
1835 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
1836 * @netdev: network interface device structure
1838 * The set_rx_method entry point is called whenever the unicast/multicast
1839 * address list or the network interface flags are updated. This routine is
1840 * responsible for configuring the hardware for proper unicast, multicast and
1843 static void ixgbe_set_rx_mode(struct net_device
*netdev
)
1845 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1846 struct ixgbe_hw
*hw
= &adapter
->hw
;
1848 u8
*addr_list
= NULL
;
1851 /* Check for Promiscuous and All Multicast modes */
1853 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
1854 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
1856 if (netdev
->flags
& IFF_PROMISC
) {
1857 hw
->addr_ctrl
.user_set_promisc
= 1;
1858 fctrl
|= (IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
1859 vlnctrl
&= ~IXGBE_VLNCTRL_VFE
;
1861 if (netdev
->flags
& IFF_ALLMULTI
) {
1862 fctrl
|= IXGBE_FCTRL_MPE
;
1863 fctrl
&= ~IXGBE_FCTRL_UPE
;
1865 fctrl
&= ~(IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
1867 vlnctrl
|= IXGBE_VLNCTRL_VFE
;
1868 hw
->addr_ctrl
.user_set_promisc
= 0;
1871 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
1872 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
1874 /* reprogram secondary unicast list */
1875 addr_count
= netdev
->uc_count
;
1877 addr_list
= netdev
->uc_list
->dmi_addr
;
1878 hw
->mac
.ops
.update_uc_addr_list(hw
, addr_list
, addr_count
,
1879 ixgbe_addr_list_itr
);
1881 /* reprogram multicast list */
1882 addr_count
= netdev
->mc_count
;
1884 addr_list
= netdev
->mc_list
->dmi_addr
;
1885 hw
->mac
.ops
.update_mc_addr_list(hw
, addr_list
, addr_count
,
1886 ixgbe_addr_list_itr
);
1889 static void ixgbe_napi_enable_all(struct ixgbe_adapter
*adapter
)
1892 struct ixgbe_q_vector
*q_vector
;
1893 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1895 /* legacy and MSI only use one vector */
1896 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
1899 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1900 struct napi_struct
*napi
;
1901 q_vector
= &adapter
->q_vector
[q_idx
];
1902 if (!q_vector
->rxr_count
)
1904 napi
= &q_vector
->napi
;
1905 if ((adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) &&
1906 (q_vector
->rxr_count
> 1))
1907 napi
->poll
= &ixgbe_clean_rxonly_many
;
1913 static void ixgbe_napi_disable_all(struct ixgbe_adapter
*adapter
)
1916 struct ixgbe_q_vector
*q_vector
;
1917 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1919 /* legacy and MSI only use one vector */
1920 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
1923 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1924 q_vector
= &adapter
->q_vector
[q_idx
];
1925 if (!q_vector
->rxr_count
)
1927 napi_disable(&q_vector
->napi
);
1931 #ifdef CONFIG_IXGBE_DCB
1933 * ixgbe_configure_dcb - Configure DCB hardware
1934 * @adapter: ixgbe adapter struct
1936 * This is called by the driver on open to configure the DCB hardware.
1937 * This is also called by the gennetlink interface when reconfiguring
1940 static void ixgbe_configure_dcb(struct ixgbe_adapter
*adapter
)
1942 struct ixgbe_hw
*hw
= &adapter
->hw
;
1943 u32 txdctl
, vlnctrl
;
1946 ixgbe_dcb_check_config(&adapter
->dcb_cfg
);
1947 ixgbe_dcb_calculate_tc_credits(&adapter
->dcb_cfg
, DCB_TX_CONFIG
);
1948 ixgbe_dcb_calculate_tc_credits(&adapter
->dcb_cfg
, DCB_RX_CONFIG
);
1950 /* reconfigure the hardware */
1951 ixgbe_dcb_hw_config(&adapter
->hw
, &adapter
->dcb_cfg
);
1953 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1954 j
= adapter
->tx_ring
[i
].reg_idx
;
1955 txdctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(j
));
1956 /* PThresh workaround for Tx hang with DFP enabled. */
1958 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(j
), txdctl
);
1960 /* Enable VLAN tag insert/strip */
1961 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
1962 vlnctrl
|= IXGBE_VLNCTRL_VME
| IXGBE_VLNCTRL_VFE
;
1963 vlnctrl
&= ~IXGBE_VLNCTRL_CFIEN
;
1964 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
1965 hw
->mac
.ops
.set_vfta(&adapter
->hw
, 0, 0, true);
1969 static void ixgbe_configure(struct ixgbe_adapter
*adapter
)
1971 struct net_device
*netdev
= adapter
->netdev
;
1974 ixgbe_set_rx_mode(netdev
);
1976 ixgbe_restore_vlan(adapter
);
1977 #ifdef CONFIG_IXGBE_DCB
1978 if (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
) {
1979 netif_set_gso_max_size(netdev
, 32768);
1980 ixgbe_configure_dcb(adapter
);
1982 netif_set_gso_max_size(netdev
, 65536);
1985 netif_set_gso_max_size(netdev
, 65536);
1988 ixgbe_configure_tx(adapter
);
1989 ixgbe_configure_rx(adapter
);
1990 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1991 ixgbe_alloc_rx_buffers(adapter
, &adapter
->rx_ring
[i
],
1992 (adapter
->rx_ring
[i
].count
- 1));
1995 static int ixgbe_up_complete(struct ixgbe_adapter
*adapter
)
1997 struct net_device
*netdev
= adapter
->netdev
;
1998 struct ixgbe_hw
*hw
= &adapter
->hw
;
2000 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2001 u32 txdctl
, rxdctl
, mhadd
;
2004 ixgbe_get_hw_control(adapter
);
2006 if ((adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) ||
2007 (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
)) {
2008 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
2009 gpie
= (IXGBE_GPIE_MSIX_MODE
| IXGBE_GPIE_EIAME
|
2010 IXGBE_GPIE_PBA_SUPPORT
| IXGBE_GPIE_OCD
);
2015 /* XXX: to interrupt immediately for EICS writes, enable this */
2016 /* gpie |= IXGBE_GPIE_EIMEN; */
2017 IXGBE_WRITE_REG(hw
, IXGBE_GPIE
, gpie
);
2020 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
2021 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
2022 * specifically only auto mask tx and rx interrupts */
2023 IXGBE_WRITE_REG(hw
, IXGBE_EIAM
, IXGBE_EICS_RTX_QUEUE
);
2026 /* Enable fan failure interrupt if media type is copper */
2027 if (adapter
->flags
& IXGBE_FLAG_FAN_FAIL_CAPABLE
) {
2028 gpie
= IXGBE_READ_REG(hw
, IXGBE_GPIE
);
2029 gpie
|= IXGBE_SDP1_GPIEN
;
2030 IXGBE_WRITE_REG(hw
, IXGBE_GPIE
, gpie
);
2033 mhadd
= IXGBE_READ_REG(hw
, IXGBE_MHADD
);
2034 if (max_frame
!= (mhadd
>> IXGBE_MHADD_MFS_SHIFT
)) {
2035 mhadd
&= ~IXGBE_MHADD_MFS_MASK
;
2036 mhadd
|= max_frame
<< IXGBE_MHADD_MFS_SHIFT
;
2038 IXGBE_WRITE_REG(hw
, IXGBE_MHADD
, mhadd
);
2041 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2042 j
= adapter
->tx_ring
[i
].reg_idx
;
2043 txdctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(j
));
2044 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2045 txdctl
|= (8 << 16);
2046 txdctl
|= IXGBE_TXDCTL_ENABLE
;
2047 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(j
), txdctl
);
2050 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2051 j
= adapter
->rx_ring
[i
].reg_idx
;
2052 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(j
));
2053 /* enable PTHRESH=32 descriptors (half the internal cache)
2054 * and HTHRESH=0 descriptors (to minimize latency on fetch),
2055 * this also removes a pesky rx_no_buffer_count increment */
2057 rxdctl
|= IXGBE_RXDCTL_ENABLE
;
2058 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(j
), rxdctl
);
2060 /* enable all receives */
2061 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
2062 rxdctl
|= (IXGBE_RXCTRL_DMBYPS
| IXGBE_RXCTRL_RXEN
);
2063 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxdctl
);
2065 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
2066 ixgbe_configure_msix(adapter
);
2068 ixgbe_configure_msi_and_legacy(adapter
);
2070 ixgbe_napi_add_all(adapter
);
2072 clear_bit(__IXGBE_DOWN
, &adapter
->state
);
2073 ixgbe_napi_enable_all(adapter
);
2075 /* clear any pending interrupts, may auto mask */
2076 IXGBE_READ_REG(hw
, IXGBE_EICR
);
2078 ixgbe_irq_enable(adapter
);
2080 /* bring the link up in the watchdog, this could race with our first
2081 * link up interrupt but shouldn't be a problem */
2082 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
2083 adapter
->link_check_timeout
= jiffies
;
2084 mod_timer(&adapter
->watchdog_timer
, jiffies
);
2088 void ixgbe_reinit_locked(struct ixgbe_adapter
*adapter
)
2090 WARN_ON(in_interrupt());
2091 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
2093 ixgbe_down(adapter
);
2095 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
2098 int ixgbe_up(struct ixgbe_adapter
*adapter
)
2100 /* hardware has been reset, we need to reload some things */
2101 ixgbe_configure(adapter
);
2103 return ixgbe_up_complete(adapter
);
2106 void ixgbe_reset(struct ixgbe_adapter
*adapter
)
2108 struct ixgbe_hw
*hw
= &adapter
->hw
;
2109 if (hw
->mac
.ops
.init_hw(hw
))
2110 dev_err(&adapter
->pdev
->dev
, "Hardware Error\n");
2112 /* reprogram the RAR[0] in case user changed it. */
2113 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0, IXGBE_RAH_AV
);
2118 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
2119 * @adapter: board private structure
2120 * @rx_ring: ring to free buffers from
2122 static void ixgbe_clean_rx_ring(struct ixgbe_adapter
*adapter
,
2123 struct ixgbe_ring
*rx_ring
)
2125 struct pci_dev
*pdev
= adapter
->pdev
;
2129 /* Free all the Rx ring sk_buffs */
2131 for (i
= 0; i
< rx_ring
->count
; i
++) {
2132 struct ixgbe_rx_buffer
*rx_buffer_info
;
2134 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
2135 if (rx_buffer_info
->dma
) {
2136 pci_unmap_single(pdev
, rx_buffer_info
->dma
,
2137 rx_ring
->rx_buf_len
,
2138 PCI_DMA_FROMDEVICE
);
2139 rx_buffer_info
->dma
= 0;
2141 if (rx_buffer_info
->skb
) {
2142 dev_kfree_skb(rx_buffer_info
->skb
);
2143 rx_buffer_info
->skb
= NULL
;
2145 if (!rx_buffer_info
->page
)
2147 pci_unmap_page(pdev
, rx_buffer_info
->page_dma
, PAGE_SIZE
/ 2,
2148 PCI_DMA_FROMDEVICE
);
2149 rx_buffer_info
->page_dma
= 0;
2150 put_page(rx_buffer_info
->page
);
2151 rx_buffer_info
->page
= NULL
;
2152 rx_buffer_info
->page_offset
= 0;
2155 size
= sizeof(struct ixgbe_rx_buffer
) * rx_ring
->count
;
2156 memset(rx_ring
->rx_buffer_info
, 0, size
);
2158 /* Zero out the descriptor ring */
2159 memset(rx_ring
->desc
, 0, rx_ring
->size
);
2161 rx_ring
->next_to_clean
= 0;
2162 rx_ring
->next_to_use
= 0;
2164 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
2165 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
2169 * ixgbe_clean_tx_ring - Free Tx Buffers
2170 * @adapter: board private structure
2171 * @tx_ring: ring to be cleaned
2173 static void ixgbe_clean_tx_ring(struct ixgbe_adapter
*adapter
,
2174 struct ixgbe_ring
*tx_ring
)
2176 struct ixgbe_tx_buffer
*tx_buffer_info
;
2180 /* Free all the Tx ring sk_buffs */
2182 for (i
= 0; i
< tx_ring
->count
; i
++) {
2183 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2184 ixgbe_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
2187 size
= sizeof(struct ixgbe_tx_buffer
) * tx_ring
->count
;
2188 memset(tx_ring
->tx_buffer_info
, 0, size
);
2190 /* Zero out the descriptor ring */
2191 memset(tx_ring
->desc
, 0, tx_ring
->size
);
2193 tx_ring
->next_to_use
= 0;
2194 tx_ring
->next_to_clean
= 0;
2196 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
2197 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
2201 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2202 * @adapter: board private structure
2204 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter
*adapter
)
2208 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2209 ixgbe_clean_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
2213 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2214 * @adapter: board private structure
2216 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter
*adapter
)
2220 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2221 ixgbe_clean_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
2224 void ixgbe_down(struct ixgbe_adapter
*adapter
)
2226 struct net_device
*netdev
= adapter
->netdev
;
2227 struct ixgbe_hw
*hw
= &adapter
->hw
;
2232 /* signal that we are down to the interrupt handler */
2233 set_bit(__IXGBE_DOWN
, &adapter
->state
);
2235 /* disable receives */
2236 rxctrl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
2237 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, rxctrl
& ~IXGBE_RXCTRL_RXEN
);
2239 netif_tx_disable(netdev
);
2241 IXGBE_WRITE_FLUSH(hw
);
2244 netif_tx_stop_all_queues(netdev
);
2246 ixgbe_irq_disable(adapter
);
2248 ixgbe_napi_disable_all(adapter
);
2250 del_timer_sync(&adapter
->watchdog_timer
);
2251 cancel_work_sync(&adapter
->watchdog_task
);
2253 /* disable transmits in the hardware now that interrupts are off */
2254 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2255 j
= adapter
->tx_ring
[i
].reg_idx
;
2256 txdctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(j
));
2257 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(j
),
2258 (txdctl
& ~IXGBE_TXDCTL_ENABLE
));
2261 netif_carrier_off(netdev
);
2263 #ifdef CONFIG_IXGBE_DCA
2264 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
2265 adapter
->flags
&= ~IXGBE_FLAG_DCA_ENABLED
;
2266 dca_remove_requester(&adapter
->pdev
->dev
);
2270 if (!pci_channel_offline(adapter
->pdev
))
2271 ixgbe_reset(adapter
);
2272 ixgbe_clean_all_tx_rings(adapter
);
2273 ixgbe_clean_all_rx_rings(adapter
);
2275 #ifdef CONFIG_IXGBE_DCA
2276 /* since we reset the hardware DCA settings were cleared */
2277 if (dca_add_requester(&adapter
->pdev
->dev
) == 0) {
2278 adapter
->flags
|= IXGBE_FLAG_DCA_ENABLED
;
2279 /* always use CB2 mode, difference is masked
2280 * in the CB driver */
2281 IXGBE_WRITE_REG(hw
, IXGBE_DCA_CTRL
, 2);
2282 ixgbe_setup_dca(adapter
);
2288 * ixgbe_poll - NAPI Rx polling callback
2289 * @napi: structure for representing this polling device
2290 * @budget: how many packets driver is allowed to clean
2292 * This function is used for legacy and MSI, NAPI mode
2294 static int ixgbe_poll(struct napi_struct
*napi
, int budget
)
2296 struct ixgbe_q_vector
*q_vector
= container_of(napi
,
2297 struct ixgbe_q_vector
, napi
);
2298 struct ixgbe_adapter
*adapter
= q_vector
->adapter
;
2299 int tx_cleaned
, work_done
= 0;
2301 #ifdef CONFIG_IXGBE_DCA
2302 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
2303 ixgbe_update_tx_dca(adapter
, adapter
->tx_ring
);
2304 ixgbe_update_rx_dca(adapter
, adapter
->rx_ring
);
2308 tx_cleaned
= ixgbe_clean_tx_irq(adapter
, adapter
->tx_ring
);
2309 ixgbe_clean_rx_irq(adapter
, adapter
->rx_ring
, &work_done
, budget
);
2314 /* If budget not fully consumed, exit the polling mode */
2315 if (work_done
< budget
) {
2316 netif_rx_complete(napi
);
2317 if (adapter
->itr_setting
& 3)
2318 ixgbe_set_itr(adapter
);
2319 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
))
2320 ixgbe_irq_enable(adapter
);
2326 * ixgbe_tx_timeout - Respond to a Tx Hang
2327 * @netdev: network interface device structure
2329 static void ixgbe_tx_timeout(struct net_device
*netdev
)
2331 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2333 /* Do the reset outside of interrupt context */
2334 schedule_work(&adapter
->reset_task
);
2337 static void ixgbe_reset_task(struct work_struct
*work
)
2339 struct ixgbe_adapter
*adapter
;
2340 adapter
= container_of(work
, struct ixgbe_adapter
, reset_task
);
2342 /* If we're already down or resetting, just bail */
2343 if (test_bit(__IXGBE_DOWN
, &adapter
->state
) ||
2344 test_bit(__IXGBE_RESETTING
, &adapter
->state
))
2347 adapter
->tx_timeout_count
++;
2349 ixgbe_reinit_locked(adapter
);
2352 static void ixgbe_set_num_queues(struct ixgbe_adapter
*adapter
)
2354 int nrq
= 1, ntq
= 1;
2355 int feature_mask
= 0, rss_i
, rss_m
;
2358 /* Number of supported queues */
2359 switch (adapter
->hw
.mac
.type
) {
2360 case ixgbe_mac_82598EB
:
2361 dcb_i
= adapter
->ring_feature
[RING_F_DCB
].indices
;
2363 rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
;
2365 feature_mask
|= IXGBE_FLAG_RSS_ENABLED
;
2366 feature_mask
|= IXGBE_FLAG_DCB_ENABLED
;
2368 switch (adapter
->flags
& feature_mask
) {
2369 case (IXGBE_FLAG_RSS_ENABLED
| IXGBE_FLAG_DCB_ENABLED
):
2371 rss_i
= min(8, rss_i
);
2373 nrq
= dcb_i
* rss_i
;
2374 ntq
= min(MAX_TX_QUEUES
, dcb_i
* rss_i
);
2376 case (IXGBE_FLAG_DCB_ENABLED
):
2381 case (IXGBE_FLAG_RSS_ENABLED
):
2397 /* Sanity check, we should never have zero queues */
2401 adapter
->ring_feature
[RING_F_DCB
].indices
= dcb_i
;
2402 adapter
->ring_feature
[RING_F_DCB
].mask
= dcb_m
;
2403 adapter
->ring_feature
[RING_F_RSS
].indices
= rss_i
;
2404 adapter
->ring_feature
[RING_F_RSS
].mask
= rss_m
;
2412 adapter
->num_rx_queues
= nrq
;
2413 adapter
->num_tx_queues
= ntq
;
2416 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter
*adapter
,
2419 int err
, vector_threshold
;
2421 /* We'll want at least 3 (vector_threshold):
2424 * 3) Other (Link Status Change, etc.)
2425 * 4) TCP Timer (optional)
2427 vector_threshold
= MIN_MSIX_COUNT
;
2429 /* The more we get, the more we will assign to Tx/Rx Cleanup
2430 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2431 * Right now, we simply care about how many we'll get; we'll
2432 * set them up later while requesting irq's.
2434 while (vectors
>= vector_threshold
) {
2435 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
2437 if (!err
) /* Success in acquiring all requested vectors. */
2440 vectors
= 0; /* Nasty failure, quit now */
2441 else /* err == number of vectors we should try again with */
2445 if (vectors
< vector_threshold
) {
2446 /* Can't allocate enough MSI-X interrupts? Oh well.
2447 * This just means we'll go with either a single MSI
2448 * vector or fall back to legacy interrupts.
2450 DPRINTK(HW
, DEBUG
, "Unable to allocate MSI-X interrupts\n");
2451 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
2452 kfree(adapter
->msix_entries
);
2453 adapter
->msix_entries
= NULL
;
2454 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
2455 adapter
->flags
&= ~IXGBE_FLAG_RSS_ENABLED
;
2456 ixgbe_set_num_queues(adapter
);
2458 adapter
->flags
|= IXGBE_FLAG_MSIX_ENABLED
; /* Woot! */
2459 adapter
->num_msix_vectors
= vectors
;
2464 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2465 * @adapter: board private structure to initialize
2467 * Once we know the feature-set enabled for the device, we'll cache
2468 * the register offset the descriptor ring is assigned to.
2470 static void ixgbe_cache_ring_register(struct ixgbe_adapter
*adapter
)
2472 int feature_mask
= 0, rss_i
;
2473 int i
, txr_idx
, rxr_idx
;
2476 /* Number of supported queues */
2477 switch (adapter
->hw
.mac
.type
) {
2478 case ixgbe_mac_82598EB
:
2479 dcb_i
= adapter
->ring_feature
[RING_F_DCB
].indices
;
2480 rss_i
= adapter
->ring_feature
[RING_F_RSS
].indices
;
2483 feature_mask
|= IXGBE_FLAG_DCB_ENABLED
;
2484 feature_mask
|= IXGBE_FLAG_RSS_ENABLED
;
2485 switch (adapter
->flags
& feature_mask
) {
2486 case (IXGBE_FLAG_RSS_ENABLED
| IXGBE_FLAG_DCB_ENABLED
):
2487 for (i
= 0; i
< dcb_i
; i
++) {
2490 for (j
= 0; j
< adapter
->num_rx_queues
; j
++) {
2491 adapter
->rx_ring
[rxr_idx
].reg_idx
=
2496 for (j
= 0; j
< adapter
->num_tx_queues
; j
++) {
2497 adapter
->tx_ring
[txr_idx
].reg_idx
=
2503 case (IXGBE_FLAG_DCB_ENABLED
):
2504 /* the number of queues is assumed to be symmetric */
2505 for (i
= 0; i
< dcb_i
; i
++) {
2506 adapter
->rx_ring
[i
].reg_idx
= i
<< 3;
2507 adapter
->tx_ring
[i
].reg_idx
= i
<< 2;
2510 case (IXGBE_FLAG_RSS_ENABLED
):
2511 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2512 adapter
->rx_ring
[i
].reg_idx
= i
;
2513 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2514 adapter
->tx_ring
[i
].reg_idx
= i
;
2527 * ixgbe_alloc_queues - Allocate memory for all rings
2528 * @adapter: board private structure to initialize
2530 * We allocate one ring per queue at run-time since we don't know the
2531 * number of queues at compile-time.
2533 static int ixgbe_alloc_queues(struct ixgbe_adapter
*adapter
)
2537 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
2538 sizeof(struct ixgbe_ring
), GFP_KERNEL
);
2539 if (!adapter
->tx_ring
)
2540 goto err_tx_ring_allocation
;
2542 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
2543 sizeof(struct ixgbe_ring
), GFP_KERNEL
);
2544 if (!adapter
->rx_ring
)
2545 goto err_rx_ring_allocation
;
2547 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2548 adapter
->tx_ring
[i
].count
= adapter
->tx_ring_count
;
2549 adapter
->tx_ring
[i
].queue_index
= i
;
2552 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2553 adapter
->rx_ring
[i
].count
= adapter
->rx_ring_count
;
2554 adapter
->rx_ring
[i
].queue_index
= i
;
2557 ixgbe_cache_ring_register(adapter
);
2561 err_rx_ring_allocation
:
2562 kfree(adapter
->tx_ring
);
2563 err_tx_ring_allocation
:
2568 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
2569 * @adapter: board private structure to initialize
2571 * Attempt to configure the interrupts using the best available
2572 * capabilities of the hardware and the kernel.
2574 static int ixgbe_set_interrupt_capability(struct ixgbe_adapter
*adapter
)
2577 int vector
, v_budget
;
2580 * It's easy to be greedy for MSI-X vectors, but it really
2581 * doesn't do us much good if we have a lot more vectors
2582 * than CPU's. So let's be conservative and only ask for
2583 * (roughly) twice the number of vectors as there are CPU's.
2585 v_budget
= min(adapter
->num_rx_queues
+ adapter
->num_tx_queues
,
2586 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS
;
2589 * At the same time, hardware can only support a maximum of
2590 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq,
2591 * we can easily reach upwards of 64 Rx descriptor queues and
2592 * 32 Tx queues. Thus, we cap it off in those rare cases where
2593 * the cpu count also exceeds our vector limit.
2595 v_budget
= min(v_budget
, MAX_MSIX_COUNT
);
2597 /* A failure in MSI-X entry allocation isn't fatal, but it does
2598 * mean we disable MSI-X capabilities of the adapter. */
2599 adapter
->msix_entries
= kcalloc(v_budget
,
2600 sizeof(struct msix_entry
), GFP_KERNEL
);
2601 if (!adapter
->msix_entries
) {
2602 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
2603 adapter
->flags
&= ~IXGBE_FLAG_RSS_ENABLED
;
2604 ixgbe_set_num_queues(adapter
);
2605 kfree(adapter
->tx_ring
);
2606 kfree(adapter
->rx_ring
);
2607 err
= ixgbe_alloc_queues(adapter
);
2609 DPRINTK(PROBE
, ERR
, "Unable to allocate memory "
2617 for (vector
= 0; vector
< v_budget
; vector
++)
2618 adapter
->msix_entries
[vector
].entry
= vector
;
2620 ixgbe_acquire_msix_vectors(adapter
, v_budget
);
2622 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
2626 err
= pci_enable_msi(adapter
->pdev
);
2628 adapter
->flags
|= IXGBE_FLAG_MSI_ENABLED
;
2630 DPRINTK(HW
, DEBUG
, "Unable to allocate MSI interrupt, "
2631 "falling back to legacy. Error: %d\n", err
);
2637 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2638 adapter
->netdev
->real_num_tx_queues
= adapter
->num_tx_queues
;
2643 void ixgbe_reset_interrupt_capability(struct ixgbe_adapter
*adapter
)
2645 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
2646 adapter
->flags
&= ~IXGBE_FLAG_MSIX_ENABLED
;
2647 pci_disable_msix(adapter
->pdev
);
2648 kfree(adapter
->msix_entries
);
2649 adapter
->msix_entries
= NULL
;
2650 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
2651 adapter
->flags
&= ~IXGBE_FLAG_MSI_ENABLED
;
2652 pci_disable_msi(adapter
->pdev
);
2658 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
2659 * @adapter: board private structure to initialize
2661 * We determine which interrupt scheme to use based on...
2662 * - Kernel support (MSI, MSI-X)
2663 * - which can be user-defined (via MODULE_PARAM)
2664 * - Hardware queue count (num_*_queues)
2665 * - defined by miscellaneous hardware support/features (RSS, etc.)
2667 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter
*adapter
)
2671 /* Number of supported queues */
2672 ixgbe_set_num_queues(adapter
);
2674 err
= ixgbe_alloc_queues(adapter
);
2676 DPRINTK(PROBE
, ERR
, "Unable to allocate memory for queues\n");
2677 goto err_alloc_queues
;
2680 err
= ixgbe_set_interrupt_capability(adapter
);
2682 DPRINTK(PROBE
, ERR
, "Unable to setup interrupt capabilities\n");
2683 goto err_set_interrupt
;
2686 DPRINTK(DRV
, INFO
, "Multiqueue %s: Rx Queue count = %u, "
2687 "Tx Queue count = %u\n",
2688 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2689 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2691 set_bit(__IXGBE_DOWN
, &adapter
->state
);
2696 kfree(adapter
->tx_ring
);
2697 kfree(adapter
->rx_ring
);
2703 * ixgbe_sfp_timer - worker thread to find a missing module
2704 * @data: pointer to our adapter struct
2706 static void ixgbe_sfp_timer(unsigned long data
)
2708 struct ixgbe_adapter
*adapter
= (struct ixgbe_adapter
*)data
;
2710 /* Do the sfp_timer outside of interrupt context due to the
2711 * delays that sfp+ detection requires
2713 schedule_work(&adapter
->sfp_task
);
2717 * ixgbe_sfp_task - worker thread to find a missing module
2718 * @work: pointer to work_struct containing our data
2720 static void ixgbe_sfp_task(struct work_struct
*work
)
2722 struct ixgbe_adapter
*adapter
= container_of(work
,
2723 struct ixgbe_adapter
,
2725 struct ixgbe_hw
*hw
= &adapter
->hw
;
2727 if ((hw
->phy
.type
== ixgbe_phy_nl
) &&
2728 (hw
->phy
.sfp_type
== ixgbe_sfp_type_not_present
)) {
2729 s32 ret
= hw
->phy
.ops
.identify_sfp(hw
);
2732 ret
= hw
->phy
.ops
.reset(hw
);
2733 if (ret
== IXGBE_ERR_SFP_NOT_SUPPORTED
) {
2734 DPRINTK(PROBE
, ERR
, "failed to initialize because an "
2735 "unsupported SFP+ module type was detected.\n"
2736 "Reload the driver after installing a "
2737 "supported module.\n");
2738 unregister_netdev(adapter
->netdev
);
2740 DPRINTK(PROBE
, INFO
, "detected SFP+: %d\n",
2743 /* don't need this routine any more */
2744 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND
, &adapter
->state
);
2748 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND
, &adapter
->state
))
2749 mod_timer(&adapter
->sfp_timer
,
2750 round_jiffies(jiffies
+ (2 * HZ
)));
2754 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
2755 * @adapter: board private structure to initialize
2757 * ixgbe_sw_init initializes the Adapter private data structure.
2758 * Fields are initialized based on PCI device information and
2759 * OS network device settings (MTU size).
2761 static int __devinit
ixgbe_sw_init(struct ixgbe_adapter
*adapter
)
2763 struct ixgbe_hw
*hw
= &adapter
->hw
;
2764 struct pci_dev
*pdev
= adapter
->pdev
;
2766 #ifdef CONFIG_IXGBE_DCB
2768 struct tc_configuration
*tc
;
2771 /* PCI config space info */
2773 hw
->vendor_id
= pdev
->vendor
;
2774 hw
->device_id
= pdev
->device
;
2775 hw
->revision_id
= pdev
->revision
;
2776 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2777 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2779 /* Set capability flags */
2780 rss
= min(IXGBE_MAX_RSS_INDICES
, (int)num_online_cpus());
2781 adapter
->ring_feature
[RING_F_RSS
].indices
= rss
;
2782 adapter
->flags
|= IXGBE_FLAG_RSS_ENABLED
;
2783 adapter
->ring_feature
[RING_F_DCB
].indices
= IXGBE_MAX_DCB_INDICES
;
2785 #ifdef CONFIG_IXGBE_DCB
2786 /* Configure DCB traffic classes */
2787 for (j
= 0; j
< MAX_TRAFFIC_CLASS
; j
++) {
2788 tc
= &adapter
->dcb_cfg
.tc_config
[j
];
2789 tc
->path
[DCB_TX_CONFIG
].bwg_id
= 0;
2790 tc
->path
[DCB_TX_CONFIG
].bwg_percent
= 12 + (j
& 1);
2791 tc
->path
[DCB_RX_CONFIG
].bwg_id
= 0;
2792 tc
->path
[DCB_RX_CONFIG
].bwg_percent
= 12 + (j
& 1);
2793 tc
->dcb_pfc
= pfc_disabled
;
2795 adapter
->dcb_cfg
.bw_percentage
[DCB_TX_CONFIG
][0] = 100;
2796 adapter
->dcb_cfg
.bw_percentage
[DCB_RX_CONFIG
][0] = 100;
2797 adapter
->dcb_cfg
.rx_pba_cfg
= pba_equal
;
2798 adapter
->dcb_cfg
.round_robin_enable
= false;
2799 adapter
->dcb_set_bitmap
= 0x00;
2800 ixgbe_copy_dcb_cfg(&adapter
->dcb_cfg
, &adapter
->temp_dcb_cfg
,
2801 adapter
->ring_feature
[RING_F_DCB
].indices
);
2804 if (hw
->mac
.ops
.get_media_type
&&
2805 (hw
->mac
.ops
.get_media_type(hw
) == ixgbe_media_type_copper
))
2806 adapter
->flags
|= IXGBE_FLAG_FAN_FAIL_CAPABLE
;
2808 /* default flow control settings */
2809 hw
->fc
.original_type
= ixgbe_fc_none
;
2810 hw
->fc
.type
= ixgbe_fc_none
;
2811 hw
->fc
.high_water
= IXGBE_DEFAULT_FCRTH
;
2812 hw
->fc
.low_water
= IXGBE_DEFAULT_FCRTL
;
2813 hw
->fc
.pause_time
= IXGBE_DEFAULT_FCPAUSE
;
2814 hw
->fc
.send_xon
= true;
2816 /* select 10G link by default */
2817 hw
->mac
.link_mode_select
= IXGBE_AUTOC_LMS_10G_LINK_NO_AN
;
2819 /* enable itr by default in dynamic mode */
2820 adapter
->itr_setting
= 1;
2821 adapter
->eitr_param
= 20000;
2823 /* set defaults for eitr in MegaBytes */
2824 adapter
->eitr_low
= 10;
2825 adapter
->eitr_high
= 20;
2827 /* set default ring sizes */
2828 adapter
->tx_ring_count
= IXGBE_DEFAULT_TXD
;
2829 adapter
->rx_ring_count
= IXGBE_DEFAULT_RXD
;
2831 /* initialize eeprom parameters */
2832 if (ixgbe_init_eeprom_params_generic(hw
)) {
2833 dev_err(&pdev
->dev
, "EEPROM initialization failed\n");
2837 /* enable rx csum by default */
2838 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
2840 set_bit(__IXGBE_DOWN
, &adapter
->state
);
2846 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2847 * @adapter: board private structure
2848 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2850 * Return 0 on success, negative on failure
2852 int ixgbe_setup_tx_resources(struct ixgbe_adapter
*adapter
,
2853 struct ixgbe_ring
*tx_ring
)
2855 struct pci_dev
*pdev
= adapter
->pdev
;
2858 size
= sizeof(struct ixgbe_tx_buffer
) * tx_ring
->count
;
2859 tx_ring
->tx_buffer_info
= vmalloc(size
);
2860 if (!tx_ring
->tx_buffer_info
)
2862 memset(tx_ring
->tx_buffer_info
, 0, size
);
2864 /* round up to nearest 4K */
2865 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
) +
2867 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2869 tx_ring
->desc
= pci_alloc_consistent(pdev
, tx_ring
->size
,
2874 tx_ring
->next_to_use
= 0;
2875 tx_ring
->next_to_clean
= 0;
2876 tx_ring
->work_limit
= tx_ring
->count
;
2880 vfree(tx_ring
->tx_buffer_info
);
2881 tx_ring
->tx_buffer_info
= NULL
;
2882 DPRINTK(PROBE
, ERR
, "Unable to allocate memory for the transmit "
2883 "descriptor ring\n");
2888 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2889 * @adapter: board private structure
2891 * If this function returns with an error, then it's possible one or
2892 * more of the rings is populated (while the rest are not). It is the
2893 * callers duty to clean those orphaned rings.
2895 * Return 0 on success, negative on failure
2897 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter
*adapter
)
2901 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2902 err
= ixgbe_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
2905 DPRINTK(PROBE
, ERR
, "Allocation for Tx Queue %u failed\n", i
);
2913 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2914 * @adapter: board private structure
2915 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2917 * Returns 0 on success, negative on failure
2919 int ixgbe_setup_rx_resources(struct ixgbe_adapter
*adapter
,
2920 struct ixgbe_ring
*rx_ring
)
2922 struct pci_dev
*pdev
= adapter
->pdev
;
2925 size
= sizeof(struct net_lro_desc
) * IXGBE_MAX_LRO_DESCRIPTORS
;
2926 rx_ring
->lro_mgr
.lro_arr
= vmalloc(size
);
2927 if (!rx_ring
->lro_mgr
.lro_arr
)
2929 memset(rx_ring
->lro_mgr
.lro_arr
, 0, size
);
2931 size
= sizeof(struct ixgbe_rx_buffer
) * rx_ring
->count
;
2932 rx_ring
->rx_buffer_info
= vmalloc(size
);
2933 if (!rx_ring
->rx_buffer_info
) {
2935 "vmalloc allocation failed for the rx desc ring\n");
2938 memset(rx_ring
->rx_buffer_info
, 0, size
);
2940 /* Round up to nearest 4K */
2941 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
2942 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2944 rx_ring
->desc
= pci_alloc_consistent(pdev
, rx_ring
->size
, &rx_ring
->dma
);
2946 if (!rx_ring
->desc
) {
2948 "Memory allocation failed for the rx desc ring\n");
2949 vfree(rx_ring
->rx_buffer_info
);
2953 rx_ring
->next_to_clean
= 0;
2954 rx_ring
->next_to_use
= 0;
2959 vfree(rx_ring
->lro_mgr
.lro_arr
);
2960 rx_ring
->lro_mgr
.lro_arr
= NULL
;
2965 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2966 * @adapter: board private structure
2968 * If this function returns with an error, then it's possible one or
2969 * more of the rings is populated (while the rest are not). It is the
2970 * callers duty to clean those orphaned rings.
2972 * Return 0 on success, negative on failure
2975 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter
*adapter
)
2979 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2980 err
= ixgbe_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2983 DPRINTK(PROBE
, ERR
, "Allocation for Rx Queue %u failed\n", i
);
2991 * ixgbe_free_tx_resources - Free Tx Resources per Queue
2992 * @adapter: board private structure
2993 * @tx_ring: Tx descriptor ring for a specific queue
2995 * Free all transmit software resources
2997 void ixgbe_free_tx_resources(struct ixgbe_adapter
*adapter
,
2998 struct ixgbe_ring
*tx_ring
)
3000 struct pci_dev
*pdev
= adapter
->pdev
;
3002 ixgbe_clean_tx_ring(adapter
, tx_ring
);
3004 vfree(tx_ring
->tx_buffer_info
);
3005 tx_ring
->tx_buffer_info
= NULL
;
3007 pci_free_consistent(pdev
, tx_ring
->size
, tx_ring
->desc
, tx_ring
->dma
);
3009 tx_ring
->desc
= NULL
;
3013 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
3014 * @adapter: board private structure
3016 * Free all transmit software resources
3018 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter
*adapter
)
3022 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
3023 ixgbe_free_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
3027 * ixgbe_free_rx_resources - Free Rx Resources
3028 * @adapter: board private structure
3029 * @rx_ring: ring to clean the resources from
3031 * Free all receive software resources
3033 void ixgbe_free_rx_resources(struct ixgbe_adapter
*adapter
,
3034 struct ixgbe_ring
*rx_ring
)
3036 struct pci_dev
*pdev
= adapter
->pdev
;
3038 vfree(rx_ring
->lro_mgr
.lro_arr
);
3039 rx_ring
->lro_mgr
.lro_arr
= NULL
;
3041 ixgbe_clean_rx_ring(adapter
, rx_ring
);
3043 vfree(rx_ring
->rx_buffer_info
);
3044 rx_ring
->rx_buffer_info
= NULL
;
3046 pci_free_consistent(pdev
, rx_ring
->size
, rx_ring
->desc
, rx_ring
->dma
);
3048 rx_ring
->desc
= NULL
;
3052 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
3053 * @adapter: board private structure
3055 * Free all receive software resources
3057 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter
*adapter
)
3061 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3062 ixgbe_free_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
3066 * ixgbe_change_mtu - Change the Maximum Transfer Unit
3067 * @netdev: network interface device structure
3068 * @new_mtu: new value for maximum frame size
3070 * Returns 0 on success, negative on failure
3072 static int ixgbe_change_mtu(struct net_device
*netdev
, int new_mtu
)
3074 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3075 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3077 /* MTU < 68 is an error and causes problems on some kernels */
3078 if ((new_mtu
< 68) || (max_frame
> IXGBE_MAX_JUMBO_FRAME_SIZE
))
3081 DPRINTK(PROBE
, INFO
, "changing MTU from %d to %d\n",
3082 netdev
->mtu
, new_mtu
);
3083 /* must set new MTU before calling down or up */
3084 netdev
->mtu
= new_mtu
;
3086 if (netif_running(netdev
))
3087 ixgbe_reinit_locked(adapter
);
3093 * ixgbe_open - Called when a network interface is made active
3094 * @netdev: network interface device structure
3096 * Returns 0 on success, negative value on failure
3098 * The open entry point is called when a network interface is made
3099 * active by the system (IFF_UP). At this point all resources needed
3100 * for transmit and receive operations are allocated, the interrupt
3101 * handler is registered with the OS, the watchdog timer is started,
3102 * and the stack is notified that the interface is ready.
3104 static int ixgbe_open(struct net_device
*netdev
)
3106 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3109 /* disallow open during test */
3110 if (test_bit(__IXGBE_TESTING
, &adapter
->state
))
3113 /* allocate transmit descriptors */
3114 err
= ixgbe_setup_all_tx_resources(adapter
);
3118 /* allocate receive descriptors */
3119 err
= ixgbe_setup_all_rx_resources(adapter
);
3123 ixgbe_configure(adapter
);
3125 err
= ixgbe_request_irq(adapter
);
3129 err
= ixgbe_up_complete(adapter
);
3133 netif_tx_start_all_queues(netdev
);
3138 ixgbe_release_hw_control(adapter
);
3139 ixgbe_free_irq(adapter
);
3141 ixgbe_free_all_rx_resources(adapter
);
3143 ixgbe_free_all_tx_resources(adapter
);
3145 ixgbe_reset(adapter
);
3151 * ixgbe_close - Disables a network interface
3152 * @netdev: network interface device structure
3154 * Returns 0, this is not allowed to fail
3156 * The close entry point is called when an interface is de-activated
3157 * by the OS. The hardware is still under the drivers control, but
3158 * needs to be disabled. A global MAC reset is issued to stop the
3159 * hardware, and all transmit and receive resources are freed.
3161 static int ixgbe_close(struct net_device
*netdev
)
3163 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3165 ixgbe_down(adapter
);
3166 ixgbe_free_irq(adapter
);
3168 ixgbe_free_all_tx_resources(adapter
);
3169 ixgbe_free_all_rx_resources(adapter
);
3171 ixgbe_release_hw_control(adapter
);
3177 * ixgbe_napi_add_all - prep napi structs for use
3178 * @adapter: private struct
3179 * helper function to napi_add each possible q_vector->napi
3181 void ixgbe_napi_add_all(struct ixgbe_adapter
*adapter
)
3183 int q_idx
, q_vectors
;
3184 struct net_device
*netdev
= adapter
->netdev
;
3185 int (*poll
)(struct napi_struct
*, int);
3187 /* check if we already have our netdev->napi_list populated */
3188 if (&netdev
->napi_list
!= netdev
->napi_list
.next
)
3191 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
3192 poll
= &ixgbe_clean_rxonly
;
3193 /* Only enable as many vectors as we have rx queues. */
3194 q_vectors
= adapter
->num_rx_queues
;
3197 /* only one q_vector for legacy modes */
3201 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
3202 struct ixgbe_q_vector
*q_vector
= &adapter
->q_vector
[q_idx
];
3203 netif_napi_add(adapter
->netdev
, &q_vector
->napi
, (*poll
), 64);
3207 void ixgbe_napi_del_all(struct ixgbe_adapter
*adapter
)
3210 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
3212 /* legacy and MSI only use one vector */
3213 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
))
3216 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
3217 struct ixgbe_q_vector
*q_vector
= &adapter
->q_vector
[q_idx
];
3218 if (!q_vector
->rxr_count
)
3220 netif_napi_del(&q_vector
->napi
);
3225 static int ixgbe_resume(struct pci_dev
*pdev
)
3227 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3228 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3231 pci_set_power_state(pdev
, PCI_D0
);
3232 pci_restore_state(pdev
);
3233 err
= pci_enable_device(pdev
);
3235 printk(KERN_ERR
"ixgbe: Cannot enable PCI device from "
3239 pci_set_master(pdev
);
3241 pci_enable_wake(pdev
, PCI_D3hot
, 0);
3242 pci_enable_wake(pdev
, PCI_D3cold
, 0);
3244 err
= ixgbe_init_interrupt_scheme(adapter
);
3246 printk(KERN_ERR
"ixgbe: Cannot initialize interrupts for "
3251 ixgbe_napi_add_all(adapter
);
3252 ixgbe_reset(adapter
);
3254 if (netif_running(netdev
)) {
3255 err
= ixgbe_open(adapter
->netdev
);
3260 netif_device_attach(netdev
);
3265 #endif /* CONFIG_PM */
3266 static int ixgbe_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3268 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3269 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3274 netif_device_detach(netdev
);
3276 if (netif_running(netdev
)) {
3277 ixgbe_down(adapter
);
3278 ixgbe_free_irq(adapter
);
3279 ixgbe_free_all_tx_resources(adapter
);
3280 ixgbe_free_all_rx_resources(adapter
);
3282 ixgbe_reset_interrupt_capability(adapter
);
3283 ixgbe_napi_del_all(adapter
);
3284 INIT_LIST_HEAD(&netdev
->napi_list
);
3285 kfree(adapter
->tx_ring
);
3286 kfree(adapter
->rx_ring
);
3289 retval
= pci_save_state(pdev
);
3294 pci_enable_wake(pdev
, PCI_D3hot
, 0);
3295 pci_enable_wake(pdev
, PCI_D3cold
, 0);
3297 ixgbe_release_hw_control(adapter
);
3299 pci_disable_device(pdev
);
3301 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3306 static void ixgbe_shutdown(struct pci_dev
*pdev
)
3308 ixgbe_suspend(pdev
, PMSG_SUSPEND
);
3312 * ixgbe_update_stats - Update the board statistics counters.
3313 * @adapter: board private structure
3315 void ixgbe_update_stats(struct ixgbe_adapter
*adapter
)
3317 struct ixgbe_hw
*hw
= &adapter
->hw
;
3319 u32 i
, missed_rx
= 0, mpc
, bprc
, lxon
, lxoff
, xon_off_tot
;
3321 adapter
->stats
.crcerrs
+= IXGBE_READ_REG(hw
, IXGBE_CRCERRS
);
3322 for (i
= 0; i
< 8; i
++) {
3323 /* for packet buffers not used, the register should read 0 */
3324 mpc
= IXGBE_READ_REG(hw
, IXGBE_MPC(i
));
3326 adapter
->stats
.mpc
[i
] += mpc
;
3327 total_mpc
+= adapter
->stats
.mpc
[i
];
3328 adapter
->stats
.rnbc
[i
] += IXGBE_READ_REG(hw
, IXGBE_RNBC(i
));
3329 adapter
->stats
.qptc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QPTC(i
));
3330 adapter
->stats
.qbtc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QBTC(i
));
3331 adapter
->stats
.qprc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QPRC(i
));
3332 adapter
->stats
.qbrc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QBRC(i
));
3333 adapter
->stats
.pxonrxc
[i
] += IXGBE_READ_REG(hw
,
3335 adapter
->stats
.pxontxc
[i
] += IXGBE_READ_REG(hw
,
3337 adapter
->stats
.pxoffrxc
[i
] += IXGBE_READ_REG(hw
,
3339 adapter
->stats
.pxofftxc
[i
] += IXGBE_READ_REG(hw
,
3342 adapter
->stats
.gprc
+= IXGBE_READ_REG(hw
, IXGBE_GPRC
);
3343 /* work around hardware counting issue */
3344 adapter
->stats
.gprc
-= missed_rx
;
3346 /* 82598 hardware only has a 32 bit counter in the high register */
3347 adapter
->stats
.gorc
+= IXGBE_READ_REG(hw
, IXGBE_GORCH
);
3348 adapter
->stats
.gotc
+= IXGBE_READ_REG(hw
, IXGBE_GOTCH
);
3349 adapter
->stats
.tor
+= IXGBE_READ_REG(hw
, IXGBE_TORH
);
3350 bprc
= IXGBE_READ_REG(hw
, IXGBE_BPRC
);
3351 adapter
->stats
.bprc
+= bprc
;
3352 adapter
->stats
.mprc
+= IXGBE_READ_REG(hw
, IXGBE_MPRC
);
3353 adapter
->stats
.mprc
-= bprc
;
3354 adapter
->stats
.roc
+= IXGBE_READ_REG(hw
, IXGBE_ROC
);
3355 adapter
->stats
.prc64
+= IXGBE_READ_REG(hw
, IXGBE_PRC64
);
3356 adapter
->stats
.prc127
+= IXGBE_READ_REG(hw
, IXGBE_PRC127
);
3357 adapter
->stats
.prc255
+= IXGBE_READ_REG(hw
, IXGBE_PRC255
);
3358 adapter
->stats
.prc511
+= IXGBE_READ_REG(hw
, IXGBE_PRC511
);
3359 adapter
->stats
.prc1023
+= IXGBE_READ_REG(hw
, IXGBE_PRC1023
);
3360 adapter
->stats
.prc1522
+= IXGBE_READ_REG(hw
, IXGBE_PRC1522
);
3361 adapter
->stats
.rlec
+= IXGBE_READ_REG(hw
, IXGBE_RLEC
);
3362 adapter
->stats
.lxonrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXONRXC
);
3363 adapter
->stats
.lxoffrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXOFFRXC
);
3364 lxon
= IXGBE_READ_REG(hw
, IXGBE_LXONTXC
);
3365 adapter
->stats
.lxontxc
+= lxon
;
3366 lxoff
= IXGBE_READ_REG(hw
, IXGBE_LXOFFTXC
);
3367 adapter
->stats
.lxofftxc
+= lxoff
;
3368 adapter
->stats
.ruc
+= IXGBE_READ_REG(hw
, IXGBE_RUC
);
3369 adapter
->stats
.gptc
+= IXGBE_READ_REG(hw
, IXGBE_GPTC
);
3370 adapter
->stats
.mptc
+= IXGBE_READ_REG(hw
, IXGBE_MPTC
);
3372 * 82598 errata - tx of flow control packets is included in tx counters
3374 xon_off_tot
= lxon
+ lxoff
;
3375 adapter
->stats
.gptc
-= xon_off_tot
;
3376 adapter
->stats
.mptc
-= xon_off_tot
;
3377 adapter
->stats
.gotc
-= (xon_off_tot
* (ETH_ZLEN
+ ETH_FCS_LEN
));
3378 adapter
->stats
.ruc
+= IXGBE_READ_REG(hw
, IXGBE_RUC
);
3379 adapter
->stats
.rfc
+= IXGBE_READ_REG(hw
, IXGBE_RFC
);
3380 adapter
->stats
.rjc
+= IXGBE_READ_REG(hw
, IXGBE_RJC
);
3381 adapter
->stats
.tpr
+= IXGBE_READ_REG(hw
, IXGBE_TPR
);
3382 adapter
->stats
.ptc64
+= IXGBE_READ_REG(hw
, IXGBE_PTC64
);
3383 adapter
->stats
.ptc64
-= xon_off_tot
;
3384 adapter
->stats
.ptc127
+= IXGBE_READ_REG(hw
, IXGBE_PTC127
);
3385 adapter
->stats
.ptc255
+= IXGBE_READ_REG(hw
, IXGBE_PTC255
);
3386 adapter
->stats
.ptc511
+= IXGBE_READ_REG(hw
, IXGBE_PTC511
);
3387 adapter
->stats
.ptc1023
+= IXGBE_READ_REG(hw
, IXGBE_PTC1023
);
3388 adapter
->stats
.ptc1522
+= IXGBE_READ_REG(hw
, IXGBE_PTC1522
);
3389 adapter
->stats
.bptc
+= IXGBE_READ_REG(hw
, IXGBE_BPTC
);
3391 /* Fill out the OS statistics structure */
3392 adapter
->net_stats
.multicast
= adapter
->stats
.mprc
;
3395 adapter
->net_stats
.rx_errors
= adapter
->stats
.crcerrs
+
3396 adapter
->stats
.rlec
;
3397 adapter
->net_stats
.rx_dropped
= 0;
3398 adapter
->net_stats
.rx_length_errors
= adapter
->stats
.rlec
;
3399 adapter
->net_stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
3400 adapter
->net_stats
.rx_missed_errors
= total_mpc
;
3404 * ixgbe_watchdog - Timer Call-back
3405 * @data: pointer to adapter cast into an unsigned long
3407 static void ixgbe_watchdog(unsigned long data
)
3409 struct ixgbe_adapter
*adapter
= (struct ixgbe_adapter
*)data
;
3410 struct ixgbe_hw
*hw
= &adapter
->hw
;
3412 /* Do the watchdog outside of interrupt context due to the lovely
3413 * delays that some of the newer hardware requires */
3414 if (!test_bit(__IXGBE_DOWN
, &adapter
->state
)) {
3415 /* Cause software interrupt to ensure rx rings are cleaned */
3416 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
3418 (1 << (adapter
->num_msix_vectors
- NON_Q_VECTORS
)) - 1;
3419 IXGBE_WRITE_REG(hw
, IXGBE_EICS
, eics
);
3421 /* For legacy and MSI interrupts don't set any bits that
3422 * are enabled for EIAM, because this operation would
3423 * set *both* EIMS and EICS for any bit in EIAM */
3424 IXGBE_WRITE_REG(hw
, IXGBE_EICS
,
3425 (IXGBE_EICS_TCP_TIMER
| IXGBE_EICS_OTHER
));
3427 /* Reset the timer */
3428 mod_timer(&adapter
->watchdog_timer
,
3429 round_jiffies(jiffies
+ 2 * HZ
));
3432 schedule_work(&adapter
->watchdog_task
);
3436 * ixgbe_watchdog_task - worker thread to bring link up
3437 * @work: pointer to work_struct containing our data
3439 static void ixgbe_watchdog_task(struct work_struct
*work
)
3441 struct ixgbe_adapter
*adapter
= container_of(work
,
3442 struct ixgbe_adapter
,
3444 struct net_device
*netdev
= adapter
->netdev
;
3445 struct ixgbe_hw
*hw
= &adapter
->hw
;
3446 u32 link_speed
= adapter
->link_speed
;
3447 bool link_up
= adapter
->link_up
;
3449 adapter
->flags
|= IXGBE_FLAG_IN_WATCHDOG_TASK
;
3451 if (adapter
->flags
& IXGBE_FLAG_NEED_LINK_UPDATE
) {
3452 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
3454 time_after(jiffies
, (adapter
->link_check_timeout
+
3455 IXGBE_TRY_LINK_TIMEOUT
))) {
3456 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, IXGBE_EIMC_LSC
);
3457 adapter
->flags
&= ~IXGBE_FLAG_NEED_LINK_UPDATE
;
3459 adapter
->link_up
= link_up
;
3460 adapter
->link_speed
= link_speed
;
3464 if (!netif_carrier_ok(netdev
)) {
3465 u32 frctl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
3466 u32 rmcs
= IXGBE_READ_REG(hw
, IXGBE_RMCS
);
3467 #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
3468 #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
3469 printk(KERN_INFO
"ixgbe: %s NIC Link is Up %s, "
3470 "Flow Control: %s\n",
3472 (link_speed
== IXGBE_LINK_SPEED_10GB_FULL
?
3474 (link_speed
== IXGBE_LINK_SPEED_1GB_FULL
?
3475 "1 Gbps" : "unknown speed")),
3476 ((FLOW_RX
&& FLOW_TX
) ? "RX/TX" :
3478 (FLOW_TX
? "TX" : "None"))));
3480 netif_carrier_on(netdev
);
3481 netif_tx_wake_all_queues(netdev
);
3483 /* Force detection of hung controller */
3484 adapter
->detect_tx_hung
= true;
3487 adapter
->link_up
= false;
3488 adapter
->link_speed
= 0;
3489 if (netif_carrier_ok(netdev
)) {
3490 printk(KERN_INFO
"ixgbe: %s NIC Link is Down\n",
3492 netif_carrier_off(netdev
);
3493 netif_tx_stop_all_queues(netdev
);
3497 ixgbe_update_stats(adapter
);
3498 adapter
->flags
&= ~IXGBE_FLAG_IN_WATCHDOG_TASK
;
3501 static int ixgbe_tso(struct ixgbe_adapter
*adapter
,
3502 struct ixgbe_ring
*tx_ring
, struct sk_buff
*skb
,
3503 u32 tx_flags
, u8
*hdr_len
)
3505 struct ixgbe_adv_tx_context_desc
*context_desc
;
3508 struct ixgbe_tx_buffer
*tx_buffer_info
;
3509 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
;
3510 u32 mss_l4len_idx
, l4len
;
3512 if (skb_is_gso(skb
)) {
3513 if (skb_header_cloned(skb
)) {
3514 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3518 l4len
= tcp_hdrlen(skb
);
3521 if (skb
->protocol
== htons(ETH_P_IP
)) {
3522 struct iphdr
*iph
= ip_hdr(skb
);
3525 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
3529 adapter
->hw_tso_ctxt
++;
3530 } else if (skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
) {
3531 ipv6_hdr(skb
)->payload_len
= 0;
3532 tcp_hdr(skb
)->check
=
3533 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3534 &ipv6_hdr(skb
)->daddr
,
3536 adapter
->hw_tso6_ctxt
++;
3539 i
= tx_ring
->next_to_use
;
3541 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3542 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
3544 /* VLAN MACLEN IPLEN */
3545 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3547 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
3548 vlan_macip_lens
|= ((skb_network_offset(skb
)) <<
3549 IXGBE_ADVTXD_MACLEN_SHIFT
);
3550 *hdr_len
+= skb_network_offset(skb
);
3552 (skb_transport_header(skb
) - skb_network_header(skb
));
3554 (skb_transport_header(skb
) - skb_network_header(skb
));
3555 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
3556 context_desc
->seqnum_seed
= 0;
3558 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3559 type_tucmd_mlhl
= (IXGBE_TXD_CMD_DEXT
|
3560 IXGBE_ADVTXD_DTYP_CTXT
);
3562 if (skb
->protocol
== htons(ETH_P_IP
))
3563 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3564 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3565 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
3569 (skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
);
3570 mss_l4len_idx
|= (l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
);
3571 /* use index 1 for TSO */
3572 mss_l4len_idx
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
3573 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
3575 tx_buffer_info
->time_stamp
= jiffies
;
3576 tx_buffer_info
->next_to_watch
= i
;
3579 if (i
== tx_ring
->count
)
3581 tx_ring
->next_to_use
= i
;
3588 static bool ixgbe_tx_csum(struct ixgbe_adapter
*adapter
,
3589 struct ixgbe_ring
*tx_ring
,
3590 struct sk_buff
*skb
, u32 tx_flags
)
3592 struct ixgbe_adv_tx_context_desc
*context_desc
;
3594 struct ixgbe_tx_buffer
*tx_buffer_info
;
3595 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
= 0;
3597 if (skb
->ip_summed
== CHECKSUM_PARTIAL
||
3598 (tx_flags
& IXGBE_TX_FLAGS_VLAN
)) {
3599 i
= tx_ring
->next_to_use
;
3600 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3601 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
3603 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3605 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
3606 vlan_macip_lens
|= (skb_network_offset(skb
) <<
3607 IXGBE_ADVTXD_MACLEN_SHIFT
);
3608 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3609 vlan_macip_lens
|= (skb_transport_header(skb
) -
3610 skb_network_header(skb
));
3612 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
3613 context_desc
->seqnum_seed
= 0;
3615 type_tucmd_mlhl
|= (IXGBE_TXD_CMD_DEXT
|
3616 IXGBE_ADVTXD_DTYP_CTXT
);
3618 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3619 switch (skb
->protocol
) {
3620 case __constant_htons(ETH_P_IP
):
3621 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3622 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3624 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3626 case __constant_htons(ETH_P_IPV6
):
3627 /* XXX what about other V6 headers?? */
3628 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3630 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3633 if (unlikely(net_ratelimit())) {
3634 DPRINTK(PROBE
, WARNING
,
3635 "partial checksum but proto=%x!\n",
3642 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
3643 /* use index zero for tx checksum offload */
3644 context_desc
->mss_l4len_idx
= 0;
3646 tx_buffer_info
->time_stamp
= jiffies
;
3647 tx_buffer_info
->next_to_watch
= i
;
3649 adapter
->hw_csum_tx_good
++;
3651 if (i
== tx_ring
->count
)
3653 tx_ring
->next_to_use
= i
;
3661 static int ixgbe_tx_map(struct ixgbe_adapter
*adapter
,
3662 struct ixgbe_ring
*tx_ring
,
3663 struct sk_buff
*skb
, unsigned int first
)
3665 struct ixgbe_tx_buffer
*tx_buffer_info
;
3666 unsigned int len
= skb
->len
;
3667 unsigned int offset
= 0, size
, count
= 0, i
;
3668 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
3671 len
-= skb
->data_len
;
3673 i
= tx_ring
->next_to_use
;
3676 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3677 size
= min(len
, (uint
)IXGBE_MAX_DATA_PER_TXD
);
3679 tx_buffer_info
->length
= size
;
3680 tx_buffer_info
->dma
= pci_map_single(adapter
->pdev
,
3682 size
, PCI_DMA_TODEVICE
);
3683 tx_buffer_info
->time_stamp
= jiffies
;
3684 tx_buffer_info
->next_to_watch
= i
;
3690 if (i
== tx_ring
->count
)
3694 for (f
= 0; f
< nr_frags
; f
++) {
3695 struct skb_frag_struct
*frag
;
3697 frag
= &skb_shinfo(skb
)->frags
[f
];
3699 offset
= frag
->page_offset
;
3702 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3703 size
= min(len
, (uint
)IXGBE_MAX_DATA_PER_TXD
);
3705 tx_buffer_info
->length
= size
;
3706 tx_buffer_info
->dma
= pci_map_page(adapter
->pdev
,
3711 tx_buffer_info
->time_stamp
= jiffies
;
3712 tx_buffer_info
->next_to_watch
= i
;
3718 if (i
== tx_ring
->count
)
3723 i
= tx_ring
->count
- 1;
3726 tx_ring
->tx_buffer_info
[i
].skb
= skb
;
3727 tx_ring
->tx_buffer_info
[first
].next_to_watch
= i
;
3732 static void ixgbe_tx_queue(struct ixgbe_adapter
*adapter
,
3733 struct ixgbe_ring
*tx_ring
,
3734 int tx_flags
, int count
, u32 paylen
, u8 hdr_len
)
3736 union ixgbe_adv_tx_desc
*tx_desc
= NULL
;
3737 struct ixgbe_tx_buffer
*tx_buffer_info
;
3738 u32 olinfo_status
= 0, cmd_type_len
= 0;
3740 u32 txd_cmd
= IXGBE_TXD_CMD_EOP
| IXGBE_TXD_CMD_RS
| IXGBE_TXD_CMD_IFCS
;
3742 cmd_type_len
|= IXGBE_ADVTXD_DTYP_DATA
;
3744 cmd_type_len
|= IXGBE_ADVTXD_DCMD_IFCS
| IXGBE_ADVTXD_DCMD_DEXT
;
3746 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3747 cmd_type_len
|= IXGBE_ADVTXD_DCMD_VLE
;
3749 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
3750 cmd_type_len
|= IXGBE_ADVTXD_DCMD_TSE
;
3752 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3753 IXGBE_ADVTXD_POPTS_SHIFT
;
3755 /* use index 1 context for tso */
3756 olinfo_status
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
3757 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3758 olinfo_status
|= IXGBE_TXD_POPTS_IXSM
<<
3759 IXGBE_ADVTXD_POPTS_SHIFT
;
3761 } else if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3762 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3763 IXGBE_ADVTXD_POPTS_SHIFT
;
3765 olinfo_status
|= ((paylen
- hdr_len
) << IXGBE_ADVTXD_PAYLEN_SHIFT
);
3767 i
= tx_ring
->next_to_use
;
3769 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3770 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
3771 tx_desc
->read
.buffer_addr
= cpu_to_le64(tx_buffer_info
->dma
);
3772 tx_desc
->read
.cmd_type_len
=
3773 cpu_to_le32(cmd_type_len
| tx_buffer_info
->length
);
3774 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3776 if (i
== tx_ring
->count
)
3780 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(txd_cmd
);
3783 * Force memory writes to complete before letting h/w
3784 * know there are new descriptors to fetch. (Only
3785 * applicable for weak-ordered memory model archs,
3790 tx_ring
->next_to_use
= i
;
3791 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
3794 static int __ixgbe_maybe_stop_tx(struct net_device
*netdev
,
3795 struct ixgbe_ring
*tx_ring
, int size
)
3797 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3799 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3800 /* Herbert's original patch had:
3801 * smp_mb__after_netif_stop_queue();
3802 * but since that doesn't exist yet, just open code it. */
3805 /* We need to check again in a case another CPU has just
3806 * made room available. */
3807 if (likely(IXGBE_DESC_UNUSED(tx_ring
) < size
))
3810 /* A reprieve! - use start_queue because it doesn't call schedule */
3811 netif_start_subqueue(netdev
, tx_ring
->queue_index
);
3812 ++adapter
->restart_queue
;
3816 static int ixgbe_maybe_stop_tx(struct net_device
*netdev
,
3817 struct ixgbe_ring
*tx_ring
, int size
)
3819 if (likely(IXGBE_DESC_UNUSED(tx_ring
) >= size
))
3821 return __ixgbe_maybe_stop_tx(netdev
, tx_ring
, size
);
3824 static int ixgbe_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3826 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3827 struct ixgbe_ring
*tx_ring
;
3829 unsigned int tx_flags
= 0;
3835 r_idx
= (adapter
->num_tx_queues
- 1) & skb
->queue_mapping
;
3836 tx_ring
= &adapter
->tx_ring
[r_idx
];
3838 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3839 tx_flags
|= vlan_tx_tag_get(skb
);
3840 if (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
) {
3841 tx_flags
&= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK
;
3842 tx_flags
|= (skb
->queue_mapping
<< 13);
3844 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3845 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3846 } else if (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
) {
3847 tx_flags
|= (skb
->queue_mapping
<< 13);
3848 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3849 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3851 /* three things can cause us to need a context descriptor */
3852 if (skb_is_gso(skb
) ||
3853 (skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
3854 (tx_flags
& IXGBE_TX_FLAGS_VLAN
))
3857 count
+= TXD_USE_COUNT(skb_headlen(skb
));
3858 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3859 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3861 if (ixgbe_maybe_stop_tx(netdev
, tx_ring
, count
)) {
3863 return NETDEV_TX_BUSY
;
3866 if (skb
->protocol
== htons(ETH_P_IP
))
3867 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
3868 first
= tx_ring
->next_to_use
;
3869 tso
= ixgbe_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
);
3871 dev_kfree_skb_any(skb
);
3872 return NETDEV_TX_OK
;
3876 tx_flags
|= IXGBE_TX_FLAGS_TSO
;
3877 else if (ixgbe_tx_csum(adapter
, tx_ring
, skb
, tx_flags
) &&
3878 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3879 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3881 ixgbe_tx_queue(adapter
, tx_ring
, tx_flags
,
3882 ixgbe_tx_map(adapter
, tx_ring
, skb
, first
),
3885 netdev
->trans_start
= jiffies
;
3887 ixgbe_maybe_stop_tx(netdev
, tx_ring
, DESC_NEEDED
);
3889 return NETDEV_TX_OK
;
3893 * ixgbe_get_stats - Get System Network Statistics
3894 * @netdev: network interface device structure
3896 * Returns the address of the device statistics structure.
3897 * The statistics are actually updated from the timer callback.
3899 static struct net_device_stats
*ixgbe_get_stats(struct net_device
*netdev
)
3901 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3903 /* only return the current stats */
3904 return &adapter
->net_stats
;
3908 * ixgbe_set_mac - Change the Ethernet Address of the NIC
3909 * @netdev: network interface device structure
3910 * @p: pointer to an address structure
3912 * Returns 0 on success, negative on failure
3914 static int ixgbe_set_mac(struct net_device
*netdev
, void *p
)
3916 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3917 struct ixgbe_hw
*hw
= &adapter
->hw
;
3918 struct sockaddr
*addr
= p
;
3920 if (!is_valid_ether_addr(addr
->sa_data
))
3921 return -EADDRNOTAVAIL
;
3923 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3924 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3926 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0, IXGBE_RAH_AV
);
3931 #ifdef CONFIG_NET_POLL_CONTROLLER
3933 * Polling 'interrupt' - used by things like netconsole to send skbs
3934 * without having to re-enable interrupts. It's not called while
3935 * the interrupt routine is executing.
3937 static void ixgbe_netpoll(struct net_device
*netdev
)
3939 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3941 disable_irq(adapter
->pdev
->irq
);
3942 adapter
->flags
|= IXGBE_FLAG_IN_NETPOLL
;
3943 ixgbe_intr(adapter
->pdev
->irq
, netdev
);
3944 adapter
->flags
&= ~IXGBE_FLAG_IN_NETPOLL
;
3945 enable_irq(adapter
->pdev
->irq
);
3950 * ixgbe_link_config - set up initial link with default speed and duplex
3951 * @hw: pointer to private hardware struct
3953 * Returns 0 on success, negative on failure
3955 static int ixgbe_link_config(struct ixgbe_hw
*hw
)
3957 u32 autoneg
= IXGBE_LINK_SPEED_10GB_FULL
;
3959 /* must always autoneg for both 1G and 10G link */
3960 hw
->mac
.autoneg
= true;
3962 if ((hw
->mac
.type
== ixgbe_mac_82598EB
) &&
3963 (hw
->phy
.media_type
== ixgbe_media_type_copper
))
3964 autoneg
= IXGBE_LINK_SPEED_82598_AUTONEG
;
3966 return hw
->mac
.ops
.setup_link_speed(hw
, autoneg
, true, true);
3969 static const struct net_device_ops ixgbe_netdev_ops
= {
3970 .ndo_open
= ixgbe_open
,
3971 .ndo_stop
= ixgbe_close
,
3972 .ndo_start_xmit
= ixgbe_xmit_frame
,
3973 .ndo_get_stats
= ixgbe_get_stats
,
3974 .ndo_set_multicast_list
= ixgbe_set_rx_mode
,
3975 .ndo_validate_addr
= eth_validate_addr
,
3976 .ndo_set_mac_address
= ixgbe_set_mac
,
3977 .ndo_change_mtu
= ixgbe_change_mtu
,
3978 .ndo_tx_timeout
= ixgbe_tx_timeout
,
3979 .ndo_vlan_rx_register
= ixgbe_vlan_rx_register
,
3980 .ndo_vlan_rx_add_vid
= ixgbe_vlan_rx_add_vid
,
3981 .ndo_vlan_rx_kill_vid
= ixgbe_vlan_rx_kill_vid
,
3982 #ifdef CONFIG_NET_POLL_CONTROLLER
3983 .ndo_poll_controller
= ixgbe_netpoll
,
3988 * ixgbe_probe - Device Initialization Routine
3989 * @pdev: PCI device information struct
3990 * @ent: entry in ixgbe_pci_tbl
3992 * Returns 0 on success, negative on failure
3994 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
3995 * The OS initialization, configuring of the adapter private structure,
3996 * and a hardware reset occur.
3998 static int __devinit
ixgbe_probe(struct pci_dev
*pdev
,
3999 const struct pci_device_id
*ent
)
4001 struct net_device
*netdev
;
4002 struct ixgbe_adapter
*adapter
= NULL
;
4003 struct ixgbe_hw
*hw
;
4004 const struct ixgbe_info
*ii
= ixgbe_info_tbl
[ent
->driver_data
];
4005 static int cards_found
;
4006 int i
, err
, pci_using_dac
;
4007 u16 link_status
, link_speed
, link_width
;
4010 err
= pci_enable_device(pdev
);
4014 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) &&
4015 !pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
)) {
4018 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
4020 err
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
4022 dev_err(&pdev
->dev
, "No usable DMA "
4023 "configuration, aborting\n");
4030 err
= pci_request_regions(pdev
, ixgbe_driver_name
);
4032 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
4036 err
= pci_enable_pcie_error_reporting(pdev
);
4038 dev_err(&pdev
->dev
, "pci_enable_pcie_error_reporting failed "
4040 /* non-fatal, continue */
4043 pci_set_master(pdev
);
4044 pci_save_state(pdev
);
4046 netdev
= alloc_etherdev_mq(sizeof(struct ixgbe_adapter
), MAX_TX_QUEUES
);
4049 goto err_alloc_etherdev
;
4052 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4054 pci_set_drvdata(pdev
, netdev
);
4055 adapter
= netdev_priv(netdev
);
4057 adapter
->netdev
= netdev
;
4058 adapter
->pdev
= pdev
;
4061 adapter
->msg_enable
= (1 << DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
4063 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
4064 pci_resource_len(pdev
, 0));
4070 for (i
= 1; i
<= 5; i
++) {
4071 if (pci_resource_len(pdev
, i
) == 0)
4075 netdev
->netdev_ops
= &ixgbe_netdev_ops
;
4076 ixgbe_set_ethtool_ops(netdev
);
4077 netdev
->watchdog_timeo
= 5 * HZ
;
4078 strcpy(netdev
->name
, pci_name(pdev
));
4080 adapter
->bd_number
= cards_found
;
4083 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
4084 hw
->mac
.type
= ii
->mac
;
4087 memcpy(&hw
->eeprom
.ops
, ii
->eeprom_ops
, sizeof(hw
->eeprom
.ops
));
4088 eec
= IXGBE_READ_REG(hw
, IXGBE_EEC
);
4089 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
4090 if (!(eec
& (1 << 8)))
4091 hw
->eeprom
.ops
.read
= &ixgbe_read_eeprom_bit_bang_generic
;
4094 memcpy(&hw
->phy
.ops
, ii
->phy_ops
, sizeof(hw
->phy
.ops
));
4095 hw
->phy
.sfp_type
= ixgbe_sfp_type_unknown
;
4097 /* set up this timer and work struct before calling get_invariants
4098 * which might start the timer
4100 init_timer(&adapter
->sfp_timer
);
4101 adapter
->sfp_timer
.function
= &ixgbe_sfp_timer
;
4102 adapter
->sfp_timer
.data
= (unsigned long) adapter
;
4104 INIT_WORK(&adapter
->sfp_task
, ixgbe_sfp_task
);
4106 err
= ii
->get_invariants(hw
);
4107 if (err
== IXGBE_ERR_SFP_NOT_PRESENT
) {
4108 /* start a kernel thread to watch for a module to arrive */
4109 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND
, &adapter
->state
);
4110 mod_timer(&adapter
->sfp_timer
,
4111 round_jiffies(jiffies
+ (2 * HZ
)));
4113 } else if (err
== IXGBE_ERR_SFP_NOT_SUPPORTED
) {
4114 DPRINTK(PROBE
, ERR
, "failed to load because an "
4115 "unsupported SFP+ module type was detected.\n");
4121 /* setup the private structure */
4122 err
= ixgbe_sw_init(adapter
);
4126 /* reset_hw fills in the perm_addr as well */
4127 err
= hw
->mac
.ops
.reset_hw(hw
);
4129 dev_err(&adapter
->pdev
->dev
, "HW Init failed: %d\n", err
);
4133 netdev
->features
= NETIF_F_SG
|
4135 NETIF_F_HW_VLAN_TX
|
4136 NETIF_F_HW_VLAN_RX
|
4137 NETIF_F_HW_VLAN_FILTER
;
4139 netdev
->features
|= NETIF_F_IPV6_CSUM
;
4140 netdev
->features
|= NETIF_F_TSO
;
4141 netdev
->features
|= NETIF_F_TSO6
;
4142 netdev
->features
|= NETIF_F_LRO
;
4144 netdev
->vlan_features
|= NETIF_F_TSO
;
4145 netdev
->vlan_features
|= NETIF_F_TSO6
;
4146 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
4147 netdev
->vlan_features
|= NETIF_F_SG
;
4149 if (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
)
4150 adapter
->flags
&= ~IXGBE_FLAG_RSS_ENABLED
;
4152 #ifdef CONFIG_IXGBE_DCB
4153 netdev
->dcbnl_ops
= &dcbnl_ops
;
4157 netdev
->features
|= NETIF_F_HIGHDMA
;
4159 /* make sure the EEPROM is good */
4160 if (hw
->eeprom
.ops
.validate_checksum(hw
, NULL
) < 0) {
4161 dev_err(&pdev
->dev
, "The EEPROM Checksum Is Not Valid\n");
4166 memcpy(netdev
->dev_addr
, hw
->mac
.perm_addr
, netdev
->addr_len
);
4167 memcpy(netdev
->perm_addr
, hw
->mac
.perm_addr
, netdev
->addr_len
);
4169 if (ixgbe_validate_mac_addr(netdev
->perm_addr
)) {
4170 dev_err(&pdev
->dev
, "invalid MAC address\n");
4175 init_timer(&adapter
->watchdog_timer
);
4176 adapter
->watchdog_timer
.function
= &ixgbe_watchdog
;
4177 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
4179 INIT_WORK(&adapter
->reset_task
, ixgbe_reset_task
);
4180 INIT_WORK(&adapter
->watchdog_task
, ixgbe_watchdog_task
);
4182 err
= ixgbe_init_interrupt_scheme(adapter
);
4186 /* print bus type/speed/width info */
4187 pci_read_config_word(pdev
, IXGBE_PCI_LINK_STATUS
, &link_status
);
4188 link_speed
= link_status
& IXGBE_PCI_LINK_SPEED
;
4189 link_width
= link_status
& IXGBE_PCI_LINK_WIDTH
;
4190 dev_info(&pdev
->dev
, "(PCI Express:%s:%s) %pM\n",
4191 ((link_speed
== IXGBE_PCI_LINK_SPEED_5000
) ? "5.0Gb/s" :
4192 (link_speed
== IXGBE_PCI_LINK_SPEED_2500
) ? "2.5Gb/s" :
4194 ((link_width
== IXGBE_PCI_LINK_WIDTH_8
) ? "Width x8" :
4195 (link_width
== IXGBE_PCI_LINK_WIDTH_4
) ? "Width x4" :
4196 (link_width
== IXGBE_PCI_LINK_WIDTH_2
) ? "Width x2" :
4197 (link_width
== IXGBE_PCI_LINK_WIDTH_1
) ? "Width x1" :
4200 ixgbe_read_pba_num_generic(hw
, &part_num
);
4201 dev_info(&pdev
->dev
, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
4202 hw
->mac
.type
, hw
->phy
.type
,
4203 (part_num
>> 8), (part_num
& 0xff));
4205 if (link_width
<= IXGBE_PCI_LINK_WIDTH_4
) {
4206 dev_warn(&pdev
->dev
, "PCI-Express bandwidth available for "
4207 "this card is not sufficient for optimal "
4209 dev_warn(&pdev
->dev
, "For optimal performance a x8 "
4210 "PCI-Express slot is required.\n");
4213 /* reset the hardware with the new settings */
4214 hw
->mac
.ops
.start_hw(hw
);
4216 /* link_config depends on start_hw being called at least once */
4217 err
= ixgbe_link_config(hw
);
4219 dev_err(&pdev
->dev
, "setup_link_speed FAILED %d\n", err
);
4223 netif_carrier_off(netdev
);
4224 netif_tx_stop_all_queues(netdev
);
4226 strcpy(netdev
->name
, "eth%d");
4227 err
= register_netdev(netdev
);
4231 #ifdef CONFIG_IXGBE_DCA
4232 if (dca_add_requester(&pdev
->dev
) == 0) {
4233 adapter
->flags
|= IXGBE_FLAG_DCA_ENABLED
;
4234 /* always use CB2 mode, difference is masked
4235 * in the CB driver */
4236 IXGBE_WRITE_REG(hw
, IXGBE_DCA_CTRL
, 2);
4237 ixgbe_setup_dca(adapter
);
4241 dev_info(&pdev
->dev
, "Intel(R) 10 Gigabit Network Connection\n");
4246 ixgbe_release_hw_control(adapter
);
4249 ixgbe_reset_interrupt_capability(adapter
);
4251 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND
, &adapter
->state
);
4252 del_timer_sync(&adapter
->sfp_timer
);
4253 cancel_work_sync(&adapter
->sfp_task
);
4254 iounmap(hw
->hw_addr
);
4256 free_netdev(netdev
);
4258 pci_release_regions(pdev
);
4261 pci_disable_device(pdev
);
4266 * ixgbe_remove - Device Removal Routine
4267 * @pdev: PCI device information struct
4269 * ixgbe_remove is called by the PCI subsystem to alert the driver
4270 * that it should release a PCI device. The could be caused by a
4271 * Hot-Plug event, or because the driver is going to be removed from
4274 static void __devexit
ixgbe_remove(struct pci_dev
*pdev
)
4276 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4277 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
4280 set_bit(__IXGBE_DOWN
, &adapter
->state
);
4281 /* clear the module not found bit to make sure the worker won't
4284 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND
, &adapter
->state
);
4285 del_timer_sync(&adapter
->watchdog_timer
);
4287 del_timer_sync(&adapter
->sfp_timer
);
4288 cancel_work_sync(&adapter
->watchdog_task
);
4289 cancel_work_sync(&adapter
->sfp_task
);
4290 flush_scheduled_work();
4292 #ifdef CONFIG_IXGBE_DCA
4293 if (adapter
->flags
& IXGBE_FLAG_DCA_ENABLED
) {
4294 adapter
->flags
&= ~IXGBE_FLAG_DCA_ENABLED
;
4295 dca_remove_requester(&pdev
->dev
);
4296 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DCA_CTRL
, 1);
4300 if (netdev
->reg_state
== NETREG_REGISTERED
)
4301 unregister_netdev(netdev
);
4303 ixgbe_reset_interrupt_capability(adapter
);
4305 ixgbe_release_hw_control(adapter
);
4307 iounmap(adapter
->hw
.hw_addr
);
4308 pci_release_regions(pdev
);
4310 DPRINTK(PROBE
, INFO
, "complete\n");
4311 kfree(adapter
->tx_ring
);
4312 kfree(adapter
->rx_ring
);
4314 free_netdev(netdev
);
4316 err
= pci_disable_pcie_error_reporting(pdev
);
4319 "pci_disable_pcie_error_reporting failed 0x%x\n", err
);
4321 pci_disable_device(pdev
);
4325 * ixgbe_io_error_detected - called when PCI error is detected
4326 * @pdev: Pointer to PCI device
4327 * @state: The current pci connection state
4329 * This function is called after a PCI bus error affecting
4330 * this device has been detected.
4332 static pci_ers_result_t
ixgbe_io_error_detected(struct pci_dev
*pdev
,
4333 pci_channel_state_t state
)
4335 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4336 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
4338 netif_device_detach(netdev
);
4340 if (netif_running(netdev
))
4341 ixgbe_down(adapter
);
4342 pci_disable_device(pdev
);
4344 /* Request a slot reset. */
4345 return PCI_ERS_RESULT_NEED_RESET
;
4349 * ixgbe_io_slot_reset - called after the pci bus has been reset.
4350 * @pdev: Pointer to PCI device
4352 * Restart the card from scratch, as if from a cold-boot.
4354 static pci_ers_result_t
ixgbe_io_slot_reset(struct pci_dev
*pdev
)
4356 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4357 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
4358 pci_ers_result_t result
;
4361 if (pci_enable_device(pdev
)) {
4363 "Cannot re-enable PCI device after reset.\n");
4364 result
= PCI_ERS_RESULT_DISCONNECT
;
4366 pci_set_master(pdev
);
4367 pci_restore_state(pdev
);
4369 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4370 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4372 ixgbe_reset(adapter
);
4374 result
= PCI_ERS_RESULT_RECOVERED
;
4377 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
4380 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err
);
4381 /* non-fatal, continue */
4388 * ixgbe_io_resume - called when traffic can start flowing again.
4389 * @pdev: Pointer to PCI device
4391 * This callback is called when the error recovery driver tells us that
4392 * its OK to resume normal operation.
4394 static void ixgbe_io_resume(struct pci_dev
*pdev
)
4396 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4397 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
4399 if (netif_running(netdev
)) {
4400 if (ixgbe_up(adapter
)) {
4401 DPRINTK(PROBE
, INFO
, "ixgbe_up failed after reset\n");
4406 netif_device_attach(netdev
);
4409 static struct pci_error_handlers ixgbe_err_handler
= {
4410 .error_detected
= ixgbe_io_error_detected
,
4411 .slot_reset
= ixgbe_io_slot_reset
,
4412 .resume
= ixgbe_io_resume
,
4415 static struct pci_driver ixgbe_driver
= {
4416 .name
= ixgbe_driver_name
,
4417 .id_table
= ixgbe_pci_tbl
,
4418 .probe
= ixgbe_probe
,
4419 .remove
= __devexit_p(ixgbe_remove
),
4421 .suspend
= ixgbe_suspend
,
4422 .resume
= ixgbe_resume
,
4424 .shutdown
= ixgbe_shutdown
,
4425 .err_handler
= &ixgbe_err_handler
4429 * ixgbe_init_module - Driver Registration Routine
4431 * ixgbe_init_module is the first routine called when the driver is
4432 * loaded. All it does is register with the PCI subsystem.
4434 static int __init
ixgbe_init_module(void)
4437 printk(KERN_INFO
"%s: %s - version %s\n", ixgbe_driver_name
,
4438 ixgbe_driver_string
, ixgbe_driver_version
);
4440 printk(KERN_INFO
"%s: %s\n", ixgbe_driver_name
, ixgbe_copyright
);
4442 #ifdef CONFIG_IXGBE_DCA
4443 dca_register_notify(&dca_notifier
);
4446 ret
= pci_register_driver(&ixgbe_driver
);
4450 module_init(ixgbe_init_module
);
4453 * ixgbe_exit_module - Driver Exit Cleanup Routine
4455 * ixgbe_exit_module is called just before the driver is removed
4458 static void __exit
ixgbe_exit_module(void)
4460 #ifdef CONFIG_IXGBE_DCA
4461 dca_unregister_notify(&dca_notifier
);
4463 pci_unregister_driver(&ixgbe_driver
);
4466 #ifdef CONFIG_IXGBE_DCA
4467 static int ixgbe_notify_dca(struct notifier_block
*nb
, unsigned long event
,
4472 ret_val
= driver_for_each_device(&ixgbe_driver
.driver
, NULL
, &event
,
4473 __ixgbe_notify_dca
);
4475 return ret_val
? NOTIFY_BAD
: NOTIFY_DONE
;
4477 #endif /* CONFIG_IXGBE_DCA */
4479 module_exit(ixgbe_exit_module
);