Merge branch 'master' of ssh://master.kernel.org/pub/scm/linux/kernel/git/linville...
[deliverable/linux.git] / drivers / net / igb / igb_main.c
1 /*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/net_tstamp.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/pci-aspm.h>
43 #include <linux/delay.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/aer.h>
47 #ifdef CONFIG_IGB_DCA
48 #include <linux/dca.h>
49 #endif
50 #include "igb.h"
51
52 #define DRV_VERSION "2.1.0-k2"
53 char igb_driver_name[] = "igb";
54 char igb_driver_version[] = DRV_VERSION;
55 static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
57 static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
58
59 static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61 };
62
63 static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
79 /* required last entry */
80 {0, }
81 };
82
83 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
84
85 void igb_reset(struct igb_adapter *);
86 static int igb_setup_all_tx_resources(struct igb_adapter *);
87 static int igb_setup_all_rx_resources(struct igb_adapter *);
88 static void igb_free_all_tx_resources(struct igb_adapter *);
89 static void igb_free_all_rx_resources(struct igb_adapter *);
90 static void igb_setup_mrqc(struct igb_adapter *);
91 void igb_update_stats(struct igb_adapter *);
92 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
93 static void __devexit igb_remove(struct pci_dev *pdev);
94 static int igb_sw_init(struct igb_adapter *);
95 static int igb_open(struct net_device *);
96 static int igb_close(struct net_device *);
97 static void igb_configure_tx(struct igb_adapter *);
98 static void igb_configure_rx(struct igb_adapter *);
99 static void igb_clean_all_tx_rings(struct igb_adapter *);
100 static void igb_clean_all_rx_rings(struct igb_adapter *);
101 static void igb_clean_tx_ring(struct igb_ring *);
102 static void igb_clean_rx_ring(struct igb_ring *);
103 static void igb_set_rx_mode(struct net_device *);
104 static void igb_update_phy_info(unsigned long);
105 static void igb_watchdog(unsigned long);
106 static void igb_watchdog_task(struct work_struct *);
107 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
108 static struct net_device_stats *igb_get_stats(struct net_device *);
109 static int igb_change_mtu(struct net_device *, int);
110 static int igb_set_mac(struct net_device *, void *);
111 static void igb_set_uta(struct igb_adapter *adapter);
112 static irqreturn_t igb_intr(int irq, void *);
113 static irqreturn_t igb_intr_msi(int irq, void *);
114 static irqreturn_t igb_msix_other(int irq, void *);
115 static irqreturn_t igb_msix_ring(int irq, void *);
116 #ifdef CONFIG_IGB_DCA
117 static void igb_update_dca(struct igb_q_vector *);
118 static void igb_setup_dca(struct igb_adapter *);
119 #endif /* CONFIG_IGB_DCA */
120 static bool igb_clean_tx_irq(struct igb_q_vector *);
121 static int igb_poll(struct napi_struct *, int);
122 static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
123 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
124 static void igb_tx_timeout(struct net_device *);
125 static void igb_reset_task(struct work_struct *);
126 static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
127 static void igb_vlan_rx_add_vid(struct net_device *, u16);
128 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
129 static void igb_restore_vlan(struct igb_adapter *);
130 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
131 static void igb_ping_all_vfs(struct igb_adapter *);
132 static void igb_msg_task(struct igb_adapter *);
133 static void igb_vmm_control(struct igb_adapter *);
134 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
135 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
136 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
137 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
138 int vf, u16 vlan, u8 qos);
139 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
140 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
141 struct ifla_vf_info *ivi);
142
143 #ifdef CONFIG_PM
144 static int igb_suspend(struct pci_dev *, pm_message_t);
145 static int igb_resume(struct pci_dev *);
146 #endif
147 static void igb_shutdown(struct pci_dev *);
148 #ifdef CONFIG_IGB_DCA
149 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
150 static struct notifier_block dca_notifier = {
151 .notifier_call = igb_notify_dca,
152 .next = NULL,
153 .priority = 0
154 };
155 #endif
156 #ifdef CONFIG_NET_POLL_CONTROLLER
157 /* for netdump / net console */
158 static void igb_netpoll(struct net_device *);
159 #endif
160 #ifdef CONFIG_PCI_IOV
161 static unsigned int max_vfs = 0;
162 module_param(max_vfs, uint, 0);
163 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
164 "per physical function");
165 #endif /* CONFIG_PCI_IOV */
166
167 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
168 pci_channel_state_t);
169 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
170 static void igb_io_resume(struct pci_dev *);
171
172 static struct pci_error_handlers igb_err_handler = {
173 .error_detected = igb_io_error_detected,
174 .slot_reset = igb_io_slot_reset,
175 .resume = igb_io_resume,
176 };
177
178
179 static struct pci_driver igb_driver = {
180 .name = igb_driver_name,
181 .id_table = igb_pci_tbl,
182 .probe = igb_probe,
183 .remove = __devexit_p(igb_remove),
184 #ifdef CONFIG_PM
185 /* Power Managment Hooks */
186 .suspend = igb_suspend,
187 .resume = igb_resume,
188 #endif
189 .shutdown = igb_shutdown,
190 .err_handler = &igb_err_handler
191 };
192
193 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
194 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
195 MODULE_LICENSE("GPL");
196 MODULE_VERSION(DRV_VERSION);
197
198 /**
199 * igb_read_clock - read raw cycle counter (to be used by time counter)
200 */
201 static cycle_t igb_read_clock(const struct cyclecounter *tc)
202 {
203 struct igb_adapter *adapter =
204 container_of(tc, struct igb_adapter, cycles);
205 struct e1000_hw *hw = &adapter->hw;
206 u64 stamp = 0;
207 int shift = 0;
208
209 /*
210 * The timestamp latches on lowest register read. For the 82580
211 * the lowest register is SYSTIMR instead of SYSTIML. However we never
212 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
213 */
214 if (hw->mac.type == e1000_82580) {
215 stamp = rd32(E1000_SYSTIMR) >> 8;
216 shift = IGB_82580_TSYNC_SHIFT;
217 }
218
219 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
220 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
221 return stamp;
222 }
223
224 #ifdef DEBUG
225 /**
226 * igb_get_hw_dev_name - return device name string
227 * used by hardware layer to print debugging information
228 **/
229 char *igb_get_hw_dev_name(struct e1000_hw *hw)
230 {
231 struct igb_adapter *adapter = hw->back;
232 return adapter->netdev->name;
233 }
234
235 /**
236 * igb_get_time_str - format current NIC and system time as string
237 */
238 static char *igb_get_time_str(struct igb_adapter *adapter,
239 char buffer[160])
240 {
241 cycle_t hw = adapter->cycles.read(&adapter->cycles);
242 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
243 struct timespec sys;
244 struct timespec delta;
245 getnstimeofday(&sys);
246
247 delta = timespec_sub(nic, sys);
248
249 sprintf(buffer,
250 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
251 hw,
252 (long)nic.tv_sec, nic.tv_nsec,
253 (long)sys.tv_sec, sys.tv_nsec,
254 (long)delta.tv_sec, delta.tv_nsec);
255
256 return buffer;
257 }
258 #endif
259
260 /**
261 * igb_init_module - Driver Registration Routine
262 *
263 * igb_init_module is the first routine called when the driver is
264 * loaded. All it does is register with the PCI subsystem.
265 **/
266 static int __init igb_init_module(void)
267 {
268 int ret;
269 printk(KERN_INFO "%s - version %s\n",
270 igb_driver_string, igb_driver_version);
271
272 printk(KERN_INFO "%s\n", igb_copyright);
273
274 #ifdef CONFIG_IGB_DCA
275 dca_register_notify(&dca_notifier);
276 #endif
277 ret = pci_register_driver(&igb_driver);
278 return ret;
279 }
280
281 module_init(igb_init_module);
282
283 /**
284 * igb_exit_module - Driver Exit Cleanup Routine
285 *
286 * igb_exit_module is called just before the driver is removed
287 * from memory.
288 **/
289 static void __exit igb_exit_module(void)
290 {
291 #ifdef CONFIG_IGB_DCA
292 dca_unregister_notify(&dca_notifier);
293 #endif
294 pci_unregister_driver(&igb_driver);
295 }
296
297 module_exit(igb_exit_module);
298
299 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
300 /**
301 * igb_cache_ring_register - Descriptor ring to register mapping
302 * @adapter: board private structure to initialize
303 *
304 * Once we know the feature-set enabled for the device, we'll cache
305 * the register offset the descriptor ring is assigned to.
306 **/
307 static void igb_cache_ring_register(struct igb_adapter *adapter)
308 {
309 int i = 0, j = 0;
310 u32 rbase_offset = adapter->vfs_allocated_count;
311
312 switch (adapter->hw.mac.type) {
313 case e1000_82576:
314 /* The queues are allocated for virtualization such that VF 0
315 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
316 * In order to avoid collision we start at the first free queue
317 * and continue consuming queues in the same sequence
318 */
319 if (adapter->vfs_allocated_count) {
320 for (; i < adapter->rss_queues; i++)
321 adapter->rx_ring[i].reg_idx = rbase_offset +
322 Q_IDX_82576(i);
323 for (; j < adapter->rss_queues; j++)
324 adapter->tx_ring[j].reg_idx = rbase_offset +
325 Q_IDX_82576(j);
326 }
327 case e1000_82575:
328 case e1000_82580:
329 default:
330 for (; i < adapter->num_rx_queues; i++)
331 adapter->rx_ring[i].reg_idx = rbase_offset + i;
332 for (; j < adapter->num_tx_queues; j++)
333 adapter->tx_ring[j].reg_idx = rbase_offset + j;
334 break;
335 }
336 }
337
338 static void igb_free_queues(struct igb_adapter *adapter)
339 {
340 kfree(adapter->tx_ring);
341 kfree(adapter->rx_ring);
342
343 adapter->tx_ring = NULL;
344 adapter->rx_ring = NULL;
345
346 adapter->num_rx_queues = 0;
347 adapter->num_tx_queues = 0;
348 }
349
350 /**
351 * igb_alloc_queues - Allocate memory for all rings
352 * @adapter: board private structure to initialize
353 *
354 * We allocate one ring per queue at run-time since we don't know the
355 * number of queues at compile-time.
356 **/
357 static int igb_alloc_queues(struct igb_adapter *adapter)
358 {
359 int i;
360
361 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
362 sizeof(struct igb_ring), GFP_KERNEL);
363 if (!adapter->tx_ring)
364 goto err;
365
366 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
367 sizeof(struct igb_ring), GFP_KERNEL);
368 if (!adapter->rx_ring)
369 goto err;
370
371 for (i = 0; i < adapter->num_tx_queues; i++) {
372 struct igb_ring *ring = &(adapter->tx_ring[i]);
373 ring->count = adapter->tx_ring_count;
374 ring->queue_index = i;
375 ring->pdev = adapter->pdev;
376 ring->netdev = adapter->netdev;
377 /* For 82575, context index must be unique per ring. */
378 if (adapter->hw.mac.type == e1000_82575)
379 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
380 }
381
382 for (i = 0; i < adapter->num_rx_queues; i++) {
383 struct igb_ring *ring = &(adapter->rx_ring[i]);
384 ring->count = adapter->rx_ring_count;
385 ring->queue_index = i;
386 ring->pdev = adapter->pdev;
387 ring->netdev = adapter->netdev;
388 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
389 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
390 /* set flag indicating ring supports SCTP checksum offload */
391 if (adapter->hw.mac.type >= e1000_82576)
392 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
393 }
394
395 igb_cache_ring_register(adapter);
396
397 return 0;
398
399 err:
400 igb_free_queues(adapter);
401
402 return -ENOMEM;
403 }
404
405 #define IGB_N0_QUEUE -1
406 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
407 {
408 u32 msixbm = 0;
409 struct igb_adapter *adapter = q_vector->adapter;
410 struct e1000_hw *hw = &adapter->hw;
411 u32 ivar, index;
412 int rx_queue = IGB_N0_QUEUE;
413 int tx_queue = IGB_N0_QUEUE;
414
415 if (q_vector->rx_ring)
416 rx_queue = q_vector->rx_ring->reg_idx;
417 if (q_vector->tx_ring)
418 tx_queue = q_vector->tx_ring->reg_idx;
419
420 switch (hw->mac.type) {
421 case e1000_82575:
422 /* The 82575 assigns vectors using a bitmask, which matches the
423 bitmask for the EICR/EIMS/EIMC registers. To assign one
424 or more queues to a vector, we write the appropriate bits
425 into the MSIXBM register for that vector. */
426 if (rx_queue > IGB_N0_QUEUE)
427 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
428 if (tx_queue > IGB_N0_QUEUE)
429 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
430 if (!adapter->msix_entries && msix_vector == 0)
431 msixbm |= E1000_EIMS_OTHER;
432 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
433 q_vector->eims_value = msixbm;
434 break;
435 case e1000_82576:
436 /* 82576 uses a table-based method for assigning vectors.
437 Each queue has a single entry in the table to which we write
438 a vector number along with a "valid" bit. Sadly, the layout
439 of the table is somewhat counterintuitive. */
440 if (rx_queue > IGB_N0_QUEUE) {
441 index = (rx_queue & 0x7);
442 ivar = array_rd32(E1000_IVAR0, index);
443 if (rx_queue < 8) {
444 /* vector goes into low byte of register */
445 ivar = ivar & 0xFFFFFF00;
446 ivar |= msix_vector | E1000_IVAR_VALID;
447 } else {
448 /* vector goes into third byte of register */
449 ivar = ivar & 0xFF00FFFF;
450 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
451 }
452 array_wr32(E1000_IVAR0, index, ivar);
453 }
454 if (tx_queue > IGB_N0_QUEUE) {
455 index = (tx_queue & 0x7);
456 ivar = array_rd32(E1000_IVAR0, index);
457 if (tx_queue < 8) {
458 /* vector goes into second byte of register */
459 ivar = ivar & 0xFFFF00FF;
460 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
461 } else {
462 /* vector goes into high byte of register */
463 ivar = ivar & 0x00FFFFFF;
464 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
465 }
466 array_wr32(E1000_IVAR0, index, ivar);
467 }
468 q_vector->eims_value = 1 << msix_vector;
469 break;
470 case e1000_82580:
471 /* 82580 uses the same table-based approach as 82576 but has fewer
472 entries as a result we carry over for queues greater than 4. */
473 if (rx_queue > IGB_N0_QUEUE) {
474 index = (rx_queue >> 1);
475 ivar = array_rd32(E1000_IVAR0, index);
476 if (rx_queue & 0x1) {
477 /* vector goes into third byte of register */
478 ivar = ivar & 0xFF00FFFF;
479 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
480 } else {
481 /* vector goes into low byte of register */
482 ivar = ivar & 0xFFFFFF00;
483 ivar |= msix_vector | E1000_IVAR_VALID;
484 }
485 array_wr32(E1000_IVAR0, index, ivar);
486 }
487 if (tx_queue > IGB_N0_QUEUE) {
488 index = (tx_queue >> 1);
489 ivar = array_rd32(E1000_IVAR0, index);
490 if (tx_queue & 0x1) {
491 /* vector goes into high byte of register */
492 ivar = ivar & 0x00FFFFFF;
493 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
494 } else {
495 /* vector goes into second byte of register */
496 ivar = ivar & 0xFFFF00FF;
497 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
498 }
499 array_wr32(E1000_IVAR0, index, ivar);
500 }
501 q_vector->eims_value = 1 << msix_vector;
502 break;
503 default:
504 BUG();
505 break;
506 }
507 }
508
509 /**
510 * igb_configure_msix - Configure MSI-X hardware
511 *
512 * igb_configure_msix sets up the hardware to properly
513 * generate MSI-X interrupts.
514 **/
515 static void igb_configure_msix(struct igb_adapter *adapter)
516 {
517 u32 tmp;
518 int i, vector = 0;
519 struct e1000_hw *hw = &adapter->hw;
520
521 adapter->eims_enable_mask = 0;
522
523 /* set vector for other causes, i.e. link changes */
524 switch (hw->mac.type) {
525 case e1000_82575:
526 tmp = rd32(E1000_CTRL_EXT);
527 /* enable MSI-X PBA support*/
528 tmp |= E1000_CTRL_EXT_PBA_CLR;
529
530 /* Auto-Mask interrupts upon ICR read. */
531 tmp |= E1000_CTRL_EXT_EIAME;
532 tmp |= E1000_CTRL_EXT_IRCA;
533
534 wr32(E1000_CTRL_EXT, tmp);
535
536 /* enable msix_other interrupt */
537 array_wr32(E1000_MSIXBM(0), vector++,
538 E1000_EIMS_OTHER);
539 adapter->eims_other = E1000_EIMS_OTHER;
540
541 break;
542
543 case e1000_82576:
544 case e1000_82580:
545 /* Turn on MSI-X capability first, or our settings
546 * won't stick. And it will take days to debug. */
547 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
548 E1000_GPIE_PBA | E1000_GPIE_EIAME |
549 E1000_GPIE_NSICR);
550
551 /* enable msix_other interrupt */
552 adapter->eims_other = 1 << vector;
553 tmp = (vector++ | E1000_IVAR_VALID) << 8;
554
555 wr32(E1000_IVAR_MISC, tmp);
556 break;
557 default:
558 /* do nothing, since nothing else supports MSI-X */
559 break;
560 } /* switch (hw->mac.type) */
561
562 adapter->eims_enable_mask |= adapter->eims_other;
563
564 for (i = 0; i < adapter->num_q_vectors; i++) {
565 struct igb_q_vector *q_vector = adapter->q_vector[i];
566 igb_assign_vector(q_vector, vector++);
567 adapter->eims_enable_mask |= q_vector->eims_value;
568 }
569
570 wrfl();
571 }
572
573 /**
574 * igb_request_msix - Initialize MSI-X interrupts
575 *
576 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
577 * kernel.
578 **/
579 static int igb_request_msix(struct igb_adapter *adapter)
580 {
581 struct net_device *netdev = adapter->netdev;
582 struct e1000_hw *hw = &adapter->hw;
583 int i, err = 0, vector = 0;
584
585 err = request_irq(adapter->msix_entries[vector].vector,
586 igb_msix_other, 0, netdev->name, adapter);
587 if (err)
588 goto out;
589 vector++;
590
591 for (i = 0; i < adapter->num_q_vectors; i++) {
592 struct igb_q_vector *q_vector = adapter->q_vector[i];
593
594 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
595
596 if (q_vector->rx_ring && q_vector->tx_ring)
597 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
598 q_vector->rx_ring->queue_index);
599 else if (q_vector->tx_ring)
600 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
601 q_vector->tx_ring->queue_index);
602 else if (q_vector->rx_ring)
603 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
604 q_vector->rx_ring->queue_index);
605 else
606 sprintf(q_vector->name, "%s-unused", netdev->name);
607
608 err = request_irq(adapter->msix_entries[vector].vector,
609 igb_msix_ring, 0, q_vector->name,
610 q_vector);
611 if (err)
612 goto out;
613 vector++;
614 }
615
616 igb_configure_msix(adapter);
617 return 0;
618 out:
619 return err;
620 }
621
622 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
623 {
624 if (adapter->msix_entries) {
625 pci_disable_msix(adapter->pdev);
626 kfree(adapter->msix_entries);
627 adapter->msix_entries = NULL;
628 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
629 pci_disable_msi(adapter->pdev);
630 }
631 }
632
633 /**
634 * igb_free_q_vectors - Free memory allocated for interrupt vectors
635 * @adapter: board private structure to initialize
636 *
637 * This function frees the memory allocated to the q_vectors. In addition if
638 * NAPI is enabled it will delete any references to the NAPI struct prior
639 * to freeing the q_vector.
640 **/
641 static void igb_free_q_vectors(struct igb_adapter *adapter)
642 {
643 int v_idx;
644
645 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
646 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
647 adapter->q_vector[v_idx] = NULL;
648 netif_napi_del(&q_vector->napi);
649 kfree(q_vector);
650 }
651 adapter->num_q_vectors = 0;
652 }
653
654 /**
655 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
656 *
657 * This function resets the device so that it has 0 rx queues, tx queues, and
658 * MSI-X interrupts allocated.
659 */
660 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
661 {
662 igb_free_queues(adapter);
663 igb_free_q_vectors(adapter);
664 igb_reset_interrupt_capability(adapter);
665 }
666
667 /**
668 * igb_set_interrupt_capability - set MSI or MSI-X if supported
669 *
670 * Attempt to configure interrupts using the best available
671 * capabilities of the hardware and kernel.
672 **/
673 static void igb_set_interrupt_capability(struct igb_adapter *adapter)
674 {
675 int err;
676 int numvecs, i;
677
678 /* Number of supported queues. */
679 adapter->num_rx_queues = adapter->rss_queues;
680 adapter->num_tx_queues = adapter->rss_queues;
681
682 /* start with one vector for every rx queue */
683 numvecs = adapter->num_rx_queues;
684
685 /* if tx handler is seperate add 1 for every tx queue */
686 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
687 numvecs += adapter->num_tx_queues;
688
689 /* store the number of vectors reserved for queues */
690 adapter->num_q_vectors = numvecs;
691
692 /* add 1 vector for link status interrupts */
693 numvecs++;
694 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
695 GFP_KERNEL);
696 if (!adapter->msix_entries)
697 goto msi_only;
698
699 for (i = 0; i < numvecs; i++)
700 adapter->msix_entries[i].entry = i;
701
702 err = pci_enable_msix(adapter->pdev,
703 adapter->msix_entries,
704 numvecs);
705 if (err == 0)
706 goto out;
707
708 igb_reset_interrupt_capability(adapter);
709
710 /* If we can't do MSI-X, try MSI */
711 msi_only:
712 #ifdef CONFIG_PCI_IOV
713 /* disable SR-IOV for non MSI-X configurations */
714 if (adapter->vf_data) {
715 struct e1000_hw *hw = &adapter->hw;
716 /* disable iov and allow time for transactions to clear */
717 pci_disable_sriov(adapter->pdev);
718 msleep(500);
719
720 kfree(adapter->vf_data);
721 adapter->vf_data = NULL;
722 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
723 msleep(100);
724 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
725 }
726 #endif
727 adapter->vfs_allocated_count = 0;
728 adapter->rss_queues = 1;
729 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
730 adapter->num_rx_queues = 1;
731 adapter->num_tx_queues = 1;
732 adapter->num_q_vectors = 1;
733 if (!pci_enable_msi(adapter->pdev))
734 adapter->flags |= IGB_FLAG_HAS_MSI;
735 out:
736 /* Notify the stack of the (possibly) reduced Tx Queue count. */
737 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
738 return;
739 }
740
741 /**
742 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
743 * @adapter: board private structure to initialize
744 *
745 * We allocate one q_vector per queue interrupt. If allocation fails we
746 * return -ENOMEM.
747 **/
748 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
749 {
750 struct igb_q_vector *q_vector;
751 struct e1000_hw *hw = &adapter->hw;
752 int v_idx;
753
754 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
755 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
756 if (!q_vector)
757 goto err_out;
758 q_vector->adapter = adapter;
759 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
760 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
761 q_vector->itr_val = IGB_START_ITR;
762 q_vector->set_itr = 1;
763 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
764 adapter->q_vector[v_idx] = q_vector;
765 }
766 return 0;
767
768 err_out:
769 while (v_idx) {
770 v_idx--;
771 q_vector = adapter->q_vector[v_idx];
772 netif_napi_del(&q_vector->napi);
773 kfree(q_vector);
774 adapter->q_vector[v_idx] = NULL;
775 }
776 return -ENOMEM;
777 }
778
779 static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
780 int ring_idx, int v_idx)
781 {
782 struct igb_q_vector *q_vector;
783
784 q_vector = adapter->q_vector[v_idx];
785 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
786 q_vector->rx_ring->q_vector = q_vector;
787 q_vector->itr_val = adapter->rx_itr_setting;
788 if (q_vector->itr_val && q_vector->itr_val <= 3)
789 q_vector->itr_val = IGB_START_ITR;
790 }
791
792 static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
793 int ring_idx, int v_idx)
794 {
795 struct igb_q_vector *q_vector;
796
797 q_vector = adapter->q_vector[v_idx];
798 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
799 q_vector->tx_ring->q_vector = q_vector;
800 q_vector->itr_val = adapter->tx_itr_setting;
801 if (q_vector->itr_val && q_vector->itr_val <= 3)
802 q_vector->itr_val = IGB_START_ITR;
803 }
804
805 /**
806 * igb_map_ring_to_vector - maps allocated queues to vectors
807 *
808 * This function maps the recently allocated queues to vectors.
809 **/
810 static int igb_map_ring_to_vector(struct igb_adapter *adapter)
811 {
812 int i;
813 int v_idx = 0;
814
815 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
816 (adapter->num_q_vectors < adapter->num_tx_queues))
817 return -ENOMEM;
818
819 if (adapter->num_q_vectors >=
820 (adapter->num_rx_queues + adapter->num_tx_queues)) {
821 for (i = 0; i < adapter->num_rx_queues; i++)
822 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
823 for (i = 0; i < adapter->num_tx_queues; i++)
824 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
825 } else {
826 for (i = 0; i < adapter->num_rx_queues; i++) {
827 if (i < adapter->num_tx_queues)
828 igb_map_tx_ring_to_vector(adapter, i, v_idx);
829 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
830 }
831 for (; i < adapter->num_tx_queues; i++)
832 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
833 }
834 return 0;
835 }
836
837 /**
838 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
839 *
840 * This function initializes the interrupts and allocates all of the queues.
841 **/
842 static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
843 {
844 struct pci_dev *pdev = adapter->pdev;
845 int err;
846
847 igb_set_interrupt_capability(adapter);
848
849 err = igb_alloc_q_vectors(adapter);
850 if (err) {
851 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
852 goto err_alloc_q_vectors;
853 }
854
855 err = igb_alloc_queues(adapter);
856 if (err) {
857 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
858 goto err_alloc_queues;
859 }
860
861 err = igb_map_ring_to_vector(adapter);
862 if (err) {
863 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
864 goto err_map_queues;
865 }
866
867
868 return 0;
869 err_map_queues:
870 igb_free_queues(adapter);
871 err_alloc_queues:
872 igb_free_q_vectors(adapter);
873 err_alloc_q_vectors:
874 igb_reset_interrupt_capability(adapter);
875 return err;
876 }
877
878 /**
879 * igb_request_irq - initialize interrupts
880 *
881 * Attempts to configure interrupts using the best available
882 * capabilities of the hardware and kernel.
883 **/
884 static int igb_request_irq(struct igb_adapter *adapter)
885 {
886 struct net_device *netdev = adapter->netdev;
887 struct pci_dev *pdev = adapter->pdev;
888 int err = 0;
889
890 if (adapter->msix_entries) {
891 err = igb_request_msix(adapter);
892 if (!err)
893 goto request_done;
894 /* fall back to MSI */
895 igb_clear_interrupt_scheme(adapter);
896 if (!pci_enable_msi(adapter->pdev))
897 adapter->flags |= IGB_FLAG_HAS_MSI;
898 igb_free_all_tx_resources(adapter);
899 igb_free_all_rx_resources(adapter);
900 adapter->num_tx_queues = 1;
901 adapter->num_rx_queues = 1;
902 adapter->num_q_vectors = 1;
903 err = igb_alloc_q_vectors(adapter);
904 if (err) {
905 dev_err(&pdev->dev,
906 "Unable to allocate memory for vectors\n");
907 goto request_done;
908 }
909 err = igb_alloc_queues(adapter);
910 if (err) {
911 dev_err(&pdev->dev,
912 "Unable to allocate memory for queues\n");
913 igb_free_q_vectors(adapter);
914 goto request_done;
915 }
916 igb_setup_all_tx_resources(adapter);
917 igb_setup_all_rx_resources(adapter);
918 } else {
919 igb_assign_vector(adapter->q_vector[0], 0);
920 }
921
922 if (adapter->flags & IGB_FLAG_HAS_MSI) {
923 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
924 netdev->name, adapter);
925 if (!err)
926 goto request_done;
927
928 /* fall back to legacy interrupts */
929 igb_reset_interrupt_capability(adapter);
930 adapter->flags &= ~IGB_FLAG_HAS_MSI;
931 }
932
933 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
934 netdev->name, adapter);
935
936 if (err)
937 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
938 err);
939
940 request_done:
941 return err;
942 }
943
944 static void igb_free_irq(struct igb_adapter *adapter)
945 {
946 if (adapter->msix_entries) {
947 int vector = 0, i;
948
949 free_irq(adapter->msix_entries[vector++].vector, adapter);
950
951 for (i = 0; i < adapter->num_q_vectors; i++) {
952 struct igb_q_vector *q_vector = adapter->q_vector[i];
953 free_irq(adapter->msix_entries[vector++].vector,
954 q_vector);
955 }
956 } else {
957 free_irq(adapter->pdev->irq, adapter);
958 }
959 }
960
961 /**
962 * igb_irq_disable - Mask off interrupt generation on the NIC
963 * @adapter: board private structure
964 **/
965 static void igb_irq_disable(struct igb_adapter *adapter)
966 {
967 struct e1000_hw *hw = &adapter->hw;
968
969 /*
970 * we need to be careful when disabling interrupts. The VFs are also
971 * mapped into these registers and so clearing the bits can cause
972 * issues on the VF drivers so we only need to clear what we set
973 */
974 if (adapter->msix_entries) {
975 u32 regval = rd32(E1000_EIAM);
976 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
977 wr32(E1000_EIMC, adapter->eims_enable_mask);
978 regval = rd32(E1000_EIAC);
979 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
980 }
981
982 wr32(E1000_IAM, 0);
983 wr32(E1000_IMC, ~0);
984 wrfl();
985 synchronize_irq(adapter->pdev->irq);
986 }
987
988 /**
989 * igb_irq_enable - Enable default interrupt generation settings
990 * @adapter: board private structure
991 **/
992 static void igb_irq_enable(struct igb_adapter *adapter)
993 {
994 struct e1000_hw *hw = &adapter->hw;
995
996 if (adapter->msix_entries) {
997 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
998 u32 regval = rd32(E1000_EIAC);
999 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1000 regval = rd32(E1000_EIAM);
1001 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1002 wr32(E1000_EIMS, adapter->eims_enable_mask);
1003 if (adapter->vfs_allocated_count) {
1004 wr32(E1000_MBVFIMR, 0xFF);
1005 ims |= E1000_IMS_VMMB;
1006 }
1007 if (adapter->hw.mac.type == e1000_82580)
1008 ims |= E1000_IMS_DRSTA;
1009
1010 wr32(E1000_IMS, ims);
1011 } else {
1012 wr32(E1000_IMS, IMS_ENABLE_MASK |
1013 E1000_IMS_DRSTA);
1014 wr32(E1000_IAM, IMS_ENABLE_MASK |
1015 E1000_IMS_DRSTA);
1016 }
1017 }
1018
1019 static void igb_update_mng_vlan(struct igb_adapter *adapter)
1020 {
1021 struct e1000_hw *hw = &adapter->hw;
1022 u16 vid = adapter->hw.mng_cookie.vlan_id;
1023 u16 old_vid = adapter->mng_vlan_id;
1024
1025 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1026 /* add VID to filter table */
1027 igb_vfta_set(hw, vid, true);
1028 adapter->mng_vlan_id = vid;
1029 } else {
1030 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1031 }
1032
1033 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1034 (vid != old_vid) &&
1035 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
1036 /* remove VID from filter table */
1037 igb_vfta_set(hw, old_vid, false);
1038 }
1039 }
1040
1041 /**
1042 * igb_release_hw_control - release control of the h/w to f/w
1043 * @adapter: address of board private structure
1044 *
1045 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1046 * For ASF and Pass Through versions of f/w this means that the
1047 * driver is no longer loaded.
1048 *
1049 **/
1050 static void igb_release_hw_control(struct igb_adapter *adapter)
1051 {
1052 struct e1000_hw *hw = &adapter->hw;
1053 u32 ctrl_ext;
1054
1055 /* Let firmware take over control of h/w */
1056 ctrl_ext = rd32(E1000_CTRL_EXT);
1057 wr32(E1000_CTRL_EXT,
1058 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1059 }
1060
1061 /**
1062 * igb_get_hw_control - get control of the h/w from f/w
1063 * @adapter: address of board private structure
1064 *
1065 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1066 * For ASF and Pass Through versions of f/w this means that
1067 * the driver is loaded.
1068 *
1069 **/
1070 static void igb_get_hw_control(struct igb_adapter *adapter)
1071 {
1072 struct e1000_hw *hw = &adapter->hw;
1073 u32 ctrl_ext;
1074
1075 /* Let firmware know the driver has taken over */
1076 ctrl_ext = rd32(E1000_CTRL_EXT);
1077 wr32(E1000_CTRL_EXT,
1078 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1079 }
1080
1081 /**
1082 * igb_configure - configure the hardware for RX and TX
1083 * @adapter: private board structure
1084 **/
1085 static void igb_configure(struct igb_adapter *adapter)
1086 {
1087 struct net_device *netdev = adapter->netdev;
1088 int i;
1089
1090 igb_get_hw_control(adapter);
1091 igb_set_rx_mode(netdev);
1092
1093 igb_restore_vlan(adapter);
1094
1095 igb_setup_tctl(adapter);
1096 igb_setup_mrqc(adapter);
1097 igb_setup_rctl(adapter);
1098
1099 igb_configure_tx(adapter);
1100 igb_configure_rx(adapter);
1101
1102 igb_rx_fifo_flush_82575(&adapter->hw);
1103
1104 /* call igb_desc_unused which always leaves
1105 * at least 1 descriptor unused to make sure
1106 * next_to_use != next_to_clean */
1107 for (i = 0; i < adapter->num_rx_queues; i++) {
1108 struct igb_ring *ring = &adapter->rx_ring[i];
1109 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
1110 }
1111
1112
1113 adapter->tx_queue_len = netdev->tx_queue_len;
1114 }
1115
1116
1117 /**
1118 * igb_up - Open the interface and prepare it to handle traffic
1119 * @adapter: board private structure
1120 **/
1121 int igb_up(struct igb_adapter *adapter)
1122 {
1123 struct e1000_hw *hw = &adapter->hw;
1124 int i;
1125
1126 /* hardware has been reset, we need to reload some things */
1127 igb_configure(adapter);
1128
1129 clear_bit(__IGB_DOWN, &adapter->state);
1130
1131 for (i = 0; i < adapter->num_q_vectors; i++) {
1132 struct igb_q_vector *q_vector = adapter->q_vector[i];
1133 napi_enable(&q_vector->napi);
1134 }
1135 if (adapter->msix_entries)
1136 igb_configure_msix(adapter);
1137 else
1138 igb_assign_vector(adapter->q_vector[0], 0);
1139
1140 /* Clear any pending interrupts. */
1141 rd32(E1000_ICR);
1142 igb_irq_enable(adapter);
1143
1144 /* notify VFs that reset has been completed */
1145 if (adapter->vfs_allocated_count) {
1146 u32 reg_data = rd32(E1000_CTRL_EXT);
1147 reg_data |= E1000_CTRL_EXT_PFRSTD;
1148 wr32(E1000_CTRL_EXT, reg_data);
1149 }
1150
1151 netif_tx_start_all_queues(adapter->netdev);
1152
1153 /* start the watchdog. */
1154 hw->mac.get_link_status = 1;
1155 schedule_work(&adapter->watchdog_task);
1156
1157 return 0;
1158 }
1159
1160 void igb_down(struct igb_adapter *adapter)
1161 {
1162 struct net_device *netdev = adapter->netdev;
1163 struct e1000_hw *hw = &adapter->hw;
1164 u32 tctl, rctl;
1165 int i;
1166
1167 /* signal that we're down so the interrupt handler does not
1168 * reschedule our watchdog timer */
1169 set_bit(__IGB_DOWN, &adapter->state);
1170
1171 /* disable receives in the hardware */
1172 rctl = rd32(E1000_RCTL);
1173 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1174 /* flush and sleep below */
1175
1176 netif_tx_stop_all_queues(netdev);
1177
1178 /* disable transmits in the hardware */
1179 tctl = rd32(E1000_TCTL);
1180 tctl &= ~E1000_TCTL_EN;
1181 wr32(E1000_TCTL, tctl);
1182 /* flush both disables and wait for them to finish */
1183 wrfl();
1184 msleep(10);
1185
1186 for (i = 0; i < adapter->num_q_vectors; i++) {
1187 struct igb_q_vector *q_vector = adapter->q_vector[i];
1188 napi_disable(&q_vector->napi);
1189 }
1190
1191 igb_irq_disable(adapter);
1192
1193 del_timer_sync(&adapter->watchdog_timer);
1194 del_timer_sync(&adapter->phy_info_timer);
1195
1196 netdev->tx_queue_len = adapter->tx_queue_len;
1197 netif_carrier_off(netdev);
1198
1199 /* record the stats before reset*/
1200 igb_update_stats(adapter);
1201
1202 adapter->link_speed = 0;
1203 adapter->link_duplex = 0;
1204
1205 if (!pci_channel_offline(adapter->pdev))
1206 igb_reset(adapter);
1207 igb_clean_all_tx_rings(adapter);
1208 igb_clean_all_rx_rings(adapter);
1209 #ifdef CONFIG_IGB_DCA
1210
1211 /* since we reset the hardware DCA settings were cleared */
1212 igb_setup_dca(adapter);
1213 #endif
1214 }
1215
1216 void igb_reinit_locked(struct igb_adapter *adapter)
1217 {
1218 WARN_ON(in_interrupt());
1219 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1220 msleep(1);
1221 igb_down(adapter);
1222 igb_up(adapter);
1223 clear_bit(__IGB_RESETTING, &adapter->state);
1224 }
1225
1226 void igb_reset(struct igb_adapter *adapter)
1227 {
1228 struct pci_dev *pdev = adapter->pdev;
1229 struct e1000_hw *hw = &adapter->hw;
1230 struct e1000_mac_info *mac = &hw->mac;
1231 struct e1000_fc_info *fc = &hw->fc;
1232 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1233 u16 hwm;
1234
1235 /* Repartition Pba for greater than 9k mtu
1236 * To take effect CTRL.RST is required.
1237 */
1238 switch (mac->type) {
1239 case e1000_82580:
1240 pba = rd32(E1000_RXPBS);
1241 pba = igb_rxpbs_adjust_82580(pba);
1242 break;
1243 case e1000_82576:
1244 pba = rd32(E1000_RXPBS);
1245 pba &= E1000_RXPBS_SIZE_MASK_82576;
1246 break;
1247 case e1000_82575:
1248 default:
1249 pba = E1000_PBA_34K;
1250 break;
1251 }
1252
1253 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1254 (mac->type < e1000_82576)) {
1255 /* adjust PBA for jumbo frames */
1256 wr32(E1000_PBA, pba);
1257
1258 /* To maintain wire speed transmits, the Tx FIFO should be
1259 * large enough to accommodate two full transmit packets,
1260 * rounded up to the next 1KB and expressed in KB. Likewise,
1261 * the Rx FIFO should be large enough to accommodate at least
1262 * one full receive packet and is similarly rounded up and
1263 * expressed in KB. */
1264 pba = rd32(E1000_PBA);
1265 /* upper 16 bits has Tx packet buffer allocation size in KB */
1266 tx_space = pba >> 16;
1267 /* lower 16 bits has Rx packet buffer allocation size in KB */
1268 pba &= 0xffff;
1269 /* the tx fifo also stores 16 bytes of information about the tx
1270 * but don't include ethernet FCS because hardware appends it */
1271 min_tx_space = (adapter->max_frame_size +
1272 sizeof(union e1000_adv_tx_desc) -
1273 ETH_FCS_LEN) * 2;
1274 min_tx_space = ALIGN(min_tx_space, 1024);
1275 min_tx_space >>= 10;
1276 /* software strips receive CRC, so leave room for it */
1277 min_rx_space = adapter->max_frame_size;
1278 min_rx_space = ALIGN(min_rx_space, 1024);
1279 min_rx_space >>= 10;
1280
1281 /* If current Tx allocation is less than the min Tx FIFO size,
1282 * and the min Tx FIFO size is less than the current Rx FIFO
1283 * allocation, take space away from current Rx allocation */
1284 if (tx_space < min_tx_space &&
1285 ((min_tx_space - tx_space) < pba)) {
1286 pba = pba - (min_tx_space - tx_space);
1287
1288 /* if short on rx space, rx wins and must trump tx
1289 * adjustment */
1290 if (pba < min_rx_space)
1291 pba = min_rx_space;
1292 }
1293 wr32(E1000_PBA, pba);
1294 }
1295
1296 /* flow control settings */
1297 /* The high water mark must be low enough to fit one full frame
1298 * (or the size used for early receive) above it in the Rx FIFO.
1299 * Set it to the lower of:
1300 * - 90% of the Rx FIFO size, or
1301 * - the full Rx FIFO size minus one full frame */
1302 hwm = min(((pba << 10) * 9 / 10),
1303 ((pba << 10) - 2 * adapter->max_frame_size));
1304
1305 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1306 fc->low_water = fc->high_water - 16;
1307 fc->pause_time = 0xFFFF;
1308 fc->send_xon = 1;
1309 fc->current_mode = fc->requested_mode;
1310
1311 /* disable receive for all VFs and wait one second */
1312 if (adapter->vfs_allocated_count) {
1313 int i;
1314 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1315 adapter->vf_data[i].flags = 0;
1316
1317 /* ping all the active vfs to let them know we are going down */
1318 igb_ping_all_vfs(adapter);
1319
1320 /* disable transmits and receives */
1321 wr32(E1000_VFRE, 0);
1322 wr32(E1000_VFTE, 0);
1323 }
1324
1325 /* Allow time for pending master requests to run */
1326 hw->mac.ops.reset_hw(hw);
1327 wr32(E1000_WUC, 0);
1328
1329 if (hw->mac.ops.init_hw(hw))
1330 dev_err(&pdev->dev, "Hardware Error\n");
1331
1332 if (hw->mac.type == e1000_82580) {
1333 u32 reg = rd32(E1000_PCIEMISC);
1334 wr32(E1000_PCIEMISC,
1335 reg & ~E1000_PCIEMISC_LX_DECISION);
1336 }
1337 igb_update_mng_vlan(adapter);
1338
1339 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1340 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1341
1342 igb_reset_adaptive(hw);
1343 igb_get_phy_info(hw);
1344 }
1345
1346 static const struct net_device_ops igb_netdev_ops = {
1347 .ndo_open = igb_open,
1348 .ndo_stop = igb_close,
1349 .ndo_start_xmit = igb_xmit_frame_adv,
1350 .ndo_get_stats = igb_get_stats,
1351 .ndo_set_rx_mode = igb_set_rx_mode,
1352 .ndo_set_multicast_list = igb_set_rx_mode,
1353 .ndo_set_mac_address = igb_set_mac,
1354 .ndo_change_mtu = igb_change_mtu,
1355 .ndo_do_ioctl = igb_ioctl,
1356 .ndo_tx_timeout = igb_tx_timeout,
1357 .ndo_validate_addr = eth_validate_addr,
1358 .ndo_vlan_rx_register = igb_vlan_rx_register,
1359 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1360 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1361 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1362 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1363 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1364 .ndo_get_vf_config = igb_ndo_get_vf_config,
1365 #ifdef CONFIG_NET_POLL_CONTROLLER
1366 .ndo_poll_controller = igb_netpoll,
1367 #endif
1368 };
1369
1370 /**
1371 * igb_probe - Device Initialization Routine
1372 * @pdev: PCI device information struct
1373 * @ent: entry in igb_pci_tbl
1374 *
1375 * Returns 0 on success, negative on failure
1376 *
1377 * igb_probe initializes an adapter identified by a pci_dev structure.
1378 * The OS initialization, configuring of the adapter private structure,
1379 * and a hardware reset occur.
1380 **/
1381 static int __devinit igb_probe(struct pci_dev *pdev,
1382 const struct pci_device_id *ent)
1383 {
1384 struct net_device *netdev;
1385 struct igb_adapter *adapter;
1386 struct e1000_hw *hw;
1387 u16 eeprom_data = 0;
1388 static int global_quad_port_a; /* global quad port a indication */
1389 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1390 unsigned long mmio_start, mmio_len;
1391 int err, pci_using_dac;
1392 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1393 u32 part_num;
1394
1395 err = pci_enable_device_mem(pdev);
1396 if (err)
1397 return err;
1398
1399 pci_using_dac = 0;
1400 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1401 if (!err) {
1402 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1403 if (!err)
1404 pci_using_dac = 1;
1405 } else {
1406 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1407 if (err) {
1408 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1409 if (err) {
1410 dev_err(&pdev->dev, "No usable DMA "
1411 "configuration, aborting\n");
1412 goto err_dma;
1413 }
1414 }
1415 }
1416
1417 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1418 IORESOURCE_MEM),
1419 igb_driver_name);
1420 if (err)
1421 goto err_pci_reg;
1422
1423 pci_enable_pcie_error_reporting(pdev);
1424
1425 pci_set_master(pdev);
1426 pci_save_state(pdev);
1427
1428 err = -ENOMEM;
1429 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1430 IGB_ABS_MAX_TX_QUEUES);
1431 if (!netdev)
1432 goto err_alloc_etherdev;
1433
1434 SET_NETDEV_DEV(netdev, &pdev->dev);
1435
1436 pci_set_drvdata(pdev, netdev);
1437 adapter = netdev_priv(netdev);
1438 adapter->netdev = netdev;
1439 adapter->pdev = pdev;
1440 hw = &adapter->hw;
1441 hw->back = adapter;
1442 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1443
1444 mmio_start = pci_resource_start(pdev, 0);
1445 mmio_len = pci_resource_len(pdev, 0);
1446
1447 err = -EIO;
1448 hw->hw_addr = ioremap(mmio_start, mmio_len);
1449 if (!hw->hw_addr)
1450 goto err_ioremap;
1451
1452 netdev->netdev_ops = &igb_netdev_ops;
1453 igb_set_ethtool_ops(netdev);
1454 netdev->watchdog_timeo = 5 * HZ;
1455
1456 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1457
1458 netdev->mem_start = mmio_start;
1459 netdev->mem_end = mmio_start + mmio_len;
1460
1461 /* PCI config space info */
1462 hw->vendor_id = pdev->vendor;
1463 hw->device_id = pdev->device;
1464 hw->revision_id = pdev->revision;
1465 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1466 hw->subsystem_device_id = pdev->subsystem_device;
1467
1468 /* Copy the default MAC, PHY and NVM function pointers */
1469 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1470 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1471 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1472 /* Initialize skew-specific constants */
1473 err = ei->get_invariants(hw);
1474 if (err)
1475 goto err_sw_init;
1476
1477 /* setup the private structure */
1478 err = igb_sw_init(adapter);
1479 if (err)
1480 goto err_sw_init;
1481
1482 igb_get_bus_info_pcie(hw);
1483
1484 hw->phy.autoneg_wait_to_complete = false;
1485 hw->mac.adaptive_ifs = true;
1486
1487 /* Copper options */
1488 if (hw->phy.media_type == e1000_media_type_copper) {
1489 hw->phy.mdix = AUTO_ALL_MODES;
1490 hw->phy.disable_polarity_correction = false;
1491 hw->phy.ms_type = e1000_ms_hw_default;
1492 }
1493
1494 if (igb_check_reset_block(hw))
1495 dev_info(&pdev->dev,
1496 "PHY reset is blocked due to SOL/IDER session.\n");
1497
1498 netdev->features = NETIF_F_SG |
1499 NETIF_F_IP_CSUM |
1500 NETIF_F_HW_VLAN_TX |
1501 NETIF_F_HW_VLAN_RX |
1502 NETIF_F_HW_VLAN_FILTER;
1503
1504 netdev->features |= NETIF_F_IPV6_CSUM;
1505 netdev->features |= NETIF_F_TSO;
1506 netdev->features |= NETIF_F_TSO6;
1507 netdev->features |= NETIF_F_GRO;
1508
1509 netdev->vlan_features |= NETIF_F_TSO;
1510 netdev->vlan_features |= NETIF_F_TSO6;
1511 netdev->vlan_features |= NETIF_F_IP_CSUM;
1512 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1513 netdev->vlan_features |= NETIF_F_SG;
1514
1515 if (pci_using_dac)
1516 netdev->features |= NETIF_F_HIGHDMA;
1517
1518 if (hw->mac.type >= e1000_82576)
1519 netdev->features |= NETIF_F_SCTP_CSUM;
1520
1521 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
1522
1523 /* before reading the NVM, reset the controller to put the device in a
1524 * known good starting state */
1525 hw->mac.ops.reset_hw(hw);
1526
1527 /* make sure the NVM is good */
1528 if (igb_validate_nvm_checksum(hw) < 0) {
1529 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1530 err = -EIO;
1531 goto err_eeprom;
1532 }
1533
1534 /* copy the MAC address out of the NVM */
1535 if (hw->mac.ops.read_mac_addr(hw))
1536 dev_err(&pdev->dev, "NVM Read Error\n");
1537
1538 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1539 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1540
1541 if (!is_valid_ether_addr(netdev->perm_addr)) {
1542 dev_err(&pdev->dev, "Invalid MAC Address\n");
1543 err = -EIO;
1544 goto err_eeprom;
1545 }
1546
1547 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1548 (unsigned long) adapter);
1549 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1550 (unsigned long) adapter);
1551
1552 INIT_WORK(&adapter->reset_task, igb_reset_task);
1553 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1554
1555 /* Initialize link properties that are user-changeable */
1556 adapter->fc_autoneg = true;
1557 hw->mac.autoneg = true;
1558 hw->phy.autoneg_advertised = 0x2f;
1559
1560 hw->fc.requested_mode = e1000_fc_default;
1561 hw->fc.current_mode = e1000_fc_default;
1562
1563 igb_validate_mdi_setting(hw);
1564
1565 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1566 * enable the ACPI Magic Packet filter
1567 */
1568
1569 if (hw->bus.func == 0)
1570 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1571 else if (hw->mac.type == e1000_82580)
1572 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1573 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1574 &eeprom_data);
1575 else if (hw->bus.func == 1)
1576 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1577
1578 if (eeprom_data & eeprom_apme_mask)
1579 adapter->eeprom_wol |= E1000_WUFC_MAG;
1580
1581 /* now that we have the eeprom settings, apply the special cases where
1582 * the eeprom may be wrong or the board simply won't support wake on
1583 * lan on a particular port */
1584 switch (pdev->device) {
1585 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1586 adapter->eeprom_wol = 0;
1587 break;
1588 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1589 case E1000_DEV_ID_82576_FIBER:
1590 case E1000_DEV_ID_82576_SERDES:
1591 /* Wake events only supported on port A for dual fiber
1592 * regardless of eeprom setting */
1593 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1594 adapter->eeprom_wol = 0;
1595 break;
1596 case E1000_DEV_ID_82576_QUAD_COPPER:
1597 /* if quad port adapter, disable WoL on all but port A */
1598 if (global_quad_port_a != 0)
1599 adapter->eeprom_wol = 0;
1600 else
1601 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1602 /* Reset for multiple quad port adapters */
1603 if (++global_quad_port_a == 4)
1604 global_quad_port_a = 0;
1605 break;
1606 }
1607
1608 /* initialize the wol settings based on the eeprom settings */
1609 adapter->wol = adapter->eeprom_wol;
1610 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1611
1612 /* reset the hardware with the new settings */
1613 igb_reset(adapter);
1614
1615 /* let the f/w know that the h/w is now under the control of the
1616 * driver. */
1617 igb_get_hw_control(adapter);
1618
1619 strcpy(netdev->name, "eth%d");
1620 err = register_netdev(netdev);
1621 if (err)
1622 goto err_register;
1623
1624 /* carrier off reporting is important to ethtool even BEFORE open */
1625 netif_carrier_off(netdev);
1626
1627 #ifdef CONFIG_IGB_DCA
1628 if (dca_add_requester(&pdev->dev) == 0) {
1629 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1630 dev_info(&pdev->dev, "DCA enabled\n");
1631 igb_setup_dca(adapter);
1632 }
1633
1634 #endif
1635 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1636 /* print bus type/speed/width info */
1637 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1638 netdev->name,
1639 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1640 "unknown"),
1641 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1642 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1643 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1644 "unknown"),
1645 netdev->dev_addr);
1646
1647 igb_read_part_num(hw, &part_num);
1648 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1649 (part_num >> 8), (part_num & 0xff));
1650
1651 dev_info(&pdev->dev,
1652 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1653 adapter->msix_entries ? "MSI-X" :
1654 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
1655 adapter->num_rx_queues, adapter->num_tx_queues);
1656
1657 return 0;
1658
1659 err_register:
1660 igb_release_hw_control(adapter);
1661 err_eeprom:
1662 if (!igb_check_reset_block(hw))
1663 igb_reset_phy(hw);
1664
1665 if (hw->flash_address)
1666 iounmap(hw->flash_address);
1667 err_sw_init:
1668 igb_clear_interrupt_scheme(adapter);
1669 iounmap(hw->hw_addr);
1670 err_ioremap:
1671 free_netdev(netdev);
1672 err_alloc_etherdev:
1673 pci_release_selected_regions(pdev,
1674 pci_select_bars(pdev, IORESOURCE_MEM));
1675 err_pci_reg:
1676 err_dma:
1677 pci_disable_device(pdev);
1678 return err;
1679 }
1680
1681 /**
1682 * igb_remove - Device Removal Routine
1683 * @pdev: PCI device information struct
1684 *
1685 * igb_remove is called by the PCI subsystem to alert the driver
1686 * that it should release a PCI device. The could be caused by a
1687 * Hot-Plug event, or because the driver is going to be removed from
1688 * memory.
1689 **/
1690 static void __devexit igb_remove(struct pci_dev *pdev)
1691 {
1692 struct net_device *netdev = pci_get_drvdata(pdev);
1693 struct igb_adapter *adapter = netdev_priv(netdev);
1694 struct e1000_hw *hw = &adapter->hw;
1695
1696 /* flush_scheduled work may reschedule our watchdog task, so
1697 * explicitly disable watchdog tasks from being rescheduled */
1698 set_bit(__IGB_DOWN, &adapter->state);
1699 del_timer_sync(&adapter->watchdog_timer);
1700 del_timer_sync(&adapter->phy_info_timer);
1701
1702 flush_scheduled_work();
1703
1704 #ifdef CONFIG_IGB_DCA
1705 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1706 dev_info(&pdev->dev, "DCA disabled\n");
1707 dca_remove_requester(&pdev->dev);
1708 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
1709 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
1710 }
1711 #endif
1712
1713 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1714 * would have already happened in close and is redundant. */
1715 igb_release_hw_control(adapter);
1716
1717 unregister_netdev(netdev);
1718
1719 if (!igb_check_reset_block(hw))
1720 igb_reset_phy(hw);
1721
1722 igb_clear_interrupt_scheme(adapter);
1723
1724 #ifdef CONFIG_PCI_IOV
1725 /* reclaim resources allocated to VFs */
1726 if (adapter->vf_data) {
1727 /* disable iov and allow time for transactions to clear */
1728 pci_disable_sriov(pdev);
1729 msleep(500);
1730
1731 kfree(adapter->vf_data);
1732 adapter->vf_data = NULL;
1733 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1734 msleep(100);
1735 dev_info(&pdev->dev, "IOV Disabled\n");
1736 }
1737 #endif
1738
1739 iounmap(hw->hw_addr);
1740 if (hw->flash_address)
1741 iounmap(hw->flash_address);
1742 pci_release_selected_regions(pdev,
1743 pci_select_bars(pdev, IORESOURCE_MEM));
1744
1745 free_netdev(netdev);
1746
1747 pci_disable_pcie_error_reporting(pdev);
1748
1749 pci_disable_device(pdev);
1750 }
1751
1752 /**
1753 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1754 * @adapter: board private structure to initialize
1755 *
1756 * This function initializes the vf specific data storage and then attempts to
1757 * allocate the VFs. The reason for ordering it this way is because it is much
1758 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1759 * the memory for the VFs.
1760 **/
1761 static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1762 {
1763 #ifdef CONFIG_PCI_IOV
1764 struct pci_dev *pdev = adapter->pdev;
1765
1766 if (adapter->vfs_allocated_count > 7)
1767 adapter->vfs_allocated_count = 7;
1768
1769 if (adapter->vfs_allocated_count) {
1770 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1771 sizeof(struct vf_data_storage),
1772 GFP_KERNEL);
1773 /* if allocation failed then we do not support SR-IOV */
1774 if (!adapter->vf_data) {
1775 adapter->vfs_allocated_count = 0;
1776 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1777 "Data Storage\n");
1778 }
1779 }
1780
1781 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1782 kfree(adapter->vf_data);
1783 adapter->vf_data = NULL;
1784 #endif /* CONFIG_PCI_IOV */
1785 adapter->vfs_allocated_count = 0;
1786 #ifdef CONFIG_PCI_IOV
1787 } else {
1788 unsigned char mac_addr[ETH_ALEN];
1789 int i;
1790 dev_info(&pdev->dev, "%d vfs allocated\n",
1791 adapter->vfs_allocated_count);
1792 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1793 random_ether_addr(mac_addr);
1794 igb_set_vf_mac(adapter, i, mac_addr);
1795 }
1796 }
1797 #endif /* CONFIG_PCI_IOV */
1798 }
1799
1800
1801 /**
1802 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1803 * @adapter: board private structure to initialize
1804 *
1805 * igb_init_hw_timer initializes the function pointer and values for the hw
1806 * timer found in hardware.
1807 **/
1808 static void igb_init_hw_timer(struct igb_adapter *adapter)
1809 {
1810 struct e1000_hw *hw = &adapter->hw;
1811
1812 switch (hw->mac.type) {
1813 case e1000_82580:
1814 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1815 adapter->cycles.read = igb_read_clock;
1816 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1817 adapter->cycles.mult = 1;
1818 /*
1819 * The 82580 timesync updates the system timer every 8ns by 8ns
1820 * and the value cannot be shifted. Instead we need to shift
1821 * the registers to generate a 64bit timer value. As a result
1822 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
1823 * 24 in order to generate a larger value for synchronization.
1824 */
1825 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
1826 /* disable system timer temporarily by setting bit 31 */
1827 wr32(E1000_TSAUXC, 0x80000000);
1828 wrfl();
1829
1830 /* Set registers so that rollover occurs soon to test this. */
1831 wr32(E1000_SYSTIMR, 0x00000000);
1832 wr32(E1000_SYSTIML, 0x80000000);
1833 wr32(E1000_SYSTIMH, 0x000000FF);
1834 wrfl();
1835
1836 /* enable system timer by clearing bit 31 */
1837 wr32(E1000_TSAUXC, 0x0);
1838 wrfl();
1839
1840 timecounter_init(&adapter->clock,
1841 &adapter->cycles,
1842 ktime_to_ns(ktime_get_real()));
1843 /*
1844 * Synchronize our NIC clock against system wall clock. NIC
1845 * time stamp reading requires ~3us per sample, each sample
1846 * was pretty stable even under load => only require 10
1847 * samples for each offset comparison.
1848 */
1849 memset(&adapter->compare, 0, sizeof(adapter->compare));
1850 adapter->compare.source = &adapter->clock;
1851 adapter->compare.target = ktime_get_real;
1852 adapter->compare.num_samples = 10;
1853 timecompare_update(&adapter->compare, 0);
1854 break;
1855 case e1000_82576:
1856 /*
1857 * Initialize hardware timer: we keep it running just in case
1858 * that some program needs it later on.
1859 */
1860 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1861 adapter->cycles.read = igb_read_clock;
1862 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1863 adapter->cycles.mult = 1;
1864 /**
1865 * Scale the NIC clock cycle by a large factor so that
1866 * relatively small clock corrections can be added or
1867 * substracted at each clock tick. The drawbacks of a large
1868 * factor are a) that the clock register overflows more quickly
1869 * (not such a big deal) and b) that the increment per tick has
1870 * to fit into 24 bits. As a result we need to use a shift of
1871 * 19 so we can fit a value of 16 into the TIMINCA register.
1872 */
1873 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1874 wr32(E1000_TIMINCA,
1875 (1 << E1000_TIMINCA_16NS_SHIFT) |
1876 (16 << IGB_82576_TSYNC_SHIFT));
1877
1878 /* Set registers so that rollover occurs soon to test this. */
1879 wr32(E1000_SYSTIML, 0x00000000);
1880 wr32(E1000_SYSTIMH, 0xFF800000);
1881 wrfl();
1882
1883 timecounter_init(&adapter->clock,
1884 &adapter->cycles,
1885 ktime_to_ns(ktime_get_real()));
1886 /*
1887 * Synchronize our NIC clock against system wall clock. NIC
1888 * time stamp reading requires ~3us per sample, each sample
1889 * was pretty stable even under load => only require 10
1890 * samples for each offset comparison.
1891 */
1892 memset(&adapter->compare, 0, sizeof(adapter->compare));
1893 adapter->compare.source = &adapter->clock;
1894 adapter->compare.target = ktime_get_real;
1895 adapter->compare.num_samples = 10;
1896 timecompare_update(&adapter->compare, 0);
1897 break;
1898 case e1000_82575:
1899 /* 82575 does not support timesync */
1900 default:
1901 break;
1902 }
1903
1904 }
1905
1906 /**
1907 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1908 * @adapter: board private structure to initialize
1909 *
1910 * igb_sw_init initializes the Adapter private data structure.
1911 * Fields are initialized based on PCI device information and
1912 * OS network device settings (MTU size).
1913 **/
1914 static int __devinit igb_sw_init(struct igb_adapter *adapter)
1915 {
1916 struct e1000_hw *hw = &adapter->hw;
1917 struct net_device *netdev = adapter->netdev;
1918 struct pci_dev *pdev = adapter->pdev;
1919
1920 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1921
1922 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1923 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1924 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1925 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1926
1927 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1928 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1929
1930 #ifdef CONFIG_PCI_IOV
1931 if (hw->mac.type == e1000_82576)
1932 adapter->vfs_allocated_count = max_vfs;
1933
1934 #endif /* CONFIG_PCI_IOV */
1935 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1936
1937 /*
1938 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1939 * then we should combine the queues into a queue pair in order to
1940 * conserve interrupts due to limited supply
1941 */
1942 if ((adapter->rss_queues > 4) ||
1943 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1944 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1945
1946 /* This call may decrease the number of queues */
1947 if (igb_init_interrupt_scheme(adapter)) {
1948 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1949 return -ENOMEM;
1950 }
1951
1952 igb_init_hw_timer(adapter);
1953 igb_probe_vfs(adapter);
1954
1955 /* Explicitly disable IRQ since the NIC can be in any state. */
1956 igb_irq_disable(adapter);
1957
1958 set_bit(__IGB_DOWN, &adapter->state);
1959 return 0;
1960 }
1961
1962 /**
1963 * igb_open - Called when a network interface is made active
1964 * @netdev: network interface device structure
1965 *
1966 * Returns 0 on success, negative value on failure
1967 *
1968 * The open entry point is called when a network interface is made
1969 * active by the system (IFF_UP). At this point all resources needed
1970 * for transmit and receive operations are allocated, the interrupt
1971 * handler is registered with the OS, the watchdog timer is started,
1972 * and the stack is notified that the interface is ready.
1973 **/
1974 static int igb_open(struct net_device *netdev)
1975 {
1976 struct igb_adapter *adapter = netdev_priv(netdev);
1977 struct e1000_hw *hw = &adapter->hw;
1978 int err;
1979 int i;
1980
1981 /* disallow open during test */
1982 if (test_bit(__IGB_TESTING, &adapter->state))
1983 return -EBUSY;
1984
1985 netif_carrier_off(netdev);
1986
1987 /* allocate transmit descriptors */
1988 err = igb_setup_all_tx_resources(adapter);
1989 if (err)
1990 goto err_setup_tx;
1991
1992 /* allocate receive descriptors */
1993 err = igb_setup_all_rx_resources(adapter);
1994 if (err)
1995 goto err_setup_rx;
1996
1997 /* e1000_power_up_phy(adapter); */
1998
1999 /* before we allocate an interrupt, we must be ready to handle it.
2000 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2001 * as soon as we call pci_request_irq, so we have to setup our
2002 * clean_rx handler before we do so. */
2003 igb_configure(adapter);
2004
2005 err = igb_request_irq(adapter);
2006 if (err)
2007 goto err_req_irq;
2008
2009 /* From here on the code is the same as igb_up() */
2010 clear_bit(__IGB_DOWN, &adapter->state);
2011
2012 for (i = 0; i < adapter->num_q_vectors; i++) {
2013 struct igb_q_vector *q_vector = adapter->q_vector[i];
2014 napi_enable(&q_vector->napi);
2015 }
2016
2017 /* Clear any pending interrupts. */
2018 rd32(E1000_ICR);
2019
2020 igb_irq_enable(adapter);
2021
2022 /* notify VFs that reset has been completed */
2023 if (adapter->vfs_allocated_count) {
2024 u32 reg_data = rd32(E1000_CTRL_EXT);
2025 reg_data |= E1000_CTRL_EXT_PFRSTD;
2026 wr32(E1000_CTRL_EXT, reg_data);
2027 }
2028
2029 netif_tx_start_all_queues(netdev);
2030
2031 /* start the watchdog. */
2032 hw->mac.get_link_status = 1;
2033 schedule_work(&adapter->watchdog_task);
2034
2035 return 0;
2036
2037 err_req_irq:
2038 igb_release_hw_control(adapter);
2039 /* e1000_power_down_phy(adapter); */
2040 igb_free_all_rx_resources(adapter);
2041 err_setup_rx:
2042 igb_free_all_tx_resources(adapter);
2043 err_setup_tx:
2044 igb_reset(adapter);
2045
2046 return err;
2047 }
2048
2049 /**
2050 * igb_close - Disables a network interface
2051 * @netdev: network interface device structure
2052 *
2053 * Returns 0, this is not allowed to fail
2054 *
2055 * The close entry point is called when an interface is de-activated
2056 * by the OS. The hardware is still under the driver's control, but
2057 * needs to be disabled. A global MAC reset is issued to stop the
2058 * hardware, and all transmit and receive resources are freed.
2059 **/
2060 static int igb_close(struct net_device *netdev)
2061 {
2062 struct igb_adapter *adapter = netdev_priv(netdev);
2063
2064 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2065 igb_down(adapter);
2066
2067 igb_free_irq(adapter);
2068
2069 igb_free_all_tx_resources(adapter);
2070 igb_free_all_rx_resources(adapter);
2071
2072 return 0;
2073 }
2074
2075 /**
2076 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2077 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2078 *
2079 * Return 0 on success, negative on failure
2080 **/
2081 int igb_setup_tx_resources(struct igb_ring *tx_ring)
2082 {
2083 struct pci_dev *pdev = tx_ring->pdev;
2084 int size;
2085
2086 size = sizeof(struct igb_buffer) * tx_ring->count;
2087 tx_ring->buffer_info = vmalloc(size);
2088 if (!tx_ring->buffer_info)
2089 goto err;
2090 memset(tx_ring->buffer_info, 0, size);
2091
2092 /* round up to nearest 4K */
2093 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2094 tx_ring->size = ALIGN(tx_ring->size, 4096);
2095
2096 tx_ring->desc = pci_alloc_consistent(pdev,
2097 tx_ring->size,
2098 &tx_ring->dma);
2099
2100 if (!tx_ring->desc)
2101 goto err;
2102
2103 tx_ring->next_to_use = 0;
2104 tx_ring->next_to_clean = 0;
2105 return 0;
2106
2107 err:
2108 vfree(tx_ring->buffer_info);
2109 dev_err(&pdev->dev,
2110 "Unable to allocate memory for the transmit descriptor ring\n");
2111 return -ENOMEM;
2112 }
2113
2114 /**
2115 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2116 * (Descriptors) for all queues
2117 * @adapter: board private structure
2118 *
2119 * Return 0 on success, negative on failure
2120 **/
2121 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2122 {
2123 struct pci_dev *pdev = adapter->pdev;
2124 int i, err = 0;
2125
2126 for (i = 0; i < adapter->num_tx_queues; i++) {
2127 err = igb_setup_tx_resources(&adapter->tx_ring[i]);
2128 if (err) {
2129 dev_err(&pdev->dev,
2130 "Allocation for Tx Queue %u failed\n", i);
2131 for (i--; i >= 0; i--)
2132 igb_free_tx_resources(&adapter->tx_ring[i]);
2133 break;
2134 }
2135 }
2136
2137 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
2138 int r_idx = i % adapter->num_tx_queues;
2139 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
2140 }
2141 return err;
2142 }
2143
2144 /**
2145 * igb_setup_tctl - configure the transmit control registers
2146 * @adapter: Board private structure
2147 **/
2148 void igb_setup_tctl(struct igb_adapter *adapter)
2149 {
2150 struct e1000_hw *hw = &adapter->hw;
2151 u32 tctl;
2152
2153 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2154 wr32(E1000_TXDCTL(0), 0);
2155
2156 /* Program the Transmit Control Register */
2157 tctl = rd32(E1000_TCTL);
2158 tctl &= ~E1000_TCTL_CT;
2159 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2160 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2161
2162 igb_config_collision_dist(hw);
2163
2164 /* Enable transmits */
2165 tctl |= E1000_TCTL_EN;
2166
2167 wr32(E1000_TCTL, tctl);
2168 }
2169
2170 /**
2171 * igb_configure_tx_ring - Configure transmit ring after Reset
2172 * @adapter: board private structure
2173 * @ring: tx ring to configure
2174 *
2175 * Configure a transmit ring after a reset.
2176 **/
2177 void igb_configure_tx_ring(struct igb_adapter *adapter,
2178 struct igb_ring *ring)
2179 {
2180 struct e1000_hw *hw = &adapter->hw;
2181 u32 txdctl;
2182 u64 tdba = ring->dma;
2183 int reg_idx = ring->reg_idx;
2184
2185 /* disable the queue */
2186 txdctl = rd32(E1000_TXDCTL(reg_idx));
2187 wr32(E1000_TXDCTL(reg_idx),
2188 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2189 wrfl();
2190 mdelay(10);
2191
2192 wr32(E1000_TDLEN(reg_idx),
2193 ring->count * sizeof(union e1000_adv_tx_desc));
2194 wr32(E1000_TDBAL(reg_idx),
2195 tdba & 0x00000000ffffffffULL);
2196 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2197
2198 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2199 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2200 writel(0, ring->head);
2201 writel(0, ring->tail);
2202
2203 txdctl |= IGB_TX_PTHRESH;
2204 txdctl |= IGB_TX_HTHRESH << 8;
2205 txdctl |= IGB_TX_WTHRESH << 16;
2206
2207 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2208 wr32(E1000_TXDCTL(reg_idx), txdctl);
2209 }
2210
2211 /**
2212 * igb_configure_tx - Configure transmit Unit after Reset
2213 * @adapter: board private structure
2214 *
2215 * Configure the Tx unit of the MAC after a reset.
2216 **/
2217 static void igb_configure_tx(struct igb_adapter *adapter)
2218 {
2219 int i;
2220
2221 for (i = 0; i < adapter->num_tx_queues; i++)
2222 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
2223 }
2224
2225 /**
2226 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2227 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2228 *
2229 * Returns 0 on success, negative on failure
2230 **/
2231 int igb_setup_rx_resources(struct igb_ring *rx_ring)
2232 {
2233 struct pci_dev *pdev = rx_ring->pdev;
2234 int size, desc_len;
2235
2236 size = sizeof(struct igb_buffer) * rx_ring->count;
2237 rx_ring->buffer_info = vmalloc(size);
2238 if (!rx_ring->buffer_info)
2239 goto err;
2240 memset(rx_ring->buffer_info, 0, size);
2241
2242 desc_len = sizeof(union e1000_adv_rx_desc);
2243
2244 /* Round up to nearest 4K */
2245 rx_ring->size = rx_ring->count * desc_len;
2246 rx_ring->size = ALIGN(rx_ring->size, 4096);
2247
2248 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2249 &rx_ring->dma);
2250
2251 if (!rx_ring->desc)
2252 goto err;
2253
2254 rx_ring->next_to_clean = 0;
2255 rx_ring->next_to_use = 0;
2256
2257 return 0;
2258
2259 err:
2260 vfree(rx_ring->buffer_info);
2261 rx_ring->buffer_info = NULL;
2262 dev_err(&pdev->dev, "Unable to allocate memory for "
2263 "the receive descriptor ring\n");
2264 return -ENOMEM;
2265 }
2266
2267 /**
2268 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2269 * (Descriptors) for all queues
2270 * @adapter: board private structure
2271 *
2272 * Return 0 on success, negative on failure
2273 **/
2274 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2275 {
2276 struct pci_dev *pdev = adapter->pdev;
2277 int i, err = 0;
2278
2279 for (i = 0; i < adapter->num_rx_queues; i++) {
2280 err = igb_setup_rx_resources(&adapter->rx_ring[i]);
2281 if (err) {
2282 dev_err(&pdev->dev,
2283 "Allocation for Rx Queue %u failed\n", i);
2284 for (i--; i >= 0; i--)
2285 igb_free_rx_resources(&adapter->rx_ring[i]);
2286 break;
2287 }
2288 }
2289
2290 return err;
2291 }
2292
2293 /**
2294 * igb_setup_mrqc - configure the multiple receive queue control registers
2295 * @adapter: Board private structure
2296 **/
2297 static void igb_setup_mrqc(struct igb_adapter *adapter)
2298 {
2299 struct e1000_hw *hw = &adapter->hw;
2300 u32 mrqc, rxcsum;
2301 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2302 union e1000_reta {
2303 u32 dword;
2304 u8 bytes[4];
2305 } reta;
2306 static const u8 rsshash[40] = {
2307 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2308 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2309 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2310 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2311
2312 /* Fill out hash function seeds */
2313 for (j = 0; j < 10; j++) {
2314 u32 rsskey = rsshash[(j * 4)];
2315 rsskey |= rsshash[(j * 4) + 1] << 8;
2316 rsskey |= rsshash[(j * 4) + 2] << 16;
2317 rsskey |= rsshash[(j * 4) + 3] << 24;
2318 array_wr32(E1000_RSSRK(0), j, rsskey);
2319 }
2320
2321 num_rx_queues = adapter->rss_queues;
2322
2323 if (adapter->vfs_allocated_count) {
2324 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2325 switch (hw->mac.type) {
2326 case e1000_82580:
2327 num_rx_queues = 1;
2328 shift = 0;
2329 break;
2330 case e1000_82576:
2331 shift = 3;
2332 num_rx_queues = 2;
2333 break;
2334 case e1000_82575:
2335 shift = 2;
2336 shift2 = 6;
2337 default:
2338 break;
2339 }
2340 } else {
2341 if (hw->mac.type == e1000_82575)
2342 shift = 6;
2343 }
2344
2345 for (j = 0; j < (32 * 4); j++) {
2346 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2347 if (shift2)
2348 reta.bytes[j & 3] |= num_rx_queues << shift2;
2349 if ((j & 3) == 3)
2350 wr32(E1000_RETA(j >> 2), reta.dword);
2351 }
2352
2353 /*
2354 * Disable raw packet checksumming so that RSS hash is placed in
2355 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2356 * offloads as they are enabled by default
2357 */
2358 rxcsum = rd32(E1000_RXCSUM);
2359 rxcsum |= E1000_RXCSUM_PCSD;
2360
2361 if (adapter->hw.mac.type >= e1000_82576)
2362 /* Enable Receive Checksum Offload for SCTP */
2363 rxcsum |= E1000_RXCSUM_CRCOFL;
2364
2365 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2366 wr32(E1000_RXCSUM, rxcsum);
2367
2368 /* If VMDq is enabled then we set the appropriate mode for that, else
2369 * we default to RSS so that an RSS hash is calculated per packet even
2370 * if we are only using one queue */
2371 if (adapter->vfs_allocated_count) {
2372 if (hw->mac.type > e1000_82575) {
2373 /* Set the default pool for the PF's first queue */
2374 u32 vtctl = rd32(E1000_VT_CTL);
2375 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2376 E1000_VT_CTL_DISABLE_DEF_POOL);
2377 vtctl |= adapter->vfs_allocated_count <<
2378 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2379 wr32(E1000_VT_CTL, vtctl);
2380 }
2381 if (adapter->rss_queues > 1)
2382 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2383 else
2384 mrqc = E1000_MRQC_ENABLE_VMDQ;
2385 } else {
2386 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2387 }
2388 igb_vmm_control(adapter);
2389
2390 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2391 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2392 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2393 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2394 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2395 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2396 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2397 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2398
2399 wr32(E1000_MRQC, mrqc);
2400 }
2401
2402 /**
2403 * igb_setup_rctl - configure the receive control registers
2404 * @adapter: Board private structure
2405 **/
2406 void igb_setup_rctl(struct igb_adapter *adapter)
2407 {
2408 struct e1000_hw *hw = &adapter->hw;
2409 u32 rctl;
2410
2411 rctl = rd32(E1000_RCTL);
2412
2413 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2414 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
2415
2416 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
2417 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2418
2419 /*
2420 * enable stripping of CRC. It's unlikely this will break BMC
2421 * redirection as it did with e1000. Newer features require
2422 * that the HW strips the CRC.
2423 */
2424 rctl |= E1000_RCTL_SECRC;
2425
2426 /* disable store bad packets and clear size bits. */
2427 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2428
2429 /* enable LPE to prevent packets larger than max_frame_size */
2430 rctl |= E1000_RCTL_LPE;
2431
2432 /* disable queue 0 to prevent tail write w/o re-config */
2433 wr32(E1000_RXDCTL(0), 0);
2434
2435 /* Attention!!! For SR-IOV PF driver operations you must enable
2436 * queue drop for all VF and PF queues to prevent head of line blocking
2437 * if an un-trusted VF does not provide descriptors to hardware.
2438 */
2439 if (adapter->vfs_allocated_count) {
2440 /* set all queue drop enable bits */
2441 wr32(E1000_QDE, ALL_QUEUES);
2442 }
2443
2444 wr32(E1000_RCTL, rctl);
2445 }
2446
2447 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2448 int vfn)
2449 {
2450 struct e1000_hw *hw = &adapter->hw;
2451 u32 vmolr;
2452
2453 /* if it isn't the PF check to see if VFs are enabled and
2454 * increase the size to support vlan tags */
2455 if (vfn < adapter->vfs_allocated_count &&
2456 adapter->vf_data[vfn].vlans_enabled)
2457 size += VLAN_TAG_SIZE;
2458
2459 vmolr = rd32(E1000_VMOLR(vfn));
2460 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2461 vmolr |= size | E1000_VMOLR_LPE;
2462 wr32(E1000_VMOLR(vfn), vmolr);
2463
2464 return 0;
2465 }
2466
2467 /**
2468 * igb_rlpml_set - set maximum receive packet size
2469 * @adapter: board private structure
2470 *
2471 * Configure maximum receivable packet size.
2472 **/
2473 static void igb_rlpml_set(struct igb_adapter *adapter)
2474 {
2475 u32 max_frame_size = adapter->max_frame_size;
2476 struct e1000_hw *hw = &adapter->hw;
2477 u16 pf_id = adapter->vfs_allocated_count;
2478
2479 if (adapter->vlgrp)
2480 max_frame_size += VLAN_TAG_SIZE;
2481
2482 /* if vfs are enabled we set RLPML to the largest possible request
2483 * size and set the VMOLR RLPML to the size we need */
2484 if (pf_id) {
2485 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2486 max_frame_size = MAX_JUMBO_FRAME_SIZE;
2487 }
2488
2489 wr32(E1000_RLPML, max_frame_size);
2490 }
2491
2492 static inline void igb_set_vmolr(struct igb_adapter *adapter,
2493 int vfn, bool aupe)
2494 {
2495 struct e1000_hw *hw = &adapter->hw;
2496 u32 vmolr;
2497
2498 /*
2499 * This register exists only on 82576 and newer so if we are older then
2500 * we should exit and do nothing
2501 */
2502 if (hw->mac.type < e1000_82576)
2503 return;
2504
2505 vmolr = rd32(E1000_VMOLR(vfn));
2506 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2507 if (aupe)
2508 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
2509 else
2510 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
2511
2512 /* clear all bits that might not be set */
2513 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2514
2515 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
2516 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2517 /*
2518 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2519 * multicast packets
2520 */
2521 if (vfn <= adapter->vfs_allocated_count)
2522 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2523
2524 wr32(E1000_VMOLR(vfn), vmolr);
2525 }
2526
2527 /**
2528 * igb_configure_rx_ring - Configure a receive ring after Reset
2529 * @adapter: board private structure
2530 * @ring: receive ring to be configured
2531 *
2532 * Configure the Rx unit of the MAC after a reset.
2533 **/
2534 void igb_configure_rx_ring(struct igb_adapter *adapter,
2535 struct igb_ring *ring)
2536 {
2537 struct e1000_hw *hw = &adapter->hw;
2538 u64 rdba = ring->dma;
2539 int reg_idx = ring->reg_idx;
2540 u32 srrctl, rxdctl;
2541
2542 /* disable the queue */
2543 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2544 wr32(E1000_RXDCTL(reg_idx),
2545 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2546
2547 /* Set DMA base address registers */
2548 wr32(E1000_RDBAL(reg_idx),
2549 rdba & 0x00000000ffffffffULL);
2550 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2551 wr32(E1000_RDLEN(reg_idx),
2552 ring->count * sizeof(union e1000_adv_rx_desc));
2553
2554 /* initialize head and tail */
2555 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2556 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2557 writel(0, ring->head);
2558 writel(0, ring->tail);
2559
2560 /* set descriptor configuration */
2561 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2562 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
2563 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2564 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2565 srrctl |= IGB_RXBUFFER_16384 >>
2566 E1000_SRRCTL_BSIZEPKT_SHIFT;
2567 #else
2568 srrctl |= (PAGE_SIZE / 2) >>
2569 E1000_SRRCTL_BSIZEPKT_SHIFT;
2570 #endif
2571 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2572 } else {
2573 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
2574 E1000_SRRCTL_BSIZEPKT_SHIFT;
2575 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2576 }
2577
2578 wr32(E1000_SRRCTL(reg_idx), srrctl);
2579
2580 /* set filtering for VMDQ pools */
2581 igb_set_vmolr(adapter, reg_idx & 0x7, true);
2582
2583 /* enable receive descriptor fetching */
2584 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2585 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2586 rxdctl &= 0xFFF00000;
2587 rxdctl |= IGB_RX_PTHRESH;
2588 rxdctl |= IGB_RX_HTHRESH << 8;
2589 rxdctl |= IGB_RX_WTHRESH << 16;
2590 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2591 }
2592
2593 /**
2594 * igb_configure_rx - Configure receive Unit after Reset
2595 * @adapter: board private structure
2596 *
2597 * Configure the Rx unit of the MAC after a reset.
2598 **/
2599 static void igb_configure_rx(struct igb_adapter *adapter)
2600 {
2601 int i;
2602
2603 /* set UTA to appropriate mode */
2604 igb_set_uta(adapter);
2605
2606 /* set the correct pool for the PF default MAC address in entry 0 */
2607 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2608 adapter->vfs_allocated_count);
2609
2610 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2611 * the Base and Length of the Rx Descriptor Ring */
2612 for (i = 0; i < adapter->num_rx_queues; i++)
2613 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
2614 }
2615
2616 /**
2617 * igb_free_tx_resources - Free Tx Resources per Queue
2618 * @tx_ring: Tx descriptor ring for a specific queue
2619 *
2620 * Free all transmit software resources
2621 **/
2622 void igb_free_tx_resources(struct igb_ring *tx_ring)
2623 {
2624 igb_clean_tx_ring(tx_ring);
2625
2626 vfree(tx_ring->buffer_info);
2627 tx_ring->buffer_info = NULL;
2628
2629 /* if not set, then don't free */
2630 if (!tx_ring->desc)
2631 return;
2632
2633 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2634 tx_ring->desc, tx_ring->dma);
2635
2636 tx_ring->desc = NULL;
2637 }
2638
2639 /**
2640 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2641 * @adapter: board private structure
2642 *
2643 * Free all transmit software resources
2644 **/
2645 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2646 {
2647 int i;
2648
2649 for (i = 0; i < adapter->num_tx_queues; i++)
2650 igb_free_tx_resources(&adapter->tx_ring[i]);
2651 }
2652
2653 void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2654 struct igb_buffer *buffer_info)
2655 {
2656 if (buffer_info->dma) {
2657 if (buffer_info->mapped_as_page)
2658 pci_unmap_page(tx_ring->pdev,
2659 buffer_info->dma,
2660 buffer_info->length,
2661 PCI_DMA_TODEVICE);
2662 else
2663 pci_unmap_single(tx_ring->pdev,
2664 buffer_info->dma,
2665 buffer_info->length,
2666 PCI_DMA_TODEVICE);
2667 buffer_info->dma = 0;
2668 }
2669 if (buffer_info->skb) {
2670 dev_kfree_skb_any(buffer_info->skb);
2671 buffer_info->skb = NULL;
2672 }
2673 buffer_info->time_stamp = 0;
2674 buffer_info->length = 0;
2675 buffer_info->next_to_watch = 0;
2676 buffer_info->mapped_as_page = false;
2677 }
2678
2679 /**
2680 * igb_clean_tx_ring - Free Tx Buffers
2681 * @tx_ring: ring to be cleaned
2682 **/
2683 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2684 {
2685 struct igb_buffer *buffer_info;
2686 unsigned long size;
2687 unsigned int i;
2688
2689 if (!tx_ring->buffer_info)
2690 return;
2691 /* Free all the Tx ring sk_buffs */
2692
2693 for (i = 0; i < tx_ring->count; i++) {
2694 buffer_info = &tx_ring->buffer_info[i];
2695 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
2696 }
2697
2698 size = sizeof(struct igb_buffer) * tx_ring->count;
2699 memset(tx_ring->buffer_info, 0, size);
2700
2701 /* Zero out the descriptor ring */
2702 memset(tx_ring->desc, 0, tx_ring->size);
2703
2704 tx_ring->next_to_use = 0;
2705 tx_ring->next_to_clean = 0;
2706 }
2707
2708 /**
2709 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2710 * @adapter: board private structure
2711 **/
2712 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2713 {
2714 int i;
2715
2716 for (i = 0; i < adapter->num_tx_queues; i++)
2717 igb_clean_tx_ring(&adapter->tx_ring[i]);
2718 }
2719
2720 /**
2721 * igb_free_rx_resources - Free Rx Resources
2722 * @rx_ring: ring to clean the resources from
2723 *
2724 * Free all receive software resources
2725 **/
2726 void igb_free_rx_resources(struct igb_ring *rx_ring)
2727 {
2728 igb_clean_rx_ring(rx_ring);
2729
2730 vfree(rx_ring->buffer_info);
2731 rx_ring->buffer_info = NULL;
2732
2733 /* if not set, then don't free */
2734 if (!rx_ring->desc)
2735 return;
2736
2737 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2738 rx_ring->desc, rx_ring->dma);
2739
2740 rx_ring->desc = NULL;
2741 }
2742
2743 /**
2744 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2745 * @adapter: board private structure
2746 *
2747 * Free all receive software resources
2748 **/
2749 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2750 {
2751 int i;
2752
2753 for (i = 0; i < adapter->num_rx_queues; i++)
2754 igb_free_rx_resources(&adapter->rx_ring[i]);
2755 }
2756
2757 /**
2758 * igb_clean_rx_ring - Free Rx Buffers per Queue
2759 * @rx_ring: ring to free buffers from
2760 **/
2761 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2762 {
2763 struct igb_buffer *buffer_info;
2764 unsigned long size;
2765 unsigned int i;
2766
2767 if (!rx_ring->buffer_info)
2768 return;
2769
2770 /* Free all the Rx ring sk_buffs */
2771 for (i = 0; i < rx_ring->count; i++) {
2772 buffer_info = &rx_ring->buffer_info[i];
2773 if (buffer_info->dma) {
2774 pci_unmap_single(rx_ring->pdev,
2775 buffer_info->dma,
2776 rx_ring->rx_buffer_len,
2777 PCI_DMA_FROMDEVICE);
2778 buffer_info->dma = 0;
2779 }
2780
2781 if (buffer_info->skb) {
2782 dev_kfree_skb(buffer_info->skb);
2783 buffer_info->skb = NULL;
2784 }
2785 if (buffer_info->page_dma) {
2786 pci_unmap_page(rx_ring->pdev,
2787 buffer_info->page_dma,
2788 PAGE_SIZE / 2,
2789 PCI_DMA_FROMDEVICE);
2790 buffer_info->page_dma = 0;
2791 }
2792 if (buffer_info->page) {
2793 put_page(buffer_info->page);
2794 buffer_info->page = NULL;
2795 buffer_info->page_offset = 0;
2796 }
2797 }
2798
2799 size = sizeof(struct igb_buffer) * rx_ring->count;
2800 memset(rx_ring->buffer_info, 0, size);
2801
2802 /* Zero out the descriptor ring */
2803 memset(rx_ring->desc, 0, rx_ring->size);
2804
2805 rx_ring->next_to_clean = 0;
2806 rx_ring->next_to_use = 0;
2807 }
2808
2809 /**
2810 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2811 * @adapter: board private structure
2812 **/
2813 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2814 {
2815 int i;
2816
2817 for (i = 0; i < adapter->num_rx_queues; i++)
2818 igb_clean_rx_ring(&adapter->rx_ring[i]);
2819 }
2820
2821 /**
2822 * igb_set_mac - Change the Ethernet Address of the NIC
2823 * @netdev: network interface device structure
2824 * @p: pointer to an address structure
2825 *
2826 * Returns 0 on success, negative on failure
2827 **/
2828 static int igb_set_mac(struct net_device *netdev, void *p)
2829 {
2830 struct igb_adapter *adapter = netdev_priv(netdev);
2831 struct e1000_hw *hw = &adapter->hw;
2832 struct sockaddr *addr = p;
2833
2834 if (!is_valid_ether_addr(addr->sa_data))
2835 return -EADDRNOTAVAIL;
2836
2837 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2838 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2839
2840 /* set the correct pool for the new PF MAC address in entry 0 */
2841 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2842 adapter->vfs_allocated_count);
2843
2844 return 0;
2845 }
2846
2847 /**
2848 * igb_write_mc_addr_list - write multicast addresses to MTA
2849 * @netdev: network interface device structure
2850 *
2851 * Writes multicast address list to the MTA hash table.
2852 * Returns: -ENOMEM on failure
2853 * 0 on no addresses written
2854 * X on writing X addresses to MTA
2855 **/
2856 static int igb_write_mc_addr_list(struct net_device *netdev)
2857 {
2858 struct igb_adapter *adapter = netdev_priv(netdev);
2859 struct e1000_hw *hw = &adapter->hw;
2860 struct dev_mc_list *mc_ptr = netdev->mc_list;
2861 u8 *mta_list;
2862 u32 vmolr = 0;
2863 int i;
2864
2865 if (netdev_mc_empty(netdev)) {
2866 /* nothing to program, so clear mc list */
2867 igb_update_mc_addr_list(hw, NULL, 0);
2868 igb_restore_vf_multicasts(adapter);
2869 return 0;
2870 }
2871
2872 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
2873 if (!mta_list)
2874 return -ENOMEM;
2875
2876 /* set vmolr receive overflow multicast bit */
2877 vmolr |= E1000_VMOLR_ROMPE;
2878
2879 /* The shared function expects a packed array of only addresses. */
2880 mc_ptr = netdev->mc_list;
2881
2882 for (i = 0; i < netdev_mc_count(netdev); i++) {
2883 if (!mc_ptr)
2884 break;
2885 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2886 mc_ptr = mc_ptr->next;
2887 }
2888 igb_update_mc_addr_list(hw, mta_list, i);
2889 kfree(mta_list);
2890
2891 return netdev_mc_count(netdev);
2892 }
2893
2894 /**
2895 * igb_write_uc_addr_list - write unicast addresses to RAR table
2896 * @netdev: network interface device structure
2897 *
2898 * Writes unicast address list to the RAR table.
2899 * Returns: -ENOMEM on failure/insufficient address space
2900 * 0 on no addresses written
2901 * X on writing X addresses to the RAR table
2902 **/
2903 static int igb_write_uc_addr_list(struct net_device *netdev)
2904 {
2905 struct igb_adapter *adapter = netdev_priv(netdev);
2906 struct e1000_hw *hw = &adapter->hw;
2907 unsigned int vfn = adapter->vfs_allocated_count;
2908 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2909 int count = 0;
2910
2911 /* return ENOMEM indicating insufficient memory for addresses */
2912 if (netdev_uc_count(netdev) > rar_entries)
2913 return -ENOMEM;
2914
2915 if (!netdev_uc_empty(netdev) && rar_entries) {
2916 struct netdev_hw_addr *ha;
2917
2918 netdev_for_each_uc_addr(ha, netdev) {
2919 if (!rar_entries)
2920 break;
2921 igb_rar_set_qsel(adapter, ha->addr,
2922 rar_entries--,
2923 vfn);
2924 count++;
2925 }
2926 }
2927 /* write the addresses in reverse order to avoid write combining */
2928 for (; rar_entries > 0 ; rar_entries--) {
2929 wr32(E1000_RAH(rar_entries), 0);
2930 wr32(E1000_RAL(rar_entries), 0);
2931 }
2932 wrfl();
2933
2934 return count;
2935 }
2936
2937 /**
2938 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2939 * @netdev: network interface device structure
2940 *
2941 * The set_rx_mode entry point is called whenever the unicast or multicast
2942 * address lists or the network interface flags are updated. This routine is
2943 * responsible for configuring the hardware for proper unicast, multicast,
2944 * promiscuous mode, and all-multi behavior.
2945 **/
2946 static void igb_set_rx_mode(struct net_device *netdev)
2947 {
2948 struct igb_adapter *adapter = netdev_priv(netdev);
2949 struct e1000_hw *hw = &adapter->hw;
2950 unsigned int vfn = adapter->vfs_allocated_count;
2951 u32 rctl, vmolr = 0;
2952 int count;
2953
2954 /* Check for Promiscuous and All Multicast modes */
2955 rctl = rd32(E1000_RCTL);
2956
2957 /* clear the effected bits */
2958 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2959
2960 if (netdev->flags & IFF_PROMISC) {
2961 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2962 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
2963 } else {
2964 if (netdev->flags & IFF_ALLMULTI) {
2965 rctl |= E1000_RCTL_MPE;
2966 vmolr |= E1000_VMOLR_MPME;
2967 } else {
2968 /*
2969 * Write addresses to the MTA, if the attempt fails
2970 * then we should just turn on promiscous mode so
2971 * that we can at least receive multicast traffic
2972 */
2973 count = igb_write_mc_addr_list(netdev);
2974 if (count < 0) {
2975 rctl |= E1000_RCTL_MPE;
2976 vmolr |= E1000_VMOLR_MPME;
2977 } else if (count) {
2978 vmolr |= E1000_VMOLR_ROMPE;
2979 }
2980 }
2981 /*
2982 * Write addresses to available RAR registers, if there is not
2983 * sufficient space to store all the addresses then enable
2984 * unicast promiscous mode
2985 */
2986 count = igb_write_uc_addr_list(netdev);
2987 if (count < 0) {
2988 rctl |= E1000_RCTL_UPE;
2989 vmolr |= E1000_VMOLR_ROPE;
2990 }
2991 rctl |= E1000_RCTL_VFE;
2992 }
2993 wr32(E1000_RCTL, rctl);
2994
2995 /*
2996 * In order to support SR-IOV and eventually VMDq it is necessary to set
2997 * the VMOLR to enable the appropriate modes. Without this workaround
2998 * we will have issues with VLAN tag stripping not being done for frames
2999 * that are only arriving because we are the default pool
3000 */
3001 if (hw->mac.type < e1000_82576)
3002 return;
3003
3004 vmolr |= rd32(E1000_VMOLR(vfn)) &
3005 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3006 wr32(E1000_VMOLR(vfn), vmolr);
3007 igb_restore_vf_multicasts(adapter);
3008 }
3009
3010 /* Need to wait a few seconds after link up to get diagnostic information from
3011 * the phy */
3012 static void igb_update_phy_info(unsigned long data)
3013 {
3014 struct igb_adapter *adapter = (struct igb_adapter *) data;
3015 igb_get_phy_info(&adapter->hw);
3016 }
3017
3018 /**
3019 * igb_has_link - check shared code for link and determine up/down
3020 * @adapter: pointer to driver private info
3021 **/
3022 static bool igb_has_link(struct igb_adapter *adapter)
3023 {
3024 struct e1000_hw *hw = &adapter->hw;
3025 bool link_active = false;
3026 s32 ret_val = 0;
3027
3028 /* get_link_status is set on LSC (link status) interrupt or
3029 * rx sequence error interrupt. get_link_status will stay
3030 * false until the e1000_check_for_link establishes link
3031 * for copper adapters ONLY
3032 */
3033 switch (hw->phy.media_type) {
3034 case e1000_media_type_copper:
3035 if (hw->mac.get_link_status) {
3036 ret_val = hw->mac.ops.check_for_link(hw);
3037 link_active = !hw->mac.get_link_status;
3038 } else {
3039 link_active = true;
3040 }
3041 break;
3042 case e1000_media_type_internal_serdes:
3043 ret_val = hw->mac.ops.check_for_link(hw);
3044 link_active = hw->mac.serdes_has_link;
3045 break;
3046 default:
3047 case e1000_media_type_unknown:
3048 break;
3049 }
3050
3051 return link_active;
3052 }
3053
3054 /**
3055 * igb_watchdog - Timer Call-back
3056 * @data: pointer to adapter cast into an unsigned long
3057 **/
3058 static void igb_watchdog(unsigned long data)
3059 {
3060 struct igb_adapter *adapter = (struct igb_adapter *)data;
3061 /* Do the rest outside of interrupt context */
3062 schedule_work(&adapter->watchdog_task);
3063 }
3064
3065 static void igb_watchdog_task(struct work_struct *work)
3066 {
3067 struct igb_adapter *adapter = container_of(work,
3068 struct igb_adapter,
3069 watchdog_task);
3070 struct e1000_hw *hw = &adapter->hw;
3071 struct net_device *netdev = adapter->netdev;
3072 u32 link;
3073 int i;
3074
3075 link = igb_has_link(adapter);
3076 if (link) {
3077 if (!netif_carrier_ok(netdev)) {
3078 u32 ctrl;
3079 hw->mac.ops.get_speed_and_duplex(hw,
3080 &adapter->link_speed,
3081 &adapter->link_duplex);
3082
3083 ctrl = rd32(E1000_CTRL);
3084 /* Links status message must follow this format */
3085 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
3086 "Flow Control: %s\n",
3087 netdev->name,
3088 adapter->link_speed,
3089 adapter->link_duplex == FULL_DUPLEX ?
3090 "Full Duplex" : "Half Duplex",
3091 ((ctrl & E1000_CTRL_TFCE) &&
3092 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3093 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3094 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
3095
3096 /* tweak tx_queue_len according to speed/duplex and
3097 * adjust the timeout factor */
3098 netdev->tx_queue_len = adapter->tx_queue_len;
3099 adapter->tx_timeout_factor = 1;
3100 switch (adapter->link_speed) {
3101 case SPEED_10:
3102 netdev->tx_queue_len = 10;
3103 adapter->tx_timeout_factor = 14;
3104 break;
3105 case SPEED_100:
3106 netdev->tx_queue_len = 100;
3107 /* maybe add some timeout factor ? */
3108 break;
3109 }
3110
3111 netif_carrier_on(netdev);
3112
3113 igb_ping_all_vfs(adapter);
3114
3115 /* link state has changed, schedule phy info update */
3116 if (!test_bit(__IGB_DOWN, &adapter->state))
3117 mod_timer(&adapter->phy_info_timer,
3118 round_jiffies(jiffies + 2 * HZ));
3119 }
3120 } else {
3121 if (netif_carrier_ok(netdev)) {
3122 adapter->link_speed = 0;
3123 adapter->link_duplex = 0;
3124 /* Links status message must follow this format */
3125 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3126 netdev->name);
3127 netif_carrier_off(netdev);
3128
3129 igb_ping_all_vfs(adapter);
3130
3131 /* link state has changed, schedule phy info update */
3132 if (!test_bit(__IGB_DOWN, &adapter->state))
3133 mod_timer(&adapter->phy_info_timer,
3134 round_jiffies(jiffies + 2 * HZ));
3135 }
3136 }
3137
3138 igb_update_stats(adapter);
3139 igb_update_adaptive(hw);
3140
3141 for (i = 0; i < adapter->num_tx_queues; i++) {
3142 struct igb_ring *tx_ring = &adapter->tx_ring[i];
3143 if (!netif_carrier_ok(netdev)) {
3144 /* We've lost link, so the controller stops DMA,
3145 * but we've got queued Tx work that's never going
3146 * to get done, so reset controller to flush Tx.
3147 * (Do the reset outside of interrupt context). */
3148 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3149 adapter->tx_timeout_count++;
3150 schedule_work(&adapter->reset_task);
3151 /* return immediately since reset is imminent */
3152 return;
3153 }
3154 }
3155
3156 /* Force detection of hung controller every watchdog period */
3157 tx_ring->detect_tx_hung = true;
3158 }
3159
3160 /* Cause software interrupt to ensure rx ring is cleaned */
3161 if (adapter->msix_entries) {
3162 u32 eics = 0;
3163 for (i = 0; i < adapter->num_q_vectors; i++) {
3164 struct igb_q_vector *q_vector = adapter->q_vector[i];
3165 eics |= q_vector->eims_value;
3166 }
3167 wr32(E1000_EICS, eics);
3168 } else {
3169 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3170 }
3171
3172 /* Reset the timer */
3173 if (!test_bit(__IGB_DOWN, &adapter->state))
3174 mod_timer(&adapter->watchdog_timer,
3175 round_jiffies(jiffies + 2 * HZ));
3176 }
3177
3178 enum latency_range {
3179 lowest_latency = 0,
3180 low_latency = 1,
3181 bulk_latency = 2,
3182 latency_invalid = 255
3183 };
3184
3185 /**
3186 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3187 *
3188 * Stores a new ITR value based on strictly on packet size. This
3189 * algorithm is less sophisticated than that used in igb_update_itr,
3190 * due to the difficulty of synchronizing statistics across multiple
3191 * receive rings. The divisors and thresholds used by this fuction
3192 * were determined based on theoretical maximum wire speed and testing
3193 * data, in order to minimize response time while increasing bulk
3194 * throughput.
3195 * This functionality is controlled by the InterruptThrottleRate module
3196 * parameter (see igb_param.c)
3197 * NOTE: This function is called only when operating in a multiqueue
3198 * receive environment.
3199 * @q_vector: pointer to q_vector
3200 **/
3201 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3202 {
3203 int new_val = q_vector->itr_val;
3204 int avg_wire_size = 0;
3205 struct igb_adapter *adapter = q_vector->adapter;
3206
3207 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3208 * ints/sec - ITR timer value of 120 ticks.
3209 */
3210 if (adapter->link_speed != SPEED_1000) {
3211 new_val = 976;
3212 goto set_itr_val;
3213 }
3214
3215 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3216 struct igb_ring *ring = q_vector->rx_ring;
3217 avg_wire_size = ring->total_bytes / ring->total_packets;
3218 }
3219
3220 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3221 struct igb_ring *ring = q_vector->tx_ring;
3222 avg_wire_size = max_t(u32, avg_wire_size,
3223 (ring->total_bytes /
3224 ring->total_packets));
3225 }
3226
3227 /* if avg_wire_size isn't set no work was done */
3228 if (!avg_wire_size)
3229 goto clear_counts;
3230
3231 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3232 avg_wire_size += 24;
3233
3234 /* Don't starve jumbo frames */
3235 avg_wire_size = min(avg_wire_size, 3000);
3236
3237 /* Give a little boost to mid-size frames */
3238 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3239 new_val = avg_wire_size / 3;
3240 else
3241 new_val = avg_wire_size / 2;
3242
3243 set_itr_val:
3244 if (new_val != q_vector->itr_val) {
3245 q_vector->itr_val = new_val;
3246 q_vector->set_itr = 1;
3247 }
3248 clear_counts:
3249 if (q_vector->rx_ring) {
3250 q_vector->rx_ring->total_bytes = 0;
3251 q_vector->rx_ring->total_packets = 0;
3252 }
3253 if (q_vector->tx_ring) {
3254 q_vector->tx_ring->total_bytes = 0;
3255 q_vector->tx_ring->total_packets = 0;
3256 }
3257 }
3258
3259 /**
3260 * igb_update_itr - update the dynamic ITR value based on statistics
3261 * Stores a new ITR value based on packets and byte
3262 * counts during the last interrupt. The advantage of per interrupt
3263 * computation is faster updates and more accurate ITR for the current
3264 * traffic pattern. Constants in this function were computed
3265 * based on theoretical maximum wire speed and thresholds were set based
3266 * on testing data as well as attempting to minimize response time
3267 * while increasing bulk throughput.
3268 * this functionality is controlled by the InterruptThrottleRate module
3269 * parameter (see igb_param.c)
3270 * NOTE: These calculations are only valid when operating in a single-
3271 * queue environment.
3272 * @adapter: pointer to adapter
3273 * @itr_setting: current q_vector->itr_val
3274 * @packets: the number of packets during this measurement interval
3275 * @bytes: the number of bytes during this measurement interval
3276 **/
3277 static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3278 int packets, int bytes)
3279 {
3280 unsigned int retval = itr_setting;
3281
3282 if (packets == 0)
3283 goto update_itr_done;
3284
3285 switch (itr_setting) {
3286 case lowest_latency:
3287 /* handle TSO and jumbo frames */
3288 if (bytes/packets > 8000)
3289 retval = bulk_latency;
3290 else if ((packets < 5) && (bytes > 512))
3291 retval = low_latency;
3292 break;
3293 case low_latency: /* 50 usec aka 20000 ints/s */
3294 if (bytes > 10000) {
3295 /* this if handles the TSO accounting */
3296 if (bytes/packets > 8000) {
3297 retval = bulk_latency;
3298 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3299 retval = bulk_latency;
3300 } else if ((packets > 35)) {
3301 retval = lowest_latency;
3302 }
3303 } else if (bytes/packets > 2000) {
3304 retval = bulk_latency;
3305 } else if (packets <= 2 && bytes < 512) {
3306 retval = lowest_latency;
3307 }
3308 break;
3309 case bulk_latency: /* 250 usec aka 4000 ints/s */
3310 if (bytes > 25000) {
3311 if (packets > 35)
3312 retval = low_latency;
3313 } else if (bytes < 1500) {
3314 retval = low_latency;
3315 }
3316 break;
3317 }
3318
3319 update_itr_done:
3320 return retval;
3321 }
3322
3323 static void igb_set_itr(struct igb_adapter *adapter)
3324 {
3325 struct igb_q_vector *q_vector = adapter->q_vector[0];
3326 u16 current_itr;
3327 u32 new_itr = q_vector->itr_val;
3328
3329 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3330 if (adapter->link_speed != SPEED_1000) {
3331 current_itr = 0;
3332 new_itr = 4000;
3333 goto set_itr_now;
3334 }
3335
3336 adapter->rx_itr = igb_update_itr(adapter,
3337 adapter->rx_itr,
3338 adapter->rx_ring->total_packets,
3339 adapter->rx_ring->total_bytes);
3340
3341 adapter->tx_itr = igb_update_itr(adapter,
3342 adapter->tx_itr,
3343 adapter->tx_ring->total_packets,
3344 adapter->tx_ring->total_bytes);
3345 current_itr = max(adapter->rx_itr, adapter->tx_itr);
3346
3347 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3348 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
3349 current_itr = low_latency;
3350
3351 switch (current_itr) {
3352 /* counts and packets in update_itr are dependent on these numbers */
3353 case lowest_latency:
3354 new_itr = 56; /* aka 70,000 ints/sec */
3355 break;
3356 case low_latency:
3357 new_itr = 196; /* aka 20,000 ints/sec */
3358 break;
3359 case bulk_latency:
3360 new_itr = 980; /* aka 4,000 ints/sec */
3361 break;
3362 default:
3363 break;
3364 }
3365
3366 set_itr_now:
3367 adapter->rx_ring->total_bytes = 0;
3368 adapter->rx_ring->total_packets = 0;
3369 adapter->tx_ring->total_bytes = 0;
3370 adapter->tx_ring->total_packets = 0;
3371
3372 if (new_itr != q_vector->itr_val) {
3373 /* this attempts to bias the interrupt rate towards Bulk
3374 * by adding intermediate steps when interrupt rate is
3375 * increasing */
3376 new_itr = new_itr > q_vector->itr_val ?
3377 max((new_itr * q_vector->itr_val) /
3378 (new_itr + (q_vector->itr_val >> 2)),
3379 new_itr) :
3380 new_itr;
3381 /* Don't write the value here; it resets the adapter's
3382 * internal timer, and causes us to delay far longer than
3383 * we should between interrupts. Instead, we write the ITR
3384 * value at the beginning of the next interrupt so the timing
3385 * ends up being correct.
3386 */
3387 q_vector->itr_val = new_itr;
3388 q_vector->set_itr = 1;
3389 }
3390
3391 return;
3392 }
3393
3394 #define IGB_TX_FLAGS_CSUM 0x00000001
3395 #define IGB_TX_FLAGS_VLAN 0x00000002
3396 #define IGB_TX_FLAGS_TSO 0x00000004
3397 #define IGB_TX_FLAGS_IPV4 0x00000008
3398 #define IGB_TX_FLAGS_TSTAMP 0x00000010
3399 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3400 #define IGB_TX_FLAGS_VLAN_SHIFT 16
3401
3402 static inline int igb_tso_adv(struct igb_ring *tx_ring,
3403 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3404 {
3405 struct e1000_adv_tx_context_desc *context_desc;
3406 unsigned int i;
3407 int err;
3408 struct igb_buffer *buffer_info;
3409 u32 info = 0, tu_cmd = 0;
3410 u32 mss_l4len_idx, l4len;
3411 *hdr_len = 0;
3412
3413 if (skb_header_cloned(skb)) {
3414 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3415 if (err)
3416 return err;
3417 }
3418
3419 l4len = tcp_hdrlen(skb);
3420 *hdr_len += l4len;
3421
3422 if (skb->protocol == htons(ETH_P_IP)) {
3423 struct iphdr *iph = ip_hdr(skb);
3424 iph->tot_len = 0;
3425 iph->check = 0;
3426 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3427 iph->daddr, 0,
3428 IPPROTO_TCP,
3429 0);
3430 } else if (skb_is_gso_v6(skb)) {
3431 ipv6_hdr(skb)->payload_len = 0;
3432 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3433 &ipv6_hdr(skb)->daddr,
3434 0, IPPROTO_TCP, 0);
3435 }
3436
3437 i = tx_ring->next_to_use;
3438
3439 buffer_info = &tx_ring->buffer_info[i];
3440 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3441 /* VLAN MACLEN IPLEN */
3442 if (tx_flags & IGB_TX_FLAGS_VLAN)
3443 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3444 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3445 *hdr_len += skb_network_offset(skb);
3446 info |= skb_network_header_len(skb);
3447 *hdr_len += skb_network_header_len(skb);
3448 context_desc->vlan_macip_lens = cpu_to_le32(info);
3449
3450 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3451 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3452
3453 if (skb->protocol == htons(ETH_P_IP))
3454 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3455 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3456
3457 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3458
3459 /* MSS L4LEN IDX */
3460 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3461 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3462
3463 /* For 82575, context index must be unique per ring. */
3464 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3465 mss_l4len_idx |= tx_ring->reg_idx << 4;
3466
3467 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3468 context_desc->seqnum_seed = 0;
3469
3470 buffer_info->time_stamp = jiffies;
3471 buffer_info->next_to_watch = i;
3472 buffer_info->dma = 0;
3473 i++;
3474 if (i == tx_ring->count)
3475 i = 0;
3476
3477 tx_ring->next_to_use = i;
3478
3479 return true;
3480 }
3481
3482 static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3483 struct sk_buff *skb, u32 tx_flags)
3484 {
3485 struct e1000_adv_tx_context_desc *context_desc;
3486 struct pci_dev *pdev = tx_ring->pdev;
3487 struct igb_buffer *buffer_info;
3488 u32 info = 0, tu_cmd = 0;
3489 unsigned int i;
3490
3491 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3492 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3493 i = tx_ring->next_to_use;
3494 buffer_info = &tx_ring->buffer_info[i];
3495 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3496
3497 if (tx_flags & IGB_TX_FLAGS_VLAN)
3498 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3499
3500 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3501 if (skb->ip_summed == CHECKSUM_PARTIAL)
3502 info |= skb_network_header_len(skb);
3503
3504 context_desc->vlan_macip_lens = cpu_to_le32(info);
3505
3506 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3507
3508 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3509 __be16 protocol;
3510
3511 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3512 const struct vlan_ethhdr *vhdr =
3513 (const struct vlan_ethhdr*)skb->data;
3514
3515 protocol = vhdr->h_vlan_encapsulated_proto;
3516 } else {
3517 protocol = skb->protocol;
3518 }
3519
3520 switch (protocol) {
3521 case cpu_to_be16(ETH_P_IP):
3522 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3523 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3524 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3525 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3526 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3527 break;
3528 case cpu_to_be16(ETH_P_IPV6):
3529 /* XXX what about other V6 headers?? */
3530 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3531 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3532 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3533 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
3534 break;
3535 default:
3536 if (unlikely(net_ratelimit()))
3537 dev_warn(&pdev->dev,
3538 "partial checksum but proto=%x!\n",
3539 skb->protocol);
3540 break;
3541 }
3542 }
3543
3544 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3545 context_desc->seqnum_seed = 0;
3546 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3547 context_desc->mss_l4len_idx =
3548 cpu_to_le32(tx_ring->reg_idx << 4);
3549
3550 buffer_info->time_stamp = jiffies;
3551 buffer_info->next_to_watch = i;
3552 buffer_info->dma = 0;
3553
3554 i++;
3555 if (i == tx_ring->count)
3556 i = 0;
3557 tx_ring->next_to_use = i;
3558
3559 return true;
3560 }
3561 return false;
3562 }
3563
3564 #define IGB_MAX_TXD_PWR 16
3565 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3566
3567 static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3568 unsigned int first)
3569 {
3570 struct igb_buffer *buffer_info;
3571 struct pci_dev *pdev = tx_ring->pdev;
3572 unsigned int len = skb_headlen(skb);
3573 unsigned int count = 0, i;
3574 unsigned int f;
3575
3576 i = tx_ring->next_to_use;
3577
3578 buffer_info = &tx_ring->buffer_info[i];
3579 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3580 buffer_info->length = len;
3581 /* set time_stamp *before* dma to help avoid a possible race */
3582 buffer_info->time_stamp = jiffies;
3583 buffer_info->next_to_watch = i;
3584 buffer_info->dma = pci_map_single(pdev, skb->data, len,
3585 PCI_DMA_TODEVICE);
3586 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3587 goto dma_error;
3588
3589 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3590 struct skb_frag_struct *frag;
3591
3592 count++;
3593 i++;
3594 if (i == tx_ring->count)
3595 i = 0;
3596
3597 frag = &skb_shinfo(skb)->frags[f];
3598 len = frag->size;
3599
3600 buffer_info = &tx_ring->buffer_info[i];
3601 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3602 buffer_info->length = len;
3603 buffer_info->time_stamp = jiffies;
3604 buffer_info->next_to_watch = i;
3605 buffer_info->mapped_as_page = true;
3606 buffer_info->dma = pci_map_page(pdev,
3607 frag->page,
3608 frag->page_offset,
3609 len,
3610 PCI_DMA_TODEVICE);
3611 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3612 goto dma_error;
3613
3614 }
3615
3616 tx_ring->buffer_info[i].skb = skb;
3617 tx_ring->buffer_info[first].next_to_watch = i;
3618
3619 return ++count;
3620
3621 dma_error:
3622 dev_err(&pdev->dev, "TX DMA map failed\n");
3623
3624 /* clear timestamp and dma mappings for failed buffer_info mapping */
3625 buffer_info->dma = 0;
3626 buffer_info->time_stamp = 0;
3627 buffer_info->length = 0;
3628 buffer_info->next_to_watch = 0;
3629 buffer_info->mapped_as_page = false;
3630 count--;
3631
3632 /* clear timestamp and dma mappings for remaining portion of packet */
3633 while (count >= 0) {
3634 count--;
3635 i--;
3636 if (i < 0)
3637 i += tx_ring->count;
3638 buffer_info = &tx_ring->buffer_info[i];
3639 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3640 }
3641
3642 return 0;
3643 }
3644
3645 static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3646 int tx_flags, int count, u32 paylen,
3647 u8 hdr_len)
3648 {
3649 union e1000_adv_tx_desc *tx_desc;
3650 struct igb_buffer *buffer_info;
3651 u32 olinfo_status = 0, cmd_type_len;
3652 unsigned int i = tx_ring->next_to_use;
3653
3654 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3655 E1000_ADVTXD_DCMD_DEXT);
3656
3657 if (tx_flags & IGB_TX_FLAGS_VLAN)
3658 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3659
3660 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3661 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3662
3663 if (tx_flags & IGB_TX_FLAGS_TSO) {
3664 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3665
3666 /* insert tcp checksum */
3667 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3668
3669 /* insert ip checksum */
3670 if (tx_flags & IGB_TX_FLAGS_IPV4)
3671 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3672
3673 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3674 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3675 }
3676
3677 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3678 (tx_flags & (IGB_TX_FLAGS_CSUM |
3679 IGB_TX_FLAGS_TSO |
3680 IGB_TX_FLAGS_VLAN)))
3681 olinfo_status |= tx_ring->reg_idx << 4;
3682
3683 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3684
3685 do {
3686 buffer_info = &tx_ring->buffer_info[i];
3687 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3688 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3689 tx_desc->read.cmd_type_len =
3690 cpu_to_le32(cmd_type_len | buffer_info->length);
3691 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3692 count--;
3693 i++;
3694 if (i == tx_ring->count)
3695 i = 0;
3696 } while (count > 0);
3697
3698 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
3699 /* Force memory writes to complete before letting h/w
3700 * know there are new descriptors to fetch. (Only
3701 * applicable for weak-ordered memory model archs,
3702 * such as IA-64). */
3703 wmb();
3704
3705 tx_ring->next_to_use = i;
3706 writel(i, tx_ring->tail);
3707 /* we need this if more than one processor can write to our tail
3708 * at a time, it syncronizes IO on IA64/Altix systems */
3709 mmiowb();
3710 }
3711
3712 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3713 {
3714 struct net_device *netdev = tx_ring->netdev;
3715
3716 netif_stop_subqueue(netdev, tx_ring->queue_index);
3717
3718 /* Herbert's original patch had:
3719 * smp_mb__after_netif_stop_queue();
3720 * but since that doesn't exist yet, just open code it. */
3721 smp_mb();
3722
3723 /* We need to check again in a case another CPU has just
3724 * made room available. */
3725 if (igb_desc_unused(tx_ring) < size)
3726 return -EBUSY;
3727
3728 /* A reprieve! */
3729 netif_wake_subqueue(netdev, tx_ring->queue_index);
3730 tx_ring->tx_stats.restart_queue++;
3731 return 0;
3732 }
3733
3734 static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3735 {
3736 if (igb_desc_unused(tx_ring) >= size)
3737 return 0;
3738 return __igb_maybe_stop_tx(tx_ring, size);
3739 }
3740
3741 netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3742 struct igb_ring *tx_ring)
3743 {
3744 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
3745 unsigned int first;
3746 unsigned int tx_flags = 0;
3747 u8 hdr_len = 0;
3748 int tso = 0, count;
3749 union skb_shared_tx *shtx = skb_tx(skb);
3750
3751 /* need: 1 descriptor per page,
3752 * + 2 desc gap to keep tail from touching head,
3753 * + 1 desc for skb->data,
3754 * + 1 desc for context descriptor,
3755 * otherwise try next time */
3756 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3757 /* this is a hard error */
3758 return NETDEV_TX_BUSY;
3759 }
3760
3761 if (unlikely(shtx->hardware)) {
3762 shtx->in_progress = 1;
3763 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3764 }
3765
3766 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
3767 tx_flags |= IGB_TX_FLAGS_VLAN;
3768 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3769 }
3770
3771 if (skb->protocol == htons(ETH_P_IP))
3772 tx_flags |= IGB_TX_FLAGS_IPV4;
3773
3774 first = tx_ring->next_to_use;
3775 if (skb_is_gso(skb)) {
3776 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
3777
3778 if (tso < 0) {
3779 dev_kfree_skb_any(skb);
3780 return NETDEV_TX_OK;
3781 }
3782 }
3783
3784 if (tso)
3785 tx_flags |= IGB_TX_FLAGS_TSO;
3786 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
3787 (skb->ip_summed == CHECKSUM_PARTIAL))
3788 tx_flags |= IGB_TX_FLAGS_CSUM;
3789
3790 /*
3791 * count reflects descriptors mapped, if 0 or less then mapping error
3792 * has occured and we need to rewind the descriptor queue
3793 */
3794 count = igb_tx_map_adv(tx_ring, skb, first);
3795 if (!count) {
3796 dev_kfree_skb_any(skb);
3797 tx_ring->buffer_info[first].time_stamp = 0;
3798 tx_ring->next_to_use = first;
3799 return NETDEV_TX_OK;
3800 }
3801
3802 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3803
3804 /* Make sure there is space in the ring for the next send. */
3805 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
3806
3807 return NETDEV_TX_OK;
3808 }
3809
3810 static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3811 struct net_device *netdev)
3812 {
3813 struct igb_adapter *adapter = netdev_priv(netdev);
3814 struct igb_ring *tx_ring;
3815 int r_idx = 0;
3816
3817 if (test_bit(__IGB_DOWN, &adapter->state)) {
3818 dev_kfree_skb_any(skb);
3819 return NETDEV_TX_OK;
3820 }
3821
3822 if (skb->len <= 0) {
3823 dev_kfree_skb_any(skb);
3824 return NETDEV_TX_OK;
3825 }
3826
3827 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3828 tx_ring = adapter->multi_tx_table[r_idx];
3829
3830 /* This goes back to the question of how to logically map a tx queue
3831 * to a flow. Right now, performance is impacted slightly negatively
3832 * if using multiple tx queues. If the stack breaks away from a
3833 * single qdisc implementation, we can look at this again. */
3834 return igb_xmit_frame_ring_adv(skb, tx_ring);
3835 }
3836
3837 /**
3838 * igb_tx_timeout - Respond to a Tx Hang
3839 * @netdev: network interface device structure
3840 **/
3841 static void igb_tx_timeout(struct net_device *netdev)
3842 {
3843 struct igb_adapter *adapter = netdev_priv(netdev);
3844 struct e1000_hw *hw = &adapter->hw;
3845
3846 /* Do the reset outside of interrupt context */
3847 adapter->tx_timeout_count++;
3848
3849 if (hw->mac.type == e1000_82580)
3850 hw->dev_spec._82575.global_device_reset = true;
3851
3852 schedule_work(&adapter->reset_task);
3853 wr32(E1000_EICS,
3854 (adapter->eims_enable_mask & ~adapter->eims_other));
3855 }
3856
3857 static void igb_reset_task(struct work_struct *work)
3858 {
3859 struct igb_adapter *adapter;
3860 adapter = container_of(work, struct igb_adapter, reset_task);
3861
3862 igb_reinit_locked(adapter);
3863 }
3864
3865 /**
3866 * igb_get_stats - Get System Network Statistics
3867 * @netdev: network interface device structure
3868 *
3869 * Returns the address of the device statistics structure.
3870 * The statistics are actually updated from the timer callback.
3871 **/
3872 static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3873 {
3874 /* only return the current stats */
3875 return &netdev->stats;
3876 }
3877
3878 /**
3879 * igb_change_mtu - Change the Maximum Transfer Unit
3880 * @netdev: network interface device structure
3881 * @new_mtu: new value for maximum frame size
3882 *
3883 * Returns 0 on success, negative on failure
3884 **/
3885 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3886 {
3887 struct igb_adapter *adapter = netdev_priv(netdev);
3888 struct pci_dev *pdev = adapter->pdev;
3889 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3890 u32 rx_buffer_len, i;
3891
3892 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3893 dev_err(&pdev->dev, "Invalid MTU setting\n");
3894 return -EINVAL;
3895 }
3896
3897 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3898 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
3899 return -EINVAL;
3900 }
3901
3902 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3903 msleep(1);
3904
3905 /* igb_down has a dependency on max_frame_size */
3906 adapter->max_frame_size = max_frame;
3907
3908 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3909 * means we reserve 2 more, this pushes us to allocate from the next
3910 * larger slab size.
3911 * i.e. RXBUFFER_2048 --> size-4096 slab
3912 */
3913
3914 if (max_frame <= IGB_RXBUFFER_1024)
3915 rx_buffer_len = IGB_RXBUFFER_1024;
3916 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
3917 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3918 else
3919 rx_buffer_len = IGB_RXBUFFER_128;
3920
3921 if (netif_running(netdev))
3922 igb_down(adapter);
3923
3924 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
3925 netdev->mtu, new_mtu);
3926 netdev->mtu = new_mtu;
3927
3928 for (i = 0; i < adapter->num_rx_queues; i++)
3929 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
3930
3931 if (netif_running(netdev))
3932 igb_up(adapter);
3933 else
3934 igb_reset(adapter);
3935
3936 clear_bit(__IGB_RESETTING, &adapter->state);
3937
3938 return 0;
3939 }
3940
3941 /**
3942 * igb_update_stats - Update the board statistics counters
3943 * @adapter: board private structure
3944 **/
3945
3946 void igb_update_stats(struct igb_adapter *adapter)
3947 {
3948 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
3949 struct e1000_hw *hw = &adapter->hw;
3950 struct pci_dev *pdev = adapter->pdev;
3951 u32 rnbc;
3952 u16 phy_tmp;
3953 int i;
3954 u64 bytes, packets;
3955
3956 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3957
3958 /*
3959 * Prevent stats update while adapter is being reset, or if the pci
3960 * connection is down.
3961 */
3962 if (adapter->link_speed == 0)
3963 return;
3964 if (pci_channel_offline(pdev))
3965 return;
3966
3967 bytes = 0;
3968 packets = 0;
3969 for (i = 0; i < adapter->num_rx_queues; i++) {
3970 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3971 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3972 net_stats->rx_fifo_errors += rqdpc_tmp;
3973 bytes += adapter->rx_ring[i].rx_stats.bytes;
3974 packets += adapter->rx_ring[i].rx_stats.packets;
3975 }
3976
3977 net_stats->rx_bytes = bytes;
3978 net_stats->rx_packets = packets;
3979
3980 bytes = 0;
3981 packets = 0;
3982 for (i = 0; i < adapter->num_tx_queues; i++) {
3983 bytes += adapter->tx_ring[i].tx_stats.bytes;
3984 packets += adapter->tx_ring[i].tx_stats.packets;
3985 }
3986 net_stats->tx_bytes = bytes;
3987 net_stats->tx_packets = packets;
3988
3989 /* read stats registers */
3990 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3991 adapter->stats.gprc += rd32(E1000_GPRC);
3992 adapter->stats.gorc += rd32(E1000_GORCL);
3993 rd32(E1000_GORCH); /* clear GORCL */
3994 adapter->stats.bprc += rd32(E1000_BPRC);
3995 adapter->stats.mprc += rd32(E1000_MPRC);
3996 adapter->stats.roc += rd32(E1000_ROC);
3997
3998 adapter->stats.prc64 += rd32(E1000_PRC64);
3999 adapter->stats.prc127 += rd32(E1000_PRC127);
4000 adapter->stats.prc255 += rd32(E1000_PRC255);
4001 adapter->stats.prc511 += rd32(E1000_PRC511);
4002 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4003 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4004 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4005 adapter->stats.sec += rd32(E1000_SEC);
4006
4007 adapter->stats.mpc += rd32(E1000_MPC);
4008 adapter->stats.scc += rd32(E1000_SCC);
4009 adapter->stats.ecol += rd32(E1000_ECOL);
4010 adapter->stats.mcc += rd32(E1000_MCC);
4011 adapter->stats.latecol += rd32(E1000_LATECOL);
4012 adapter->stats.dc += rd32(E1000_DC);
4013 adapter->stats.rlec += rd32(E1000_RLEC);
4014 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4015 adapter->stats.xontxc += rd32(E1000_XONTXC);
4016 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4017 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4018 adapter->stats.fcruc += rd32(E1000_FCRUC);
4019 adapter->stats.gptc += rd32(E1000_GPTC);
4020 adapter->stats.gotc += rd32(E1000_GOTCL);
4021 rd32(E1000_GOTCH); /* clear GOTCL */
4022 rnbc = rd32(E1000_RNBC);
4023 adapter->stats.rnbc += rnbc;
4024 net_stats->rx_fifo_errors += rnbc;
4025 adapter->stats.ruc += rd32(E1000_RUC);
4026 adapter->stats.rfc += rd32(E1000_RFC);
4027 adapter->stats.rjc += rd32(E1000_RJC);
4028 adapter->stats.tor += rd32(E1000_TORH);
4029 adapter->stats.tot += rd32(E1000_TOTH);
4030 adapter->stats.tpr += rd32(E1000_TPR);
4031
4032 adapter->stats.ptc64 += rd32(E1000_PTC64);
4033 adapter->stats.ptc127 += rd32(E1000_PTC127);
4034 adapter->stats.ptc255 += rd32(E1000_PTC255);
4035 adapter->stats.ptc511 += rd32(E1000_PTC511);
4036 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4037 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4038
4039 adapter->stats.mptc += rd32(E1000_MPTC);
4040 adapter->stats.bptc += rd32(E1000_BPTC);
4041
4042 /* used for adaptive IFS */
4043 hw->mac.tx_packet_delta = rd32(E1000_TPT);
4044 adapter->stats.tpt += hw->mac.tx_packet_delta;
4045 hw->mac.collision_delta = rd32(E1000_COLC);
4046 adapter->stats.colc += hw->mac.collision_delta;
4047
4048 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
4049 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4050 adapter->stats.tncrs += rd32(E1000_TNCRS);
4051 adapter->stats.tsctc += rd32(E1000_TSCTC);
4052 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4053
4054 adapter->stats.iac += rd32(E1000_IAC);
4055 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4056 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4057 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4058 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4059 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4060 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4061 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4062 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4063
4064 /* Fill out the OS statistics structure */
4065 net_stats->multicast = adapter->stats.mprc;
4066 net_stats->collisions = adapter->stats.colc;
4067
4068 /* Rx Errors */
4069
4070 /* RLEC on some newer hardware can be incorrect so build
4071 * our own version based on RUC and ROC */
4072 net_stats->rx_errors = adapter->stats.rxerrc +
4073 adapter->stats.crcerrs + adapter->stats.algnerrc +
4074 adapter->stats.ruc + adapter->stats.roc +
4075 adapter->stats.cexterr;
4076 net_stats->rx_length_errors = adapter->stats.ruc +
4077 adapter->stats.roc;
4078 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4079 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4080 net_stats->rx_missed_errors = adapter->stats.mpc;
4081
4082 /* Tx Errors */
4083 net_stats->tx_errors = adapter->stats.ecol +
4084 adapter->stats.latecol;
4085 net_stats->tx_aborted_errors = adapter->stats.ecol;
4086 net_stats->tx_window_errors = adapter->stats.latecol;
4087 net_stats->tx_carrier_errors = adapter->stats.tncrs;
4088
4089 /* Tx Dropped needs to be maintained elsewhere */
4090
4091 /* Phy Stats */
4092 if (hw->phy.media_type == e1000_media_type_copper) {
4093 if ((adapter->link_speed == SPEED_1000) &&
4094 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
4095 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4096 adapter->phy_stats.idle_errors += phy_tmp;
4097 }
4098 }
4099
4100 /* Management Stats */
4101 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4102 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4103 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4104 }
4105
4106 static irqreturn_t igb_msix_other(int irq, void *data)
4107 {
4108 struct igb_adapter *adapter = data;
4109 struct e1000_hw *hw = &adapter->hw;
4110 u32 icr = rd32(E1000_ICR);
4111 /* reading ICR causes bit 31 of EICR to be cleared */
4112
4113 if (icr & E1000_ICR_DRSTA)
4114 schedule_work(&adapter->reset_task);
4115
4116 if (icr & E1000_ICR_DOUTSYNC) {
4117 /* HW is reporting DMA is out of sync */
4118 adapter->stats.doosync++;
4119 }
4120
4121 /* Check for a mailbox event */
4122 if (icr & E1000_ICR_VMMB)
4123 igb_msg_task(adapter);
4124
4125 if (icr & E1000_ICR_LSC) {
4126 hw->mac.get_link_status = 1;
4127 /* guard against interrupt when we're going down */
4128 if (!test_bit(__IGB_DOWN, &adapter->state))
4129 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4130 }
4131
4132 if (adapter->vfs_allocated_count)
4133 wr32(E1000_IMS, E1000_IMS_LSC |
4134 E1000_IMS_VMMB |
4135 E1000_IMS_DOUTSYNC);
4136 else
4137 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
4138 wr32(E1000_EIMS, adapter->eims_other);
4139
4140 return IRQ_HANDLED;
4141 }
4142
4143 static void igb_write_itr(struct igb_q_vector *q_vector)
4144 {
4145 u32 itr_val = q_vector->itr_val & 0x7FFC;
4146
4147 if (!q_vector->set_itr)
4148 return;
4149
4150 if (!itr_val)
4151 itr_val = 0x4;
4152
4153 if (q_vector->itr_shift)
4154 itr_val |= itr_val << q_vector->itr_shift;
4155 else
4156 itr_val |= 0x8000000;
4157
4158 writel(itr_val, q_vector->itr_register);
4159 q_vector->set_itr = 0;
4160 }
4161
4162 static irqreturn_t igb_msix_ring(int irq, void *data)
4163 {
4164 struct igb_q_vector *q_vector = data;
4165
4166 /* Write the ITR value calculated from the previous interrupt. */
4167 igb_write_itr(q_vector);
4168
4169 napi_schedule(&q_vector->napi);
4170
4171 return IRQ_HANDLED;
4172 }
4173
4174 #ifdef CONFIG_IGB_DCA
4175 static void igb_update_dca(struct igb_q_vector *q_vector)
4176 {
4177 struct igb_adapter *adapter = q_vector->adapter;
4178 struct e1000_hw *hw = &adapter->hw;
4179 int cpu = get_cpu();
4180
4181 if (q_vector->cpu == cpu)
4182 goto out_no_update;
4183
4184 if (q_vector->tx_ring) {
4185 int q = q_vector->tx_ring->reg_idx;
4186 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4187 if (hw->mac.type == e1000_82575) {
4188 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4189 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4190 } else {
4191 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4192 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4193 E1000_DCA_TXCTRL_CPUID_SHIFT;
4194 }
4195 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4196 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4197 }
4198 if (q_vector->rx_ring) {
4199 int q = q_vector->rx_ring->reg_idx;
4200 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4201 if (hw->mac.type == e1000_82575) {
4202 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4203 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4204 } else {
4205 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4206 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4207 E1000_DCA_RXCTRL_CPUID_SHIFT;
4208 }
4209 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4210 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4211 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4212 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
4213 }
4214 q_vector->cpu = cpu;
4215 out_no_update:
4216 put_cpu();
4217 }
4218
4219 static void igb_setup_dca(struct igb_adapter *adapter)
4220 {
4221 struct e1000_hw *hw = &adapter->hw;
4222 int i;
4223
4224 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
4225 return;
4226
4227 /* Always use CB2 mode, difference is masked in the CB driver. */
4228 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4229
4230 for (i = 0; i < adapter->num_q_vectors; i++) {
4231 struct igb_q_vector *q_vector = adapter->q_vector[i];
4232 q_vector->cpu = -1;
4233 igb_update_dca(q_vector);
4234 }
4235 }
4236
4237 static int __igb_notify_dca(struct device *dev, void *data)
4238 {
4239 struct net_device *netdev = dev_get_drvdata(dev);
4240 struct igb_adapter *adapter = netdev_priv(netdev);
4241 struct pci_dev *pdev = adapter->pdev;
4242 struct e1000_hw *hw = &adapter->hw;
4243 unsigned long event = *(unsigned long *)data;
4244
4245 switch (event) {
4246 case DCA_PROVIDER_ADD:
4247 /* if already enabled, don't do it again */
4248 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
4249 break;
4250 if (dca_add_requester(dev) == 0) {
4251 adapter->flags |= IGB_FLAG_DCA_ENABLED;
4252 dev_info(&pdev->dev, "DCA enabled\n");
4253 igb_setup_dca(adapter);
4254 break;
4255 }
4256 /* Fall Through since DCA is disabled. */
4257 case DCA_PROVIDER_REMOVE:
4258 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
4259 /* without this a class_device is left
4260 * hanging around in the sysfs model */
4261 dca_remove_requester(dev);
4262 dev_info(&pdev->dev, "DCA disabled\n");
4263 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
4264 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
4265 }
4266 break;
4267 }
4268
4269 return 0;
4270 }
4271
4272 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4273 void *p)
4274 {
4275 int ret_val;
4276
4277 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4278 __igb_notify_dca);
4279
4280 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4281 }
4282 #endif /* CONFIG_IGB_DCA */
4283
4284 static void igb_ping_all_vfs(struct igb_adapter *adapter)
4285 {
4286 struct e1000_hw *hw = &adapter->hw;
4287 u32 ping;
4288 int i;
4289
4290 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4291 ping = E1000_PF_CONTROL_MSG;
4292 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
4293 ping |= E1000_VT_MSGTYPE_CTS;
4294 igb_write_mbx(hw, &ping, 1, i);
4295 }
4296 }
4297
4298 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4299 {
4300 struct e1000_hw *hw = &adapter->hw;
4301 u32 vmolr = rd32(E1000_VMOLR(vf));
4302 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4303
4304 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4305 IGB_VF_FLAG_MULTI_PROMISC);
4306 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4307
4308 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4309 vmolr |= E1000_VMOLR_MPME;
4310 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4311 } else {
4312 /*
4313 * if we have hashes and we are clearing a multicast promisc
4314 * flag we need to write the hashes to the MTA as this step
4315 * was previously skipped
4316 */
4317 if (vf_data->num_vf_mc_hashes > 30) {
4318 vmolr |= E1000_VMOLR_MPME;
4319 } else if (vf_data->num_vf_mc_hashes) {
4320 int j;
4321 vmolr |= E1000_VMOLR_ROMPE;
4322 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4323 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4324 }
4325 }
4326
4327 wr32(E1000_VMOLR(vf), vmolr);
4328
4329 /* there are flags left unprocessed, likely not supported */
4330 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4331 return -EINVAL;
4332
4333 return 0;
4334
4335 }
4336
4337 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4338 u32 *msgbuf, u32 vf)
4339 {
4340 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4341 u16 *hash_list = (u16 *)&msgbuf[1];
4342 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4343 int i;
4344
4345 /* salt away the number of multicast addresses assigned
4346 * to this VF for later use to restore when the PF multi cast
4347 * list changes
4348 */
4349 vf_data->num_vf_mc_hashes = n;
4350
4351 /* only up to 30 hash values supported */
4352 if (n > 30)
4353 n = 30;
4354
4355 /* store the hashes for later use */
4356 for (i = 0; i < n; i++)
4357 vf_data->vf_mc_hashes[i] = hash_list[i];
4358
4359 /* Flush and reset the mta with the new values */
4360 igb_set_rx_mode(adapter->netdev);
4361
4362 return 0;
4363 }
4364
4365 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4366 {
4367 struct e1000_hw *hw = &adapter->hw;
4368 struct vf_data_storage *vf_data;
4369 int i, j;
4370
4371 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4372 u32 vmolr = rd32(E1000_VMOLR(i));
4373 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4374
4375 vf_data = &adapter->vf_data[i];
4376
4377 if ((vf_data->num_vf_mc_hashes > 30) ||
4378 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4379 vmolr |= E1000_VMOLR_MPME;
4380 } else if (vf_data->num_vf_mc_hashes) {
4381 vmolr |= E1000_VMOLR_ROMPE;
4382 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4383 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4384 }
4385 wr32(E1000_VMOLR(i), vmolr);
4386 }
4387 }
4388
4389 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4390 {
4391 struct e1000_hw *hw = &adapter->hw;
4392 u32 pool_mask, reg, vid;
4393 int i;
4394
4395 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4396
4397 /* Find the vlan filter for this id */
4398 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4399 reg = rd32(E1000_VLVF(i));
4400
4401 /* remove the vf from the pool */
4402 reg &= ~pool_mask;
4403
4404 /* if pool is empty then remove entry from vfta */
4405 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4406 (reg & E1000_VLVF_VLANID_ENABLE)) {
4407 reg = 0;
4408 vid = reg & E1000_VLVF_VLANID_MASK;
4409 igb_vfta_set(hw, vid, false);
4410 }
4411
4412 wr32(E1000_VLVF(i), reg);
4413 }
4414
4415 adapter->vf_data[vf].vlans_enabled = 0;
4416 }
4417
4418 static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4419 {
4420 struct e1000_hw *hw = &adapter->hw;
4421 u32 reg, i;
4422
4423 /* The vlvf table only exists on 82576 hardware and newer */
4424 if (hw->mac.type < e1000_82576)
4425 return -1;
4426
4427 /* we only need to do this if VMDq is enabled */
4428 if (!adapter->vfs_allocated_count)
4429 return -1;
4430
4431 /* Find the vlan filter for this id */
4432 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4433 reg = rd32(E1000_VLVF(i));
4434 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4435 vid == (reg & E1000_VLVF_VLANID_MASK))
4436 break;
4437 }
4438
4439 if (add) {
4440 if (i == E1000_VLVF_ARRAY_SIZE) {
4441 /* Did not find a matching VLAN ID entry that was
4442 * enabled. Search for a free filter entry, i.e.
4443 * one without the enable bit set
4444 */
4445 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4446 reg = rd32(E1000_VLVF(i));
4447 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4448 break;
4449 }
4450 }
4451 if (i < E1000_VLVF_ARRAY_SIZE) {
4452 /* Found an enabled/available entry */
4453 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4454
4455 /* if !enabled we need to set this up in vfta */
4456 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
4457 /* add VID to filter table */
4458 igb_vfta_set(hw, vid, true);
4459 reg |= E1000_VLVF_VLANID_ENABLE;
4460 }
4461 reg &= ~E1000_VLVF_VLANID_MASK;
4462 reg |= vid;
4463 wr32(E1000_VLVF(i), reg);
4464
4465 /* do not modify RLPML for PF devices */
4466 if (vf >= adapter->vfs_allocated_count)
4467 return 0;
4468
4469 if (!adapter->vf_data[vf].vlans_enabled) {
4470 u32 size;
4471 reg = rd32(E1000_VMOLR(vf));
4472 size = reg & E1000_VMOLR_RLPML_MASK;
4473 size += 4;
4474 reg &= ~E1000_VMOLR_RLPML_MASK;
4475 reg |= size;
4476 wr32(E1000_VMOLR(vf), reg);
4477 }
4478
4479 adapter->vf_data[vf].vlans_enabled++;
4480 return 0;
4481 }
4482 } else {
4483 if (i < E1000_VLVF_ARRAY_SIZE) {
4484 /* remove vf from the pool */
4485 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4486 /* if pool is empty then remove entry from vfta */
4487 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4488 reg = 0;
4489 igb_vfta_set(hw, vid, false);
4490 }
4491 wr32(E1000_VLVF(i), reg);
4492
4493 /* do not modify RLPML for PF devices */
4494 if (vf >= adapter->vfs_allocated_count)
4495 return 0;
4496
4497 adapter->vf_data[vf].vlans_enabled--;
4498 if (!adapter->vf_data[vf].vlans_enabled) {
4499 u32 size;
4500 reg = rd32(E1000_VMOLR(vf));
4501 size = reg & E1000_VMOLR_RLPML_MASK;
4502 size -= 4;
4503 reg &= ~E1000_VMOLR_RLPML_MASK;
4504 reg |= size;
4505 wr32(E1000_VMOLR(vf), reg);
4506 }
4507 }
4508 }
4509 return 0;
4510 }
4511
4512 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
4513 {
4514 struct e1000_hw *hw = &adapter->hw;
4515
4516 if (vid)
4517 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
4518 else
4519 wr32(E1000_VMVIR(vf), 0);
4520 }
4521
4522 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
4523 int vf, u16 vlan, u8 qos)
4524 {
4525 int err = 0;
4526 struct igb_adapter *adapter = netdev_priv(netdev);
4527
4528 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
4529 return -EINVAL;
4530 if (vlan || qos) {
4531 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
4532 if (err)
4533 goto out;
4534 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
4535 igb_set_vmolr(adapter, vf, !vlan);
4536 adapter->vf_data[vf].pf_vlan = vlan;
4537 adapter->vf_data[vf].pf_qos = qos;
4538 dev_info(&adapter->pdev->dev,
4539 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
4540 if (test_bit(__IGB_DOWN, &adapter->state)) {
4541 dev_warn(&adapter->pdev->dev,
4542 "The VF VLAN has been set,"
4543 " but the PF device is not up.\n");
4544 dev_warn(&adapter->pdev->dev,
4545 "Bring the PF device up before"
4546 " attempting to use the VF device.\n");
4547 }
4548 } else {
4549 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
4550 false, vf);
4551 igb_set_vmvir(adapter, vlan, vf);
4552 igb_set_vmolr(adapter, vf, true);
4553 adapter->vf_data[vf].pf_vlan = 0;
4554 adapter->vf_data[vf].pf_qos = 0;
4555 }
4556 out:
4557 return err;
4558 }
4559
4560 static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4561 {
4562 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4563 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4564
4565 return igb_vlvf_set(adapter, vid, add, vf);
4566 }
4567
4568 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4569 {
4570 /* clear flags */
4571 adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
4572 adapter->vf_data[vf].last_nack = jiffies;
4573
4574 /* reset offloads to defaults */
4575 igb_set_vmolr(adapter, vf, true);
4576
4577 /* reset vlans for device */
4578 igb_clear_vf_vfta(adapter, vf);
4579 if (adapter->vf_data[vf].pf_vlan)
4580 igb_ndo_set_vf_vlan(adapter->netdev, vf,
4581 adapter->vf_data[vf].pf_vlan,
4582 adapter->vf_data[vf].pf_qos);
4583 else
4584 igb_clear_vf_vfta(adapter, vf);
4585
4586 /* reset multicast table array for vf */
4587 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4588
4589 /* Flush and reset the mta with the new values */
4590 igb_set_rx_mode(adapter->netdev);
4591 }
4592
4593 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4594 {
4595 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4596
4597 /* generate a new mac address as we were hotplug removed/added */
4598 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
4599 random_ether_addr(vf_mac);
4600
4601 /* process remaining reset events */
4602 igb_vf_reset(adapter, vf);
4603 }
4604
4605 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4606 {
4607 struct e1000_hw *hw = &adapter->hw;
4608 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4609 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4610 u32 reg, msgbuf[3];
4611 u8 *addr = (u8 *)(&msgbuf[1]);
4612
4613 /* process all the same items cleared in a function level reset */
4614 igb_vf_reset(adapter, vf);
4615
4616 /* set vf mac address */
4617 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4618
4619 /* enable transmit and receive for vf */
4620 reg = rd32(E1000_VFTE);
4621 wr32(E1000_VFTE, reg | (1 << vf));
4622 reg = rd32(E1000_VFRE);
4623 wr32(E1000_VFRE, reg | (1 << vf));
4624
4625 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
4626
4627 /* reply to reset with ack and vf mac address */
4628 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4629 memcpy(addr, vf_mac, 6);
4630 igb_write_mbx(hw, msgbuf, 3, vf);
4631 }
4632
4633 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4634 {
4635 unsigned char *addr = (char *)&msg[1];
4636 int err = -1;
4637
4638 if (is_valid_ether_addr(addr))
4639 err = igb_set_vf_mac(adapter, vf, addr);
4640
4641 return err;
4642 }
4643
4644 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4645 {
4646 struct e1000_hw *hw = &adapter->hw;
4647 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4648 u32 msg = E1000_VT_MSGTYPE_NACK;
4649
4650 /* if device isn't clear to send it shouldn't be reading either */
4651 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4652 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4653 igb_write_mbx(hw, &msg, 1, vf);
4654 vf_data->last_nack = jiffies;
4655 }
4656 }
4657
4658 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4659 {
4660 struct pci_dev *pdev = adapter->pdev;
4661 u32 msgbuf[E1000_VFMAILBOX_SIZE];
4662 struct e1000_hw *hw = &adapter->hw;
4663 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4664 s32 retval;
4665
4666 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4667
4668 if (retval) {
4669 /* if receive failed revoke VF CTS stats and restart init */
4670 dev_err(&pdev->dev, "Error receiving message from VF\n");
4671 vf_data->flags &= ~IGB_VF_FLAG_CTS;
4672 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4673 return;
4674 goto out;
4675 }
4676
4677 /* this is a message we already processed, do nothing */
4678 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4679 return;
4680
4681 /*
4682 * until the vf completes a reset it should not be
4683 * allowed to start any configuration.
4684 */
4685
4686 if (msgbuf[0] == E1000_VF_RESET) {
4687 igb_vf_reset_msg(adapter, vf);
4688 return;
4689 }
4690
4691 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
4692 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4693 return;
4694 retval = -1;
4695 goto out;
4696 }
4697
4698 switch ((msgbuf[0] & 0xFFFF)) {
4699 case E1000_VF_SET_MAC_ADDR:
4700 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4701 break;
4702 case E1000_VF_SET_PROMISC:
4703 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4704 break;
4705 case E1000_VF_SET_MULTICAST:
4706 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4707 break;
4708 case E1000_VF_SET_LPE:
4709 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4710 break;
4711 case E1000_VF_SET_VLAN:
4712 if (adapter->vf_data[vf].pf_vlan)
4713 retval = -1;
4714 else
4715 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4716 break;
4717 default:
4718 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4719 retval = -1;
4720 break;
4721 }
4722
4723 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4724 out:
4725 /* notify the VF of the results of what it sent us */
4726 if (retval)
4727 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4728 else
4729 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4730
4731 igb_write_mbx(hw, msgbuf, 1, vf);
4732 }
4733
4734 static void igb_msg_task(struct igb_adapter *adapter)
4735 {
4736 struct e1000_hw *hw = &adapter->hw;
4737 u32 vf;
4738
4739 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4740 /* process any reset requests */
4741 if (!igb_check_for_rst(hw, vf))
4742 igb_vf_reset_event(adapter, vf);
4743
4744 /* process any messages pending */
4745 if (!igb_check_for_msg(hw, vf))
4746 igb_rcv_msg_from_vf(adapter, vf);
4747
4748 /* process any acks */
4749 if (!igb_check_for_ack(hw, vf))
4750 igb_rcv_ack_from_vf(adapter, vf);
4751 }
4752 }
4753
4754 /**
4755 * igb_set_uta - Set unicast filter table address
4756 * @adapter: board private structure
4757 *
4758 * The unicast table address is a register array of 32-bit registers.
4759 * The table is meant to be used in a way similar to how the MTA is used
4760 * however due to certain limitations in the hardware it is necessary to
4761 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4762 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4763 **/
4764 static void igb_set_uta(struct igb_adapter *adapter)
4765 {
4766 struct e1000_hw *hw = &adapter->hw;
4767 int i;
4768
4769 /* The UTA table only exists on 82576 hardware and newer */
4770 if (hw->mac.type < e1000_82576)
4771 return;
4772
4773 /* we only need to do this if VMDq is enabled */
4774 if (!adapter->vfs_allocated_count)
4775 return;
4776
4777 for (i = 0; i < hw->mac.uta_reg_count; i++)
4778 array_wr32(E1000_UTA, i, ~0);
4779 }
4780
4781 /**
4782 * igb_intr_msi - Interrupt Handler
4783 * @irq: interrupt number
4784 * @data: pointer to a network interface device structure
4785 **/
4786 static irqreturn_t igb_intr_msi(int irq, void *data)
4787 {
4788 struct igb_adapter *adapter = data;
4789 struct igb_q_vector *q_vector = adapter->q_vector[0];
4790 struct e1000_hw *hw = &adapter->hw;
4791 /* read ICR disables interrupts using IAM */
4792 u32 icr = rd32(E1000_ICR);
4793
4794 igb_write_itr(q_vector);
4795
4796 if (icr & E1000_ICR_DRSTA)
4797 schedule_work(&adapter->reset_task);
4798
4799 if (icr & E1000_ICR_DOUTSYNC) {
4800 /* HW is reporting DMA is out of sync */
4801 adapter->stats.doosync++;
4802 }
4803
4804 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4805 hw->mac.get_link_status = 1;
4806 if (!test_bit(__IGB_DOWN, &adapter->state))
4807 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4808 }
4809
4810 napi_schedule(&q_vector->napi);
4811
4812 return IRQ_HANDLED;
4813 }
4814
4815 /**
4816 * igb_intr - Legacy Interrupt Handler
4817 * @irq: interrupt number
4818 * @data: pointer to a network interface device structure
4819 **/
4820 static irqreturn_t igb_intr(int irq, void *data)
4821 {
4822 struct igb_adapter *adapter = data;
4823 struct igb_q_vector *q_vector = adapter->q_vector[0];
4824 struct e1000_hw *hw = &adapter->hw;
4825 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4826 * need for the IMC write */
4827 u32 icr = rd32(E1000_ICR);
4828 if (!icr)
4829 return IRQ_NONE; /* Not our interrupt */
4830
4831 igb_write_itr(q_vector);
4832
4833 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4834 * not set, then the adapter didn't send an interrupt */
4835 if (!(icr & E1000_ICR_INT_ASSERTED))
4836 return IRQ_NONE;
4837
4838 if (icr & E1000_ICR_DRSTA)
4839 schedule_work(&adapter->reset_task);
4840
4841 if (icr & E1000_ICR_DOUTSYNC) {
4842 /* HW is reporting DMA is out of sync */
4843 adapter->stats.doosync++;
4844 }
4845
4846 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4847 hw->mac.get_link_status = 1;
4848 /* guard against interrupt when we're going down */
4849 if (!test_bit(__IGB_DOWN, &adapter->state))
4850 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4851 }
4852
4853 napi_schedule(&q_vector->napi);
4854
4855 return IRQ_HANDLED;
4856 }
4857
4858 static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
4859 {
4860 struct igb_adapter *adapter = q_vector->adapter;
4861 struct e1000_hw *hw = &adapter->hw;
4862
4863 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4864 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
4865 if (!adapter->msix_entries)
4866 igb_set_itr(adapter);
4867 else
4868 igb_update_ring_itr(q_vector);
4869 }
4870
4871 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4872 if (adapter->msix_entries)
4873 wr32(E1000_EIMS, q_vector->eims_value);
4874 else
4875 igb_irq_enable(adapter);
4876 }
4877 }
4878
4879 /**
4880 * igb_poll - NAPI Rx polling callback
4881 * @napi: napi polling structure
4882 * @budget: count of how many packets we should handle
4883 **/
4884 static int igb_poll(struct napi_struct *napi, int budget)
4885 {
4886 struct igb_q_vector *q_vector = container_of(napi,
4887 struct igb_q_vector,
4888 napi);
4889 int tx_clean_complete = 1, work_done = 0;
4890
4891 #ifdef CONFIG_IGB_DCA
4892 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4893 igb_update_dca(q_vector);
4894 #endif
4895 if (q_vector->tx_ring)
4896 tx_clean_complete = igb_clean_tx_irq(q_vector);
4897
4898 if (q_vector->rx_ring)
4899 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4900
4901 if (!tx_clean_complete)
4902 work_done = budget;
4903
4904 /* If not enough Rx work done, exit the polling mode */
4905 if (work_done < budget) {
4906 napi_complete(napi);
4907 igb_ring_irq_enable(q_vector);
4908 }
4909
4910 return work_done;
4911 }
4912
4913 /**
4914 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
4915 * @adapter: board private structure
4916 * @shhwtstamps: timestamp structure to update
4917 * @regval: unsigned 64bit system time value.
4918 *
4919 * We need to convert the system time value stored in the RX/TXSTMP registers
4920 * into a hwtstamp which can be used by the upper level timestamping functions
4921 */
4922 static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4923 struct skb_shared_hwtstamps *shhwtstamps,
4924 u64 regval)
4925 {
4926 u64 ns;
4927
4928 /*
4929 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
4930 * 24 to match clock shift we setup earlier.
4931 */
4932 if (adapter->hw.mac.type == e1000_82580)
4933 regval <<= IGB_82580_TSYNC_SHIFT;
4934
4935 ns = timecounter_cyc2time(&adapter->clock, regval);
4936 timecompare_update(&adapter->compare, ns);
4937 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4938 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4939 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4940 }
4941
4942 /**
4943 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4944 * @q_vector: pointer to q_vector containing needed info
4945 * @skb: packet that was just sent
4946 *
4947 * If we were asked to do hardware stamping and such a time stamp is
4948 * available, then it must have been for this skb here because we only
4949 * allow only one such packet into the queue.
4950 */
4951 static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
4952 {
4953 struct igb_adapter *adapter = q_vector->adapter;
4954 union skb_shared_tx *shtx = skb_tx(skb);
4955 struct e1000_hw *hw = &adapter->hw;
4956 struct skb_shared_hwtstamps shhwtstamps;
4957 u64 regval;
4958
4959 /* if skb does not support hw timestamp or TX stamp not valid exit */
4960 if (likely(!shtx->hardware) ||
4961 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4962 return;
4963
4964 regval = rd32(E1000_TXSTMPL);
4965 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4966
4967 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4968 skb_tstamp_tx(skb, &shhwtstamps);
4969 }
4970
4971 /**
4972 * igb_clean_tx_irq - Reclaim resources after transmit completes
4973 * @q_vector: pointer to q_vector containing needed info
4974 * returns true if ring is completely cleaned
4975 **/
4976 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4977 {
4978 struct igb_adapter *adapter = q_vector->adapter;
4979 struct igb_ring *tx_ring = q_vector->tx_ring;
4980 struct net_device *netdev = tx_ring->netdev;
4981 struct e1000_hw *hw = &adapter->hw;
4982 struct igb_buffer *buffer_info;
4983 struct sk_buff *skb;
4984 union e1000_adv_tx_desc *tx_desc, *eop_desc;
4985 unsigned int total_bytes = 0, total_packets = 0;
4986 unsigned int i, eop, count = 0;
4987 bool cleaned = false;
4988
4989 i = tx_ring->next_to_clean;
4990 eop = tx_ring->buffer_info[i].next_to_watch;
4991 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4992
4993 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
4994 (count < tx_ring->count)) {
4995 for (cleaned = false; !cleaned; count++) {
4996 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
4997 buffer_info = &tx_ring->buffer_info[i];
4998 cleaned = (i == eop);
4999 skb = buffer_info->skb;
5000
5001 if (skb) {
5002 unsigned int segs, bytecount;
5003 /* gso_segs is currently only valid for tcp */
5004 segs = skb_shinfo(skb)->gso_segs ?: 1;
5005 /* multiply data chunks by size of headers */
5006 bytecount = ((segs - 1) * skb_headlen(skb)) +
5007 skb->len;
5008 total_packets += segs;
5009 total_bytes += bytecount;
5010
5011 igb_tx_hwtstamp(q_vector, skb);
5012 }
5013
5014 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
5015 tx_desc->wb.status = 0;
5016
5017 i++;
5018 if (i == tx_ring->count)
5019 i = 0;
5020 }
5021 eop = tx_ring->buffer_info[i].next_to_watch;
5022 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
5023 }
5024
5025 tx_ring->next_to_clean = i;
5026
5027 if (unlikely(count &&
5028 netif_carrier_ok(netdev) &&
5029 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5030 /* Make sure that anybody stopping the queue after this
5031 * sees the new next_to_clean.
5032 */
5033 smp_mb();
5034 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5035 !(test_bit(__IGB_DOWN, &adapter->state))) {
5036 netif_wake_subqueue(netdev, tx_ring->queue_index);
5037 tx_ring->tx_stats.restart_queue++;
5038 }
5039 }
5040
5041 if (tx_ring->detect_tx_hung) {
5042 /* Detect a transmit hang in hardware, this serializes the
5043 * check with the clearing of time_stamp and movement of i */
5044 tx_ring->detect_tx_hung = false;
5045 if (tx_ring->buffer_info[i].time_stamp &&
5046 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
5047 (adapter->tx_timeout_factor * HZ)) &&
5048 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
5049
5050 /* detected Tx unit hang */
5051 dev_err(&tx_ring->pdev->dev,
5052 "Detected Tx Unit Hang\n"
5053 " Tx Queue <%d>\n"
5054 " TDH <%x>\n"
5055 " TDT <%x>\n"
5056 " next_to_use <%x>\n"
5057 " next_to_clean <%x>\n"
5058 "buffer_info[next_to_clean]\n"
5059 " time_stamp <%lx>\n"
5060 " next_to_watch <%x>\n"
5061 " jiffies <%lx>\n"
5062 " desc.status <%x>\n",
5063 tx_ring->queue_index,
5064 readl(tx_ring->head),
5065 readl(tx_ring->tail),
5066 tx_ring->next_to_use,
5067 tx_ring->next_to_clean,
5068 tx_ring->buffer_info[eop].time_stamp,
5069 eop,
5070 jiffies,
5071 eop_desc->wb.status);
5072 netif_stop_subqueue(netdev, tx_ring->queue_index);
5073 }
5074 }
5075 tx_ring->total_bytes += total_bytes;
5076 tx_ring->total_packets += total_packets;
5077 tx_ring->tx_stats.bytes += total_bytes;
5078 tx_ring->tx_stats.packets += total_packets;
5079 return (count < tx_ring->count);
5080 }
5081
5082 /**
5083 * igb_receive_skb - helper function to handle rx indications
5084 * @q_vector: structure containing interrupt and ring information
5085 * @skb: packet to send up
5086 * @vlan_tag: vlan tag for packet
5087 **/
5088 static void igb_receive_skb(struct igb_q_vector *q_vector,
5089 struct sk_buff *skb,
5090 u16 vlan_tag)
5091 {
5092 struct igb_adapter *adapter = q_vector->adapter;
5093
5094 if (vlan_tag)
5095 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5096 vlan_tag, skb);
5097 else
5098 napi_gro_receive(&q_vector->napi, skb);
5099 }
5100
5101 static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5102 u32 status_err, struct sk_buff *skb)
5103 {
5104 skb->ip_summed = CHECKSUM_NONE;
5105
5106 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
5107 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5108 (status_err & E1000_RXD_STAT_IXSM))
5109 return;
5110
5111 /* TCP/UDP checksum error bit is set */
5112 if (status_err &
5113 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
5114 /*
5115 * work around errata with sctp packets where the TCPE aka
5116 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5117 * packets, (aka let the stack check the crc32c)
5118 */
5119 if ((skb->len == 60) &&
5120 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
5121 ring->rx_stats.csum_err++;
5122
5123 /* let the stack verify checksum errors */
5124 return;
5125 }
5126 /* It must be a TCP or UDP packet with a valid checksum */
5127 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5128 skb->ip_summed = CHECKSUM_UNNECESSARY;
5129
5130 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
5131 }
5132
5133 static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5134 struct sk_buff *skb)
5135 {
5136 struct igb_adapter *adapter = q_vector->adapter;
5137 struct e1000_hw *hw = &adapter->hw;
5138 u64 regval;
5139
5140 /*
5141 * If this bit is set, then the RX registers contain the time stamp. No
5142 * other packet will be time stamped until we read these registers, so
5143 * read the registers to make them available again. Because only one
5144 * packet can be time stamped at a time, we know that the register
5145 * values must belong to this one here and therefore we don't need to
5146 * compare any of the additional attributes stored for it.
5147 *
5148 * If nothing went wrong, then it should have a skb_shared_tx that we
5149 * can turn into a skb_shared_hwtstamps.
5150 */
5151 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
5152 return;
5153 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5154 return;
5155
5156 regval = rd32(E1000_RXSTMPL);
5157 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5158
5159 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5160 }
5161 static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
5162 union e1000_adv_rx_desc *rx_desc)
5163 {
5164 /* HW will not DMA in data larger than the given buffer, even if it
5165 * parses the (NFS, of course) header to be larger. In that case, it
5166 * fills the header buffer and spills the rest into the page.
5167 */
5168 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5169 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
5170 if (hlen > rx_ring->rx_buffer_len)
5171 hlen = rx_ring->rx_buffer_len;
5172 return hlen;
5173 }
5174
5175 static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5176 int *work_done, int budget)
5177 {
5178 struct igb_ring *rx_ring = q_vector->rx_ring;
5179 struct net_device *netdev = rx_ring->netdev;
5180 struct pci_dev *pdev = rx_ring->pdev;
5181 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5182 struct igb_buffer *buffer_info , *next_buffer;
5183 struct sk_buff *skb;
5184 bool cleaned = false;
5185 int cleaned_count = 0;
5186 int current_node = numa_node_id();
5187 unsigned int total_bytes = 0, total_packets = 0;
5188 unsigned int i;
5189 u32 staterr;
5190 u16 length;
5191 u16 vlan_tag;
5192
5193 i = rx_ring->next_to_clean;
5194 buffer_info = &rx_ring->buffer_info[i];
5195 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5196 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5197
5198 while (staterr & E1000_RXD_STAT_DD) {
5199 if (*work_done >= budget)
5200 break;
5201 (*work_done)++;
5202
5203 skb = buffer_info->skb;
5204 prefetch(skb->data - NET_IP_ALIGN);
5205 buffer_info->skb = NULL;
5206
5207 i++;
5208 if (i == rx_ring->count)
5209 i = 0;
5210
5211 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
5212 prefetch(next_rxd);
5213 next_buffer = &rx_ring->buffer_info[i];
5214
5215 length = le16_to_cpu(rx_desc->wb.upper.length);
5216 cleaned = true;
5217 cleaned_count++;
5218
5219 if (buffer_info->dma) {
5220 pci_unmap_single(pdev, buffer_info->dma,
5221 rx_ring->rx_buffer_len,
5222 PCI_DMA_FROMDEVICE);
5223 buffer_info->dma = 0;
5224 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
5225 skb_put(skb, length);
5226 goto send_up;
5227 }
5228 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
5229 }
5230
5231 if (length) {
5232 pci_unmap_page(pdev, buffer_info->page_dma,
5233 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
5234 buffer_info->page_dma = 0;
5235
5236 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
5237 buffer_info->page,
5238 buffer_info->page_offset,
5239 length);
5240
5241 if ((page_count(buffer_info->page) != 1) ||
5242 (page_to_nid(buffer_info->page) != current_node))
5243 buffer_info->page = NULL;
5244 else
5245 get_page(buffer_info->page);
5246
5247 skb->len += length;
5248 skb->data_len += length;
5249 skb->truesize += length;
5250 }
5251
5252 if (!(staterr & E1000_RXD_STAT_EOP)) {
5253 buffer_info->skb = next_buffer->skb;
5254 buffer_info->dma = next_buffer->dma;
5255 next_buffer->skb = skb;
5256 next_buffer->dma = 0;
5257 goto next_desc;
5258 }
5259 send_up:
5260 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5261 dev_kfree_skb_irq(skb);
5262 goto next_desc;
5263 }
5264
5265 igb_rx_hwtstamp(q_vector, staterr, skb);
5266 total_bytes += skb->len;
5267 total_packets++;
5268
5269 igb_rx_checksum_adv(rx_ring, staterr, skb);
5270
5271 skb->protocol = eth_type_trans(skb, netdev);
5272 skb_record_rx_queue(skb, rx_ring->queue_index);
5273
5274 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5275 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5276
5277 igb_receive_skb(q_vector, skb, vlan_tag);
5278
5279 next_desc:
5280 rx_desc->wb.upper.status_error = 0;
5281
5282 /* return some buffers to hardware, one at a time is too slow */
5283 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
5284 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5285 cleaned_count = 0;
5286 }
5287
5288 /* use prefetched values */
5289 rx_desc = next_rxd;
5290 buffer_info = next_buffer;
5291 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5292 }
5293
5294 rx_ring->next_to_clean = i;
5295 cleaned_count = igb_desc_unused(rx_ring);
5296
5297 if (cleaned_count)
5298 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
5299
5300 rx_ring->total_packets += total_packets;
5301 rx_ring->total_bytes += total_bytes;
5302 rx_ring->rx_stats.packets += total_packets;
5303 rx_ring->rx_stats.bytes += total_bytes;
5304 return cleaned;
5305 }
5306
5307 /**
5308 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5309 * @adapter: address of board private structure
5310 **/
5311 void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5312 {
5313 struct net_device *netdev = rx_ring->netdev;
5314 union e1000_adv_rx_desc *rx_desc;
5315 struct igb_buffer *buffer_info;
5316 struct sk_buff *skb;
5317 unsigned int i;
5318 int bufsz;
5319
5320 i = rx_ring->next_to_use;
5321 buffer_info = &rx_ring->buffer_info[i];
5322
5323 bufsz = rx_ring->rx_buffer_len;
5324
5325 while (cleaned_count--) {
5326 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5327
5328 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5329 if (!buffer_info->page) {
5330 buffer_info->page = netdev_alloc_page(netdev);
5331 if (!buffer_info->page) {
5332 rx_ring->rx_stats.alloc_failed++;
5333 goto no_buffers;
5334 }
5335 buffer_info->page_offset = 0;
5336 } else {
5337 buffer_info->page_offset ^= PAGE_SIZE / 2;
5338 }
5339 buffer_info->page_dma =
5340 pci_map_page(rx_ring->pdev, buffer_info->page,
5341 buffer_info->page_offset,
5342 PAGE_SIZE / 2,
5343 PCI_DMA_FROMDEVICE);
5344 if (pci_dma_mapping_error(rx_ring->pdev,
5345 buffer_info->page_dma)) {
5346 buffer_info->page_dma = 0;
5347 rx_ring->rx_stats.alloc_failed++;
5348 goto no_buffers;
5349 }
5350 }
5351
5352 skb = buffer_info->skb;
5353 if (!skb) {
5354 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
5355 if (!skb) {
5356 rx_ring->rx_stats.alloc_failed++;
5357 goto no_buffers;
5358 }
5359
5360 buffer_info->skb = skb;
5361 }
5362 if (!buffer_info->dma) {
5363 buffer_info->dma = pci_map_single(rx_ring->pdev,
5364 skb->data,
5365 bufsz,
5366 PCI_DMA_FROMDEVICE);
5367 if (pci_dma_mapping_error(rx_ring->pdev,
5368 buffer_info->dma)) {
5369 buffer_info->dma = 0;
5370 rx_ring->rx_stats.alloc_failed++;
5371 goto no_buffers;
5372 }
5373 }
5374 /* Refresh the desc even if buffer_addrs didn't change because
5375 * each write-back erases this info. */
5376 if (bufsz < IGB_RXBUFFER_1024) {
5377 rx_desc->read.pkt_addr =
5378 cpu_to_le64(buffer_info->page_dma);
5379 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5380 } else {
5381 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
5382 rx_desc->read.hdr_addr = 0;
5383 }
5384
5385 i++;
5386 if (i == rx_ring->count)
5387 i = 0;
5388 buffer_info = &rx_ring->buffer_info[i];
5389 }
5390
5391 no_buffers:
5392 if (rx_ring->next_to_use != i) {
5393 rx_ring->next_to_use = i;
5394 if (i == 0)
5395 i = (rx_ring->count - 1);
5396 else
5397 i--;
5398
5399 /* Force memory writes to complete before letting h/w
5400 * know there are new descriptors to fetch. (Only
5401 * applicable for weak-ordered memory model archs,
5402 * such as IA-64). */
5403 wmb();
5404 writel(i, rx_ring->tail);
5405 }
5406 }
5407
5408 /**
5409 * igb_mii_ioctl -
5410 * @netdev:
5411 * @ifreq:
5412 * @cmd:
5413 **/
5414 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5415 {
5416 struct igb_adapter *adapter = netdev_priv(netdev);
5417 struct mii_ioctl_data *data = if_mii(ifr);
5418
5419 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5420 return -EOPNOTSUPP;
5421
5422 switch (cmd) {
5423 case SIOCGMIIPHY:
5424 data->phy_id = adapter->hw.phy.addr;
5425 break;
5426 case SIOCGMIIREG:
5427 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5428 &data->val_out))
5429 return -EIO;
5430 break;
5431 case SIOCSMIIREG:
5432 default:
5433 return -EOPNOTSUPP;
5434 }
5435 return 0;
5436 }
5437
5438 /**
5439 * igb_hwtstamp_ioctl - control hardware time stamping
5440 * @netdev:
5441 * @ifreq:
5442 * @cmd:
5443 *
5444 * Outgoing time stamping can be enabled and disabled. Play nice and
5445 * disable it when requested, although it shouldn't case any overhead
5446 * when no packet needs it. At most one packet in the queue may be
5447 * marked for time stamping, otherwise it would be impossible to tell
5448 * for sure to which packet the hardware time stamp belongs.
5449 *
5450 * Incoming time stamping has to be configured via the hardware
5451 * filters. Not all combinations are supported, in particular event
5452 * type has to be specified. Matching the kind of event packet is
5453 * not supported, with the exception of "all V2 events regardless of
5454 * level 2 or 4".
5455 *
5456 **/
5457 static int igb_hwtstamp_ioctl(struct net_device *netdev,
5458 struct ifreq *ifr, int cmd)
5459 {
5460 struct igb_adapter *adapter = netdev_priv(netdev);
5461 struct e1000_hw *hw = &adapter->hw;
5462 struct hwtstamp_config config;
5463 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5464 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
5465 u32 tsync_rx_cfg = 0;
5466 bool is_l4 = false;
5467 bool is_l2 = false;
5468 u32 regval;
5469
5470 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5471 return -EFAULT;
5472
5473 /* reserved for future extensions */
5474 if (config.flags)
5475 return -EINVAL;
5476
5477 switch (config.tx_type) {
5478 case HWTSTAMP_TX_OFF:
5479 tsync_tx_ctl = 0;
5480 case HWTSTAMP_TX_ON:
5481 break;
5482 default:
5483 return -ERANGE;
5484 }
5485
5486 switch (config.rx_filter) {
5487 case HWTSTAMP_FILTER_NONE:
5488 tsync_rx_ctl = 0;
5489 break;
5490 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5491 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5492 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5493 case HWTSTAMP_FILTER_ALL:
5494 /*
5495 * register TSYNCRXCFG must be set, therefore it is not
5496 * possible to time stamp both Sync and Delay_Req messages
5497 * => fall back to time stamping all packets
5498 */
5499 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
5500 config.rx_filter = HWTSTAMP_FILTER_ALL;
5501 break;
5502 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
5503 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
5504 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
5505 is_l4 = true;
5506 break;
5507 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
5508 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
5509 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
5510 is_l4 = true;
5511 break;
5512 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5513 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5514 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5515 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
5516 is_l2 = true;
5517 is_l4 = true;
5518 config.rx_filter = HWTSTAMP_FILTER_SOME;
5519 break;
5520 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5521 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5522 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5523 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
5524 is_l2 = true;
5525 is_l4 = true;
5526 config.rx_filter = HWTSTAMP_FILTER_SOME;
5527 break;
5528 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5529 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5530 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5531 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
5532 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
5533 is_l2 = true;
5534 break;
5535 default:
5536 return -ERANGE;
5537 }
5538
5539 if (hw->mac.type == e1000_82575) {
5540 if (tsync_rx_ctl | tsync_tx_ctl)
5541 return -EINVAL;
5542 return 0;
5543 }
5544
5545 /* enable/disable TX */
5546 regval = rd32(E1000_TSYNCTXCTL);
5547 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5548 regval |= tsync_tx_ctl;
5549 wr32(E1000_TSYNCTXCTL, regval);
5550
5551 /* enable/disable RX */
5552 regval = rd32(E1000_TSYNCRXCTL);
5553 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5554 regval |= tsync_rx_ctl;
5555 wr32(E1000_TSYNCRXCTL, regval);
5556
5557 /* define which PTP packets are time stamped */
5558 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5559
5560 /* define ethertype filter for timestamped packets */
5561 if (is_l2)
5562 wr32(E1000_ETQF(3),
5563 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5564 E1000_ETQF_1588 | /* enable timestamping */
5565 ETH_P_1588)); /* 1588 eth protocol type */
5566 else
5567 wr32(E1000_ETQF(3), 0);
5568
5569 #define PTP_PORT 319
5570 /* L4 Queue Filter[3]: filter by destination port and protocol */
5571 if (is_l4) {
5572 u32 ftqf = (IPPROTO_UDP /* UDP */
5573 | E1000_FTQF_VF_BP /* VF not compared */
5574 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5575 | E1000_FTQF_MASK); /* mask all inputs */
5576 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
5577
5578 wr32(E1000_IMIR(3), htons(PTP_PORT));
5579 wr32(E1000_IMIREXT(3),
5580 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5581 if (hw->mac.type == e1000_82576) {
5582 /* enable source port check */
5583 wr32(E1000_SPQF(3), htons(PTP_PORT));
5584 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5585 }
5586 wr32(E1000_FTQF(3), ftqf);
5587 } else {
5588 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5589 }
5590 wrfl();
5591
5592 adapter->hwtstamp_config = config;
5593
5594 /* clear TX/RX time stamp registers, just to be sure */
5595 regval = rd32(E1000_TXSTMPH);
5596 regval = rd32(E1000_RXSTMPH);
5597
5598 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5599 -EFAULT : 0;
5600 }
5601
5602 /**
5603 * igb_ioctl -
5604 * @netdev:
5605 * @ifreq:
5606 * @cmd:
5607 **/
5608 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5609 {
5610 switch (cmd) {
5611 case SIOCGMIIPHY:
5612 case SIOCGMIIREG:
5613 case SIOCSMIIREG:
5614 return igb_mii_ioctl(netdev, ifr, cmd);
5615 case SIOCSHWTSTAMP:
5616 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
5617 default:
5618 return -EOPNOTSUPP;
5619 }
5620 }
5621
5622 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5623 {
5624 struct igb_adapter *adapter = hw->back;
5625 u16 cap_offset;
5626
5627 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5628 if (!cap_offset)
5629 return -E1000_ERR_CONFIG;
5630
5631 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5632
5633 return 0;
5634 }
5635
5636 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5637 {
5638 struct igb_adapter *adapter = hw->back;
5639 u16 cap_offset;
5640
5641 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5642 if (!cap_offset)
5643 return -E1000_ERR_CONFIG;
5644
5645 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5646
5647 return 0;
5648 }
5649
5650 static void igb_vlan_rx_register(struct net_device *netdev,
5651 struct vlan_group *grp)
5652 {
5653 struct igb_adapter *adapter = netdev_priv(netdev);
5654 struct e1000_hw *hw = &adapter->hw;
5655 u32 ctrl, rctl;
5656
5657 igb_irq_disable(adapter);
5658 adapter->vlgrp = grp;
5659
5660 if (grp) {
5661 /* enable VLAN tag insert/strip */
5662 ctrl = rd32(E1000_CTRL);
5663 ctrl |= E1000_CTRL_VME;
5664 wr32(E1000_CTRL, ctrl);
5665
5666 /* Disable CFI check */
5667 rctl = rd32(E1000_RCTL);
5668 rctl &= ~E1000_RCTL_CFIEN;
5669 wr32(E1000_RCTL, rctl);
5670 } else {
5671 /* disable VLAN tag insert/strip */
5672 ctrl = rd32(E1000_CTRL);
5673 ctrl &= ~E1000_CTRL_VME;
5674 wr32(E1000_CTRL, ctrl);
5675 }
5676
5677 igb_rlpml_set(adapter);
5678
5679 if (!test_bit(__IGB_DOWN, &adapter->state))
5680 igb_irq_enable(adapter);
5681 }
5682
5683 static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5684 {
5685 struct igb_adapter *adapter = netdev_priv(netdev);
5686 struct e1000_hw *hw = &adapter->hw;
5687 int pf_id = adapter->vfs_allocated_count;
5688
5689 /* attempt to add filter to vlvf array */
5690 igb_vlvf_set(adapter, vid, true, pf_id);
5691
5692 /* add the filter since PF can receive vlans w/o entry in vlvf */
5693 igb_vfta_set(hw, vid, true);
5694 }
5695
5696 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5697 {
5698 struct igb_adapter *adapter = netdev_priv(netdev);
5699 struct e1000_hw *hw = &adapter->hw;
5700 int pf_id = adapter->vfs_allocated_count;
5701 s32 err;
5702
5703 igb_irq_disable(adapter);
5704 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5705
5706 if (!test_bit(__IGB_DOWN, &adapter->state))
5707 igb_irq_enable(adapter);
5708
5709 /* remove vlan from VLVF table array */
5710 err = igb_vlvf_set(adapter, vid, false, pf_id);
5711
5712 /* if vid was not present in VLVF just remove it from table */
5713 if (err)
5714 igb_vfta_set(hw, vid, false);
5715 }
5716
5717 static void igb_restore_vlan(struct igb_adapter *adapter)
5718 {
5719 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5720
5721 if (adapter->vlgrp) {
5722 u16 vid;
5723 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5724 if (!vlan_group_get_device(adapter->vlgrp, vid))
5725 continue;
5726 igb_vlan_rx_add_vid(adapter->netdev, vid);
5727 }
5728 }
5729 }
5730
5731 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5732 {
5733 struct pci_dev *pdev = adapter->pdev;
5734 struct e1000_mac_info *mac = &adapter->hw.mac;
5735
5736 mac->autoneg = 0;
5737
5738 switch (spddplx) {
5739 case SPEED_10 + DUPLEX_HALF:
5740 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5741 break;
5742 case SPEED_10 + DUPLEX_FULL:
5743 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5744 break;
5745 case SPEED_100 + DUPLEX_HALF:
5746 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5747 break;
5748 case SPEED_100 + DUPLEX_FULL:
5749 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5750 break;
5751 case SPEED_1000 + DUPLEX_FULL:
5752 mac->autoneg = 1;
5753 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5754 break;
5755 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5756 default:
5757 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
5758 return -EINVAL;
5759 }
5760 return 0;
5761 }
5762
5763 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5764 {
5765 struct net_device *netdev = pci_get_drvdata(pdev);
5766 struct igb_adapter *adapter = netdev_priv(netdev);
5767 struct e1000_hw *hw = &adapter->hw;
5768 u32 ctrl, rctl, status;
5769 u32 wufc = adapter->wol;
5770 #ifdef CONFIG_PM
5771 int retval = 0;
5772 #endif
5773
5774 netif_device_detach(netdev);
5775
5776 if (netif_running(netdev))
5777 igb_close(netdev);
5778
5779 igb_clear_interrupt_scheme(adapter);
5780
5781 #ifdef CONFIG_PM
5782 retval = pci_save_state(pdev);
5783 if (retval)
5784 return retval;
5785 #endif
5786
5787 status = rd32(E1000_STATUS);
5788 if (status & E1000_STATUS_LU)
5789 wufc &= ~E1000_WUFC_LNKC;
5790
5791 if (wufc) {
5792 igb_setup_rctl(adapter);
5793 igb_set_rx_mode(netdev);
5794
5795 /* turn on all-multi mode if wake on multicast is enabled */
5796 if (wufc & E1000_WUFC_MC) {
5797 rctl = rd32(E1000_RCTL);
5798 rctl |= E1000_RCTL_MPE;
5799 wr32(E1000_RCTL, rctl);
5800 }
5801
5802 ctrl = rd32(E1000_CTRL);
5803 /* advertise wake from D3Cold */
5804 #define E1000_CTRL_ADVD3WUC 0x00100000
5805 /* phy power management enable */
5806 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5807 ctrl |= E1000_CTRL_ADVD3WUC;
5808 wr32(E1000_CTRL, ctrl);
5809
5810 /* Allow time for pending master requests to run */
5811 igb_disable_pcie_master(hw);
5812
5813 wr32(E1000_WUC, E1000_WUC_PME_EN);
5814 wr32(E1000_WUFC, wufc);
5815 } else {
5816 wr32(E1000_WUC, 0);
5817 wr32(E1000_WUFC, 0);
5818 }
5819
5820 *enable_wake = wufc || adapter->en_mng_pt;
5821 if (!*enable_wake)
5822 igb_shutdown_serdes_link_82575(hw);
5823
5824 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5825 * would have already happened in close and is redundant. */
5826 igb_release_hw_control(adapter);
5827
5828 pci_disable_device(pdev);
5829
5830 return 0;
5831 }
5832
5833 #ifdef CONFIG_PM
5834 static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5835 {
5836 int retval;
5837 bool wake;
5838
5839 retval = __igb_shutdown(pdev, &wake);
5840 if (retval)
5841 return retval;
5842
5843 if (wake) {
5844 pci_prepare_to_sleep(pdev);
5845 } else {
5846 pci_wake_from_d3(pdev, false);
5847 pci_set_power_state(pdev, PCI_D3hot);
5848 }
5849
5850 return 0;
5851 }
5852
5853 static int igb_resume(struct pci_dev *pdev)
5854 {
5855 struct net_device *netdev = pci_get_drvdata(pdev);
5856 struct igb_adapter *adapter = netdev_priv(netdev);
5857 struct e1000_hw *hw = &adapter->hw;
5858 u32 err;
5859
5860 pci_set_power_state(pdev, PCI_D0);
5861 pci_restore_state(pdev);
5862
5863 err = pci_enable_device_mem(pdev);
5864 if (err) {
5865 dev_err(&pdev->dev,
5866 "igb: Cannot enable PCI device from suspend\n");
5867 return err;
5868 }
5869 pci_set_master(pdev);
5870
5871 pci_enable_wake(pdev, PCI_D3hot, 0);
5872 pci_enable_wake(pdev, PCI_D3cold, 0);
5873
5874 if (igb_init_interrupt_scheme(adapter)) {
5875 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5876 return -ENOMEM;
5877 }
5878
5879 /* e1000_power_up_phy(adapter); */
5880
5881 igb_reset(adapter);
5882
5883 /* let the f/w know that the h/w is now under the control of the
5884 * driver. */
5885 igb_get_hw_control(adapter);
5886
5887 wr32(E1000_WUS, ~0);
5888
5889 if (netif_running(netdev)) {
5890 err = igb_open(netdev);
5891 if (err)
5892 return err;
5893 }
5894
5895 netif_device_attach(netdev);
5896
5897 return 0;
5898 }
5899 #endif
5900
5901 static void igb_shutdown(struct pci_dev *pdev)
5902 {
5903 bool wake;
5904
5905 __igb_shutdown(pdev, &wake);
5906
5907 if (system_state == SYSTEM_POWER_OFF) {
5908 pci_wake_from_d3(pdev, wake);
5909 pci_set_power_state(pdev, PCI_D3hot);
5910 }
5911 }
5912
5913 #ifdef CONFIG_NET_POLL_CONTROLLER
5914 /*
5915 * Polling 'interrupt' - used by things like netconsole to send skbs
5916 * without having to re-enable interrupts. It's not called while
5917 * the interrupt routine is executing.
5918 */
5919 static void igb_netpoll(struct net_device *netdev)
5920 {
5921 struct igb_adapter *adapter = netdev_priv(netdev);
5922 struct e1000_hw *hw = &adapter->hw;
5923 int i;
5924
5925 if (!adapter->msix_entries) {
5926 struct igb_q_vector *q_vector = adapter->q_vector[0];
5927 igb_irq_disable(adapter);
5928 napi_schedule(&q_vector->napi);
5929 return;
5930 }
5931
5932 for (i = 0; i < adapter->num_q_vectors; i++) {
5933 struct igb_q_vector *q_vector = adapter->q_vector[i];
5934 wr32(E1000_EIMC, q_vector->eims_value);
5935 napi_schedule(&q_vector->napi);
5936 }
5937 }
5938 #endif /* CONFIG_NET_POLL_CONTROLLER */
5939
5940 /**
5941 * igb_io_error_detected - called when PCI error is detected
5942 * @pdev: Pointer to PCI device
5943 * @state: The current pci connection state
5944 *
5945 * This function is called after a PCI bus error affecting
5946 * this device has been detected.
5947 */
5948 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5949 pci_channel_state_t state)
5950 {
5951 struct net_device *netdev = pci_get_drvdata(pdev);
5952 struct igb_adapter *adapter = netdev_priv(netdev);
5953
5954 netif_device_detach(netdev);
5955
5956 if (state == pci_channel_io_perm_failure)
5957 return PCI_ERS_RESULT_DISCONNECT;
5958
5959 if (netif_running(netdev))
5960 igb_down(adapter);
5961 pci_disable_device(pdev);
5962
5963 /* Request a slot slot reset. */
5964 return PCI_ERS_RESULT_NEED_RESET;
5965 }
5966
5967 /**
5968 * igb_io_slot_reset - called after the pci bus has been reset.
5969 * @pdev: Pointer to PCI device
5970 *
5971 * Restart the card from scratch, as if from a cold-boot. Implementation
5972 * resembles the first-half of the igb_resume routine.
5973 */
5974 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5975 {
5976 struct net_device *netdev = pci_get_drvdata(pdev);
5977 struct igb_adapter *adapter = netdev_priv(netdev);
5978 struct e1000_hw *hw = &adapter->hw;
5979 pci_ers_result_t result;
5980 int err;
5981
5982 if (pci_enable_device_mem(pdev)) {
5983 dev_err(&pdev->dev,
5984 "Cannot re-enable PCI device after reset.\n");
5985 result = PCI_ERS_RESULT_DISCONNECT;
5986 } else {
5987 pci_set_master(pdev);
5988 pci_restore_state(pdev);
5989
5990 pci_enable_wake(pdev, PCI_D3hot, 0);
5991 pci_enable_wake(pdev, PCI_D3cold, 0);
5992
5993 igb_reset(adapter);
5994 wr32(E1000_WUS, ~0);
5995 result = PCI_ERS_RESULT_RECOVERED;
5996 }
5997
5998 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5999 if (err) {
6000 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6001 "failed 0x%0x\n", err);
6002 /* non-fatal, continue */
6003 }
6004
6005 return result;
6006 }
6007
6008 /**
6009 * igb_io_resume - called when traffic can start flowing again.
6010 * @pdev: Pointer to PCI device
6011 *
6012 * This callback is called when the error recovery driver tells us that
6013 * its OK to resume normal operation. Implementation resembles the
6014 * second-half of the igb_resume routine.
6015 */
6016 static void igb_io_resume(struct pci_dev *pdev)
6017 {
6018 struct net_device *netdev = pci_get_drvdata(pdev);
6019 struct igb_adapter *adapter = netdev_priv(netdev);
6020
6021 if (netif_running(netdev)) {
6022 if (igb_up(adapter)) {
6023 dev_err(&pdev->dev, "igb_up failed after reset\n");
6024 return;
6025 }
6026 }
6027
6028 netif_device_attach(netdev);
6029
6030 /* let the f/w know that the h/w is now under the control of the
6031 * driver. */
6032 igb_get_hw_control(adapter);
6033 }
6034
6035 static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6036 u8 qsel)
6037 {
6038 u32 rar_low, rar_high;
6039 struct e1000_hw *hw = &adapter->hw;
6040
6041 /* HW expects these in little endian so we reverse the byte order
6042 * from network order (big endian) to little endian
6043 */
6044 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6045 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6046 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6047
6048 /* Indicate to hardware the Address is Valid. */
6049 rar_high |= E1000_RAH_AV;
6050
6051 if (hw->mac.type == e1000_82575)
6052 rar_high |= E1000_RAH_POOL_1 * qsel;
6053 else
6054 rar_high |= E1000_RAH_POOL_1 << qsel;
6055
6056 wr32(E1000_RAL(index), rar_low);
6057 wrfl();
6058 wr32(E1000_RAH(index), rar_high);
6059 wrfl();
6060 }
6061
6062 static int igb_set_vf_mac(struct igb_adapter *adapter,
6063 int vf, unsigned char *mac_addr)
6064 {
6065 struct e1000_hw *hw = &adapter->hw;
6066 /* VF MAC addresses start at end of receive addresses and moves
6067 * torwards the first, as a result a collision should not be possible */
6068 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
6069
6070 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
6071
6072 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
6073
6074 return 0;
6075 }
6076
6077 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6078 {
6079 struct igb_adapter *adapter = netdev_priv(netdev);
6080 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6081 return -EINVAL;
6082 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6083 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6084 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6085 " change effective.");
6086 if (test_bit(__IGB_DOWN, &adapter->state)) {
6087 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6088 " but the PF device is not up.\n");
6089 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6090 " attempting to use the VF device.\n");
6091 }
6092 return igb_set_vf_mac(adapter, vf, mac);
6093 }
6094
6095 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6096 {
6097 return -EOPNOTSUPP;
6098 }
6099
6100 static int igb_ndo_get_vf_config(struct net_device *netdev,
6101 int vf, struct ifla_vf_info *ivi)
6102 {
6103 struct igb_adapter *adapter = netdev_priv(netdev);
6104 if (vf >= adapter->vfs_allocated_count)
6105 return -EINVAL;
6106 ivi->vf = vf;
6107 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6108 ivi->tx_rate = 0;
6109 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6110 ivi->qos = adapter->vf_data[vf].pf_qos;
6111 return 0;
6112 }
6113
6114 static void igb_vmm_control(struct igb_adapter *adapter)
6115 {
6116 struct e1000_hw *hw = &adapter->hw;
6117 u32 reg;
6118
6119 /* replication is not supported for 82575 */
6120 if (hw->mac.type == e1000_82575)
6121 return;
6122
6123 /* enable replication vlan tag stripping */
6124 reg = rd32(E1000_RPLOLR);
6125 reg |= E1000_RPLOLR_STRVLAN;
6126 wr32(E1000_RPLOLR, reg);
6127
6128 /* notify HW that the MAC is adding vlan tags */
6129 reg = rd32(E1000_DTXCTL);
6130 reg |= E1000_DTXCTL_VLAN_ADDED;
6131 wr32(E1000_DTXCTL, reg);
6132
6133 if (adapter->vfs_allocated_count) {
6134 igb_vmdq_set_loopback_pf(hw, true);
6135 igb_vmdq_set_replication_pf(hw, true);
6136 } else {
6137 igb_vmdq_set_loopback_pf(hw, false);
6138 igb_vmdq_set_replication_pf(hw, false);
6139 }
6140 }
6141
6142 /* igb_main.c */
This page took 0.173369 seconds and 6 git commands to generate.