Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
[deliverable/linux.git] / drivers / net / ethernet / sfc / efx.c
1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
17 #include <linux/ip.h>
18 #include <linux/tcp.h>
19 #include <linux/in.h>
20 #include <linux/crc32.h>
21 #include <linux/ethtool.h>
22 #include <linux/topology.h>
23 #include <linux/gfp.h>
24 #include <linux/aer.h>
25 #include <linux/interrupt.h>
26 #include "net_driver.h"
27 #include "efx.h"
28 #include "nic.h"
29 #include "selftest.h"
30
31 #include "mcdi.h"
32 #include "workarounds.h"
33
34 /**************************************************************************
35 *
36 * Type name strings
37 *
38 **************************************************************************
39 */
40
41 /* Loopback mode names (see LOOPBACK_MODE()) */
42 const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
43 const char *const efx_loopback_mode_names[] = {
44 [LOOPBACK_NONE] = "NONE",
45 [LOOPBACK_DATA] = "DATAPATH",
46 [LOOPBACK_GMAC] = "GMAC",
47 [LOOPBACK_XGMII] = "XGMII",
48 [LOOPBACK_XGXS] = "XGXS",
49 [LOOPBACK_XAUI] = "XAUI",
50 [LOOPBACK_GMII] = "GMII",
51 [LOOPBACK_SGMII] = "SGMII",
52 [LOOPBACK_XGBR] = "XGBR",
53 [LOOPBACK_XFI] = "XFI",
54 [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
55 [LOOPBACK_GMII_FAR] = "GMII_FAR",
56 [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
57 [LOOPBACK_XFI_FAR] = "XFI_FAR",
58 [LOOPBACK_GPHY] = "GPHY",
59 [LOOPBACK_PHYXS] = "PHYXS",
60 [LOOPBACK_PCS] = "PCS",
61 [LOOPBACK_PMAPMD] = "PMA/PMD",
62 [LOOPBACK_XPORT] = "XPORT",
63 [LOOPBACK_XGMII_WS] = "XGMII_WS",
64 [LOOPBACK_XAUI_WS] = "XAUI_WS",
65 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
66 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
67 [LOOPBACK_GMII_WS] = "GMII_WS",
68 [LOOPBACK_XFI_WS] = "XFI_WS",
69 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
70 [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
71 };
72
73 const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
74 const char *const efx_reset_type_names[] = {
75 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
76 [RESET_TYPE_ALL] = "ALL",
77 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
78 [RESET_TYPE_WORLD] = "WORLD",
79 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
80 [RESET_TYPE_DISABLE] = "DISABLE",
81 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
82 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
83 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
84 [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
85 [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
86 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
87 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
88 };
89
90 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
91 * queued onto this work queue. This is not a per-nic work queue, because
92 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
93 */
94 static struct workqueue_struct *reset_workqueue;
95
96 /**************************************************************************
97 *
98 * Configurable values
99 *
100 *************************************************************************/
101
102 /*
103 * Use separate channels for TX and RX events
104 *
105 * Set this to 1 to use separate channels for TX and RX. It allows us
106 * to control interrupt affinity separately for TX and RX.
107 *
108 * This is only used in MSI-X interrupt mode
109 */
110 static bool separate_tx_channels;
111 module_param(separate_tx_channels, bool, 0444);
112 MODULE_PARM_DESC(separate_tx_channels,
113 "Use separate channels for TX and RX");
114
115 /* This is the weight assigned to each of the (per-channel) virtual
116 * NAPI devices.
117 */
118 static int napi_weight = 64;
119
120 /* This is the time (in jiffies) between invocations of the hardware
121 * monitor.
122 * On Falcon-based NICs, this will:
123 * - Check the on-board hardware monitor;
124 * - Poll the link state and reconfigure the hardware as necessary.
125 * On Siena-based NICs for power systems with EEH support, this will give EEH a
126 * chance to start.
127 */
128 static unsigned int efx_monitor_interval = 1 * HZ;
129
130 /* Initial interrupt moderation settings. They can be modified after
131 * module load with ethtool.
132 *
133 * The default for RX should strike a balance between increasing the
134 * round-trip latency and reducing overhead.
135 */
136 static unsigned int rx_irq_mod_usec = 60;
137
138 /* Initial interrupt moderation settings. They can be modified after
139 * module load with ethtool.
140 *
141 * This default is chosen to ensure that a 10G link does not go idle
142 * while a TX queue is stopped after it has become full. A queue is
143 * restarted when it drops below half full. The time this takes (assuming
144 * worst case 3 descriptors per packet and 1024 descriptors) is
145 * 512 / 3 * 1.2 = 205 usec.
146 */
147 static unsigned int tx_irq_mod_usec = 150;
148
149 /* This is the first interrupt mode to try out of:
150 * 0 => MSI-X
151 * 1 => MSI
152 * 2 => legacy
153 */
154 static unsigned int interrupt_mode;
155
156 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
157 * i.e. the number of CPUs among which we may distribute simultaneous
158 * interrupt handling.
159 *
160 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
161 * The default (0) means to assign an interrupt to each core.
162 */
163 static unsigned int rss_cpus;
164 module_param(rss_cpus, uint, 0444);
165 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
166
167 static bool phy_flash_cfg;
168 module_param(phy_flash_cfg, bool, 0644);
169 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
170
171 static unsigned irq_adapt_low_thresh = 8000;
172 module_param(irq_adapt_low_thresh, uint, 0644);
173 MODULE_PARM_DESC(irq_adapt_low_thresh,
174 "Threshold score for reducing IRQ moderation");
175
176 static unsigned irq_adapt_high_thresh = 16000;
177 module_param(irq_adapt_high_thresh, uint, 0644);
178 MODULE_PARM_DESC(irq_adapt_high_thresh,
179 "Threshold score for increasing IRQ moderation");
180
181 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
182 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
183 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
184 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
185 module_param(debug, uint, 0);
186 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
187
188 /**************************************************************************
189 *
190 * Utility functions and prototypes
191 *
192 *************************************************************************/
193
194 static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
195 static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
196 static void efx_remove_channel(struct efx_channel *channel);
197 static void efx_remove_channels(struct efx_nic *efx);
198 static const struct efx_channel_type efx_default_channel_type;
199 static void efx_remove_port(struct efx_nic *efx);
200 static void efx_init_napi_channel(struct efx_channel *channel);
201 static void efx_fini_napi(struct efx_nic *efx);
202 static void efx_fini_napi_channel(struct efx_channel *channel);
203 static void efx_fini_struct(struct efx_nic *efx);
204 static void efx_start_all(struct efx_nic *efx);
205 static void efx_stop_all(struct efx_nic *efx);
206
207 #define EFX_ASSERT_RESET_SERIALISED(efx) \
208 do { \
209 if ((efx->state == STATE_READY) || \
210 (efx->state == STATE_RECOVERY) || \
211 (efx->state == STATE_DISABLED)) \
212 ASSERT_RTNL(); \
213 } while (0)
214
215 static int efx_check_disabled(struct efx_nic *efx)
216 {
217 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
218 netif_err(efx, drv, efx->net_dev,
219 "device is disabled due to earlier errors\n");
220 return -EIO;
221 }
222 return 0;
223 }
224
225 /**************************************************************************
226 *
227 * Event queue processing
228 *
229 *************************************************************************/
230
231 /* Process channel's event queue
232 *
233 * This function is responsible for processing the event queue of a
234 * single channel. The caller must guarantee that this function will
235 * never be concurrently called more than once on the same channel,
236 * though different channels may be being processed concurrently.
237 */
238 static int efx_process_channel(struct efx_channel *channel, int budget)
239 {
240 int spent;
241
242 if (unlikely(!channel->enabled))
243 return 0;
244
245 spent = efx_nic_process_eventq(channel, budget);
246 if (spent && efx_channel_has_rx_queue(channel)) {
247 struct efx_rx_queue *rx_queue =
248 efx_channel_get_rx_queue(channel);
249
250 efx_rx_flush_packet(channel);
251 if (rx_queue->enabled)
252 efx_fast_push_rx_descriptors(rx_queue);
253 }
254
255 return spent;
256 }
257
258 /* Mark channel as finished processing
259 *
260 * Note that since we will not receive further interrupts for this
261 * channel before we finish processing and call the eventq_read_ack()
262 * method, there is no need to use the interrupt hold-off timers.
263 */
264 static inline void efx_channel_processed(struct efx_channel *channel)
265 {
266 /* The interrupt handler for this channel may set work_pending
267 * as soon as we acknowledge the events we've seen. Make sure
268 * it's cleared before then. */
269 channel->work_pending = false;
270 smp_wmb();
271
272 efx_nic_eventq_read_ack(channel);
273 }
274
275 /* NAPI poll handler
276 *
277 * NAPI guarantees serialisation of polls of the same device, which
278 * provides the guarantee required by efx_process_channel().
279 */
280 static int efx_poll(struct napi_struct *napi, int budget)
281 {
282 struct efx_channel *channel =
283 container_of(napi, struct efx_channel, napi_str);
284 struct efx_nic *efx = channel->efx;
285 int spent;
286
287 netif_vdbg(efx, intr, efx->net_dev,
288 "channel %d NAPI poll executing on CPU %d\n",
289 channel->channel, raw_smp_processor_id());
290
291 spent = efx_process_channel(channel, budget);
292
293 if (spent < budget) {
294 if (efx_channel_has_rx_queue(channel) &&
295 efx->irq_rx_adaptive &&
296 unlikely(++channel->irq_count == 1000)) {
297 if (unlikely(channel->irq_mod_score <
298 irq_adapt_low_thresh)) {
299 if (channel->irq_moderation > 1) {
300 channel->irq_moderation -= 1;
301 efx->type->push_irq_moderation(channel);
302 }
303 } else if (unlikely(channel->irq_mod_score >
304 irq_adapt_high_thresh)) {
305 if (channel->irq_moderation <
306 efx->irq_rx_moderation) {
307 channel->irq_moderation += 1;
308 efx->type->push_irq_moderation(channel);
309 }
310 }
311 channel->irq_count = 0;
312 channel->irq_mod_score = 0;
313 }
314
315 efx_filter_rfs_expire(channel);
316
317 /* There is no race here; although napi_disable() will
318 * only wait for napi_complete(), this isn't a problem
319 * since efx_channel_processed() will have no effect if
320 * interrupts have already been disabled.
321 */
322 napi_complete(napi);
323 efx_channel_processed(channel);
324 }
325
326 return spent;
327 }
328
329 /* Process the eventq of the specified channel immediately on this CPU
330 *
331 * Disable hardware generated interrupts, wait for any existing
332 * processing to finish, then directly poll (and ack ) the eventq.
333 * Finally reenable NAPI and interrupts.
334 *
335 * This is for use only during a loopback self-test. It must not
336 * deliver any packets up the stack as this can result in deadlock.
337 */
338 void efx_process_channel_now(struct efx_channel *channel)
339 {
340 struct efx_nic *efx = channel->efx;
341
342 BUG_ON(channel->channel >= efx->n_channels);
343 BUG_ON(!channel->enabled);
344 BUG_ON(!efx->loopback_selftest);
345
346 /* Disable interrupts and wait for ISRs to complete */
347 efx_nic_disable_interrupts(efx);
348 if (efx->legacy_irq) {
349 synchronize_irq(efx->legacy_irq);
350 efx->legacy_irq_enabled = false;
351 }
352 if (channel->irq)
353 synchronize_irq(channel->irq);
354
355 /* Wait for any NAPI processing to complete */
356 napi_disable(&channel->napi_str);
357
358 /* Poll the channel */
359 efx_process_channel(channel, channel->eventq_mask + 1);
360
361 /* Ack the eventq. This may cause an interrupt to be generated
362 * when they are reenabled */
363 efx_channel_processed(channel);
364
365 napi_enable(&channel->napi_str);
366 if (efx->legacy_irq)
367 efx->legacy_irq_enabled = true;
368 efx_nic_enable_interrupts(efx);
369 }
370
371 /* Create event queue
372 * Event queue memory allocations are done only once. If the channel
373 * is reset, the memory buffer will be reused; this guards against
374 * errors during channel reset and also simplifies interrupt handling.
375 */
376 static int efx_probe_eventq(struct efx_channel *channel)
377 {
378 struct efx_nic *efx = channel->efx;
379 unsigned long entries;
380
381 netif_dbg(efx, probe, efx->net_dev,
382 "chan %d create event queue\n", channel->channel);
383
384 /* Build an event queue with room for one event per tx and rx buffer,
385 * plus some extra for link state events and MCDI completions. */
386 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
387 EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
388 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
389
390 return efx_nic_probe_eventq(channel);
391 }
392
393 /* Prepare channel's event queue */
394 static void efx_init_eventq(struct efx_channel *channel)
395 {
396 netif_dbg(channel->efx, drv, channel->efx->net_dev,
397 "chan %d init event queue\n", channel->channel);
398
399 channel->eventq_read_ptr = 0;
400
401 efx_nic_init_eventq(channel);
402 }
403
404 /* Enable event queue processing and NAPI */
405 static void efx_start_eventq(struct efx_channel *channel)
406 {
407 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
408 "chan %d start event queue\n", channel->channel);
409
410 /* The interrupt handler for this channel may set work_pending
411 * as soon as we enable it. Make sure it's cleared before
412 * then. Similarly, make sure it sees the enabled flag set.
413 */
414 channel->work_pending = false;
415 channel->enabled = true;
416 smp_wmb();
417
418 napi_enable(&channel->napi_str);
419 efx_nic_eventq_read_ack(channel);
420 }
421
422 /* Disable event queue processing and NAPI */
423 static void efx_stop_eventq(struct efx_channel *channel)
424 {
425 if (!channel->enabled)
426 return;
427
428 napi_disable(&channel->napi_str);
429 channel->enabled = false;
430 }
431
432 static void efx_fini_eventq(struct efx_channel *channel)
433 {
434 netif_dbg(channel->efx, drv, channel->efx->net_dev,
435 "chan %d fini event queue\n", channel->channel);
436
437 efx_nic_fini_eventq(channel);
438 }
439
440 static void efx_remove_eventq(struct efx_channel *channel)
441 {
442 netif_dbg(channel->efx, drv, channel->efx->net_dev,
443 "chan %d remove event queue\n", channel->channel);
444
445 efx_nic_remove_eventq(channel);
446 }
447
448 /**************************************************************************
449 *
450 * Channel handling
451 *
452 *************************************************************************/
453
454 /* Allocate and initialise a channel structure. */
455 static struct efx_channel *
456 efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
457 {
458 struct efx_channel *channel;
459 struct efx_rx_queue *rx_queue;
460 struct efx_tx_queue *tx_queue;
461 int j;
462
463 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
464 if (!channel)
465 return NULL;
466
467 channel->efx = efx;
468 channel->channel = i;
469 channel->type = &efx_default_channel_type;
470
471 for (j = 0; j < EFX_TXQ_TYPES; j++) {
472 tx_queue = &channel->tx_queue[j];
473 tx_queue->efx = efx;
474 tx_queue->queue = i * EFX_TXQ_TYPES + j;
475 tx_queue->channel = channel;
476 }
477
478 rx_queue = &channel->rx_queue;
479 rx_queue->efx = efx;
480 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
481 (unsigned long)rx_queue);
482
483 return channel;
484 }
485
486 /* Allocate and initialise a channel structure, copying parameters
487 * (but not resources) from an old channel structure.
488 */
489 static struct efx_channel *
490 efx_copy_channel(const struct efx_channel *old_channel)
491 {
492 struct efx_channel *channel;
493 struct efx_rx_queue *rx_queue;
494 struct efx_tx_queue *tx_queue;
495 int j;
496
497 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
498 if (!channel)
499 return NULL;
500
501 *channel = *old_channel;
502
503 channel->napi_dev = NULL;
504 memset(&channel->eventq, 0, sizeof(channel->eventq));
505
506 for (j = 0; j < EFX_TXQ_TYPES; j++) {
507 tx_queue = &channel->tx_queue[j];
508 if (tx_queue->channel)
509 tx_queue->channel = channel;
510 tx_queue->buffer = NULL;
511 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
512 }
513
514 rx_queue = &channel->rx_queue;
515 rx_queue->buffer = NULL;
516 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
517 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
518 (unsigned long)rx_queue);
519
520 return channel;
521 }
522
523 static int efx_probe_channel(struct efx_channel *channel)
524 {
525 struct efx_tx_queue *tx_queue;
526 struct efx_rx_queue *rx_queue;
527 int rc;
528
529 netif_dbg(channel->efx, probe, channel->efx->net_dev,
530 "creating channel %d\n", channel->channel);
531
532 rc = channel->type->pre_probe(channel);
533 if (rc)
534 goto fail;
535
536 rc = efx_probe_eventq(channel);
537 if (rc)
538 goto fail;
539
540 efx_for_each_channel_tx_queue(tx_queue, channel) {
541 rc = efx_probe_tx_queue(tx_queue);
542 if (rc)
543 goto fail;
544 }
545
546 efx_for_each_channel_rx_queue(rx_queue, channel) {
547 rc = efx_probe_rx_queue(rx_queue);
548 if (rc)
549 goto fail;
550 }
551
552 channel->n_rx_frm_trunc = 0;
553
554 return 0;
555
556 fail:
557 efx_remove_channel(channel);
558 return rc;
559 }
560
561 static void
562 efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
563 {
564 struct efx_nic *efx = channel->efx;
565 const char *type;
566 int number;
567
568 number = channel->channel;
569 if (efx->tx_channel_offset == 0) {
570 type = "";
571 } else if (channel->channel < efx->tx_channel_offset) {
572 type = "-rx";
573 } else {
574 type = "-tx";
575 number -= efx->tx_channel_offset;
576 }
577 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
578 }
579
580 static void efx_set_channel_names(struct efx_nic *efx)
581 {
582 struct efx_channel *channel;
583
584 efx_for_each_channel(channel, efx)
585 channel->type->get_name(channel,
586 efx->channel_name[channel->channel],
587 sizeof(efx->channel_name[0]));
588 }
589
590 static int efx_probe_channels(struct efx_nic *efx)
591 {
592 struct efx_channel *channel;
593 int rc;
594
595 /* Restart special buffer allocation */
596 efx->next_buffer_table = 0;
597
598 /* Probe channels in reverse, so that any 'extra' channels
599 * use the start of the buffer table. This allows the traffic
600 * channels to be resized without moving them or wasting the
601 * entries before them.
602 */
603 efx_for_each_channel_rev(channel, efx) {
604 rc = efx_probe_channel(channel);
605 if (rc) {
606 netif_err(efx, probe, efx->net_dev,
607 "failed to create channel %d\n",
608 channel->channel);
609 goto fail;
610 }
611 }
612 efx_set_channel_names(efx);
613
614 return 0;
615
616 fail:
617 efx_remove_channels(efx);
618 return rc;
619 }
620
621 /* Channels are shutdown and reinitialised whilst the NIC is running
622 * to propagate configuration changes (mtu, checksum offload), or
623 * to clear hardware error conditions
624 */
625 static void efx_start_datapath(struct efx_nic *efx)
626 {
627 bool old_rx_scatter = efx->rx_scatter;
628 struct efx_tx_queue *tx_queue;
629 struct efx_rx_queue *rx_queue;
630 struct efx_channel *channel;
631 size_t rx_buf_len;
632
633 /* Calculate the rx buffer allocation parameters required to
634 * support the current MTU, including padding for header
635 * alignment and overruns.
636 */
637 efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
638 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
639 efx->type->rx_buffer_padding);
640 rx_buf_len = (sizeof(struct efx_rx_page_state) +
641 NET_IP_ALIGN + efx->rx_dma_len);
642 if (rx_buf_len <= PAGE_SIZE) {
643 efx->rx_scatter = false;
644 efx->rx_buffer_order = 0;
645 } else if (efx->type->can_rx_scatter) {
646 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
647 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
648 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
649 EFX_RX_BUF_ALIGNMENT) >
650 PAGE_SIZE);
651 efx->rx_scatter = true;
652 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
653 efx->rx_buffer_order = 0;
654 } else {
655 efx->rx_scatter = false;
656 efx->rx_buffer_order = get_order(rx_buf_len);
657 }
658
659 efx_rx_config_page_split(efx);
660 if (efx->rx_buffer_order)
661 netif_dbg(efx, drv, efx->net_dev,
662 "RX buf len=%u; page order=%u batch=%u\n",
663 efx->rx_dma_len, efx->rx_buffer_order,
664 efx->rx_pages_per_batch);
665 else
666 netif_dbg(efx, drv, efx->net_dev,
667 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
668 efx->rx_dma_len, efx->rx_page_buf_step,
669 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
670
671 /* RX filters also have scatter-enabled flags */
672 if (efx->rx_scatter != old_rx_scatter)
673 efx_filter_update_rx_scatter(efx);
674
675 /* We must keep at least one descriptor in a TX ring empty.
676 * We could avoid this when the queue size does not exactly
677 * match the hardware ring size, but it's not that important.
678 * Therefore we stop the queue when one more skb might fill
679 * the ring completely. We wake it when half way back to
680 * empty.
681 */
682 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
683 efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
684
685 /* Initialise the channels */
686 efx_for_each_channel(channel, efx) {
687 efx_for_each_channel_tx_queue(tx_queue, channel)
688 efx_init_tx_queue(tx_queue);
689
690 efx_for_each_channel_rx_queue(rx_queue, channel) {
691 efx_init_rx_queue(rx_queue);
692 efx_nic_generate_fill_event(rx_queue);
693 }
694
695 WARN_ON(channel->rx_pkt_n_frags);
696 }
697
698 if (netif_device_present(efx->net_dev))
699 netif_tx_wake_all_queues(efx->net_dev);
700 }
701
702 static void efx_stop_datapath(struct efx_nic *efx)
703 {
704 struct efx_channel *channel;
705 struct efx_tx_queue *tx_queue;
706 struct efx_rx_queue *rx_queue;
707 struct pci_dev *dev = efx->pci_dev;
708 int rc;
709
710 EFX_ASSERT_RESET_SERIALISED(efx);
711 BUG_ON(efx->port_enabled);
712
713 /* Only perform flush if dma is enabled */
714 if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
715 rc = efx_nic_flush_queues(efx);
716
717 if (rc && EFX_WORKAROUND_7803(efx)) {
718 /* Schedule a reset to recover from the flush failure. The
719 * descriptor caches reference memory we're about to free,
720 * but falcon_reconfigure_mac_wrapper() won't reconnect
721 * the MACs because of the pending reset. */
722 netif_err(efx, drv, efx->net_dev,
723 "Resetting to recover from flush failure\n");
724 efx_schedule_reset(efx, RESET_TYPE_ALL);
725 } else if (rc) {
726 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
727 } else {
728 netif_dbg(efx, drv, efx->net_dev,
729 "successfully flushed all queues\n");
730 }
731 }
732
733 efx_for_each_channel(channel, efx) {
734 /* RX packet processing is pipelined, so wait for the
735 * NAPI handler to complete. At least event queue 0
736 * might be kept active by non-data events, so don't
737 * use napi_synchronize() but actually disable NAPI
738 * temporarily.
739 */
740 if (efx_channel_has_rx_queue(channel)) {
741 efx_stop_eventq(channel);
742 efx_start_eventq(channel);
743 }
744
745 efx_for_each_channel_rx_queue(rx_queue, channel)
746 efx_fini_rx_queue(rx_queue);
747 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
748 efx_fini_tx_queue(tx_queue);
749 }
750 }
751
752 static void efx_remove_channel(struct efx_channel *channel)
753 {
754 struct efx_tx_queue *tx_queue;
755 struct efx_rx_queue *rx_queue;
756
757 netif_dbg(channel->efx, drv, channel->efx->net_dev,
758 "destroy chan %d\n", channel->channel);
759
760 efx_for_each_channel_rx_queue(rx_queue, channel)
761 efx_remove_rx_queue(rx_queue);
762 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
763 efx_remove_tx_queue(tx_queue);
764 efx_remove_eventq(channel);
765 channel->type->post_remove(channel);
766 }
767
768 static void efx_remove_channels(struct efx_nic *efx)
769 {
770 struct efx_channel *channel;
771
772 efx_for_each_channel(channel, efx)
773 efx_remove_channel(channel);
774 }
775
776 int
777 efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
778 {
779 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
780 u32 old_rxq_entries, old_txq_entries;
781 unsigned i, next_buffer_table = 0;
782 int rc;
783
784 rc = efx_check_disabled(efx);
785 if (rc)
786 return rc;
787
788 /* Not all channels should be reallocated. We must avoid
789 * reallocating their buffer table entries.
790 */
791 efx_for_each_channel(channel, efx) {
792 struct efx_rx_queue *rx_queue;
793 struct efx_tx_queue *tx_queue;
794
795 if (channel->type->copy)
796 continue;
797 next_buffer_table = max(next_buffer_table,
798 channel->eventq.index +
799 channel->eventq.entries);
800 efx_for_each_channel_rx_queue(rx_queue, channel)
801 next_buffer_table = max(next_buffer_table,
802 rx_queue->rxd.index +
803 rx_queue->rxd.entries);
804 efx_for_each_channel_tx_queue(tx_queue, channel)
805 next_buffer_table = max(next_buffer_table,
806 tx_queue->txd.index +
807 tx_queue->txd.entries);
808 }
809
810 efx_device_detach_sync(efx);
811 efx_stop_all(efx);
812 efx_stop_interrupts(efx, true);
813
814 /* Clone channels (where possible) */
815 memset(other_channel, 0, sizeof(other_channel));
816 for (i = 0; i < efx->n_channels; i++) {
817 channel = efx->channel[i];
818 if (channel->type->copy)
819 channel = channel->type->copy(channel);
820 if (!channel) {
821 rc = -ENOMEM;
822 goto out;
823 }
824 other_channel[i] = channel;
825 }
826
827 /* Swap entry counts and channel pointers */
828 old_rxq_entries = efx->rxq_entries;
829 old_txq_entries = efx->txq_entries;
830 efx->rxq_entries = rxq_entries;
831 efx->txq_entries = txq_entries;
832 for (i = 0; i < efx->n_channels; i++) {
833 channel = efx->channel[i];
834 efx->channel[i] = other_channel[i];
835 other_channel[i] = channel;
836 }
837
838 /* Restart buffer table allocation */
839 efx->next_buffer_table = next_buffer_table;
840
841 for (i = 0; i < efx->n_channels; i++) {
842 channel = efx->channel[i];
843 if (!channel->type->copy)
844 continue;
845 rc = efx_probe_channel(channel);
846 if (rc)
847 goto rollback;
848 efx_init_napi_channel(efx->channel[i]);
849 }
850
851 out:
852 /* Destroy unused channel structures */
853 for (i = 0; i < efx->n_channels; i++) {
854 channel = other_channel[i];
855 if (channel && channel->type->copy) {
856 efx_fini_napi_channel(channel);
857 efx_remove_channel(channel);
858 kfree(channel);
859 }
860 }
861
862 efx_start_interrupts(efx, true);
863 efx_start_all(efx);
864 netif_device_attach(efx->net_dev);
865 return rc;
866
867 rollback:
868 /* Swap back */
869 efx->rxq_entries = old_rxq_entries;
870 efx->txq_entries = old_txq_entries;
871 for (i = 0; i < efx->n_channels; i++) {
872 channel = efx->channel[i];
873 efx->channel[i] = other_channel[i];
874 other_channel[i] = channel;
875 }
876 goto out;
877 }
878
879 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
880 {
881 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
882 }
883
884 static const struct efx_channel_type efx_default_channel_type = {
885 .pre_probe = efx_channel_dummy_op_int,
886 .post_remove = efx_channel_dummy_op_void,
887 .get_name = efx_get_channel_name,
888 .copy = efx_copy_channel,
889 .keep_eventq = false,
890 };
891
892 int efx_channel_dummy_op_int(struct efx_channel *channel)
893 {
894 return 0;
895 }
896
897 void efx_channel_dummy_op_void(struct efx_channel *channel)
898 {
899 }
900
901 /**************************************************************************
902 *
903 * Port handling
904 *
905 **************************************************************************/
906
907 /* This ensures that the kernel is kept informed (via
908 * netif_carrier_on/off) of the link status, and also maintains the
909 * link status's stop on the port's TX queue.
910 */
911 void efx_link_status_changed(struct efx_nic *efx)
912 {
913 struct efx_link_state *link_state = &efx->link_state;
914
915 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
916 * that no events are triggered between unregister_netdev() and the
917 * driver unloading. A more general condition is that NETDEV_CHANGE
918 * can only be generated between NETDEV_UP and NETDEV_DOWN */
919 if (!netif_running(efx->net_dev))
920 return;
921
922 if (link_state->up != netif_carrier_ok(efx->net_dev)) {
923 efx->n_link_state_changes++;
924
925 if (link_state->up)
926 netif_carrier_on(efx->net_dev);
927 else
928 netif_carrier_off(efx->net_dev);
929 }
930
931 /* Status message for kernel log */
932 if (link_state->up)
933 netif_info(efx, link, efx->net_dev,
934 "link up at %uMbps %s-duplex (MTU %d)%s\n",
935 link_state->speed, link_state->fd ? "full" : "half",
936 efx->net_dev->mtu,
937 (efx->promiscuous ? " [PROMISC]" : ""));
938 else
939 netif_info(efx, link, efx->net_dev, "link down\n");
940 }
941
942 void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
943 {
944 efx->link_advertising = advertising;
945 if (advertising) {
946 if (advertising & ADVERTISED_Pause)
947 efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
948 else
949 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
950 if (advertising & ADVERTISED_Asym_Pause)
951 efx->wanted_fc ^= EFX_FC_TX;
952 }
953 }
954
955 void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
956 {
957 efx->wanted_fc = wanted_fc;
958 if (efx->link_advertising) {
959 if (wanted_fc & EFX_FC_RX)
960 efx->link_advertising |= (ADVERTISED_Pause |
961 ADVERTISED_Asym_Pause);
962 else
963 efx->link_advertising &= ~(ADVERTISED_Pause |
964 ADVERTISED_Asym_Pause);
965 if (wanted_fc & EFX_FC_TX)
966 efx->link_advertising ^= ADVERTISED_Asym_Pause;
967 }
968 }
969
970 static void efx_fini_port(struct efx_nic *efx);
971
972 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
973 * the MAC appropriately. All other PHY configuration changes are pushed
974 * through phy_op->set_settings(), and pushed asynchronously to the MAC
975 * through efx_monitor().
976 *
977 * Callers must hold the mac_lock
978 */
979 int __efx_reconfigure_port(struct efx_nic *efx)
980 {
981 enum efx_phy_mode phy_mode;
982 int rc;
983
984 WARN_ON(!mutex_is_locked(&efx->mac_lock));
985
986 /* Serialise the promiscuous flag with efx_set_rx_mode. */
987 netif_addr_lock_bh(efx->net_dev);
988 netif_addr_unlock_bh(efx->net_dev);
989
990 /* Disable PHY transmit in mac level loopbacks */
991 phy_mode = efx->phy_mode;
992 if (LOOPBACK_INTERNAL(efx))
993 efx->phy_mode |= PHY_MODE_TX_DISABLED;
994 else
995 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
996
997 rc = efx->type->reconfigure_port(efx);
998
999 if (rc)
1000 efx->phy_mode = phy_mode;
1001
1002 return rc;
1003 }
1004
1005 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
1006 * disabled. */
1007 int efx_reconfigure_port(struct efx_nic *efx)
1008 {
1009 int rc;
1010
1011 EFX_ASSERT_RESET_SERIALISED(efx);
1012
1013 mutex_lock(&efx->mac_lock);
1014 rc = __efx_reconfigure_port(efx);
1015 mutex_unlock(&efx->mac_lock);
1016
1017 return rc;
1018 }
1019
1020 /* Asynchronous work item for changing MAC promiscuity and multicast
1021 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
1022 * MAC directly. */
1023 static void efx_mac_work(struct work_struct *data)
1024 {
1025 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
1026
1027 mutex_lock(&efx->mac_lock);
1028 if (efx->port_enabled)
1029 efx->type->reconfigure_mac(efx);
1030 mutex_unlock(&efx->mac_lock);
1031 }
1032
1033 static int efx_probe_port(struct efx_nic *efx)
1034 {
1035 int rc;
1036
1037 netif_dbg(efx, probe, efx->net_dev, "create port\n");
1038
1039 if (phy_flash_cfg)
1040 efx->phy_mode = PHY_MODE_SPECIAL;
1041
1042 /* Connect up MAC/PHY operations table */
1043 rc = efx->type->probe_port(efx);
1044 if (rc)
1045 return rc;
1046
1047 /* Initialise MAC address to permanent address */
1048 memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
1049
1050 return 0;
1051 }
1052
1053 static int efx_init_port(struct efx_nic *efx)
1054 {
1055 int rc;
1056
1057 netif_dbg(efx, drv, efx->net_dev, "init port\n");
1058
1059 mutex_lock(&efx->mac_lock);
1060
1061 rc = efx->phy_op->init(efx);
1062 if (rc)
1063 goto fail1;
1064
1065 efx->port_initialized = true;
1066
1067 /* Reconfigure the MAC before creating dma queues (required for
1068 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1069 efx->type->reconfigure_mac(efx);
1070
1071 /* Ensure the PHY advertises the correct flow control settings */
1072 rc = efx->phy_op->reconfigure(efx);
1073 if (rc)
1074 goto fail2;
1075
1076 mutex_unlock(&efx->mac_lock);
1077 return 0;
1078
1079 fail2:
1080 efx->phy_op->fini(efx);
1081 fail1:
1082 mutex_unlock(&efx->mac_lock);
1083 return rc;
1084 }
1085
1086 static void efx_start_port(struct efx_nic *efx)
1087 {
1088 netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1089 BUG_ON(efx->port_enabled);
1090
1091 mutex_lock(&efx->mac_lock);
1092 efx->port_enabled = true;
1093
1094 /* efx_mac_work() might have been scheduled after efx_stop_port(),
1095 * and then cancelled by efx_flush_all() */
1096 efx->type->reconfigure_mac(efx);
1097
1098 mutex_unlock(&efx->mac_lock);
1099 }
1100
1101 /* Prevent efx_mac_work() and efx_monitor() from working */
1102 static void efx_stop_port(struct efx_nic *efx)
1103 {
1104 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1105
1106 mutex_lock(&efx->mac_lock);
1107 efx->port_enabled = false;
1108 mutex_unlock(&efx->mac_lock);
1109
1110 /* Serialise against efx_set_multicast_list() */
1111 netif_addr_lock_bh(efx->net_dev);
1112 netif_addr_unlock_bh(efx->net_dev);
1113 }
1114
1115 static void efx_fini_port(struct efx_nic *efx)
1116 {
1117 netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1118
1119 if (!efx->port_initialized)
1120 return;
1121
1122 efx->phy_op->fini(efx);
1123 efx->port_initialized = false;
1124
1125 efx->link_state.up = false;
1126 efx_link_status_changed(efx);
1127 }
1128
1129 static void efx_remove_port(struct efx_nic *efx)
1130 {
1131 netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1132
1133 efx->type->remove_port(efx);
1134 }
1135
1136 /**************************************************************************
1137 *
1138 * NIC handling
1139 *
1140 **************************************************************************/
1141
1142 /* This configures the PCI device to enable I/O and DMA. */
1143 static int efx_init_io(struct efx_nic *efx)
1144 {
1145 struct pci_dev *pci_dev = efx->pci_dev;
1146 dma_addr_t dma_mask = efx->type->max_dma_mask;
1147 int rc;
1148
1149 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1150
1151 rc = pci_enable_device(pci_dev);
1152 if (rc) {
1153 netif_err(efx, probe, efx->net_dev,
1154 "failed to enable PCI device\n");
1155 goto fail1;
1156 }
1157
1158 pci_set_master(pci_dev);
1159
1160 /* Set the PCI DMA mask. Try all possibilities from our
1161 * genuine mask down to 32 bits, because some architectures
1162 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
1163 * masks event though they reject 46 bit masks.
1164 */
1165 while (dma_mask > 0x7fffffffUL) {
1166 if (dma_supported(&pci_dev->dev, dma_mask)) {
1167 rc = dma_set_mask(&pci_dev->dev, dma_mask);
1168 if (rc == 0)
1169 break;
1170 }
1171 dma_mask >>= 1;
1172 }
1173 if (rc) {
1174 netif_err(efx, probe, efx->net_dev,
1175 "could not find a suitable DMA mask\n");
1176 goto fail2;
1177 }
1178 netif_dbg(efx, probe, efx->net_dev,
1179 "using DMA mask %llx\n", (unsigned long long) dma_mask);
1180 rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
1181 if (rc) {
1182 /* dma_set_coherent_mask() is not *allowed* to
1183 * fail with a mask that dma_set_mask() accepted,
1184 * but just in case...
1185 */
1186 netif_err(efx, probe, efx->net_dev,
1187 "failed to set consistent DMA mask\n");
1188 goto fail2;
1189 }
1190
1191 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
1192 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
1193 if (rc) {
1194 netif_err(efx, probe, efx->net_dev,
1195 "request for memory BAR failed\n");
1196 rc = -EIO;
1197 goto fail3;
1198 }
1199 efx->membase = ioremap_nocache(efx->membase_phys,
1200 efx->type->mem_map_size);
1201 if (!efx->membase) {
1202 netif_err(efx, probe, efx->net_dev,
1203 "could not map memory BAR at %llx+%x\n",
1204 (unsigned long long)efx->membase_phys,
1205 efx->type->mem_map_size);
1206 rc = -ENOMEM;
1207 goto fail4;
1208 }
1209 netif_dbg(efx, probe, efx->net_dev,
1210 "memory BAR at %llx+%x (virtual %p)\n",
1211 (unsigned long long)efx->membase_phys,
1212 efx->type->mem_map_size, efx->membase);
1213
1214 return 0;
1215
1216 fail4:
1217 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
1218 fail3:
1219 efx->membase_phys = 0;
1220 fail2:
1221 pci_disable_device(efx->pci_dev);
1222 fail1:
1223 return rc;
1224 }
1225
1226 static void efx_fini_io(struct efx_nic *efx)
1227 {
1228 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1229
1230 if (efx->membase) {
1231 iounmap(efx->membase);
1232 efx->membase = NULL;
1233 }
1234
1235 if (efx->membase_phys) {
1236 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
1237 efx->membase_phys = 0;
1238 }
1239
1240 pci_disable_device(efx->pci_dev);
1241 }
1242
1243 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1244 {
1245 cpumask_var_t thread_mask;
1246 unsigned int count;
1247 int cpu;
1248
1249 if (rss_cpus) {
1250 count = rss_cpus;
1251 } else {
1252 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1253 netif_warn(efx, probe, efx->net_dev,
1254 "RSS disabled due to allocation failure\n");
1255 return 1;
1256 }
1257
1258 count = 0;
1259 for_each_online_cpu(cpu) {
1260 if (!cpumask_test_cpu(cpu, thread_mask)) {
1261 ++count;
1262 cpumask_or(thread_mask, thread_mask,
1263 topology_thread_cpumask(cpu));
1264 }
1265 }
1266
1267 free_cpumask_var(thread_mask);
1268 }
1269
1270 /* If RSS is requested for the PF *and* VFs then we can't write RSS
1271 * table entries that are inaccessible to VFs
1272 */
1273 if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
1274 count > efx_vf_size(efx)) {
1275 netif_warn(efx, probe, efx->net_dev,
1276 "Reducing number of RSS channels from %u to %u for "
1277 "VF support. Increase vf-msix-limit to use more "
1278 "channels on the PF.\n",
1279 count, efx_vf_size(efx));
1280 count = efx_vf_size(efx);
1281 }
1282
1283 return count;
1284 }
1285
1286 /* Probe the number and type of interrupts we are able to obtain, and
1287 * the resulting numbers of channels and RX queues.
1288 */
1289 static int efx_probe_interrupts(struct efx_nic *efx)
1290 {
1291 unsigned int max_channels =
1292 min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
1293 unsigned int extra_channels = 0;
1294 unsigned int i, j;
1295 int rc;
1296
1297 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
1298 if (efx->extra_channel_type[i])
1299 ++extra_channels;
1300
1301 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1302 struct msix_entry xentries[EFX_MAX_CHANNELS];
1303 unsigned int n_channels;
1304
1305 n_channels = efx_wanted_parallelism(efx);
1306 if (separate_tx_channels)
1307 n_channels *= 2;
1308 n_channels += extra_channels;
1309 n_channels = min(n_channels, max_channels);
1310
1311 for (i = 0; i < n_channels; i++)
1312 xentries[i].entry = i;
1313 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
1314 if (rc > 0) {
1315 netif_err(efx, drv, efx->net_dev,
1316 "WARNING: Insufficient MSI-X vectors"
1317 " available (%d < %u).\n", rc, n_channels);
1318 netif_err(efx, drv, efx->net_dev,
1319 "WARNING: Performance may be reduced.\n");
1320 EFX_BUG_ON_PARANOID(rc >= n_channels);
1321 n_channels = rc;
1322 rc = pci_enable_msix(efx->pci_dev, xentries,
1323 n_channels);
1324 }
1325
1326 if (rc == 0) {
1327 efx->n_channels = n_channels;
1328 if (n_channels > extra_channels)
1329 n_channels -= extra_channels;
1330 if (separate_tx_channels) {
1331 efx->n_tx_channels = max(n_channels / 2, 1U);
1332 efx->n_rx_channels = max(n_channels -
1333 efx->n_tx_channels,
1334 1U);
1335 } else {
1336 efx->n_tx_channels = n_channels;
1337 efx->n_rx_channels = n_channels;
1338 }
1339 for (i = 0; i < efx->n_channels; i++)
1340 efx_get_channel(efx, i)->irq =
1341 xentries[i].vector;
1342 } else {
1343 /* Fall back to single channel MSI */
1344 efx->interrupt_mode = EFX_INT_MODE_MSI;
1345 netif_err(efx, drv, efx->net_dev,
1346 "could not enable MSI-X\n");
1347 }
1348 }
1349
1350 /* Try single interrupt MSI */
1351 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1352 efx->n_channels = 1;
1353 efx->n_rx_channels = 1;
1354 efx->n_tx_channels = 1;
1355 rc = pci_enable_msi(efx->pci_dev);
1356 if (rc == 0) {
1357 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1358 } else {
1359 netif_err(efx, drv, efx->net_dev,
1360 "could not enable MSI\n");
1361 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
1362 }
1363 }
1364
1365 /* Assume legacy interrupts */
1366 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1367 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
1368 efx->n_rx_channels = 1;
1369 efx->n_tx_channels = 1;
1370 efx->legacy_irq = efx->pci_dev->irq;
1371 }
1372
1373 /* Assign extra channels if possible */
1374 j = efx->n_channels;
1375 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
1376 if (!efx->extra_channel_type[i])
1377 continue;
1378 if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
1379 efx->n_channels <= extra_channels) {
1380 efx->extra_channel_type[i]->handle_no_channel(efx);
1381 } else {
1382 --j;
1383 efx_get_channel(efx, j)->type =
1384 efx->extra_channel_type[i];
1385 }
1386 }
1387
1388 /* RSS might be usable on VFs even if it is disabled on the PF */
1389 efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
1390 efx->n_rx_channels : efx_vf_size(efx));
1391
1392 return 0;
1393 }
1394
1395 /* Enable interrupts, then probe and start the event queues */
1396 static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1397 {
1398 struct efx_channel *channel;
1399
1400 BUG_ON(efx->state == STATE_DISABLED);
1401
1402 if (efx->eeh_disabled_legacy_irq) {
1403 enable_irq(efx->legacy_irq);
1404 efx->eeh_disabled_legacy_irq = false;
1405 }
1406 if (efx->legacy_irq)
1407 efx->legacy_irq_enabled = true;
1408 efx_nic_enable_interrupts(efx);
1409
1410 efx_for_each_channel(channel, efx) {
1411 if (!channel->type->keep_eventq || !may_keep_eventq)
1412 efx_init_eventq(channel);
1413 efx_start_eventq(channel);
1414 }
1415
1416 efx_mcdi_mode_event(efx);
1417 }
1418
1419 static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
1420 {
1421 struct efx_channel *channel;
1422
1423 if (efx->state == STATE_DISABLED)
1424 return;
1425
1426 efx_mcdi_mode_poll(efx);
1427
1428 efx_nic_disable_interrupts(efx);
1429 if (efx->legacy_irq) {
1430 synchronize_irq(efx->legacy_irq);
1431 efx->legacy_irq_enabled = false;
1432 }
1433
1434 efx_for_each_channel(channel, efx) {
1435 if (channel->irq)
1436 synchronize_irq(channel->irq);
1437
1438 efx_stop_eventq(channel);
1439 if (!channel->type->keep_eventq || !may_keep_eventq)
1440 efx_fini_eventq(channel);
1441 }
1442 }
1443
1444 static void efx_remove_interrupts(struct efx_nic *efx)
1445 {
1446 struct efx_channel *channel;
1447
1448 /* Remove MSI/MSI-X interrupts */
1449 efx_for_each_channel(channel, efx)
1450 channel->irq = 0;
1451 pci_disable_msi(efx->pci_dev);
1452 pci_disable_msix(efx->pci_dev);
1453
1454 /* Remove legacy interrupt */
1455 efx->legacy_irq = 0;
1456 }
1457
1458 static void efx_set_channels(struct efx_nic *efx)
1459 {
1460 struct efx_channel *channel;
1461 struct efx_tx_queue *tx_queue;
1462
1463 efx->tx_channel_offset =
1464 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1465
1466 /* We need to mark which channels really have RX and TX
1467 * queues, and adjust the TX queue numbers if we have separate
1468 * RX-only and TX-only channels.
1469 */
1470 efx_for_each_channel(channel, efx) {
1471 if (channel->channel < efx->n_rx_channels)
1472 channel->rx_queue.core_index = channel->channel;
1473 else
1474 channel->rx_queue.core_index = -1;
1475
1476 efx_for_each_channel_tx_queue(tx_queue, channel)
1477 tx_queue->queue -= (efx->tx_channel_offset *
1478 EFX_TXQ_TYPES);
1479 }
1480 }
1481
1482 static int efx_probe_nic(struct efx_nic *efx)
1483 {
1484 size_t i;
1485 int rc;
1486
1487 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1488
1489 /* Carry out hardware-type specific initialisation */
1490 rc = efx->type->probe(efx);
1491 if (rc)
1492 return rc;
1493
1494 /* Determine the number of channels and queues by trying to hook
1495 * in MSI-X interrupts. */
1496 rc = efx_probe_interrupts(efx);
1497 if (rc)
1498 goto fail;
1499
1500 efx->type->dimension_resources(efx);
1501
1502 if (efx->n_channels > 1)
1503 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
1504 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1505 efx->rx_indir_table[i] =
1506 ethtool_rxfh_indir_default(i, efx->rss_spread);
1507
1508 efx_set_channels(efx);
1509 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1510 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1511
1512 /* Initialise the interrupt moderation settings */
1513 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1514 true);
1515
1516 return 0;
1517
1518 fail:
1519 efx->type->remove(efx);
1520 return rc;
1521 }
1522
1523 static void efx_remove_nic(struct efx_nic *efx)
1524 {
1525 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1526
1527 efx_remove_interrupts(efx);
1528 efx->type->remove(efx);
1529 }
1530
1531 /**************************************************************************
1532 *
1533 * NIC startup/shutdown
1534 *
1535 *************************************************************************/
1536
1537 static int efx_probe_all(struct efx_nic *efx)
1538 {
1539 int rc;
1540
1541 rc = efx_probe_nic(efx);
1542 if (rc) {
1543 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1544 goto fail1;
1545 }
1546
1547 rc = efx_probe_port(efx);
1548 if (rc) {
1549 netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1550 goto fail2;
1551 }
1552
1553 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
1554 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
1555 rc = -EINVAL;
1556 goto fail3;
1557 }
1558 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1559
1560 rc = efx_probe_filters(efx);
1561 if (rc) {
1562 netif_err(efx, probe, efx->net_dev,
1563 "failed to create filter tables\n");
1564 goto fail3;
1565 }
1566
1567 rc = efx_probe_channels(efx);
1568 if (rc)
1569 goto fail4;
1570
1571 return 0;
1572
1573 fail4:
1574 efx_remove_filters(efx);
1575 fail3:
1576 efx_remove_port(efx);
1577 fail2:
1578 efx_remove_nic(efx);
1579 fail1:
1580 return rc;
1581 }
1582
1583 /* If the interface is supposed to be running but is not, start
1584 * the hardware and software data path, regular activity for the port
1585 * (MAC statistics, link polling, etc.) and schedule the port to be
1586 * reconfigured. Interrupts must already be enabled. This function
1587 * is safe to call multiple times, so long as the NIC is not disabled.
1588 * Requires the RTNL lock.
1589 */
1590 static void efx_start_all(struct efx_nic *efx)
1591 {
1592 EFX_ASSERT_RESET_SERIALISED(efx);
1593 BUG_ON(efx->state == STATE_DISABLED);
1594
1595 /* Check that it is appropriate to restart the interface. All
1596 * of these flags are safe to read under just the rtnl lock */
1597 if (efx->port_enabled || !netif_running(efx->net_dev))
1598 return;
1599
1600 efx_start_port(efx);
1601 efx_start_datapath(efx);
1602
1603 /* Start the hardware monitor if there is one */
1604 if (efx->type->monitor != NULL)
1605 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1606 efx_monitor_interval);
1607
1608 /* If link state detection is normally event-driven, we have
1609 * to poll now because we could have missed a change
1610 */
1611 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1612 mutex_lock(&efx->mac_lock);
1613 if (efx->phy_op->poll(efx))
1614 efx_link_status_changed(efx);
1615 mutex_unlock(&efx->mac_lock);
1616 }
1617
1618 efx->type->start_stats(efx);
1619 }
1620
1621 /* Flush all delayed work. Should only be called when no more delayed work
1622 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1623 * since we're holding the rtnl_lock at this point. */
1624 static void efx_flush_all(struct efx_nic *efx)
1625 {
1626 /* Make sure the hardware monitor and event self-test are stopped */
1627 cancel_delayed_work_sync(&efx->monitor_work);
1628 efx_selftest_async_cancel(efx);
1629 /* Stop scheduled port reconfigurations */
1630 cancel_work_sync(&efx->mac_work);
1631 }
1632
1633 /* Quiesce the hardware and software data path, and regular activity
1634 * for the port without bringing the link down. Safe to call multiple
1635 * times with the NIC in almost any state, but interrupts should be
1636 * enabled. Requires the RTNL lock.
1637 */
1638 static void efx_stop_all(struct efx_nic *efx)
1639 {
1640 EFX_ASSERT_RESET_SERIALISED(efx);
1641
1642 /* port_enabled can be read safely under the rtnl lock */
1643 if (!efx->port_enabled)
1644 return;
1645
1646 efx->type->stop_stats(efx);
1647 efx_stop_port(efx);
1648
1649 /* Flush efx_mac_work(), refill_workqueue, monitor_work */
1650 efx_flush_all(efx);
1651
1652 /* Stop the kernel transmit interface. This is only valid if
1653 * the device is stopped or detached; otherwise the watchdog
1654 * may fire immediately.
1655 */
1656 WARN_ON(netif_running(efx->net_dev) &&
1657 netif_device_present(efx->net_dev));
1658 netif_tx_disable(efx->net_dev);
1659
1660 efx_stop_datapath(efx);
1661 }
1662
1663 static void efx_remove_all(struct efx_nic *efx)
1664 {
1665 efx_remove_channels(efx);
1666 efx_remove_filters(efx);
1667 efx_remove_port(efx);
1668 efx_remove_nic(efx);
1669 }
1670
1671 /**************************************************************************
1672 *
1673 * Interrupt moderation
1674 *
1675 **************************************************************************/
1676
1677 static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
1678 {
1679 if (usecs == 0)
1680 return 0;
1681 if (usecs * 1000 < quantum_ns)
1682 return 1; /* never round down to 0 */
1683 return usecs * 1000 / quantum_ns;
1684 }
1685
1686 /* Set interrupt moderation parameters */
1687 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
1688 unsigned int rx_usecs, bool rx_adaptive,
1689 bool rx_may_override_tx)
1690 {
1691 struct efx_channel *channel;
1692 unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
1693 efx->timer_quantum_ns,
1694 1000);
1695 unsigned int tx_ticks;
1696 unsigned int rx_ticks;
1697
1698 EFX_ASSERT_RESET_SERIALISED(efx);
1699
1700 if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
1701 return -EINVAL;
1702
1703 tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
1704 rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
1705
1706 if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
1707 !rx_may_override_tx) {
1708 netif_err(efx, drv, efx->net_dev, "Channels are shared. "
1709 "RX and TX IRQ moderation must be equal\n");
1710 return -EINVAL;
1711 }
1712
1713 efx->irq_rx_adaptive = rx_adaptive;
1714 efx->irq_rx_moderation = rx_ticks;
1715 efx_for_each_channel(channel, efx) {
1716 if (efx_channel_has_rx_queue(channel))
1717 channel->irq_moderation = rx_ticks;
1718 else if (efx_channel_has_tx_queues(channel))
1719 channel->irq_moderation = tx_ticks;
1720 }
1721
1722 return 0;
1723 }
1724
1725 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
1726 unsigned int *rx_usecs, bool *rx_adaptive)
1727 {
1728 /* We must round up when converting ticks to microseconds
1729 * because we round down when converting the other way.
1730 */
1731
1732 *rx_adaptive = efx->irq_rx_adaptive;
1733 *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
1734 efx->timer_quantum_ns,
1735 1000);
1736
1737 /* If channels are shared between RX and TX, so is IRQ
1738 * moderation. Otherwise, IRQ moderation is the same for all
1739 * TX channels and is not adaptive.
1740 */
1741 if (efx->tx_channel_offset == 0)
1742 *tx_usecs = *rx_usecs;
1743 else
1744 *tx_usecs = DIV_ROUND_UP(
1745 efx->channel[efx->tx_channel_offset]->irq_moderation *
1746 efx->timer_quantum_ns,
1747 1000);
1748 }
1749
1750 /**************************************************************************
1751 *
1752 * Hardware monitor
1753 *
1754 **************************************************************************/
1755
1756 /* Run periodically off the general workqueue */
1757 static void efx_monitor(struct work_struct *data)
1758 {
1759 struct efx_nic *efx = container_of(data, struct efx_nic,
1760 monitor_work.work);
1761
1762 netif_vdbg(efx, timer, efx->net_dev,
1763 "hardware monitor executing on CPU %d\n",
1764 raw_smp_processor_id());
1765 BUG_ON(efx->type->monitor == NULL);
1766
1767 /* If the mac_lock is already held then it is likely a port
1768 * reconfiguration is already in place, which will likely do
1769 * most of the work of monitor() anyway. */
1770 if (mutex_trylock(&efx->mac_lock)) {
1771 if (efx->port_enabled)
1772 efx->type->monitor(efx);
1773 mutex_unlock(&efx->mac_lock);
1774 }
1775
1776 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1777 efx_monitor_interval);
1778 }
1779
1780 /**************************************************************************
1781 *
1782 * ioctls
1783 *
1784 *************************************************************************/
1785
1786 /* Net device ioctl
1787 * Context: process, rtnl_lock() held.
1788 */
1789 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1790 {
1791 struct efx_nic *efx = netdev_priv(net_dev);
1792 struct mii_ioctl_data *data = if_mii(ifr);
1793
1794 if (cmd == SIOCSHWTSTAMP)
1795 return efx_ptp_ioctl(efx, ifr, cmd);
1796
1797 /* Convert phy_id from older PRTAD/DEVAD format */
1798 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
1799 (data->phy_id & 0xfc00) == 0x0400)
1800 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
1801
1802 return mdio_mii_ioctl(&efx->mdio, data, cmd);
1803 }
1804
1805 /**************************************************************************
1806 *
1807 * NAPI interface
1808 *
1809 **************************************************************************/
1810
1811 static void efx_init_napi_channel(struct efx_channel *channel)
1812 {
1813 struct efx_nic *efx = channel->efx;
1814
1815 channel->napi_dev = efx->net_dev;
1816 netif_napi_add(channel->napi_dev, &channel->napi_str,
1817 efx_poll, napi_weight);
1818 }
1819
1820 static void efx_init_napi(struct efx_nic *efx)
1821 {
1822 struct efx_channel *channel;
1823
1824 efx_for_each_channel(channel, efx)
1825 efx_init_napi_channel(channel);
1826 }
1827
1828 static void efx_fini_napi_channel(struct efx_channel *channel)
1829 {
1830 if (channel->napi_dev)
1831 netif_napi_del(&channel->napi_str);
1832 channel->napi_dev = NULL;
1833 }
1834
1835 static void efx_fini_napi(struct efx_nic *efx)
1836 {
1837 struct efx_channel *channel;
1838
1839 efx_for_each_channel(channel, efx)
1840 efx_fini_napi_channel(channel);
1841 }
1842
1843 /**************************************************************************
1844 *
1845 * Kernel netpoll interface
1846 *
1847 *************************************************************************/
1848
1849 #ifdef CONFIG_NET_POLL_CONTROLLER
1850
1851 /* Although in the common case interrupts will be disabled, this is not
1852 * guaranteed. However, all our work happens inside the NAPI callback,
1853 * so no locking is required.
1854 */
1855 static void efx_netpoll(struct net_device *net_dev)
1856 {
1857 struct efx_nic *efx = netdev_priv(net_dev);
1858 struct efx_channel *channel;
1859
1860 efx_for_each_channel(channel, efx)
1861 efx_schedule_channel(channel);
1862 }
1863
1864 #endif
1865
1866 /**************************************************************************
1867 *
1868 * Kernel net device interface
1869 *
1870 *************************************************************************/
1871
1872 /* Context: process, rtnl_lock() held. */
1873 static int efx_net_open(struct net_device *net_dev)
1874 {
1875 struct efx_nic *efx = netdev_priv(net_dev);
1876 int rc;
1877
1878 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
1879 raw_smp_processor_id());
1880
1881 rc = efx_check_disabled(efx);
1882 if (rc)
1883 return rc;
1884 if (efx->phy_mode & PHY_MODE_SPECIAL)
1885 return -EBUSY;
1886 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
1887 return -EIO;
1888
1889 /* Notify the kernel of the link state polled during driver load,
1890 * before the monitor starts running */
1891 efx_link_status_changed(efx);
1892
1893 efx_start_all(efx);
1894 efx_selftest_async_start(efx);
1895 return 0;
1896 }
1897
1898 /* Context: process, rtnl_lock() held.
1899 * Note that the kernel will ignore our return code; this method
1900 * should really be a void.
1901 */
1902 static int efx_net_stop(struct net_device *net_dev)
1903 {
1904 struct efx_nic *efx = netdev_priv(net_dev);
1905
1906 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
1907 raw_smp_processor_id());
1908
1909 /* Stop the device and flush all the channels */
1910 efx_stop_all(efx);
1911
1912 return 0;
1913 }
1914
1915 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
1916 static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
1917 struct rtnl_link_stats64 *stats)
1918 {
1919 struct efx_nic *efx = netdev_priv(net_dev);
1920 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1921
1922 spin_lock_bh(&efx->stats_lock);
1923
1924 efx->type->update_stats(efx);
1925
1926 stats->rx_packets = mac_stats->rx_packets;
1927 stats->tx_packets = mac_stats->tx_packets;
1928 stats->rx_bytes = mac_stats->rx_bytes;
1929 stats->tx_bytes = mac_stats->tx_bytes;
1930 stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
1931 stats->multicast = mac_stats->rx_multicast;
1932 stats->collisions = mac_stats->tx_collision;
1933 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1934 mac_stats->rx_length_error);
1935 stats->rx_crc_errors = mac_stats->rx_bad;
1936 stats->rx_frame_errors = mac_stats->rx_align_error;
1937 stats->rx_fifo_errors = mac_stats->rx_overflow;
1938 stats->rx_missed_errors = mac_stats->rx_missed;
1939 stats->tx_window_errors = mac_stats->tx_late_collision;
1940
1941 stats->rx_errors = (stats->rx_length_errors +
1942 stats->rx_crc_errors +
1943 stats->rx_frame_errors +
1944 mac_stats->rx_symbol_error);
1945 stats->tx_errors = (stats->tx_window_errors +
1946 mac_stats->tx_bad);
1947
1948 spin_unlock_bh(&efx->stats_lock);
1949
1950 return stats;
1951 }
1952
1953 /* Context: netif_tx_lock held, BHs disabled. */
1954 static void efx_watchdog(struct net_device *net_dev)
1955 {
1956 struct efx_nic *efx = netdev_priv(net_dev);
1957
1958 netif_err(efx, tx_err, efx->net_dev,
1959 "TX stuck with port_enabled=%d: resetting channels\n",
1960 efx->port_enabled);
1961
1962 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1963 }
1964
1965
1966 /* Context: process, rtnl_lock() held. */
1967 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1968 {
1969 struct efx_nic *efx = netdev_priv(net_dev);
1970 int rc;
1971
1972 rc = efx_check_disabled(efx);
1973 if (rc)
1974 return rc;
1975 if (new_mtu > EFX_MAX_MTU)
1976 return -EINVAL;
1977
1978 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
1979
1980 efx_device_detach_sync(efx);
1981 efx_stop_all(efx);
1982
1983 mutex_lock(&efx->mac_lock);
1984 net_dev->mtu = new_mtu;
1985 efx->type->reconfigure_mac(efx);
1986 mutex_unlock(&efx->mac_lock);
1987
1988 efx_start_all(efx);
1989 netif_device_attach(efx->net_dev);
1990 return 0;
1991 }
1992
1993 static int efx_set_mac_address(struct net_device *net_dev, void *data)
1994 {
1995 struct efx_nic *efx = netdev_priv(net_dev);
1996 struct sockaddr *addr = data;
1997 char *new_addr = addr->sa_data;
1998
1999 if (!is_valid_ether_addr(new_addr)) {
2000 netif_err(efx, drv, efx->net_dev,
2001 "invalid ethernet MAC address requested: %pM\n",
2002 new_addr);
2003 return -EADDRNOTAVAIL;
2004 }
2005
2006 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
2007 efx_sriov_mac_address_changed(efx);
2008
2009 /* Reconfigure the MAC */
2010 mutex_lock(&efx->mac_lock);
2011 efx->type->reconfigure_mac(efx);
2012 mutex_unlock(&efx->mac_lock);
2013
2014 return 0;
2015 }
2016
2017 /* Context: netif_addr_lock held, BHs disabled. */
2018 static void efx_set_rx_mode(struct net_device *net_dev)
2019 {
2020 struct efx_nic *efx = netdev_priv(net_dev);
2021 struct netdev_hw_addr *ha;
2022 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2023 u32 crc;
2024 int bit;
2025
2026 efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
2027
2028 /* Build multicast hash table */
2029 if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
2030 memset(mc_hash, 0xff, sizeof(*mc_hash));
2031 } else {
2032 memset(mc_hash, 0x00, sizeof(*mc_hash));
2033 netdev_for_each_mc_addr(ha, net_dev) {
2034 crc = ether_crc_le(ETH_ALEN, ha->addr);
2035 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2036 __set_bit_le(bit, mc_hash);
2037 }
2038
2039 /* Broadcast packets go through the multicast hash filter.
2040 * ether_crc_le() of the broadcast address is 0xbe2612ff
2041 * so we always add bit 0xff to the mask.
2042 */
2043 __set_bit_le(0xff, mc_hash);
2044 }
2045
2046 if (efx->port_enabled)
2047 queue_work(efx->workqueue, &efx->mac_work);
2048 /* Otherwise efx_start_port() will do this */
2049 }
2050
2051 static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2052 {
2053 struct efx_nic *efx = netdev_priv(net_dev);
2054
2055 /* If disabling RX n-tuple filtering, clear existing filters */
2056 if (net_dev->features & ~data & NETIF_F_NTUPLE)
2057 efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
2058
2059 return 0;
2060 }
2061
2062 static const struct net_device_ops efx_netdev_ops = {
2063 .ndo_open = efx_net_open,
2064 .ndo_stop = efx_net_stop,
2065 .ndo_get_stats64 = efx_net_stats,
2066 .ndo_tx_timeout = efx_watchdog,
2067 .ndo_start_xmit = efx_hard_start_xmit,
2068 .ndo_validate_addr = eth_validate_addr,
2069 .ndo_do_ioctl = efx_ioctl,
2070 .ndo_change_mtu = efx_change_mtu,
2071 .ndo_set_mac_address = efx_set_mac_address,
2072 .ndo_set_rx_mode = efx_set_rx_mode,
2073 .ndo_set_features = efx_set_features,
2074 #ifdef CONFIG_SFC_SRIOV
2075 .ndo_set_vf_mac = efx_sriov_set_vf_mac,
2076 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
2077 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
2078 .ndo_get_vf_config = efx_sriov_get_vf_config,
2079 #endif
2080 #ifdef CONFIG_NET_POLL_CONTROLLER
2081 .ndo_poll_controller = efx_netpoll,
2082 #endif
2083 .ndo_setup_tc = efx_setup_tc,
2084 #ifdef CONFIG_RFS_ACCEL
2085 .ndo_rx_flow_steer = efx_filter_rfs,
2086 #endif
2087 };
2088
2089 static void efx_update_name(struct efx_nic *efx)
2090 {
2091 strcpy(efx->name, efx->net_dev->name);
2092 efx_mtd_rename(efx);
2093 efx_set_channel_names(efx);
2094 }
2095
2096 static int efx_netdev_event(struct notifier_block *this,
2097 unsigned long event, void *ptr)
2098 {
2099 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2100
2101 if (net_dev->netdev_ops == &efx_netdev_ops &&
2102 event == NETDEV_CHANGENAME)
2103 efx_update_name(netdev_priv(net_dev));
2104
2105 return NOTIFY_DONE;
2106 }
2107
2108 static struct notifier_block efx_netdev_notifier = {
2109 .notifier_call = efx_netdev_event,
2110 };
2111
2112 static ssize_t
2113 show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2114 {
2115 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2116 return sprintf(buf, "%d\n", efx->phy_type);
2117 }
2118 static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2119
2120 static int efx_register_netdev(struct efx_nic *efx)
2121 {
2122 struct net_device *net_dev = efx->net_dev;
2123 struct efx_channel *channel;
2124 int rc;
2125
2126 net_dev->watchdog_timeo = 5 * HZ;
2127 net_dev->irq = efx->pci_dev->irq;
2128 net_dev->netdev_ops = &efx_netdev_ops;
2129 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
2130 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2131
2132 rtnl_lock();
2133
2134 /* Enable resets to be scheduled and check whether any were
2135 * already requested. If so, the NIC is probably hosed so we
2136 * abort.
2137 */
2138 efx->state = STATE_READY;
2139 smp_mb(); /* ensure we change state before checking reset_pending */
2140 if (efx->reset_pending) {
2141 netif_err(efx, probe, efx->net_dev,
2142 "aborting probe due to scheduled reset\n");
2143 rc = -EIO;
2144 goto fail_locked;
2145 }
2146
2147 rc = dev_alloc_name(net_dev, net_dev->name);
2148 if (rc < 0)
2149 goto fail_locked;
2150 efx_update_name(efx);
2151
2152 /* Always start with carrier off; PHY events will detect the link */
2153 netif_carrier_off(net_dev);
2154
2155 rc = register_netdevice(net_dev);
2156 if (rc)
2157 goto fail_locked;
2158
2159 efx_for_each_channel(channel, efx) {
2160 struct efx_tx_queue *tx_queue;
2161 efx_for_each_channel_tx_queue(tx_queue, channel)
2162 efx_init_tx_queue_core_txq(tx_queue);
2163 }
2164
2165 rtnl_unlock();
2166
2167 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2168 if (rc) {
2169 netif_err(efx, drv, efx->net_dev,
2170 "failed to init net dev attributes\n");
2171 goto fail_registered;
2172 }
2173
2174 return 0;
2175
2176 fail_registered:
2177 rtnl_lock();
2178 unregister_netdevice(net_dev);
2179 fail_locked:
2180 efx->state = STATE_UNINIT;
2181 rtnl_unlock();
2182 netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2183 return rc;
2184 }
2185
2186 static void efx_unregister_netdev(struct efx_nic *efx)
2187 {
2188 struct efx_channel *channel;
2189 struct efx_tx_queue *tx_queue;
2190
2191 if (!efx->net_dev)
2192 return;
2193
2194 BUG_ON(netdev_priv(efx->net_dev) != efx);
2195
2196 /* Free up any skbs still remaining. This has to happen before
2197 * we try to unregister the netdev as running their destructors
2198 * may be needed to get the device ref. count to 0. */
2199 efx_for_each_channel(channel, efx) {
2200 efx_for_each_channel_tx_queue(tx_queue, channel)
2201 efx_release_tx_buffers(tx_queue);
2202 }
2203
2204 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2205 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2206
2207 rtnl_lock();
2208 unregister_netdevice(efx->net_dev);
2209 efx->state = STATE_UNINIT;
2210 rtnl_unlock();
2211 }
2212
2213 /**************************************************************************
2214 *
2215 * Device reset and suspend
2216 *
2217 **************************************************************************/
2218
2219 /* Tears down the entire software state and most of the hardware state
2220 * before reset. */
2221 void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2222 {
2223 EFX_ASSERT_RESET_SERIALISED(efx);
2224
2225 efx_stop_all(efx);
2226 efx_stop_interrupts(efx, false);
2227
2228 mutex_lock(&efx->mac_lock);
2229 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
2230 efx->phy_op->fini(efx);
2231 efx->type->fini(efx);
2232 }
2233
2234 /* This function will always ensure that the locks acquired in
2235 * efx_reset_down() are released. A failure return code indicates
2236 * that we were unable to reinitialise the hardware, and the
2237 * driver should be disabled. If ok is false, then the rx and tx
2238 * engines are not restarted, pending a RESET_DISABLE. */
2239 int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2240 {
2241 int rc;
2242
2243 EFX_ASSERT_RESET_SERIALISED(efx);
2244
2245 rc = efx->type->init(efx);
2246 if (rc) {
2247 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2248 goto fail;
2249 }
2250
2251 if (!ok)
2252 goto fail;
2253
2254 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
2255 rc = efx->phy_op->init(efx);
2256 if (rc)
2257 goto fail;
2258 if (efx->phy_op->reconfigure(efx))
2259 netif_err(efx, drv, efx->net_dev,
2260 "could not restore PHY settings\n");
2261 }
2262
2263 efx->type->reconfigure_mac(efx);
2264
2265 efx_start_interrupts(efx, false);
2266 efx_restore_filters(efx);
2267 efx_sriov_reset(efx);
2268
2269 mutex_unlock(&efx->mac_lock);
2270
2271 efx_start_all(efx);
2272
2273 return 0;
2274
2275 fail:
2276 efx->port_initialized = false;
2277
2278 mutex_unlock(&efx->mac_lock);
2279
2280 return rc;
2281 }
2282
2283 /* Reset the NIC using the specified method. Note that the reset may
2284 * fail, in which case the card will be left in an unusable state.
2285 *
2286 * Caller must hold the rtnl_lock.
2287 */
2288 int efx_reset(struct efx_nic *efx, enum reset_type method)
2289 {
2290 int rc, rc2;
2291 bool disabled;
2292
2293 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2294 RESET_TYPE(method));
2295
2296 efx_device_detach_sync(efx);
2297 efx_reset_down(efx, method);
2298
2299 rc = efx->type->reset(efx, method);
2300 if (rc) {
2301 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2302 goto out;
2303 }
2304
2305 /* Clear flags for the scopes we covered. We assume the NIC and
2306 * driver are now quiescent so that there is no race here.
2307 */
2308 efx->reset_pending &= -(1 << (method + 1));
2309
2310 /* Reinitialise bus-mastering, which may have been turned off before
2311 * the reset was scheduled. This is still appropriate, even in the
2312 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2313 * can respond to requests. */
2314 pci_set_master(efx->pci_dev);
2315
2316 out:
2317 /* Leave device stopped if necessary */
2318 disabled = rc ||
2319 method == RESET_TYPE_DISABLE ||
2320 method == RESET_TYPE_RECOVER_OR_DISABLE;
2321 rc2 = efx_reset_up(efx, method, !disabled);
2322 if (rc2) {
2323 disabled = true;
2324 if (!rc)
2325 rc = rc2;
2326 }
2327
2328 if (disabled) {
2329 dev_close(efx->net_dev);
2330 netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2331 efx->state = STATE_DISABLED;
2332 } else {
2333 netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2334 netif_device_attach(efx->net_dev);
2335 }
2336 return rc;
2337 }
2338
2339 /* Try recovery mechanisms.
2340 * For now only EEH is supported.
2341 * Returns 0 if the recovery mechanisms are unsuccessful.
2342 * Returns a non-zero value otherwise.
2343 */
2344 int efx_try_recovery(struct efx_nic *efx)
2345 {
2346 #ifdef CONFIG_EEH
2347 /* A PCI error can occur and not be seen by EEH because nothing
2348 * happens on the PCI bus. In this case the driver may fail and
2349 * schedule a 'recover or reset', leading to this recovery handler.
2350 * Manually call the eeh failure check function.
2351 */
2352 struct eeh_dev *eehdev =
2353 of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
2354
2355 if (eeh_dev_check_failure(eehdev)) {
2356 /* The EEH mechanisms will handle the error and reset the
2357 * device if necessary.
2358 */
2359 return 1;
2360 }
2361 #endif
2362 return 0;
2363 }
2364
2365 /* The worker thread exists so that code that cannot sleep can
2366 * schedule a reset for later.
2367 */
2368 static void efx_reset_work(struct work_struct *data)
2369 {
2370 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
2371 unsigned long pending;
2372 enum reset_type method;
2373
2374 pending = ACCESS_ONCE(efx->reset_pending);
2375 method = fls(pending) - 1;
2376
2377 if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2378 method == RESET_TYPE_RECOVER_OR_ALL) &&
2379 efx_try_recovery(efx))
2380 return;
2381
2382 if (!pending)
2383 return;
2384
2385 rtnl_lock();
2386
2387 /* We checked the state in efx_schedule_reset() but it may
2388 * have changed by now. Now that we have the RTNL lock,
2389 * it cannot change again.
2390 */
2391 if (efx->state == STATE_READY)
2392 (void)efx_reset(efx, method);
2393
2394 rtnl_unlock();
2395 }
2396
2397 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2398 {
2399 enum reset_type method;
2400
2401 if (efx->state == STATE_RECOVERY) {
2402 netif_dbg(efx, drv, efx->net_dev,
2403 "recovering: skip scheduling %s reset\n",
2404 RESET_TYPE(type));
2405 return;
2406 }
2407
2408 switch (type) {
2409 case RESET_TYPE_INVISIBLE:
2410 case RESET_TYPE_ALL:
2411 case RESET_TYPE_RECOVER_OR_ALL:
2412 case RESET_TYPE_WORLD:
2413 case RESET_TYPE_DISABLE:
2414 case RESET_TYPE_RECOVER_OR_DISABLE:
2415 method = type;
2416 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2417 RESET_TYPE(method));
2418 break;
2419 default:
2420 method = efx->type->map_reset_reason(type);
2421 netif_dbg(efx, drv, efx->net_dev,
2422 "scheduling %s reset for %s\n",
2423 RESET_TYPE(method), RESET_TYPE(type));
2424 break;
2425 }
2426
2427 set_bit(method, &efx->reset_pending);
2428 smp_mb(); /* ensure we change reset_pending before checking state */
2429
2430 /* If we're not READY then just leave the flags set as the cue
2431 * to abort probing or reschedule the reset later.
2432 */
2433 if (ACCESS_ONCE(efx->state) != STATE_READY)
2434 return;
2435
2436 /* efx_process_channel() will no longer read events once a
2437 * reset is scheduled. So switch back to poll'd MCDI completions. */
2438 efx_mcdi_mode_poll(efx);
2439
2440 queue_work(reset_workqueue, &efx->reset_work);
2441 }
2442
2443 /**************************************************************************
2444 *
2445 * List of NICs we support
2446 *
2447 **************************************************************************/
2448
2449 /* PCI device ID table */
2450 static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
2451 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2452 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
2453 .driver_data = (unsigned long) &falcon_a1_nic_type},
2454 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2455 PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
2456 .driver_data = (unsigned long) &falcon_b0_nic_type},
2457 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
2458 .driver_data = (unsigned long) &siena_a0_nic_type},
2459 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
2460 .driver_data = (unsigned long) &siena_a0_nic_type},
2461 {0} /* end of list */
2462 };
2463
2464 /**************************************************************************
2465 *
2466 * Dummy PHY/MAC operations
2467 *
2468 * Can be used for some unimplemented operations
2469 * Needed so all function pointers are valid and do not have to be tested
2470 * before use
2471 *
2472 **************************************************************************/
2473 int efx_port_dummy_op_int(struct efx_nic *efx)
2474 {
2475 return 0;
2476 }
2477 void efx_port_dummy_op_void(struct efx_nic *efx) {}
2478
2479 static bool efx_port_dummy_op_poll(struct efx_nic *efx)
2480 {
2481 return false;
2482 }
2483
2484 static const struct efx_phy_operations efx_dummy_phy_operations = {
2485 .init = efx_port_dummy_op_int,
2486 .reconfigure = efx_port_dummy_op_int,
2487 .poll = efx_port_dummy_op_poll,
2488 .fini = efx_port_dummy_op_void,
2489 };
2490
2491 /**************************************************************************
2492 *
2493 * Data housekeeping
2494 *
2495 **************************************************************************/
2496
2497 /* This zeroes out and then fills in the invariants in a struct
2498 * efx_nic (including all sub-structures).
2499 */
2500 static int efx_init_struct(struct efx_nic *efx,
2501 struct pci_dev *pci_dev, struct net_device *net_dev)
2502 {
2503 int i;
2504
2505 /* Initialise common structures */
2506 spin_lock_init(&efx->biu_lock);
2507 #ifdef CONFIG_SFC_MTD
2508 INIT_LIST_HEAD(&efx->mtd_list);
2509 #endif
2510 INIT_WORK(&efx->reset_work, efx_reset_work);
2511 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
2512 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
2513 efx->pci_dev = pci_dev;
2514 efx->msg_enable = debug;
2515 efx->state = STATE_UNINIT;
2516 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2517
2518 efx->net_dev = net_dev;
2519 spin_lock_init(&efx->stats_lock);
2520 mutex_init(&efx->mac_lock);
2521 efx->phy_op = &efx_dummy_phy_operations;
2522 efx->mdio.dev = net_dev;
2523 INIT_WORK(&efx->mac_work, efx_mac_work);
2524 init_waitqueue_head(&efx->flush_wq);
2525
2526 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2527 efx->channel[i] = efx_alloc_channel(efx, i, NULL);
2528 if (!efx->channel[i])
2529 goto fail;
2530 }
2531
2532 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
2533
2534 /* Higher numbered interrupt modes are less capable! */
2535 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2536 interrupt_mode);
2537
2538 /* Would be good to use the net_dev name, but we're too early */
2539 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
2540 pci_name(pci_dev));
2541 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2542 if (!efx->workqueue)
2543 goto fail;
2544
2545 return 0;
2546
2547 fail:
2548 efx_fini_struct(efx);
2549 return -ENOMEM;
2550 }
2551
2552 static void efx_fini_struct(struct efx_nic *efx)
2553 {
2554 int i;
2555
2556 for (i = 0; i < EFX_MAX_CHANNELS; i++)
2557 kfree(efx->channel[i]);
2558
2559 if (efx->workqueue) {
2560 destroy_workqueue(efx->workqueue);
2561 efx->workqueue = NULL;
2562 }
2563 }
2564
2565 /**************************************************************************
2566 *
2567 * PCI interface
2568 *
2569 **************************************************************************/
2570
2571 /* Main body of final NIC shutdown code
2572 * This is called only at module unload (or hotplug removal).
2573 */
2574 static void efx_pci_remove_main(struct efx_nic *efx)
2575 {
2576 /* Flush reset_work. It can no longer be scheduled since we
2577 * are not READY.
2578 */
2579 BUG_ON(efx->state == STATE_READY);
2580 cancel_work_sync(&efx->reset_work);
2581
2582 efx_stop_interrupts(efx, false);
2583 efx_nic_fini_interrupt(efx);
2584 efx_fini_port(efx);
2585 efx->type->fini(efx);
2586 efx_fini_napi(efx);
2587 efx_remove_all(efx);
2588 }
2589
2590 /* Final NIC shutdown
2591 * This is called only at module unload (or hotplug removal).
2592 */
2593 static void efx_pci_remove(struct pci_dev *pci_dev)
2594 {
2595 struct efx_nic *efx;
2596
2597 efx = pci_get_drvdata(pci_dev);
2598 if (!efx)
2599 return;
2600
2601 /* Mark the NIC as fini, then stop the interface */
2602 rtnl_lock();
2603 dev_close(efx->net_dev);
2604 efx_stop_interrupts(efx, false);
2605 rtnl_unlock();
2606
2607 efx_sriov_fini(efx);
2608 efx_unregister_netdev(efx);
2609
2610 efx_mtd_remove(efx);
2611
2612 efx_pci_remove_main(efx);
2613
2614 efx_fini_io(efx);
2615 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2616
2617 efx_fini_struct(efx);
2618 pci_set_drvdata(pci_dev, NULL);
2619 free_netdev(efx->net_dev);
2620
2621 pci_disable_pcie_error_reporting(pci_dev);
2622 };
2623
2624 /* NIC VPD information
2625 * Called during probe to display the part number of the
2626 * installed NIC. VPD is potentially very large but this should
2627 * always appear within the first 512 bytes.
2628 */
2629 #define SFC_VPD_LEN 512
2630 static void efx_print_product_vpd(struct efx_nic *efx)
2631 {
2632 struct pci_dev *dev = efx->pci_dev;
2633 char vpd_data[SFC_VPD_LEN];
2634 ssize_t vpd_size;
2635 int i, j;
2636
2637 /* Get the vpd data from the device */
2638 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
2639 if (vpd_size <= 0) {
2640 netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
2641 return;
2642 }
2643
2644 /* Get the Read only section */
2645 i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
2646 if (i < 0) {
2647 netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
2648 return;
2649 }
2650
2651 j = pci_vpd_lrdt_size(&vpd_data[i]);
2652 i += PCI_VPD_LRDT_TAG_SIZE;
2653 if (i + j > vpd_size)
2654 j = vpd_size - i;
2655
2656 /* Get the Part number */
2657 i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
2658 if (i < 0) {
2659 netif_err(efx, drv, efx->net_dev, "Part number not found\n");
2660 return;
2661 }
2662
2663 j = pci_vpd_info_field_size(&vpd_data[i]);
2664 i += PCI_VPD_INFO_FLD_HDR_SIZE;
2665 if (i + j > vpd_size) {
2666 netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
2667 return;
2668 }
2669
2670 netif_info(efx, drv, efx->net_dev,
2671 "Part Number : %.*s\n", j, &vpd_data[i]);
2672 }
2673
2674
2675 /* Main body of NIC initialisation
2676 * This is called at module load (or hotplug insertion, theoretically).
2677 */
2678 static int efx_pci_probe_main(struct efx_nic *efx)
2679 {
2680 int rc;
2681
2682 /* Do start-of-day initialisation */
2683 rc = efx_probe_all(efx);
2684 if (rc)
2685 goto fail1;
2686
2687 efx_init_napi(efx);
2688
2689 rc = efx->type->init(efx);
2690 if (rc) {
2691 netif_err(efx, probe, efx->net_dev,
2692 "failed to initialise NIC\n");
2693 goto fail3;
2694 }
2695
2696 rc = efx_init_port(efx);
2697 if (rc) {
2698 netif_err(efx, probe, efx->net_dev,
2699 "failed to initialise port\n");
2700 goto fail4;
2701 }
2702
2703 rc = efx_nic_init_interrupt(efx);
2704 if (rc)
2705 goto fail5;
2706 efx_start_interrupts(efx, false);
2707
2708 return 0;
2709
2710 fail5:
2711 efx_fini_port(efx);
2712 fail4:
2713 efx->type->fini(efx);
2714 fail3:
2715 efx_fini_napi(efx);
2716 efx_remove_all(efx);
2717 fail1:
2718 return rc;
2719 }
2720
2721 /* NIC initialisation
2722 *
2723 * This is called at module load (or hotplug insertion,
2724 * theoretically). It sets up PCI mappings, resets the NIC,
2725 * sets up and registers the network devices with the kernel and hooks
2726 * the interrupt service routine. It does not prepare the device for
2727 * transmission; this is left to the first time one of the network
2728 * interfaces is brought up (i.e. efx_net_open).
2729 */
2730 static int efx_pci_probe(struct pci_dev *pci_dev,
2731 const struct pci_device_id *entry)
2732 {
2733 struct net_device *net_dev;
2734 struct efx_nic *efx;
2735 int rc;
2736
2737 /* Allocate and initialise a struct net_device and struct efx_nic */
2738 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
2739 EFX_MAX_RX_QUEUES);
2740 if (!net_dev)
2741 return -ENOMEM;
2742 efx = netdev_priv(net_dev);
2743 efx->type = (const struct efx_nic_type *) entry->driver_data;
2744 net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
2745 NETIF_F_HIGHDMA | NETIF_F_TSO |
2746 NETIF_F_RXCSUM);
2747 if (efx->type->offload_features & NETIF_F_V6_CSUM)
2748 net_dev->features |= NETIF_F_TSO6;
2749 /* Mask for features that also apply to VLAN devices */
2750 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2751 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
2752 NETIF_F_RXCSUM);
2753 /* All offloads can be toggled */
2754 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
2755 pci_set_drvdata(pci_dev, efx);
2756 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2757 rc = efx_init_struct(efx, pci_dev, net_dev);
2758 if (rc)
2759 goto fail1;
2760
2761 netif_info(efx, probe, efx->net_dev,
2762 "Solarflare NIC detected\n");
2763
2764 efx_print_product_vpd(efx);
2765
2766 /* Set up basic I/O (BAR mappings etc) */
2767 rc = efx_init_io(efx);
2768 if (rc)
2769 goto fail2;
2770
2771 rc = efx_pci_probe_main(efx);
2772 if (rc)
2773 goto fail3;
2774
2775 rc = efx_register_netdev(efx);
2776 if (rc)
2777 goto fail4;
2778
2779 rc = efx_sriov_init(efx);
2780 if (rc)
2781 netif_err(efx, probe, efx->net_dev,
2782 "SR-IOV can't be enabled rc %d\n", rc);
2783
2784 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
2785
2786 /* Try to create MTDs, but allow this to fail */
2787 rtnl_lock();
2788 rc = efx_mtd_probe(efx);
2789 rtnl_unlock();
2790 if (rc)
2791 netif_warn(efx, probe, efx->net_dev,
2792 "failed to create MTDs (%d)\n", rc);
2793
2794 rc = pci_enable_pcie_error_reporting(pci_dev);
2795 if (rc && rc != -EINVAL)
2796 netif_warn(efx, probe, efx->net_dev,
2797 "pci_enable_pcie_error_reporting failed (%d)\n", rc);
2798
2799 return 0;
2800
2801 fail4:
2802 efx_pci_remove_main(efx);
2803 fail3:
2804 efx_fini_io(efx);
2805 fail2:
2806 efx_fini_struct(efx);
2807 fail1:
2808 pci_set_drvdata(pci_dev, NULL);
2809 WARN_ON(rc > 0);
2810 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
2811 free_netdev(net_dev);
2812 return rc;
2813 }
2814
2815 static int efx_pm_freeze(struct device *dev)
2816 {
2817 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2818
2819 rtnl_lock();
2820
2821 if (efx->state != STATE_DISABLED) {
2822 efx->state = STATE_UNINIT;
2823
2824 efx_device_detach_sync(efx);
2825
2826 efx_stop_all(efx);
2827 efx_stop_interrupts(efx, false);
2828 }
2829
2830 rtnl_unlock();
2831
2832 return 0;
2833 }
2834
2835 static int efx_pm_thaw(struct device *dev)
2836 {
2837 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2838
2839 rtnl_lock();
2840
2841 if (efx->state != STATE_DISABLED) {
2842 efx_start_interrupts(efx, false);
2843
2844 mutex_lock(&efx->mac_lock);
2845 efx->phy_op->reconfigure(efx);
2846 mutex_unlock(&efx->mac_lock);
2847
2848 efx_start_all(efx);
2849
2850 netif_device_attach(efx->net_dev);
2851
2852 efx->state = STATE_READY;
2853
2854 efx->type->resume_wol(efx);
2855 }
2856
2857 rtnl_unlock();
2858
2859 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
2860 queue_work(reset_workqueue, &efx->reset_work);
2861
2862 return 0;
2863 }
2864
2865 static int efx_pm_poweroff(struct device *dev)
2866 {
2867 struct pci_dev *pci_dev = to_pci_dev(dev);
2868 struct efx_nic *efx = pci_get_drvdata(pci_dev);
2869
2870 efx->type->fini(efx);
2871
2872 efx->reset_pending = 0;
2873
2874 pci_save_state(pci_dev);
2875 return pci_set_power_state(pci_dev, PCI_D3hot);
2876 }
2877
2878 /* Used for both resume and restore */
2879 static int efx_pm_resume(struct device *dev)
2880 {
2881 struct pci_dev *pci_dev = to_pci_dev(dev);
2882 struct efx_nic *efx = pci_get_drvdata(pci_dev);
2883 int rc;
2884
2885 rc = pci_set_power_state(pci_dev, PCI_D0);
2886 if (rc)
2887 return rc;
2888 pci_restore_state(pci_dev);
2889 rc = pci_enable_device(pci_dev);
2890 if (rc)
2891 return rc;
2892 pci_set_master(efx->pci_dev);
2893 rc = efx->type->reset(efx, RESET_TYPE_ALL);
2894 if (rc)
2895 return rc;
2896 rc = efx->type->init(efx);
2897 if (rc)
2898 return rc;
2899 efx_pm_thaw(dev);
2900 return 0;
2901 }
2902
2903 static int efx_pm_suspend(struct device *dev)
2904 {
2905 int rc;
2906
2907 efx_pm_freeze(dev);
2908 rc = efx_pm_poweroff(dev);
2909 if (rc)
2910 efx_pm_resume(dev);
2911 return rc;
2912 }
2913
2914 static const struct dev_pm_ops efx_pm_ops = {
2915 .suspend = efx_pm_suspend,
2916 .resume = efx_pm_resume,
2917 .freeze = efx_pm_freeze,
2918 .thaw = efx_pm_thaw,
2919 .poweroff = efx_pm_poweroff,
2920 .restore = efx_pm_resume,
2921 };
2922
2923 /* A PCI error affecting this device was detected.
2924 * At this point MMIO and DMA may be disabled.
2925 * Stop the software path and request a slot reset.
2926 */
2927 static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
2928 enum pci_channel_state state)
2929 {
2930 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
2931 struct efx_nic *efx = pci_get_drvdata(pdev);
2932
2933 if (state == pci_channel_io_perm_failure)
2934 return PCI_ERS_RESULT_DISCONNECT;
2935
2936 rtnl_lock();
2937
2938 if (efx->state != STATE_DISABLED) {
2939 efx->state = STATE_RECOVERY;
2940 efx->reset_pending = 0;
2941
2942 efx_device_detach_sync(efx);
2943
2944 efx_stop_all(efx);
2945 efx_stop_interrupts(efx, false);
2946
2947 status = PCI_ERS_RESULT_NEED_RESET;
2948 } else {
2949 /* If the interface is disabled we don't want to do anything
2950 * with it.
2951 */
2952 status = PCI_ERS_RESULT_RECOVERED;
2953 }
2954
2955 rtnl_unlock();
2956
2957 pci_disable_device(pdev);
2958
2959 return status;
2960 }
2961
2962 /* Fake a successfull reset, which will be performed later in efx_io_resume. */
2963 static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
2964 {
2965 struct efx_nic *efx = pci_get_drvdata(pdev);
2966 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
2967 int rc;
2968
2969 if (pci_enable_device(pdev)) {
2970 netif_err(efx, hw, efx->net_dev,
2971 "Cannot re-enable PCI device after reset.\n");
2972 status = PCI_ERS_RESULT_DISCONNECT;
2973 }
2974
2975 rc = pci_cleanup_aer_uncorrect_error_status(pdev);
2976 if (rc) {
2977 netif_err(efx, hw, efx->net_dev,
2978 "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
2979 /* Non-fatal error. Continue. */
2980 }
2981
2982 return status;
2983 }
2984
2985 /* Perform the actual reset and resume I/O operations. */
2986 static void efx_io_resume(struct pci_dev *pdev)
2987 {
2988 struct efx_nic *efx = pci_get_drvdata(pdev);
2989 int rc;
2990
2991 rtnl_lock();
2992
2993 if (efx->state == STATE_DISABLED)
2994 goto out;
2995
2996 rc = efx_reset(efx, RESET_TYPE_ALL);
2997 if (rc) {
2998 netif_err(efx, hw, efx->net_dev,
2999 "efx_reset failed after PCI error (%d)\n", rc);
3000 } else {
3001 efx->state = STATE_READY;
3002 netif_dbg(efx, hw, efx->net_dev,
3003 "Done resetting and resuming IO after PCI error.\n");
3004 }
3005
3006 out:
3007 rtnl_unlock();
3008 }
3009
3010 /* For simplicity and reliability, we always require a slot reset and try to
3011 * reset the hardware when a pci error affecting the device is detected.
3012 * We leave both the link_reset and mmio_enabled callback unimplemented:
3013 * with our request for slot reset the mmio_enabled callback will never be
3014 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3015 */
3016 static struct pci_error_handlers efx_err_handlers = {
3017 .error_detected = efx_io_error_detected,
3018 .slot_reset = efx_io_slot_reset,
3019 .resume = efx_io_resume,
3020 };
3021
3022 static struct pci_driver efx_pci_driver = {
3023 .name = KBUILD_MODNAME,
3024 .id_table = efx_pci_table,
3025 .probe = efx_pci_probe,
3026 .remove = efx_pci_remove,
3027 .driver.pm = &efx_pm_ops,
3028 .err_handler = &efx_err_handlers,
3029 };
3030
3031 /**************************************************************************
3032 *
3033 * Kernel module interface
3034 *
3035 *************************************************************************/
3036
3037 module_param(interrupt_mode, uint, 0444);
3038 MODULE_PARM_DESC(interrupt_mode,
3039 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3040
3041 static int __init efx_init_module(void)
3042 {
3043 int rc;
3044
3045 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
3046
3047 rc = register_netdevice_notifier(&efx_netdev_notifier);
3048 if (rc)
3049 goto err_notifier;
3050
3051 rc = efx_init_sriov();
3052 if (rc)
3053 goto err_sriov;
3054
3055 reset_workqueue = create_singlethread_workqueue("sfc_reset");
3056 if (!reset_workqueue) {
3057 rc = -ENOMEM;
3058 goto err_reset;
3059 }
3060
3061 rc = pci_register_driver(&efx_pci_driver);
3062 if (rc < 0)
3063 goto err_pci;
3064
3065 return 0;
3066
3067 err_pci:
3068 destroy_workqueue(reset_workqueue);
3069 err_reset:
3070 efx_fini_sriov();
3071 err_sriov:
3072 unregister_netdevice_notifier(&efx_netdev_notifier);
3073 err_notifier:
3074 return rc;
3075 }
3076
3077 static void __exit efx_exit_module(void)
3078 {
3079 printk(KERN_INFO "Solarflare NET driver unloading\n");
3080
3081 pci_unregister_driver(&efx_pci_driver);
3082 destroy_workqueue(reset_workqueue);
3083 efx_fini_sriov();
3084 unregister_netdevice_notifier(&efx_netdev_notifier);
3085
3086 }
3087
3088 module_init(efx_init_module);
3089 module_exit(efx_exit_module);
3090
3091 MODULE_AUTHOR("Solarflare Communications and "
3092 "Michael Brown <mbrown@fensystems.co.uk>");
3093 MODULE_DESCRIPTION("Solarflare Communications network driver");
3094 MODULE_LICENSE("GPL");
3095 MODULE_DEVICE_TABLE(pci, efx_pci_table);
This page took 0.126492 seconds and 5 git commands to generate.