sfc: Rework IRQ enable/disable
[deliverable/linux.git] / drivers / net / ethernet / sfc / falcon.c
1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/i2c.h>
17 #include <linux/mii.h>
18 #include <linux/slab.h>
19 #include "net_driver.h"
20 #include "bitfield.h"
21 #include "efx.h"
22 #include "spi.h"
23 #include "nic.h"
24 #include "farch_regs.h"
25 #include "io.h"
26 #include "phy.h"
27 #include "workarounds.h"
28 #include "selftest.h"
29 #include "mdio_10g.h"
30
31 /* Hardware control for SFC4000 (aka Falcon). */
32
33 /**************************************************************************
34 *
35 * MAC stats DMA format
36 *
37 **************************************************************************
38 */
39
40 #define FALCON_MAC_STATS_SIZE 0x100
41
42 #define XgRxOctets_offset 0x0
43 #define XgRxOctets_WIDTH 48
44 #define XgRxOctetsOK_offset 0x8
45 #define XgRxOctetsOK_WIDTH 48
46 #define XgRxPkts_offset 0x10
47 #define XgRxPkts_WIDTH 32
48 #define XgRxPktsOK_offset 0x14
49 #define XgRxPktsOK_WIDTH 32
50 #define XgRxBroadcastPkts_offset 0x18
51 #define XgRxBroadcastPkts_WIDTH 32
52 #define XgRxMulticastPkts_offset 0x1C
53 #define XgRxMulticastPkts_WIDTH 32
54 #define XgRxUnicastPkts_offset 0x20
55 #define XgRxUnicastPkts_WIDTH 32
56 #define XgRxUndersizePkts_offset 0x24
57 #define XgRxUndersizePkts_WIDTH 32
58 #define XgRxOversizePkts_offset 0x28
59 #define XgRxOversizePkts_WIDTH 32
60 #define XgRxJabberPkts_offset 0x2C
61 #define XgRxJabberPkts_WIDTH 32
62 #define XgRxUndersizeFCSerrorPkts_offset 0x30
63 #define XgRxUndersizeFCSerrorPkts_WIDTH 32
64 #define XgRxDropEvents_offset 0x34
65 #define XgRxDropEvents_WIDTH 32
66 #define XgRxFCSerrorPkts_offset 0x38
67 #define XgRxFCSerrorPkts_WIDTH 32
68 #define XgRxAlignError_offset 0x3C
69 #define XgRxAlignError_WIDTH 32
70 #define XgRxSymbolError_offset 0x40
71 #define XgRxSymbolError_WIDTH 32
72 #define XgRxInternalMACError_offset 0x44
73 #define XgRxInternalMACError_WIDTH 32
74 #define XgRxControlPkts_offset 0x48
75 #define XgRxControlPkts_WIDTH 32
76 #define XgRxPausePkts_offset 0x4C
77 #define XgRxPausePkts_WIDTH 32
78 #define XgRxPkts64Octets_offset 0x50
79 #define XgRxPkts64Octets_WIDTH 32
80 #define XgRxPkts65to127Octets_offset 0x54
81 #define XgRxPkts65to127Octets_WIDTH 32
82 #define XgRxPkts128to255Octets_offset 0x58
83 #define XgRxPkts128to255Octets_WIDTH 32
84 #define XgRxPkts256to511Octets_offset 0x5C
85 #define XgRxPkts256to511Octets_WIDTH 32
86 #define XgRxPkts512to1023Octets_offset 0x60
87 #define XgRxPkts512to1023Octets_WIDTH 32
88 #define XgRxPkts1024to15xxOctets_offset 0x64
89 #define XgRxPkts1024to15xxOctets_WIDTH 32
90 #define XgRxPkts15xxtoMaxOctets_offset 0x68
91 #define XgRxPkts15xxtoMaxOctets_WIDTH 32
92 #define XgRxLengthError_offset 0x6C
93 #define XgRxLengthError_WIDTH 32
94 #define XgTxPkts_offset 0x80
95 #define XgTxPkts_WIDTH 32
96 #define XgTxOctets_offset 0x88
97 #define XgTxOctets_WIDTH 48
98 #define XgTxMulticastPkts_offset 0x90
99 #define XgTxMulticastPkts_WIDTH 32
100 #define XgTxBroadcastPkts_offset 0x94
101 #define XgTxBroadcastPkts_WIDTH 32
102 #define XgTxUnicastPkts_offset 0x98
103 #define XgTxUnicastPkts_WIDTH 32
104 #define XgTxControlPkts_offset 0x9C
105 #define XgTxControlPkts_WIDTH 32
106 #define XgTxPausePkts_offset 0xA0
107 #define XgTxPausePkts_WIDTH 32
108 #define XgTxPkts64Octets_offset 0xA4
109 #define XgTxPkts64Octets_WIDTH 32
110 #define XgTxPkts65to127Octets_offset 0xA8
111 #define XgTxPkts65to127Octets_WIDTH 32
112 #define XgTxPkts128to255Octets_offset 0xAC
113 #define XgTxPkts128to255Octets_WIDTH 32
114 #define XgTxPkts256to511Octets_offset 0xB0
115 #define XgTxPkts256to511Octets_WIDTH 32
116 #define XgTxPkts512to1023Octets_offset 0xB4
117 #define XgTxPkts512to1023Octets_WIDTH 32
118 #define XgTxPkts1024to15xxOctets_offset 0xB8
119 #define XgTxPkts1024to15xxOctets_WIDTH 32
120 #define XgTxPkts1519toMaxOctets_offset 0xBC
121 #define XgTxPkts1519toMaxOctets_WIDTH 32
122 #define XgTxUndersizePkts_offset 0xC0
123 #define XgTxUndersizePkts_WIDTH 32
124 #define XgTxOversizePkts_offset 0xC4
125 #define XgTxOversizePkts_WIDTH 32
126 #define XgTxNonTcpUdpPkt_offset 0xC8
127 #define XgTxNonTcpUdpPkt_WIDTH 16
128 #define XgTxMacSrcErrPkt_offset 0xCC
129 #define XgTxMacSrcErrPkt_WIDTH 16
130 #define XgTxIpSrcErrPkt_offset 0xD0
131 #define XgTxIpSrcErrPkt_WIDTH 16
132 #define XgDmaDone_offset 0xD4
133 #define XgDmaDone_WIDTH 32
134
135 #define FALCON_STATS_NOT_DONE 0x00000000
136 #define FALCON_STATS_DONE 0xffffffff
137
138 #define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
139 #define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
140
141 /* Retrieve statistic from statistics block */
142 #define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
143 if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
144 (efx)->mac_stats.efx_stat += le16_to_cpu( \
145 *((__force __le16 *) \
146 (efx->stats_buffer.addr + \
147 FALCON_STAT_OFFSET(falcon_stat)))); \
148 else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
149 (efx)->mac_stats.efx_stat += le32_to_cpu( \
150 *((__force __le32 *) \
151 (efx->stats_buffer.addr + \
152 FALCON_STAT_OFFSET(falcon_stat)))); \
153 else \
154 (efx)->mac_stats.efx_stat += le64_to_cpu( \
155 *((__force __le64 *) \
156 (efx->stats_buffer.addr + \
157 FALCON_STAT_OFFSET(falcon_stat)))); \
158 } while (0)
159
160 /**************************************************************************
161 *
162 * Non-volatile configuration
163 *
164 **************************************************************************
165 */
166
167 /* Board configuration v2 (v1 is obsolete; later versions are compatible) */
168 struct falcon_nvconfig_board_v2 {
169 __le16 nports;
170 u8 port0_phy_addr;
171 u8 port0_phy_type;
172 u8 port1_phy_addr;
173 u8 port1_phy_type;
174 __le16 asic_sub_revision;
175 __le16 board_revision;
176 } __packed;
177
178 /* Board configuration v3 extra information */
179 struct falcon_nvconfig_board_v3 {
180 __le32 spi_device_type[2];
181 } __packed;
182
183 /* Bit numbers for spi_device_type */
184 #define SPI_DEV_TYPE_SIZE_LBN 0
185 #define SPI_DEV_TYPE_SIZE_WIDTH 5
186 #define SPI_DEV_TYPE_ADDR_LEN_LBN 6
187 #define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
188 #define SPI_DEV_TYPE_ERASE_CMD_LBN 8
189 #define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
190 #define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
191 #define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
192 #define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
193 #define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
194 #define SPI_DEV_TYPE_FIELD(type, field) \
195 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
196
197 #define FALCON_NVCONFIG_OFFSET 0x300
198
199 #define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
200 struct falcon_nvconfig {
201 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
202 u8 mac_address[2][8]; /* 0x310 */
203 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
204 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
205 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
206 efx_oword_t hw_init_reg; /* 0x350 */
207 efx_oword_t nic_stat_reg; /* 0x360 */
208 efx_oword_t glb_ctl_reg; /* 0x370 */
209 efx_oword_t srm_cfg_reg; /* 0x380 */
210 efx_oword_t spare_reg; /* 0x390 */
211 __le16 board_magic_num; /* 0x3A0 */
212 __le16 board_struct_ver;
213 __le16 board_checksum;
214 struct falcon_nvconfig_board_v2 board_v2;
215 efx_oword_t ee_base_page_reg; /* 0x3B0 */
216 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
217 } __packed;
218
219 /*************************************************************************/
220
221 static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
222 static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
223
224 static const unsigned int
225 /* "Large" EEPROM device: Atmel AT25640 or similar
226 * 8 KB, 16-bit address, 32 B write block */
227 large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
228 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
229 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
230 /* Default flash device: Atmel AT25F1024
231 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
232 default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
233 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
234 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
235 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
236 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
237
238 /**************************************************************************
239 *
240 * I2C bus - this is a bit-bashing interface using GPIO pins
241 * Note that it uses the output enables to tristate the outputs
242 * SDA is the data pin and SCL is the clock
243 *
244 **************************************************************************
245 */
246 static void falcon_setsda(void *data, int state)
247 {
248 struct efx_nic *efx = (struct efx_nic *)data;
249 efx_oword_t reg;
250
251 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
252 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
253 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
254 }
255
256 static void falcon_setscl(void *data, int state)
257 {
258 struct efx_nic *efx = (struct efx_nic *)data;
259 efx_oword_t reg;
260
261 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
262 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
263 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
264 }
265
266 static int falcon_getsda(void *data)
267 {
268 struct efx_nic *efx = (struct efx_nic *)data;
269 efx_oword_t reg;
270
271 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
272 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
273 }
274
275 static int falcon_getscl(void *data)
276 {
277 struct efx_nic *efx = (struct efx_nic *)data;
278 efx_oword_t reg;
279
280 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
281 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
282 }
283
284 static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
285 .setsda = falcon_setsda,
286 .setscl = falcon_setscl,
287 .getsda = falcon_getsda,
288 .getscl = falcon_getscl,
289 .udelay = 5,
290 /* Wait up to 50 ms for slave to let us pull SCL high */
291 .timeout = DIV_ROUND_UP(HZ, 20),
292 };
293
294 static void falcon_push_irq_moderation(struct efx_channel *channel)
295 {
296 efx_dword_t timer_cmd;
297 struct efx_nic *efx = channel->efx;
298
299 /* Set timer register */
300 if (channel->irq_moderation) {
301 EFX_POPULATE_DWORD_2(timer_cmd,
302 FRF_AB_TC_TIMER_MODE,
303 FFE_BB_TIMER_MODE_INT_HLDOFF,
304 FRF_AB_TC_TIMER_VAL,
305 channel->irq_moderation - 1);
306 } else {
307 EFX_POPULATE_DWORD_2(timer_cmd,
308 FRF_AB_TC_TIMER_MODE,
309 FFE_BB_TIMER_MODE_DIS,
310 FRF_AB_TC_TIMER_VAL, 0);
311 }
312 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
313 efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
314 channel->channel);
315 }
316
317 static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
318
319 static void falcon_prepare_flush(struct efx_nic *efx)
320 {
321 falcon_deconfigure_mac_wrapper(efx);
322
323 /* Wait for the tx and rx fifo's to get to the next packet boundary
324 * (~1ms without back-pressure), then to drain the remainder of the
325 * fifo's at data path speeds (negligible), with a healthy margin. */
326 msleep(10);
327 }
328
329 /* Acknowledge a legacy interrupt from Falcon
330 *
331 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
332 *
333 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
334 * BIU. Interrupt acknowledge is read sensitive so must write instead
335 * (then read to ensure the BIU collector is flushed)
336 *
337 * NB most hardware supports MSI interrupts
338 */
339 inline void falcon_irq_ack_a1(struct efx_nic *efx)
340 {
341 efx_dword_t reg;
342
343 EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
344 efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
345 efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
346 }
347
348
349 irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
350 {
351 struct efx_nic *efx = dev_id;
352 efx_oword_t *int_ker = efx->irq_status.addr;
353 int syserr;
354 int queues;
355
356 /* Check to see if this is our interrupt. If it isn't, we
357 * exit without having touched the hardware.
358 */
359 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
360 netif_vdbg(efx, intr, efx->net_dev,
361 "IRQ %d on CPU %d not for me\n", irq,
362 raw_smp_processor_id());
363 return IRQ_NONE;
364 }
365 efx->last_irq_cpu = raw_smp_processor_id();
366 netif_vdbg(efx, intr, efx->net_dev,
367 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
368 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
369
370 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
371 return IRQ_HANDLED;
372
373 /* Check to see if we have a serious error condition */
374 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
375 if (unlikely(syserr))
376 return efx_nic_fatal_interrupt(efx);
377
378 /* Determine interrupting queues, clear interrupt status
379 * register and acknowledge the device interrupt.
380 */
381 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
382 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
383 EFX_ZERO_OWORD(*int_ker);
384 wmb(); /* Ensure the vector is cleared before interrupt ack */
385 falcon_irq_ack_a1(efx);
386
387 if (queues & 1)
388 efx_schedule_channel_irq(efx_get_channel(efx, 0));
389 if (queues & 2)
390 efx_schedule_channel_irq(efx_get_channel(efx, 1));
391 return IRQ_HANDLED;
392 }
393 /**************************************************************************
394 *
395 * EEPROM/flash
396 *
397 **************************************************************************
398 */
399
400 #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
401
402 static int falcon_spi_poll(struct efx_nic *efx)
403 {
404 efx_oword_t reg;
405 efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
406 return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
407 }
408
409 /* Wait for SPI command completion */
410 static int falcon_spi_wait(struct efx_nic *efx)
411 {
412 /* Most commands will finish quickly, so we start polling at
413 * very short intervals. Sometimes the command may have to
414 * wait for VPD or expansion ROM access outside of our
415 * control, so we allow up to 100 ms. */
416 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
417 int i;
418
419 for (i = 0; i < 10; i++) {
420 if (!falcon_spi_poll(efx))
421 return 0;
422 udelay(10);
423 }
424
425 for (;;) {
426 if (!falcon_spi_poll(efx))
427 return 0;
428 if (time_after_eq(jiffies, timeout)) {
429 netif_err(efx, hw, efx->net_dev,
430 "timed out waiting for SPI\n");
431 return -ETIMEDOUT;
432 }
433 schedule_timeout_uninterruptible(1);
434 }
435 }
436
437 int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
438 unsigned int command, int address,
439 const void *in, void *out, size_t len)
440 {
441 bool addressed = (address >= 0);
442 bool reading = (out != NULL);
443 efx_oword_t reg;
444 int rc;
445
446 /* Input validation */
447 if (len > FALCON_SPI_MAX_LEN)
448 return -EINVAL;
449
450 /* Check that previous command is not still running */
451 rc = falcon_spi_poll(efx);
452 if (rc)
453 return rc;
454
455 /* Program address register, if we have an address */
456 if (addressed) {
457 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
458 efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
459 }
460
461 /* Program data register, if we have data */
462 if (in != NULL) {
463 memcpy(&reg, in, len);
464 efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
465 }
466
467 /* Issue read/write command */
468 EFX_POPULATE_OWORD_7(reg,
469 FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
470 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
471 FRF_AB_EE_SPI_HCMD_DABCNT, len,
472 FRF_AB_EE_SPI_HCMD_READ, reading,
473 FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
474 FRF_AB_EE_SPI_HCMD_ADBCNT,
475 (addressed ? spi->addr_len : 0),
476 FRF_AB_EE_SPI_HCMD_ENC, command);
477 efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
478
479 /* Wait for read/write to complete */
480 rc = falcon_spi_wait(efx);
481 if (rc)
482 return rc;
483
484 /* Read data */
485 if (out != NULL) {
486 efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
487 memcpy(out, &reg, len);
488 }
489
490 return 0;
491 }
492
493 static size_t
494 falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
495 {
496 return min(FALCON_SPI_MAX_LEN,
497 (spi->block_size - (start & (spi->block_size - 1))));
498 }
499
500 static inline u8
501 efx_spi_munge_command(const struct efx_spi_device *spi,
502 const u8 command, const unsigned int address)
503 {
504 return command | (((address >> 8) & spi->munge_address) << 3);
505 }
506
507 /* Wait up to 10 ms for buffered write completion */
508 int
509 falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
510 {
511 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
512 u8 status;
513 int rc;
514
515 for (;;) {
516 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
517 &status, sizeof(status));
518 if (rc)
519 return rc;
520 if (!(status & SPI_STATUS_NRDY))
521 return 0;
522 if (time_after_eq(jiffies, timeout)) {
523 netif_err(efx, hw, efx->net_dev,
524 "SPI write timeout on device %d"
525 " last status=0x%02x\n",
526 spi->device_id, status);
527 return -ETIMEDOUT;
528 }
529 schedule_timeout_uninterruptible(1);
530 }
531 }
532
533 int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
534 loff_t start, size_t len, size_t *retlen, u8 *buffer)
535 {
536 size_t block_len, pos = 0;
537 unsigned int command;
538 int rc = 0;
539
540 while (pos < len) {
541 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
542
543 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
544 rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
545 buffer + pos, block_len);
546 if (rc)
547 break;
548 pos += block_len;
549
550 /* Avoid locking up the system */
551 cond_resched();
552 if (signal_pending(current)) {
553 rc = -EINTR;
554 break;
555 }
556 }
557
558 if (retlen)
559 *retlen = pos;
560 return rc;
561 }
562
563 int
564 falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
565 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
566 {
567 u8 verify_buffer[FALCON_SPI_MAX_LEN];
568 size_t block_len, pos = 0;
569 unsigned int command;
570 int rc = 0;
571
572 while (pos < len) {
573 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
574 if (rc)
575 break;
576
577 block_len = min(len - pos,
578 falcon_spi_write_limit(spi, start + pos));
579 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
580 rc = falcon_spi_cmd(efx, spi, command, start + pos,
581 buffer + pos, NULL, block_len);
582 if (rc)
583 break;
584
585 rc = falcon_spi_wait_write(efx, spi);
586 if (rc)
587 break;
588
589 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
590 rc = falcon_spi_cmd(efx, spi, command, start + pos,
591 NULL, verify_buffer, block_len);
592 if (memcmp(verify_buffer, buffer + pos, block_len)) {
593 rc = -EIO;
594 break;
595 }
596
597 pos += block_len;
598
599 /* Avoid locking up the system */
600 cond_resched();
601 if (signal_pending(current)) {
602 rc = -EINTR;
603 break;
604 }
605 }
606
607 if (retlen)
608 *retlen = pos;
609 return rc;
610 }
611
612 /**************************************************************************
613 *
614 * XMAC operations
615 *
616 **************************************************************************
617 */
618
619 /* Configure the XAUI driver that is an output from Falcon */
620 static void falcon_setup_xaui(struct efx_nic *efx)
621 {
622 efx_oword_t sdctl, txdrv;
623
624 /* Move the XAUI into low power, unless there is no PHY, in
625 * which case the XAUI will have to drive a cable. */
626 if (efx->phy_type == PHY_TYPE_NONE)
627 return;
628
629 efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
630 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
631 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
632 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
633 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
634 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
635 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
636 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
637 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
638 efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
639
640 EFX_POPULATE_OWORD_8(txdrv,
641 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
642 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
643 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
644 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
645 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
646 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
647 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
648 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
649 efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
650 }
651
652 int falcon_reset_xaui(struct efx_nic *efx)
653 {
654 struct falcon_nic_data *nic_data = efx->nic_data;
655 efx_oword_t reg;
656 int count;
657
658 /* Don't fetch MAC statistics over an XMAC reset */
659 WARN_ON(nic_data->stats_disable_count == 0);
660
661 /* Start reset sequence */
662 EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
663 efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
664
665 /* Wait up to 10 ms for completion, then reinitialise */
666 for (count = 0; count < 1000; count++) {
667 efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
668 if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
669 EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
670 falcon_setup_xaui(efx);
671 return 0;
672 }
673 udelay(10);
674 }
675 netif_err(efx, hw, efx->net_dev,
676 "timed out waiting for XAUI/XGXS reset\n");
677 return -ETIMEDOUT;
678 }
679
680 static void falcon_ack_status_intr(struct efx_nic *efx)
681 {
682 struct falcon_nic_data *nic_data = efx->nic_data;
683 efx_oword_t reg;
684
685 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
686 return;
687
688 /* We expect xgmii faults if the wireside link is down */
689 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
690 return;
691
692 /* We can only use this interrupt to signal the negative edge of
693 * xaui_align [we have to poll the positive edge]. */
694 if (nic_data->xmac_poll_required)
695 return;
696
697 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
698 }
699
700 static bool falcon_xgxs_link_ok(struct efx_nic *efx)
701 {
702 efx_oword_t reg;
703 bool align_done, link_ok = false;
704 int sync_status;
705
706 /* Read link status */
707 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
708
709 align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
710 sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
711 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
712 link_ok = true;
713
714 /* Clear link status ready for next read */
715 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
716 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
717 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
718 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
719
720 return link_ok;
721 }
722
723 static bool falcon_xmac_link_ok(struct efx_nic *efx)
724 {
725 /*
726 * Check MAC's XGXS link status except when using XGMII loopback
727 * which bypasses the XGXS block.
728 * If possible, check PHY's XGXS link status except when using
729 * MAC loopback.
730 */
731 return (efx->loopback_mode == LOOPBACK_XGMII ||
732 falcon_xgxs_link_ok(efx)) &&
733 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
734 LOOPBACK_INTERNAL(efx) ||
735 efx_mdio_phyxgxs_lane_sync(efx));
736 }
737
738 static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
739 {
740 unsigned int max_frame_len;
741 efx_oword_t reg;
742 bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
743 bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
744
745 /* Configure MAC - cut-thru mode is hard wired on */
746 EFX_POPULATE_OWORD_3(reg,
747 FRF_AB_XM_RX_JUMBO_MODE, 1,
748 FRF_AB_XM_TX_STAT_EN, 1,
749 FRF_AB_XM_RX_STAT_EN, 1);
750 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
751
752 /* Configure TX */
753 EFX_POPULATE_OWORD_6(reg,
754 FRF_AB_XM_TXEN, 1,
755 FRF_AB_XM_TX_PRMBL, 1,
756 FRF_AB_XM_AUTO_PAD, 1,
757 FRF_AB_XM_TXCRC, 1,
758 FRF_AB_XM_FCNTL, tx_fc,
759 FRF_AB_XM_IPG, 0x3);
760 efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
761
762 /* Configure RX */
763 EFX_POPULATE_OWORD_5(reg,
764 FRF_AB_XM_RXEN, 1,
765 FRF_AB_XM_AUTO_DEPAD, 0,
766 FRF_AB_XM_ACPT_ALL_MCAST, 1,
767 FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
768 FRF_AB_XM_PASS_CRC_ERR, 1);
769 efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
770
771 /* Set frame length */
772 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
773 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
774 efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
775 EFX_POPULATE_OWORD_2(reg,
776 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
777 FRF_AB_XM_TX_JUMBO_MODE, 1);
778 efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
779
780 EFX_POPULATE_OWORD_2(reg,
781 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
782 FRF_AB_XM_DIS_FCNTL, !rx_fc);
783 efx_writeo(efx, &reg, FR_AB_XM_FC);
784
785 /* Set MAC address */
786 memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
787 efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
788 memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
789 efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
790 }
791
792 static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
793 {
794 efx_oword_t reg;
795 bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
796 bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
797 bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
798
799 /* XGXS block is flaky and will need to be reset if moving
800 * into our out of XGMII, XGXS or XAUI loopbacks. */
801 if (EFX_WORKAROUND_5147(efx)) {
802 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
803 bool reset_xgxs;
804
805 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
806 old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
807 old_xgmii_loopback =
808 EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
809
810 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
811 old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
812
813 /* The PHY driver may have turned XAUI off */
814 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
815 (xaui_loopback != old_xaui_loopback) ||
816 (xgmii_loopback != old_xgmii_loopback));
817
818 if (reset_xgxs)
819 falcon_reset_xaui(efx);
820 }
821
822 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
823 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
824 (xgxs_loopback || xaui_loopback) ?
825 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
826 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
827 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
828 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
829
830 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
831 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
832 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
833 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
834 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
835 efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
836 }
837
838
839 /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
840 static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
841 {
842 bool mac_up = falcon_xmac_link_ok(efx);
843
844 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
845 efx_phy_mode_disabled(efx->phy_mode))
846 /* XAUI link is expected to be down */
847 return mac_up;
848
849 falcon_stop_nic_stats(efx);
850
851 while (!mac_up && tries) {
852 netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
853 falcon_reset_xaui(efx);
854 udelay(200);
855
856 mac_up = falcon_xmac_link_ok(efx);
857 --tries;
858 }
859
860 falcon_start_nic_stats(efx);
861
862 return mac_up;
863 }
864
865 static bool falcon_xmac_check_fault(struct efx_nic *efx)
866 {
867 return !falcon_xmac_link_ok_retry(efx, 5);
868 }
869
870 static int falcon_reconfigure_xmac(struct efx_nic *efx)
871 {
872 struct falcon_nic_data *nic_data = efx->nic_data;
873
874 falcon_reconfigure_xgxs_core(efx);
875 falcon_reconfigure_xmac_core(efx);
876
877 falcon_reconfigure_mac_wrapper(efx);
878
879 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
880 falcon_ack_status_intr(efx);
881
882 return 0;
883 }
884
885 static void falcon_update_stats_xmac(struct efx_nic *efx)
886 {
887 struct efx_mac_stats *mac_stats = &efx->mac_stats;
888
889 /* Update MAC stats from DMAed values */
890 FALCON_STAT(efx, XgRxOctets, rx_bytes);
891 FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
892 FALCON_STAT(efx, XgRxPkts, rx_packets);
893 FALCON_STAT(efx, XgRxPktsOK, rx_good);
894 FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
895 FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
896 FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
897 FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
898 FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
899 FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
900 FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
901 FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
902 FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
903 FALCON_STAT(efx, XgRxAlignError, rx_align_error);
904 FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
905 FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
906 FALCON_STAT(efx, XgRxControlPkts, rx_control);
907 FALCON_STAT(efx, XgRxPausePkts, rx_pause);
908 FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
909 FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
910 FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
911 FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
912 FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
913 FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
914 FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
915 FALCON_STAT(efx, XgRxLengthError, rx_length_error);
916 FALCON_STAT(efx, XgTxPkts, tx_packets);
917 FALCON_STAT(efx, XgTxOctets, tx_bytes);
918 FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
919 FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
920 FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
921 FALCON_STAT(efx, XgTxControlPkts, tx_control);
922 FALCON_STAT(efx, XgTxPausePkts, tx_pause);
923 FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
924 FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
925 FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
926 FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
927 FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
928 FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
929 FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
930 FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
931 FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
932 FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
933 FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
934 FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
935
936 /* Update derived statistics */
937 efx_update_diff_stat(&mac_stats->tx_good_bytes,
938 mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
939 mac_stats->tx_control * 64);
940 efx_update_diff_stat(&mac_stats->rx_bad_bytes,
941 mac_stats->rx_bytes - mac_stats->rx_good_bytes -
942 mac_stats->rx_control * 64);
943 }
944
945 static void falcon_poll_xmac(struct efx_nic *efx)
946 {
947 struct falcon_nic_data *nic_data = efx->nic_data;
948
949 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
950 !nic_data->xmac_poll_required)
951 return;
952
953 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
954 falcon_ack_status_intr(efx);
955 }
956
957 /**************************************************************************
958 *
959 * MAC wrapper
960 *
961 **************************************************************************
962 */
963
964 static void falcon_push_multicast_hash(struct efx_nic *efx)
965 {
966 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
967
968 WARN_ON(!mutex_is_locked(&efx->mac_lock));
969
970 efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
971 efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
972 }
973
974 static void falcon_reset_macs(struct efx_nic *efx)
975 {
976 struct falcon_nic_data *nic_data = efx->nic_data;
977 efx_oword_t reg, mac_ctrl;
978 int count;
979
980 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
981 /* It's not safe to use GLB_CTL_REG to reset the
982 * macs, so instead use the internal MAC resets
983 */
984 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
985 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
986
987 for (count = 0; count < 10000; count++) {
988 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
989 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
990 0)
991 return;
992 udelay(10);
993 }
994
995 netif_err(efx, hw, efx->net_dev,
996 "timed out waiting for XMAC core reset\n");
997 }
998
999 /* Mac stats will fail whist the TX fifo is draining */
1000 WARN_ON(nic_data->stats_disable_count == 0);
1001
1002 efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1003 EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
1004 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1005
1006 efx_reado(efx, &reg, FR_AB_GLB_CTL);
1007 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1008 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1009 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1010 efx_writeo(efx, &reg, FR_AB_GLB_CTL);
1011
1012 count = 0;
1013 while (1) {
1014 efx_reado(efx, &reg, FR_AB_GLB_CTL);
1015 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1016 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1017 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1018 netif_dbg(efx, hw, efx->net_dev,
1019 "Completed MAC reset after %d loops\n",
1020 count);
1021 break;
1022 }
1023 if (count > 20) {
1024 netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
1025 break;
1026 }
1027 count++;
1028 udelay(10);
1029 }
1030
1031 /* Ensure the correct MAC is selected before statistics
1032 * are re-enabled by the caller */
1033 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1034
1035 falcon_setup_xaui(efx);
1036 }
1037
1038 static void falcon_drain_tx_fifo(struct efx_nic *efx)
1039 {
1040 efx_oword_t reg;
1041
1042 if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
1043 (efx->loopback_mode != LOOPBACK_NONE))
1044 return;
1045
1046 efx_reado(efx, &reg, FR_AB_MAC_CTRL);
1047 /* There is no point in draining more than once */
1048 if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1049 return;
1050
1051 falcon_reset_macs(efx);
1052 }
1053
1054 static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1055 {
1056 efx_oword_t reg;
1057
1058 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1059 return;
1060
1061 /* Isolate the MAC -> RX */
1062 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1063 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1064 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1065
1066 /* Isolate TX -> MAC */
1067 falcon_drain_tx_fifo(efx);
1068 }
1069
1070 static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1071 {
1072 struct efx_link_state *link_state = &efx->link_state;
1073 efx_oword_t reg;
1074 int link_speed, isolate;
1075
1076 isolate = !!ACCESS_ONCE(efx->reset_pending);
1077
1078 switch (link_state->speed) {
1079 case 10000: link_speed = 3; break;
1080 case 1000: link_speed = 2; break;
1081 case 100: link_speed = 1; break;
1082 default: link_speed = 0; break;
1083 }
1084 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1085 * as advertised. Disable to ensure packets are not
1086 * indefinitely held and TX queue can be flushed at any point
1087 * while the link is down. */
1088 EFX_POPULATE_OWORD_5(reg,
1089 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1090 FRF_AB_MAC_BCAD_ACPT, 1,
1091 FRF_AB_MAC_UC_PROM, efx->promiscuous,
1092 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1093 FRF_AB_MAC_SPEED, link_speed);
1094 /* On B0, MAC backpressure can be disabled and packets get
1095 * discarded. */
1096 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1097 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1098 !link_state->up || isolate);
1099 }
1100
1101 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
1102
1103 /* Restore the multicast hash registers. */
1104 falcon_push_multicast_hash(efx);
1105
1106 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1107 /* Enable XOFF signal from RX FIFO (we enabled it during NIC
1108 * initialisation but it may read back as 0) */
1109 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1110 /* Unisolate the MAC -> RX */
1111 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1112 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
1113 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1114 }
1115
1116 static void falcon_stats_request(struct efx_nic *efx)
1117 {
1118 struct falcon_nic_data *nic_data = efx->nic_data;
1119 efx_oword_t reg;
1120
1121 WARN_ON(nic_data->stats_pending);
1122 WARN_ON(nic_data->stats_disable_count);
1123
1124 if (nic_data->stats_dma_done == NULL)
1125 return; /* no mac selected */
1126
1127 *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
1128 nic_data->stats_pending = true;
1129 wmb(); /* ensure done flag is clear */
1130
1131 /* Initiate DMA transfer of stats */
1132 EFX_POPULATE_OWORD_2(reg,
1133 FRF_AB_MAC_STAT_DMA_CMD, 1,
1134 FRF_AB_MAC_STAT_DMA_ADR,
1135 efx->stats_buffer.dma_addr);
1136 efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
1137
1138 mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
1139 }
1140
1141 static void falcon_stats_complete(struct efx_nic *efx)
1142 {
1143 struct falcon_nic_data *nic_data = efx->nic_data;
1144
1145 if (!nic_data->stats_pending)
1146 return;
1147
1148 nic_data->stats_pending = false;
1149 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
1150 rmb(); /* read the done flag before the stats */
1151 falcon_update_stats_xmac(efx);
1152 } else {
1153 netif_err(efx, hw, efx->net_dev,
1154 "timed out waiting for statistics\n");
1155 }
1156 }
1157
1158 static void falcon_stats_timer_func(unsigned long context)
1159 {
1160 struct efx_nic *efx = (struct efx_nic *)context;
1161 struct falcon_nic_data *nic_data = efx->nic_data;
1162
1163 spin_lock(&efx->stats_lock);
1164
1165 falcon_stats_complete(efx);
1166 if (nic_data->stats_disable_count == 0)
1167 falcon_stats_request(efx);
1168
1169 spin_unlock(&efx->stats_lock);
1170 }
1171
1172 static bool falcon_loopback_link_poll(struct efx_nic *efx)
1173 {
1174 struct efx_link_state old_state = efx->link_state;
1175
1176 WARN_ON(!mutex_is_locked(&efx->mac_lock));
1177 WARN_ON(!LOOPBACK_INTERNAL(efx));
1178
1179 efx->link_state.fd = true;
1180 efx->link_state.fc = efx->wanted_fc;
1181 efx->link_state.up = true;
1182 efx->link_state.speed = 10000;
1183
1184 return !efx_link_state_equal(&efx->link_state, &old_state);
1185 }
1186
1187 static int falcon_reconfigure_port(struct efx_nic *efx)
1188 {
1189 int rc;
1190
1191 WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);
1192
1193 /* Poll the PHY link state *before* reconfiguring it. This means we
1194 * will pick up the correct speed (in loopback) to select the correct
1195 * MAC.
1196 */
1197 if (LOOPBACK_INTERNAL(efx))
1198 falcon_loopback_link_poll(efx);
1199 else
1200 efx->phy_op->poll(efx);
1201
1202 falcon_stop_nic_stats(efx);
1203 falcon_deconfigure_mac_wrapper(efx);
1204
1205 falcon_reset_macs(efx);
1206
1207 efx->phy_op->reconfigure(efx);
1208 rc = falcon_reconfigure_xmac(efx);
1209 BUG_ON(rc);
1210
1211 falcon_start_nic_stats(efx);
1212
1213 /* Synchronise efx->link_state with the kernel */
1214 efx_link_status_changed(efx);
1215
1216 return 0;
1217 }
1218
1219 /* TX flow control may automatically turn itself off if the link
1220 * partner (intermittently) stops responding to pause frames. There
1221 * isn't any indication that this has happened, so the best we do is
1222 * leave it up to the user to spot this and fix it by cycling transmit
1223 * flow control on this end.
1224 */
1225
1226 static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
1227 {
1228 /* Schedule a reset to recover */
1229 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
1230 }
1231
1232 static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
1233 {
1234 /* Recover by resetting the EM block */
1235 falcon_stop_nic_stats(efx);
1236 falcon_drain_tx_fifo(efx);
1237 falcon_reconfigure_xmac(efx);
1238 falcon_start_nic_stats(efx);
1239 }
1240
1241 /**************************************************************************
1242 *
1243 * PHY access via GMII
1244 *
1245 **************************************************************************
1246 */
1247
1248 /* Wait for GMII access to complete */
1249 static int falcon_gmii_wait(struct efx_nic *efx)
1250 {
1251 efx_oword_t md_stat;
1252 int count;
1253
1254 /* wait up to 50ms - taken max from datasheet */
1255 for (count = 0; count < 5000; count++) {
1256 efx_reado(efx, &md_stat, FR_AB_MD_STAT);
1257 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
1258 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
1259 EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
1260 netif_err(efx, hw, efx->net_dev,
1261 "error from GMII access "
1262 EFX_OWORD_FMT"\n",
1263 EFX_OWORD_VAL(md_stat));
1264 return -EIO;
1265 }
1266 return 0;
1267 }
1268 udelay(10);
1269 }
1270 netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
1271 return -ETIMEDOUT;
1272 }
1273
1274 /* Write an MDIO register of a PHY connected to Falcon. */
1275 static int falcon_mdio_write(struct net_device *net_dev,
1276 int prtad, int devad, u16 addr, u16 value)
1277 {
1278 struct efx_nic *efx = netdev_priv(net_dev);
1279 struct falcon_nic_data *nic_data = efx->nic_data;
1280 efx_oword_t reg;
1281 int rc;
1282
1283 netif_vdbg(efx, hw, efx->net_dev,
1284 "writing MDIO %d register %d.%d with 0x%04x\n",
1285 prtad, devad, addr, value);
1286
1287 mutex_lock(&nic_data->mdio_lock);
1288
1289 /* Check MDIO not currently being accessed */
1290 rc = falcon_gmii_wait(efx);
1291 if (rc)
1292 goto out;
1293
1294 /* Write the address/ID register */
1295 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1296 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1297
1298 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1299 FRF_AB_MD_DEV_ADR, devad);
1300 efx_writeo(efx, &reg, FR_AB_MD_ID);
1301
1302 /* Write data */
1303 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
1304 efx_writeo(efx, &reg, FR_AB_MD_TXD);
1305
1306 EFX_POPULATE_OWORD_2(reg,
1307 FRF_AB_MD_WRC, 1,
1308 FRF_AB_MD_GC, 0);
1309 efx_writeo(efx, &reg, FR_AB_MD_CS);
1310
1311 /* Wait for data to be written */
1312 rc = falcon_gmii_wait(efx);
1313 if (rc) {
1314 /* Abort the write operation */
1315 EFX_POPULATE_OWORD_2(reg,
1316 FRF_AB_MD_WRC, 0,
1317 FRF_AB_MD_GC, 1);
1318 efx_writeo(efx, &reg, FR_AB_MD_CS);
1319 udelay(10);
1320 }
1321
1322 out:
1323 mutex_unlock(&nic_data->mdio_lock);
1324 return rc;
1325 }
1326
1327 /* Read an MDIO register of a PHY connected to Falcon. */
1328 static int falcon_mdio_read(struct net_device *net_dev,
1329 int prtad, int devad, u16 addr)
1330 {
1331 struct efx_nic *efx = netdev_priv(net_dev);
1332 struct falcon_nic_data *nic_data = efx->nic_data;
1333 efx_oword_t reg;
1334 int rc;
1335
1336 mutex_lock(&nic_data->mdio_lock);
1337
1338 /* Check MDIO not currently being accessed */
1339 rc = falcon_gmii_wait(efx);
1340 if (rc)
1341 goto out;
1342
1343 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1344 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1345
1346 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1347 FRF_AB_MD_DEV_ADR, devad);
1348 efx_writeo(efx, &reg, FR_AB_MD_ID);
1349
1350 /* Request data to be read */
1351 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
1352 efx_writeo(efx, &reg, FR_AB_MD_CS);
1353
1354 /* Wait for data to become available */
1355 rc = falcon_gmii_wait(efx);
1356 if (rc == 0) {
1357 efx_reado(efx, &reg, FR_AB_MD_RXD);
1358 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
1359 netif_vdbg(efx, hw, efx->net_dev,
1360 "read from MDIO %d register %d.%d, got %04x\n",
1361 prtad, devad, addr, rc);
1362 } else {
1363 /* Abort the read operation */
1364 EFX_POPULATE_OWORD_2(reg,
1365 FRF_AB_MD_RIC, 0,
1366 FRF_AB_MD_GC, 1);
1367 efx_writeo(efx, &reg, FR_AB_MD_CS);
1368
1369 netif_dbg(efx, hw, efx->net_dev,
1370 "read from MDIO %d register %d.%d, got error %d\n",
1371 prtad, devad, addr, rc);
1372 }
1373
1374 out:
1375 mutex_unlock(&nic_data->mdio_lock);
1376 return rc;
1377 }
1378
1379 /* This call is responsible for hooking in the MAC and PHY operations */
1380 static int falcon_probe_port(struct efx_nic *efx)
1381 {
1382 struct falcon_nic_data *nic_data = efx->nic_data;
1383 int rc;
1384
1385 switch (efx->phy_type) {
1386 case PHY_TYPE_SFX7101:
1387 efx->phy_op = &falcon_sfx7101_phy_ops;
1388 break;
1389 case PHY_TYPE_QT2022C2:
1390 case PHY_TYPE_QT2025C:
1391 efx->phy_op = &falcon_qt202x_phy_ops;
1392 break;
1393 case PHY_TYPE_TXC43128:
1394 efx->phy_op = &falcon_txc_phy_ops;
1395 break;
1396 default:
1397 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
1398 efx->phy_type);
1399 return -ENODEV;
1400 }
1401
1402 /* Fill out MDIO structure and loopback modes */
1403 mutex_init(&nic_data->mdio_lock);
1404 efx->mdio.mdio_read = falcon_mdio_read;
1405 efx->mdio.mdio_write = falcon_mdio_write;
1406 rc = efx->phy_op->probe(efx);
1407 if (rc != 0)
1408 return rc;
1409
1410 /* Initial assumption */
1411 efx->link_state.speed = 10000;
1412 efx->link_state.fd = true;
1413
1414 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
1415 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1416 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
1417 else
1418 efx->wanted_fc = EFX_FC_RX;
1419 if (efx->mdio.mmds & MDIO_DEVS_AN)
1420 efx->wanted_fc |= EFX_FC_AUTO;
1421
1422 /* Allocate buffer for stats */
1423 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
1424 FALCON_MAC_STATS_SIZE, GFP_KERNEL);
1425 if (rc)
1426 return rc;
1427 netif_dbg(efx, probe, efx->net_dev,
1428 "stats buffer at %llx (virt %p phys %llx)\n",
1429 (u64)efx->stats_buffer.dma_addr,
1430 efx->stats_buffer.addr,
1431 (u64)virt_to_phys(efx->stats_buffer.addr));
1432 nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
1433
1434 return 0;
1435 }
1436
1437 static void falcon_remove_port(struct efx_nic *efx)
1438 {
1439 efx->phy_op->remove(efx);
1440 efx_nic_free_buffer(efx, &efx->stats_buffer);
1441 }
1442
1443 /* Global events are basically PHY events */
1444 static bool
1445 falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
1446 {
1447 struct efx_nic *efx = channel->efx;
1448 struct falcon_nic_data *nic_data = efx->nic_data;
1449
1450 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
1451 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
1452 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
1453 /* Ignored */
1454 return true;
1455
1456 if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
1457 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
1458 nic_data->xmac_poll_required = true;
1459 return true;
1460 }
1461
1462 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
1463 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
1464 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
1465 netif_err(efx, rx_err, efx->net_dev,
1466 "channel %d seen global RX_RESET event. Resetting.\n",
1467 channel->channel);
1468
1469 atomic_inc(&efx->rx_reset);
1470 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
1471 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1472 return true;
1473 }
1474
1475 return false;
1476 }
1477
1478 /**************************************************************************
1479 *
1480 * Falcon test code
1481 *
1482 **************************************************************************/
1483
1484 static int
1485 falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
1486 {
1487 struct falcon_nic_data *nic_data = efx->nic_data;
1488 struct falcon_nvconfig *nvconfig;
1489 struct efx_spi_device *spi;
1490 void *region;
1491 int rc, magic_num, struct_ver;
1492 __le16 *word, *limit;
1493 u32 csum;
1494
1495 if (efx_spi_present(&nic_data->spi_flash))
1496 spi = &nic_data->spi_flash;
1497 else if (efx_spi_present(&nic_data->spi_eeprom))
1498 spi = &nic_data->spi_eeprom;
1499 else
1500 return -EINVAL;
1501
1502 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
1503 if (!region)
1504 return -ENOMEM;
1505 nvconfig = region + FALCON_NVCONFIG_OFFSET;
1506
1507 mutex_lock(&nic_data->spi_lock);
1508 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
1509 mutex_unlock(&nic_data->spi_lock);
1510 if (rc) {
1511 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
1512 efx_spi_present(&nic_data->spi_flash) ?
1513 "flash" : "EEPROM");
1514 rc = -EIO;
1515 goto out;
1516 }
1517
1518 magic_num = le16_to_cpu(nvconfig->board_magic_num);
1519 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
1520
1521 rc = -EINVAL;
1522 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
1523 netif_err(efx, hw, efx->net_dev,
1524 "NVRAM bad magic 0x%x\n", magic_num);
1525 goto out;
1526 }
1527 if (struct_ver < 2) {
1528 netif_err(efx, hw, efx->net_dev,
1529 "NVRAM has ancient version 0x%x\n", struct_ver);
1530 goto out;
1531 } else if (struct_ver < 4) {
1532 word = &nvconfig->board_magic_num;
1533 limit = (__le16 *) (nvconfig + 1);
1534 } else {
1535 word = region;
1536 limit = region + FALCON_NVCONFIG_END;
1537 }
1538 for (csum = 0; word < limit; ++word)
1539 csum += le16_to_cpu(*word);
1540
1541 if (~csum & 0xffff) {
1542 netif_err(efx, hw, efx->net_dev,
1543 "NVRAM has incorrect checksum\n");
1544 goto out;
1545 }
1546
1547 rc = 0;
1548 if (nvconfig_out)
1549 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
1550
1551 out:
1552 kfree(region);
1553 return rc;
1554 }
1555
1556 static int falcon_test_nvram(struct efx_nic *efx)
1557 {
1558 return falcon_read_nvram(efx, NULL);
1559 }
1560
1561 static const struct efx_nic_register_test falcon_b0_register_tests[] = {
1562 { FR_AZ_ADR_REGION,
1563 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1564 { FR_AZ_RX_CFG,
1565 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1566 { FR_AZ_TX_CFG,
1567 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
1568 { FR_AZ_TX_RESERVED,
1569 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
1570 { FR_AB_MAC_CTRL,
1571 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
1572 { FR_AZ_SRM_TX_DC_CFG,
1573 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
1574 { FR_AZ_RX_DC_CFG,
1575 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
1576 { FR_AZ_RX_DC_PF_WM,
1577 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
1578 { FR_BZ_DP_CTRL,
1579 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
1580 { FR_AB_GM_CFG2,
1581 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
1582 { FR_AB_GMF_CFG0,
1583 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
1584 { FR_AB_XM_GLB_CFG,
1585 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
1586 { FR_AB_XM_TX_CFG,
1587 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
1588 { FR_AB_XM_RX_CFG,
1589 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
1590 { FR_AB_XM_RX_PARAM,
1591 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
1592 { FR_AB_XM_FC,
1593 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
1594 { FR_AB_XM_ADR_LO,
1595 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
1596 { FR_AB_XX_SD_CTL,
1597 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1598 };
1599
1600 static int
1601 falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
1602 {
1603 enum reset_type reset_method = RESET_TYPE_INVISIBLE;
1604 int rc, rc2;
1605
1606 mutex_lock(&efx->mac_lock);
1607 if (efx->loopback_modes) {
1608 /* We need the 312 clock from the PHY to test the XMAC
1609 * registers, so move into XGMII loopback if available */
1610 if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
1611 efx->loopback_mode = LOOPBACK_XGMII;
1612 else
1613 efx->loopback_mode = __ffs(efx->loopback_modes);
1614 }
1615 __efx_reconfigure_port(efx);
1616 mutex_unlock(&efx->mac_lock);
1617
1618 efx_reset_down(efx, reset_method);
1619
1620 tests->registers =
1621 efx_nic_test_registers(efx, falcon_b0_register_tests,
1622 ARRAY_SIZE(falcon_b0_register_tests))
1623 ? -1 : 1;
1624
1625 rc = falcon_reset_hw(efx, reset_method);
1626 rc2 = efx_reset_up(efx, reset_method, rc == 0);
1627 return rc ? rc : rc2;
1628 }
1629
1630 /**************************************************************************
1631 *
1632 * Device reset
1633 *
1634 **************************************************************************
1635 */
1636
1637 static enum reset_type falcon_map_reset_reason(enum reset_type reason)
1638 {
1639 switch (reason) {
1640 case RESET_TYPE_RX_RECOVERY:
1641 case RESET_TYPE_RX_DESC_FETCH:
1642 case RESET_TYPE_TX_DESC_FETCH:
1643 case RESET_TYPE_TX_SKIP:
1644 /* These can occasionally occur due to hardware bugs.
1645 * We try to reset without disrupting the link.
1646 */
1647 return RESET_TYPE_INVISIBLE;
1648 default:
1649 return RESET_TYPE_ALL;
1650 }
1651 }
1652
1653 static int falcon_map_reset_flags(u32 *flags)
1654 {
1655 enum {
1656 FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
1657 ETH_RESET_OFFLOAD | ETH_RESET_MAC),
1658 FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
1659 FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
1660 };
1661
1662 if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
1663 *flags &= ~FALCON_RESET_WORLD;
1664 return RESET_TYPE_WORLD;
1665 }
1666
1667 if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
1668 *flags &= ~FALCON_RESET_ALL;
1669 return RESET_TYPE_ALL;
1670 }
1671
1672 if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
1673 *flags &= ~FALCON_RESET_INVISIBLE;
1674 return RESET_TYPE_INVISIBLE;
1675 }
1676
1677 return -EINVAL;
1678 }
1679
1680 /* Resets NIC to known state. This routine must be called in process
1681 * context and is allowed to sleep. */
1682 static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1683 {
1684 struct falcon_nic_data *nic_data = efx->nic_data;
1685 efx_oword_t glb_ctl_reg_ker;
1686 int rc;
1687
1688 netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
1689 RESET_TYPE(method));
1690
1691 /* Initiate device reset */
1692 if (method == RESET_TYPE_WORLD) {
1693 rc = pci_save_state(efx->pci_dev);
1694 if (rc) {
1695 netif_err(efx, drv, efx->net_dev,
1696 "failed to backup PCI state of primary "
1697 "function prior to hardware reset\n");
1698 goto fail1;
1699 }
1700 if (efx_nic_is_dual_func(efx)) {
1701 rc = pci_save_state(nic_data->pci_dev2);
1702 if (rc) {
1703 netif_err(efx, drv, efx->net_dev,
1704 "failed to backup PCI state of "
1705 "secondary function prior to "
1706 "hardware reset\n");
1707 goto fail2;
1708 }
1709 }
1710
1711 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
1712 FRF_AB_EXT_PHY_RST_DUR,
1713 FFE_AB_EXT_PHY_RST_DUR_10240US,
1714 FRF_AB_SWRST, 1);
1715 } else {
1716 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
1717 /* exclude PHY from "invisible" reset */
1718 FRF_AB_EXT_PHY_RST_CTL,
1719 method == RESET_TYPE_INVISIBLE,
1720 /* exclude EEPROM/flash and PCIe */
1721 FRF_AB_PCIE_CORE_RST_CTL, 1,
1722 FRF_AB_PCIE_NSTKY_RST_CTL, 1,
1723 FRF_AB_PCIE_SD_RST_CTL, 1,
1724 FRF_AB_EE_RST_CTL, 1,
1725 FRF_AB_EXT_PHY_RST_DUR,
1726 FFE_AB_EXT_PHY_RST_DUR_10240US,
1727 FRF_AB_SWRST, 1);
1728 }
1729 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1730
1731 netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
1732 schedule_timeout_uninterruptible(HZ / 20);
1733
1734 /* Restore PCI configuration if needed */
1735 if (method == RESET_TYPE_WORLD) {
1736 if (efx_nic_is_dual_func(efx))
1737 pci_restore_state(nic_data->pci_dev2);
1738 pci_restore_state(efx->pci_dev);
1739 netif_dbg(efx, drv, efx->net_dev,
1740 "successfully restored PCI config\n");
1741 }
1742
1743 /* Assert that reset complete */
1744 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1745 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
1746 rc = -ETIMEDOUT;
1747 netif_err(efx, hw, efx->net_dev,
1748 "timed out waiting for hardware reset\n");
1749 goto fail3;
1750 }
1751 netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
1752
1753 return 0;
1754
1755 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
1756 fail2:
1757 pci_restore_state(efx->pci_dev);
1758 fail1:
1759 fail3:
1760 return rc;
1761 }
1762
1763 static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1764 {
1765 struct falcon_nic_data *nic_data = efx->nic_data;
1766 int rc;
1767
1768 mutex_lock(&nic_data->spi_lock);
1769 rc = __falcon_reset_hw(efx, method);
1770 mutex_unlock(&nic_data->spi_lock);
1771
1772 return rc;
1773 }
1774
1775 static void falcon_monitor(struct efx_nic *efx)
1776 {
1777 bool link_changed;
1778 int rc;
1779
1780 BUG_ON(!mutex_is_locked(&efx->mac_lock));
1781
1782 rc = falcon_board(efx)->type->monitor(efx);
1783 if (rc) {
1784 netif_err(efx, hw, efx->net_dev,
1785 "Board sensor %s; shutting down PHY\n",
1786 (rc == -ERANGE) ? "reported fault" : "failed");
1787 efx->phy_mode |= PHY_MODE_LOW_POWER;
1788 rc = __efx_reconfigure_port(efx);
1789 WARN_ON(rc);
1790 }
1791
1792 if (LOOPBACK_INTERNAL(efx))
1793 link_changed = falcon_loopback_link_poll(efx);
1794 else
1795 link_changed = efx->phy_op->poll(efx);
1796
1797 if (link_changed) {
1798 falcon_stop_nic_stats(efx);
1799 falcon_deconfigure_mac_wrapper(efx);
1800
1801 falcon_reset_macs(efx);
1802 rc = falcon_reconfigure_xmac(efx);
1803 BUG_ON(rc);
1804
1805 falcon_start_nic_stats(efx);
1806
1807 efx_link_status_changed(efx);
1808 }
1809
1810 falcon_poll_xmac(efx);
1811 }
1812
1813 /* Zeroes out the SRAM contents. This routine must be called in
1814 * process context and is allowed to sleep.
1815 */
1816 static int falcon_reset_sram(struct efx_nic *efx)
1817 {
1818 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
1819 int count;
1820
1821 /* Set the SRAM wake/sleep GPIO appropriately. */
1822 efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1823 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
1824 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
1825 efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1826
1827 /* Initiate SRAM reset */
1828 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
1829 FRF_AZ_SRM_INIT_EN, 1,
1830 FRF_AZ_SRM_NB_SZ, 0);
1831 efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1832
1833 /* Wait for SRAM reset to complete */
1834 count = 0;
1835 do {
1836 netif_dbg(efx, hw, efx->net_dev,
1837 "waiting for SRAM reset (attempt %d)...\n", count);
1838
1839 /* SRAM reset is slow; expect around 16ms */
1840 schedule_timeout_uninterruptible(HZ / 50);
1841
1842 /* Check for reset complete */
1843 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1844 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
1845 netif_dbg(efx, hw, efx->net_dev,
1846 "SRAM reset complete\n");
1847
1848 return 0;
1849 }
1850 } while (++count < 20); /* wait up to 0.4 sec */
1851
1852 netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
1853 return -ETIMEDOUT;
1854 }
1855
1856 static void falcon_spi_device_init(struct efx_nic *efx,
1857 struct efx_spi_device *spi_device,
1858 unsigned int device_id, u32 device_type)
1859 {
1860 if (device_type != 0) {
1861 spi_device->device_id = device_id;
1862 spi_device->size =
1863 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
1864 spi_device->addr_len =
1865 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
1866 spi_device->munge_address = (spi_device->size == 1 << 9 &&
1867 spi_device->addr_len == 1);
1868 spi_device->erase_command =
1869 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
1870 spi_device->erase_size =
1871 1 << SPI_DEV_TYPE_FIELD(device_type,
1872 SPI_DEV_TYPE_ERASE_SIZE);
1873 spi_device->block_size =
1874 1 << SPI_DEV_TYPE_FIELD(device_type,
1875 SPI_DEV_TYPE_BLOCK_SIZE);
1876 } else {
1877 spi_device->size = 0;
1878 }
1879 }
1880
1881 /* Extract non-volatile configuration */
1882 static int falcon_probe_nvconfig(struct efx_nic *efx)
1883 {
1884 struct falcon_nic_data *nic_data = efx->nic_data;
1885 struct falcon_nvconfig *nvconfig;
1886 int rc;
1887
1888 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
1889 if (!nvconfig)
1890 return -ENOMEM;
1891
1892 rc = falcon_read_nvram(efx, nvconfig);
1893 if (rc)
1894 goto out;
1895
1896 efx->phy_type = nvconfig->board_v2.port0_phy_type;
1897 efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
1898
1899 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1900 falcon_spi_device_init(
1901 efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1902 le32_to_cpu(nvconfig->board_v3
1903 .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
1904 falcon_spi_device_init(
1905 efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1906 le32_to_cpu(nvconfig->board_v3
1907 .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
1908 }
1909
1910 /* Read the MAC addresses */
1911 memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
1912
1913 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
1914 efx->phy_type, efx->mdio.prtad);
1915
1916 rc = falcon_probe_board(efx,
1917 le16_to_cpu(nvconfig->board_v2.board_revision));
1918 out:
1919 kfree(nvconfig);
1920 return rc;
1921 }
1922
1923 static void falcon_dimension_resources(struct efx_nic *efx)
1924 {
1925 efx->rx_dc_base = 0x20000;
1926 efx->tx_dc_base = 0x26000;
1927 }
1928
1929 /* Probe all SPI devices on the NIC */
1930 static void falcon_probe_spi_devices(struct efx_nic *efx)
1931 {
1932 struct falcon_nic_data *nic_data = efx->nic_data;
1933 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
1934 int boot_dev;
1935
1936 efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
1937 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
1938 efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1939
1940 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
1941 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
1942 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
1943 netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
1944 boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
1945 "flash" : "EEPROM");
1946 } else {
1947 /* Disable VPD and set clock dividers to safe
1948 * values for initial programming. */
1949 boot_dev = -1;
1950 netif_dbg(efx, probe, efx->net_dev,
1951 "Booted from internal ASIC settings;"
1952 " setting SPI config\n");
1953 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
1954 /* 125 MHz / 7 ~= 20 MHz */
1955 FRF_AB_EE_SF_CLOCK_DIV, 7,
1956 /* 125 MHz / 63 ~= 2 MHz */
1957 FRF_AB_EE_EE_CLOCK_DIV, 63);
1958 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1959 }
1960
1961 mutex_init(&nic_data->spi_lock);
1962
1963 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
1964 falcon_spi_device_init(efx, &nic_data->spi_flash,
1965 FFE_AB_SPI_DEVICE_FLASH,
1966 default_flash_type);
1967 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
1968 falcon_spi_device_init(efx, &nic_data->spi_eeprom,
1969 FFE_AB_SPI_DEVICE_EEPROM,
1970 large_eeprom_type);
1971 }
1972
1973 static int falcon_probe_nic(struct efx_nic *efx)
1974 {
1975 struct falcon_nic_data *nic_data;
1976 struct falcon_board *board;
1977 int rc;
1978
1979 /* Allocate storage for hardware specific data */
1980 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
1981 if (!nic_data)
1982 return -ENOMEM;
1983 efx->nic_data = nic_data;
1984
1985 rc = -ENODEV;
1986
1987 if (efx_nic_fpga_ver(efx) != 0) {
1988 netif_err(efx, probe, efx->net_dev,
1989 "Falcon FPGA not supported\n");
1990 goto fail1;
1991 }
1992
1993 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1994 efx_oword_t nic_stat;
1995 struct pci_dev *dev;
1996 u8 pci_rev = efx->pci_dev->revision;
1997
1998 if ((pci_rev == 0xff) || (pci_rev == 0)) {
1999 netif_err(efx, probe, efx->net_dev,
2000 "Falcon rev A0 not supported\n");
2001 goto fail1;
2002 }
2003 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2004 if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
2005 netif_err(efx, probe, efx->net_dev,
2006 "Falcon rev A1 1G not supported\n");
2007 goto fail1;
2008 }
2009 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2010 netif_err(efx, probe, efx->net_dev,
2011 "Falcon rev A1 PCI-X not supported\n");
2012 goto fail1;
2013 }
2014
2015 dev = pci_dev_get(efx->pci_dev);
2016 while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
2017 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
2018 dev))) {
2019 if (dev->bus == efx->pci_dev->bus &&
2020 dev->devfn == efx->pci_dev->devfn + 1) {
2021 nic_data->pci_dev2 = dev;
2022 break;
2023 }
2024 }
2025 if (!nic_data->pci_dev2) {
2026 netif_err(efx, probe, efx->net_dev,
2027 "failed to find secondary function\n");
2028 rc = -ENODEV;
2029 goto fail2;
2030 }
2031 }
2032
2033 /* Now we can reset the NIC */
2034 rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
2035 if (rc) {
2036 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
2037 goto fail3;
2038 }
2039
2040 /* Allocate memory for INT_KER */
2041 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
2042 GFP_KERNEL);
2043 if (rc)
2044 goto fail4;
2045 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2046
2047 netif_dbg(efx, probe, efx->net_dev,
2048 "INT_KER at %llx (virt %p phys %llx)\n",
2049 (u64)efx->irq_status.dma_addr,
2050 efx->irq_status.addr,
2051 (u64)virt_to_phys(efx->irq_status.addr));
2052
2053 falcon_probe_spi_devices(efx);
2054
2055 /* Read in the non-volatile configuration */
2056 rc = falcon_probe_nvconfig(efx);
2057 if (rc) {
2058 if (rc == -EINVAL)
2059 netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
2060 goto fail5;
2061 }
2062
2063 efx->timer_quantum_ns = 4968; /* 621 cycles */
2064
2065 /* Initialise I2C adapter */
2066 board = falcon_board(efx);
2067 board->i2c_adap.owner = THIS_MODULE;
2068 board->i2c_data = falcon_i2c_bit_operations;
2069 board->i2c_data.data = efx;
2070 board->i2c_adap.algo_data = &board->i2c_data;
2071 board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2072 strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
2073 sizeof(board->i2c_adap.name));
2074 rc = i2c_bit_add_bus(&board->i2c_adap);
2075 if (rc)
2076 goto fail5;
2077
2078 rc = falcon_board(efx)->type->init(efx);
2079 if (rc) {
2080 netif_err(efx, probe, efx->net_dev,
2081 "failed to initialise board\n");
2082 goto fail6;
2083 }
2084
2085 nic_data->stats_disable_count = 1;
2086 setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
2087 (unsigned long)efx);
2088
2089 return 0;
2090
2091 fail6:
2092 i2c_del_adapter(&board->i2c_adap);
2093 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2094 fail5:
2095 efx_nic_free_buffer(efx, &efx->irq_status);
2096 fail4:
2097 fail3:
2098 if (nic_data->pci_dev2) {
2099 pci_dev_put(nic_data->pci_dev2);
2100 nic_data->pci_dev2 = NULL;
2101 }
2102 fail2:
2103 fail1:
2104 kfree(efx->nic_data);
2105 return rc;
2106 }
2107
2108 static void falcon_init_rx_cfg(struct efx_nic *efx)
2109 {
2110 /* RX control FIFO thresholds (32 entries) */
2111 const unsigned ctrl_xon_thr = 20;
2112 const unsigned ctrl_xoff_thr = 25;
2113 efx_oword_t reg;
2114
2115 efx_reado(efx, &reg, FR_AZ_RX_CFG);
2116 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
2117 /* Data FIFO size is 5.5K. The RX DMA engine only
2118 * supports scattering for user-mode queues, but will
2119 * split DMA writes at intervals of RX_USR_BUF_SIZE
2120 * (32-byte units) even for kernel-mode queues. We
2121 * set it to be so large that that never happens.
2122 */
2123 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2124 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2125 (3 * 4096) >> 5);
2126 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
2127 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
2128 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2129 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2130 } else {
2131 /* Data FIFO size is 80K; register fields moved */
2132 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2133 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2134 EFX_RX_USR_BUF_SIZE >> 5);
2135 /* Send XON and XOFF at ~3 * max MTU away from empty/full */
2136 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
2137 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
2138 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2139 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2140 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2141
2142 /* Enable hash insertion. This is broken for the
2143 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
2144 * IPv4 hashes. */
2145 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
2146 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
2147 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
2148 }
2149 /* Always enable XOFF signal from RX FIFO. We enable
2150 * or disable transmission of pause frames at the MAC. */
2151 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
2152 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
2153 }
2154
2155 /* This call performs hardware-specific global initialisation, such as
2156 * defining the descriptor cache sizes and number of RSS channels.
2157 * It does not set up any buffers, descriptor rings or event queues.
2158 */
2159 static int falcon_init_nic(struct efx_nic *efx)
2160 {
2161 efx_oword_t temp;
2162 int rc;
2163
2164 /* Use on-chip SRAM */
2165 efx_reado(efx, &temp, FR_AB_NIC_STAT);
2166 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2167 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
2168
2169 rc = falcon_reset_sram(efx);
2170 if (rc)
2171 return rc;
2172
2173 /* Clear the parity enables on the TX data fifos as
2174 * they produce false parity errors because of timing issues
2175 */
2176 if (EFX_WORKAROUND_5129(efx)) {
2177 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
2178 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2179 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2180 }
2181
2182 if (EFX_WORKAROUND_7244(efx)) {
2183 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
2184 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2185 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2186 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2187 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
2188 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
2189 }
2190
2191 /* XXX This is documented only for Falcon A0/A1 */
2192 /* Setup RX. Wait for descriptor is broken and must
2193 * be disabled. RXDP recovery shouldn't be needed, but is.
2194 */
2195 efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
2196 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
2197 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
2198 if (EFX_WORKAROUND_5583(efx))
2199 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
2200 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
2201
2202 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2203 * descriptors (which is bad).
2204 */
2205 efx_reado(efx, &temp, FR_AZ_TX_CFG);
2206 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
2207 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
2208
2209 falcon_init_rx_cfg(efx);
2210
2211 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
2212 /* Set hash key for IPv4 */
2213 memcpy(&temp, efx->rx_hash_key, sizeof(temp));
2214 efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
2215
2216 /* Set destination of both TX and RX Flush events */
2217 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
2218 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
2219 }
2220
2221 efx_nic_init_common(efx);
2222
2223 return 0;
2224 }
2225
2226 static void falcon_remove_nic(struct efx_nic *efx)
2227 {
2228 struct falcon_nic_data *nic_data = efx->nic_data;
2229 struct falcon_board *board = falcon_board(efx);
2230
2231 board->type->fini(efx);
2232
2233 /* Remove I2C adapter and clear it in preparation for a retry */
2234 i2c_del_adapter(&board->i2c_adap);
2235 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2236
2237 efx_nic_free_buffer(efx, &efx->irq_status);
2238
2239 __falcon_reset_hw(efx, RESET_TYPE_ALL);
2240
2241 /* Release the second function after the reset */
2242 if (nic_data->pci_dev2) {
2243 pci_dev_put(nic_data->pci_dev2);
2244 nic_data->pci_dev2 = NULL;
2245 }
2246
2247 /* Tear down the private nic state */
2248 kfree(efx->nic_data);
2249 efx->nic_data = NULL;
2250 }
2251
2252 static void falcon_update_nic_stats(struct efx_nic *efx)
2253 {
2254 struct falcon_nic_data *nic_data = efx->nic_data;
2255 efx_oword_t cnt;
2256
2257 if (nic_data->stats_disable_count)
2258 return;
2259
2260 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
2261 efx->n_rx_nodesc_drop_cnt +=
2262 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
2263
2264 if (nic_data->stats_pending &&
2265 *nic_data->stats_dma_done == FALCON_STATS_DONE) {
2266 nic_data->stats_pending = false;
2267 rmb(); /* read the done flag before the stats */
2268 falcon_update_stats_xmac(efx);
2269 }
2270 }
2271
2272 void falcon_start_nic_stats(struct efx_nic *efx)
2273 {
2274 struct falcon_nic_data *nic_data = efx->nic_data;
2275
2276 spin_lock_bh(&efx->stats_lock);
2277 if (--nic_data->stats_disable_count == 0)
2278 falcon_stats_request(efx);
2279 spin_unlock_bh(&efx->stats_lock);
2280 }
2281
2282 void falcon_stop_nic_stats(struct efx_nic *efx)
2283 {
2284 struct falcon_nic_data *nic_data = efx->nic_data;
2285 int i;
2286
2287 might_sleep();
2288
2289 spin_lock_bh(&efx->stats_lock);
2290 ++nic_data->stats_disable_count;
2291 spin_unlock_bh(&efx->stats_lock);
2292
2293 del_timer_sync(&nic_data->stats_timer);
2294
2295 /* Wait enough time for the most recent transfer to
2296 * complete. */
2297 for (i = 0; i < 4 && nic_data->stats_pending; i++) {
2298 if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
2299 break;
2300 msleep(1);
2301 }
2302
2303 spin_lock_bh(&efx->stats_lock);
2304 falcon_stats_complete(efx);
2305 spin_unlock_bh(&efx->stats_lock);
2306 }
2307
2308 static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
2309 {
2310 falcon_board(efx)->type->set_id_led(efx, mode);
2311 }
2312
2313 /**************************************************************************
2314 *
2315 * Wake on LAN
2316 *
2317 **************************************************************************
2318 */
2319
2320 static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
2321 {
2322 wol->supported = 0;
2323 wol->wolopts = 0;
2324 memset(&wol->sopass, 0, sizeof(wol->sopass));
2325 }
2326
2327 static int falcon_set_wol(struct efx_nic *efx, u32 type)
2328 {
2329 if (type != 0)
2330 return -EINVAL;
2331 return 0;
2332 }
2333
2334 /**************************************************************************
2335 *
2336 * Revision-dependent attributes used by efx.c and nic.c
2337 *
2338 **************************************************************************
2339 */
2340
2341 const struct efx_nic_type falcon_a1_nic_type = {
2342 .probe = falcon_probe_nic,
2343 .remove = falcon_remove_nic,
2344 .init = falcon_init_nic,
2345 .dimension_resources = falcon_dimension_resources,
2346 .fini = efx_port_dummy_op_void,
2347 .monitor = falcon_monitor,
2348 .map_reset_reason = falcon_map_reset_reason,
2349 .map_reset_flags = falcon_map_reset_flags,
2350 .reset = falcon_reset_hw,
2351 .probe_port = falcon_probe_port,
2352 .remove_port = falcon_remove_port,
2353 .handle_global_event = falcon_handle_global_event,
2354 .prepare_flush = falcon_prepare_flush,
2355 .finish_flush = efx_port_dummy_op_void,
2356 .update_stats = falcon_update_nic_stats,
2357 .start_stats = falcon_start_nic_stats,
2358 .stop_stats = falcon_stop_nic_stats,
2359 .set_id_led = falcon_set_id_led,
2360 .push_irq_moderation = falcon_push_irq_moderation,
2361 .reconfigure_port = falcon_reconfigure_port,
2362 .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
2363 .reconfigure_mac = falcon_reconfigure_xmac,
2364 .check_mac_fault = falcon_xmac_check_fault,
2365 .get_wol = falcon_get_wol,
2366 .set_wol = falcon_set_wol,
2367 .resume_wol = efx_port_dummy_op_void,
2368 .test_nvram = falcon_test_nvram,
2369
2370 .revision = EFX_REV_FALCON_A1,
2371 .mem_map_size = 0x20000,
2372 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
2373 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
2374 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
2375 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
2376 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
2377 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2378 .rx_buffer_padding = 0x24,
2379 .can_rx_scatter = false,
2380 .max_interrupt_mode = EFX_INT_MODE_MSI,
2381 .phys_addr_channels = 4,
2382 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2383 .offload_features = NETIF_F_IP_CSUM,
2384 };
2385
2386 const struct efx_nic_type falcon_b0_nic_type = {
2387 .probe = falcon_probe_nic,
2388 .remove = falcon_remove_nic,
2389 .init = falcon_init_nic,
2390 .dimension_resources = falcon_dimension_resources,
2391 .fini = efx_port_dummy_op_void,
2392 .monitor = falcon_monitor,
2393 .map_reset_reason = falcon_map_reset_reason,
2394 .map_reset_flags = falcon_map_reset_flags,
2395 .reset = falcon_reset_hw,
2396 .probe_port = falcon_probe_port,
2397 .remove_port = falcon_remove_port,
2398 .handle_global_event = falcon_handle_global_event,
2399 .prepare_flush = falcon_prepare_flush,
2400 .finish_flush = efx_port_dummy_op_void,
2401 .update_stats = falcon_update_nic_stats,
2402 .start_stats = falcon_start_nic_stats,
2403 .stop_stats = falcon_stop_nic_stats,
2404 .set_id_led = falcon_set_id_led,
2405 .push_irq_moderation = falcon_push_irq_moderation,
2406 .reconfigure_port = falcon_reconfigure_port,
2407 .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
2408 .reconfigure_mac = falcon_reconfigure_xmac,
2409 .check_mac_fault = falcon_xmac_check_fault,
2410 .get_wol = falcon_get_wol,
2411 .set_wol = falcon_set_wol,
2412 .resume_wol = efx_port_dummy_op_void,
2413 .test_chip = falcon_b0_test_chip,
2414 .test_nvram = falcon_test_nvram,
2415
2416 .revision = EFX_REV_FALCON_B0,
2417 /* Map everything up to and including the RSS indirection
2418 * table. Don't map MSI-X table, MSI-X PBA since Linux
2419 * requires that they not be mapped. */
2420 .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
2421 FR_BZ_RX_INDIRECTION_TBL_STEP *
2422 FR_BZ_RX_INDIRECTION_TBL_ROWS),
2423 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
2424 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
2425 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
2426 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
2427 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
2428 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2429 .rx_buffer_hash_size = 0x10,
2430 .rx_buffer_padding = 0,
2431 .can_rx_scatter = true,
2432 .max_interrupt_mode = EFX_INT_MODE_MSIX,
2433 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
2434 * interrupt handler only supports 32
2435 * channels */
2436 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2437 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
2438 };
2439
This page took 0.131477 seconds and 6 git commands to generate.