2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/inetdevice.h>
19 #include <linux/interrupt.h>
21 #include <linux/kernel.h>
22 #include <linux/mbus.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy.h>
31 #include <linux/platform_device.h>
32 #include <linux/skbuff.h>
34 #include "mvneta_bm.h"
40 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
41 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
42 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
43 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
44 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
45 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
46 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
47 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
48 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
49 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
50 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
51 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
52 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
53 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
54 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
55 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
56 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
57 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
58 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
59 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
60 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
61 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
62 #define MVNETA_PORT_RX_RESET 0x1cc0
63 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
64 #define MVNETA_PHY_ADDR 0x2000
65 #define MVNETA_PHY_ADDR_MASK 0x1f
66 #define MVNETA_MBUS_RETRY 0x2010
67 #define MVNETA_UNIT_INTR_CAUSE 0x2080
68 #define MVNETA_UNIT_CONTROL 0x20B0
69 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
70 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
71 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
72 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
73 #define MVNETA_BASE_ADDR_ENABLE 0x2290
74 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
75 #define MVNETA_PORT_CONFIG 0x2400
76 #define MVNETA_UNI_PROMISC_MODE BIT(0)
77 #define MVNETA_DEF_RXQ(q) ((q) << 1)
78 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
79 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
80 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
81 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
82 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
83 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
84 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
85 MVNETA_DEF_RXQ_ARP(q) | \
86 MVNETA_DEF_RXQ_TCP(q) | \
87 MVNETA_DEF_RXQ_UDP(q) | \
88 MVNETA_DEF_RXQ_BPDU(q) | \
89 MVNETA_TX_UNSET_ERR_SUM | \
90 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
91 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
92 #define MVNETA_MAC_ADDR_LOW 0x2414
93 #define MVNETA_MAC_ADDR_HIGH 0x2418
94 #define MVNETA_SDMA_CONFIG 0x241c
95 #define MVNETA_SDMA_BRST_SIZE_16 4
96 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
97 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
98 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
99 #define MVNETA_DESC_SWAP BIT(6)
100 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
101 #define MVNETA_PORT_STATUS 0x2444
102 #define MVNETA_TX_IN_PRGRS BIT(1)
103 #define MVNETA_TX_FIFO_EMPTY BIT(8)
104 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
105 #define MVNETA_SERDES_CFG 0x24A0
106 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
107 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
108 #define MVNETA_TYPE_PRIO 0x24bc
109 #define MVNETA_FORCE_UNI BIT(21)
110 #define MVNETA_TXQ_CMD_1 0x24e4
111 #define MVNETA_TXQ_CMD 0x2448
112 #define MVNETA_TXQ_DISABLE_SHIFT 8
113 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
114 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
115 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488
116 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
117 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
118 #define MVNETA_ACC_MODE 0x2500
119 #define MVNETA_BM_ADDRESS 0x2504
120 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
121 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
122 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
123 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
124 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
125 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
127 /* Exception Interrupt Port/Queue Cause register
129 * Their behavior depend of the mapping done using the PCPX2Q
130 * registers. For a given CPU if the bit associated to a queue is not
131 * set, then for the register a read from this CPU will always return
132 * 0 and a write won't do anything
135 #define MVNETA_INTR_NEW_CAUSE 0x25a0
136 #define MVNETA_INTR_NEW_MASK 0x25a4
138 /* bits 0..7 = TXQ SENT, one bit per queue.
139 * bits 8..15 = RXQ OCCUP, one bit per queue.
140 * bits 16..23 = RXQ FREE, one bit per queue.
141 * bit 29 = OLD_REG_SUM, see old reg ?
142 * bit 30 = TX_ERR_SUM, one bit for 4 ports
143 * bit 31 = MISC_SUM, one bit for 4 ports
145 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
146 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
147 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
148 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
149 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
151 #define MVNETA_INTR_OLD_CAUSE 0x25a8
152 #define MVNETA_INTR_OLD_MASK 0x25ac
154 /* Data Path Port/Queue Cause Register */
155 #define MVNETA_INTR_MISC_CAUSE 0x25b0
156 #define MVNETA_INTR_MISC_MASK 0x25b4
158 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
159 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
160 #define MVNETA_CAUSE_PTP BIT(4)
162 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
163 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
164 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
165 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
166 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
167 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
168 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
169 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
171 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
172 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
173 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
175 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
176 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
177 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
179 #define MVNETA_INTR_ENABLE 0x25b8
180 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
181 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
183 #define MVNETA_RXQ_CMD 0x2680
184 #define MVNETA_RXQ_DISABLE_SHIFT 8
185 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
186 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
187 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
188 #define MVNETA_GMAC_CTRL_0 0x2c00
189 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
190 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
191 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
192 #define MVNETA_GMAC_CTRL_2 0x2c08
193 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
194 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
195 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
196 #define MVNETA_GMAC2_PORT_RESET BIT(6)
197 #define MVNETA_GMAC_STATUS 0x2c10
198 #define MVNETA_GMAC_LINK_UP BIT(0)
199 #define MVNETA_GMAC_SPEED_1000 BIT(1)
200 #define MVNETA_GMAC_SPEED_100 BIT(2)
201 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
202 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
203 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
204 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
205 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
206 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
207 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
208 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
209 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
210 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
211 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
212 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
213 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
214 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
215 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
216 #define MVNETA_MIB_COUNTERS_BASE 0x3000
217 #define MVNETA_MIB_LATE_COLLISION 0x7c
218 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
219 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
220 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
221 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
222 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
223 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
224 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
225 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
226 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
227 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
228 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
229 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
230 #define MVNETA_PORT_TX_RESET 0x3cf0
231 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
232 #define MVNETA_TX_MTU 0x3e0c
233 #define MVNETA_TX_TOKEN_SIZE 0x3e14
234 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
235 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
236 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
238 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
240 /* Descriptor ring Macros */
241 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
242 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
244 /* Various constants */
247 #define MVNETA_TXDONE_COAL_PKTS 1
248 #define MVNETA_RX_COAL_PKTS 32
249 #define MVNETA_RX_COAL_USEC 100
251 /* The two bytes Marvell header. Either contains a special value used
252 * by Marvell switches when a specific hardware mode is enabled (not
253 * supported by this driver) or is filled automatically by zeroes on
254 * the RX side. Those two bytes being at the front of the Ethernet
255 * header, they allow to have the IP header aligned on a 4 bytes
256 * boundary automatically: the hardware skips those two bytes on its
259 #define MVNETA_MH_SIZE 2
261 #define MVNETA_VLAN_TAG_LEN 4
263 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
264 #define MVNETA_TX_CSUM_DEF_SIZE 1600
265 #define MVNETA_TX_CSUM_MAX_SIZE 9800
266 #define MVNETA_ACC_MODE_EXT1 1
267 #define MVNETA_ACC_MODE_EXT2 2
269 #define MVNETA_MAX_DECODE_WIN 6
271 /* Timeout constants */
272 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
273 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
274 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
276 #define MVNETA_TX_MTU_MAX 0x3ffff
278 /* The RSS lookup table actually has 256 entries but we do not use
281 #define MVNETA_RSS_LU_TABLE_SIZE 1
283 /* TSO header size */
284 #define TSO_HEADER_SIZE 128
286 /* Max number of Rx descriptors */
287 #define MVNETA_MAX_RXD 128
289 /* Max number of Tx descriptors */
290 #define MVNETA_MAX_TXD 532
292 /* Max number of allowed TCP segments for software TSO */
293 #define MVNETA_MAX_TSO_SEGS 100
295 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
297 /* descriptor aligned size */
298 #define MVNETA_DESC_ALIGNED_SIZE 32
300 #define MVNETA_RX_PKT_SIZE(mtu) \
301 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
302 ETH_HLEN + ETH_FCS_LEN, \
303 MVNETA_CPU_D_CACHE_LINE_SIZE)
305 #define IS_TSO_HEADER(txq, addr) \
306 ((addr >= txq->tso_hdrs_phys) && \
307 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
309 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
310 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
312 struct mvneta_statistic
{
313 unsigned short offset
;
315 const char name
[ETH_GSTRING_LEN
];
321 static const struct mvneta_statistic mvneta_statistics
[] = {
322 { 0x3000, T_REG_64
, "good_octets_received", },
323 { 0x3010, T_REG_32
, "good_frames_received", },
324 { 0x3008, T_REG_32
, "bad_octets_received", },
325 { 0x3014, T_REG_32
, "bad_frames_received", },
326 { 0x3018, T_REG_32
, "broadcast_frames_received", },
327 { 0x301c, T_REG_32
, "multicast_frames_received", },
328 { 0x3050, T_REG_32
, "unrec_mac_control_received", },
329 { 0x3058, T_REG_32
, "good_fc_received", },
330 { 0x305c, T_REG_32
, "bad_fc_received", },
331 { 0x3060, T_REG_32
, "undersize_received", },
332 { 0x3064, T_REG_32
, "fragments_received", },
333 { 0x3068, T_REG_32
, "oversize_received", },
334 { 0x306c, T_REG_32
, "jabber_received", },
335 { 0x3070, T_REG_32
, "mac_receive_error", },
336 { 0x3074, T_REG_32
, "bad_crc_event", },
337 { 0x3078, T_REG_32
, "collision", },
338 { 0x307c, T_REG_32
, "late_collision", },
339 { 0x2484, T_REG_32
, "rx_discard", },
340 { 0x2488, T_REG_32
, "rx_overrun", },
341 { 0x3020, T_REG_32
, "frames_64_octets", },
342 { 0x3024, T_REG_32
, "frames_65_to_127_octets", },
343 { 0x3028, T_REG_32
, "frames_128_to_255_octets", },
344 { 0x302c, T_REG_32
, "frames_256_to_511_octets", },
345 { 0x3030, T_REG_32
, "frames_512_to_1023_octets", },
346 { 0x3034, T_REG_32
, "frames_1024_to_max_octets", },
347 { 0x3038, T_REG_64
, "good_octets_sent", },
348 { 0x3040, T_REG_32
, "good_frames_sent", },
349 { 0x3044, T_REG_32
, "excessive_collision", },
350 { 0x3048, T_REG_32
, "multicast_frames_sent", },
351 { 0x304c, T_REG_32
, "broadcast_frames_sent", },
352 { 0x3054, T_REG_32
, "fc_sent", },
353 { 0x300c, T_REG_32
, "internal_mac_transmit_err", },
356 struct mvneta_pcpu_stats
{
357 struct u64_stats_sync syncp
;
364 struct mvneta_pcpu_port
{
365 /* Pointer to the shared port */
366 struct mvneta_port
*pp
;
368 /* Pointer to the CPU-local NAPI struct */
369 struct napi_struct napi
;
371 /* Cause of the previous interrupt */
377 struct mvneta_pcpu_port __percpu
*ports
;
378 struct mvneta_pcpu_stats __percpu
*stats
;
381 unsigned int frag_size
;
383 struct mvneta_rx_queue
*rxqs
;
384 struct mvneta_tx_queue
*txqs
;
385 struct net_device
*dev
;
386 struct notifier_block cpu_notifier
;
388 /* Protect the access to the percpu interrupt registers,
389 * ensuring that the configuration remains coherent.
402 struct mii_bus
*mii_bus
;
403 struct phy_device
*phy_dev
;
404 phy_interface_t phy_interface
;
405 struct device_node
*phy_node
;
409 unsigned int tx_csum_limit
;
410 unsigned int use_inband_status
:1;
412 struct mvneta_bm
*bm_priv
;
413 struct mvneta_bm_pool
*pool_long
;
414 struct mvneta_bm_pool
*pool_short
;
417 u64 ethtool_stats
[ARRAY_SIZE(mvneta_statistics
)];
419 u32 indir
[MVNETA_RSS_LU_TABLE_SIZE
];
422 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
423 * layout of the transmit and reception DMA descriptors, and their
424 * layout is therefore defined by the hardware design
427 #define MVNETA_TX_L3_OFF_SHIFT 0
428 #define MVNETA_TX_IP_HLEN_SHIFT 8
429 #define MVNETA_TX_L4_UDP BIT(16)
430 #define MVNETA_TX_L3_IP6 BIT(17)
431 #define MVNETA_TXD_IP_CSUM BIT(18)
432 #define MVNETA_TXD_Z_PAD BIT(19)
433 #define MVNETA_TXD_L_DESC BIT(20)
434 #define MVNETA_TXD_F_DESC BIT(21)
435 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
436 MVNETA_TXD_L_DESC | \
438 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
439 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
441 #define MVNETA_RXD_ERR_CRC 0x0
442 #define MVNETA_RXD_BM_POOL_SHIFT 13
443 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
444 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
445 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
446 #define MVNETA_RXD_ERR_LEN BIT(18)
447 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
448 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
449 #define MVNETA_RXD_L3_IP4 BIT(25)
450 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
451 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
453 #if defined(__LITTLE_ENDIAN)
454 struct mvneta_tx_desc
{
455 u32 command
; /* Options used by HW for packet transmitting.*/
456 u16 reserverd1
; /* csum_l4 (for future use) */
457 u16 data_size
; /* Data size of transmitted packet in bytes */
458 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
459 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
460 u32 reserved3
[4]; /* Reserved - (for future use) */
463 struct mvneta_rx_desc
{
464 u32 status
; /* Info about received packet */
465 u16 reserved1
; /* pnc_info - (for future use, PnC) */
466 u16 data_size
; /* Size of received packet in bytes */
468 u32 buf_phys_addr
; /* Physical address of the buffer */
469 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
471 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
472 u16 reserved3
; /* prefetch_cmd, for future use */
473 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
475 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
476 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
479 struct mvneta_tx_desc
{
480 u16 data_size
; /* Data size of transmitted packet in bytes */
481 u16 reserverd1
; /* csum_l4 (for future use) */
482 u32 command
; /* Options used by HW for packet transmitting.*/
483 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
484 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
485 u32 reserved3
[4]; /* Reserved - (for future use) */
488 struct mvneta_rx_desc
{
489 u16 data_size
; /* Size of received packet in bytes */
490 u16 reserved1
; /* pnc_info - (for future use, PnC) */
491 u32 status
; /* Info about received packet */
493 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
494 u32 buf_phys_addr
; /* Physical address of the buffer */
496 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
497 u16 reserved3
; /* prefetch_cmd, for future use */
498 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
500 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
501 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
505 struct mvneta_tx_queue
{
506 /* Number of this TX queue, in the range 0-7 */
509 /* Number of TX DMA descriptors in the descriptor ring */
512 /* Number of currently used TX DMA descriptor in the
516 int tx_stop_threshold
;
517 int tx_wake_threshold
;
519 /* Array of transmitted skb */
520 struct sk_buff
**tx_skb
;
522 /* Index of last TX DMA descriptor that was inserted */
525 /* Index of the TX DMA descriptor to be cleaned up */
530 /* Virtual address of the TX DMA descriptors array */
531 struct mvneta_tx_desc
*descs
;
533 /* DMA address of the TX DMA descriptors array */
534 dma_addr_t descs_phys
;
536 /* Index of the last TX DMA descriptor */
539 /* Index of the next TX DMA descriptor to process */
540 int next_desc_to_proc
;
542 /* DMA buffers for TSO headers */
545 /* DMA address of TSO headers */
546 dma_addr_t tso_hdrs_phys
;
548 /* Affinity mask for CPUs*/
549 cpumask_t affinity_mask
;
552 struct mvneta_rx_queue
{
553 /* rx queue number, in the range 0-7 */
556 /* num of rx descriptors in the rx descriptor ring */
559 /* counter of times when mvneta_refill() failed */
565 /* Virtual address of the RX DMA descriptors array */
566 struct mvneta_rx_desc
*descs
;
568 /* DMA address of the RX DMA descriptors array */
569 dma_addr_t descs_phys
;
571 /* Index of the last RX DMA descriptor */
574 /* Index of the next RX DMA descriptor to process */
575 int next_desc_to_proc
;
578 /* The hardware supports eight (8) rx queues, but we are only allowing
579 * the first one to be used. Therefore, let's just allocate one queue.
581 static int rxq_number
= 8;
582 static int txq_number
= 8;
586 static int rx_copybreak __read_mostly
= 256;
588 /* HW BM need that each port be identify by a unique ID */
589 static int global_port_id
;
591 #define MVNETA_DRIVER_NAME "mvneta"
592 #define MVNETA_DRIVER_VERSION "1.0"
594 /* Utility/helper methods */
596 /* Write helper method */
597 static void mvreg_write(struct mvneta_port
*pp
, u32 offset
, u32 data
)
599 writel(data
, pp
->base
+ offset
);
602 /* Read helper method */
603 static u32
mvreg_read(struct mvneta_port
*pp
, u32 offset
)
605 return readl(pp
->base
+ offset
);
608 /* Increment txq get counter */
609 static void mvneta_txq_inc_get(struct mvneta_tx_queue
*txq
)
611 txq
->txq_get_index
++;
612 if (txq
->txq_get_index
== txq
->size
)
613 txq
->txq_get_index
= 0;
616 /* Increment txq put counter */
617 static void mvneta_txq_inc_put(struct mvneta_tx_queue
*txq
)
619 txq
->txq_put_index
++;
620 if (txq
->txq_put_index
== txq
->size
)
621 txq
->txq_put_index
= 0;
625 /* Clear all MIB counters */
626 static void mvneta_mib_counters_clear(struct mvneta_port
*pp
)
631 /* Perform dummy reads from MIB counters */
632 for (i
= 0; i
< MVNETA_MIB_LATE_COLLISION
; i
+= 4)
633 dummy
= mvreg_read(pp
, (MVNETA_MIB_COUNTERS_BASE
+ i
));
634 dummy
= mvreg_read(pp
, MVNETA_RX_DISCARD_FRAME_COUNT
);
635 dummy
= mvreg_read(pp
, MVNETA_OVERRUN_FRAME_COUNT
);
638 /* Get System Network Statistics */
639 struct rtnl_link_stats64
*mvneta_get_stats64(struct net_device
*dev
,
640 struct rtnl_link_stats64
*stats
)
642 struct mvneta_port
*pp
= netdev_priv(dev
);
646 for_each_possible_cpu(cpu
) {
647 struct mvneta_pcpu_stats
*cpu_stats
;
653 cpu_stats
= per_cpu_ptr(pp
->stats
, cpu
);
655 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
656 rx_packets
= cpu_stats
->rx_packets
;
657 rx_bytes
= cpu_stats
->rx_bytes
;
658 tx_packets
= cpu_stats
->tx_packets
;
659 tx_bytes
= cpu_stats
->tx_bytes
;
660 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
662 stats
->rx_packets
+= rx_packets
;
663 stats
->rx_bytes
+= rx_bytes
;
664 stats
->tx_packets
+= tx_packets
;
665 stats
->tx_bytes
+= tx_bytes
;
668 stats
->rx_errors
= dev
->stats
.rx_errors
;
669 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
671 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
676 /* Rx descriptors helper methods */
678 /* Checks whether the RX descriptor having this status is both the first
679 * and the last descriptor for the RX packet. Each RX packet is currently
680 * received through a single RX descriptor, so not having each RX
681 * descriptor with its first and last bits set is an error
683 static int mvneta_rxq_desc_is_first_last(u32 status
)
685 return (status
& MVNETA_RXD_FIRST_LAST_DESC
) ==
686 MVNETA_RXD_FIRST_LAST_DESC
;
689 /* Add number of descriptors ready to receive new packets */
690 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port
*pp
,
691 struct mvneta_rx_queue
*rxq
,
694 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
697 while (ndescs
> MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
) {
698 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
699 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
<<
700 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
701 ndescs
-= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
;
704 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
705 (ndescs
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
708 /* Get number of RX descriptors occupied by received packets */
709 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port
*pp
,
710 struct mvneta_rx_queue
*rxq
)
714 val
= mvreg_read(pp
, MVNETA_RXQ_STATUS_REG(rxq
->id
));
715 return val
& MVNETA_RXQ_OCCUPIED_ALL_MASK
;
718 /* Update num of rx desc called upon return from rx path or
719 * from mvneta_rxq_drop_pkts().
721 static void mvneta_rxq_desc_num_update(struct mvneta_port
*pp
,
722 struct mvneta_rx_queue
*rxq
,
723 int rx_done
, int rx_filled
)
727 if ((rx_done
<= 0xff) && (rx_filled
<= 0xff)) {
729 (rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
);
730 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
734 /* Only 255 descriptors can be added at once */
735 while ((rx_done
> 0) || (rx_filled
> 0)) {
736 if (rx_done
<= 0xff) {
743 if (rx_filled
<= 0xff) {
744 val
|= rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
747 val
|= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
750 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
754 /* Get pointer to next RX descriptor to be processed by SW */
755 static struct mvneta_rx_desc
*
756 mvneta_rxq_next_desc_get(struct mvneta_rx_queue
*rxq
)
758 int rx_desc
= rxq
->next_desc_to_proc
;
760 rxq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(rxq
, rx_desc
);
761 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
762 return rxq
->descs
+ rx_desc
;
765 /* Change maximum receive size of the port. */
766 static void mvneta_max_rx_size_set(struct mvneta_port
*pp
, int max_rx_size
)
770 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
771 val
&= ~MVNETA_GMAC_MAX_RX_SIZE_MASK
;
772 val
|= ((max_rx_size
- MVNETA_MH_SIZE
) / 2) <<
773 MVNETA_GMAC_MAX_RX_SIZE_SHIFT
;
774 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
778 /* Set rx queue offset */
779 static void mvneta_rxq_offset_set(struct mvneta_port
*pp
,
780 struct mvneta_rx_queue
*rxq
,
785 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
786 val
&= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK
;
789 val
|= MVNETA_RXQ_PKT_OFFSET_MASK(offset
>> 3);
790 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
794 /* Tx descriptors helper methods */
796 /* Update HW with number of TX descriptors to be sent */
797 static void mvneta_txq_pend_desc_add(struct mvneta_port
*pp
,
798 struct mvneta_tx_queue
*txq
,
803 /* Only 255 descriptors can be added at once ; Assume caller
804 * process TX desriptors in quanta less than 256
807 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
810 /* Get pointer to next TX descriptor to be processed (send) by HW */
811 static struct mvneta_tx_desc
*
812 mvneta_txq_next_desc_get(struct mvneta_tx_queue
*txq
)
814 int tx_desc
= txq
->next_desc_to_proc
;
816 txq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(txq
, tx_desc
);
817 return txq
->descs
+ tx_desc
;
820 /* Release the last allocated TX descriptor. Useful to handle DMA
821 * mapping failures in the TX path.
823 static void mvneta_txq_desc_put(struct mvneta_tx_queue
*txq
)
825 if (txq
->next_desc_to_proc
== 0)
826 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
828 txq
->next_desc_to_proc
--;
831 /* Set rxq buf size */
832 static void mvneta_rxq_buf_size_set(struct mvneta_port
*pp
,
833 struct mvneta_rx_queue
*rxq
,
838 val
= mvreg_read(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
));
840 val
&= ~MVNETA_RXQ_BUF_SIZE_MASK
;
841 val
|= ((buf_size
>> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT
);
843 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), val
);
846 /* Disable buffer management (BM) */
847 static void mvneta_rxq_bm_disable(struct mvneta_port
*pp
,
848 struct mvneta_rx_queue
*rxq
)
852 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
853 val
&= ~MVNETA_RXQ_HW_BUF_ALLOC
;
854 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
857 /* Enable buffer management (BM) */
858 static void mvneta_rxq_bm_enable(struct mvneta_port
*pp
,
859 struct mvneta_rx_queue
*rxq
)
863 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
864 val
|= MVNETA_RXQ_HW_BUF_ALLOC
;
865 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
868 /* Notify HW about port's assignment of pool for bigger packets */
869 static void mvneta_rxq_long_pool_set(struct mvneta_port
*pp
,
870 struct mvneta_rx_queue
*rxq
)
874 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
875 val
&= ~MVNETA_RXQ_LONG_POOL_ID_MASK
;
876 val
|= (pp
->pool_long
->id
<< MVNETA_RXQ_LONG_POOL_ID_SHIFT
);
878 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
881 /* Notify HW about port's assignment of pool for smaller packets */
882 static void mvneta_rxq_short_pool_set(struct mvneta_port
*pp
,
883 struct mvneta_rx_queue
*rxq
)
887 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
888 val
&= ~MVNETA_RXQ_SHORT_POOL_ID_MASK
;
889 val
|= (pp
->pool_short
->id
<< MVNETA_RXQ_SHORT_POOL_ID_SHIFT
);
891 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
894 /* Set port's receive buffer size for assigned BM pool */
895 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port
*pp
,
901 if (!IS_ALIGNED(buf_size
, 8)) {
902 dev_warn(pp
->dev
->dev
.parent
,
903 "illegal buf_size value %d, round to %d\n",
904 buf_size
, ALIGN(buf_size
, 8));
905 buf_size
= ALIGN(buf_size
, 8);
908 val
= mvreg_read(pp
, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id
));
909 val
|= buf_size
& MVNETA_PORT_POOL_BUFFER_SZ_MASK
;
910 mvreg_write(pp
, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id
), val
);
913 /* Configure MBUS window in order to enable access BM internal SRAM */
914 static int mvneta_mbus_io_win_set(struct mvneta_port
*pp
, u32 base
, u32 wsize
,
917 u32 win_enable
, win_protect
;
920 win_enable
= mvreg_read(pp
, MVNETA_BASE_ADDR_ENABLE
);
922 if (pp
->bm_win_id
< 0) {
923 /* Find first not occupied window */
924 for (i
= 0; i
< MVNETA_MAX_DECODE_WIN
; i
++) {
925 if (win_enable
& (1 << i
)) {
930 if (i
== MVNETA_MAX_DECODE_WIN
)
936 mvreg_write(pp
, MVNETA_WIN_BASE(i
), 0);
937 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), 0);
940 mvreg_write(pp
, MVNETA_WIN_REMAP(i
), 0);
942 mvreg_write(pp
, MVNETA_WIN_BASE(i
), (base
& 0xffff0000) |
943 (attr
<< 8) | target
);
945 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), (wsize
- 1) & 0xffff0000);
947 win_protect
= mvreg_read(pp
, MVNETA_ACCESS_PROTECT_ENABLE
);
948 win_protect
|= 3 << (2 * i
);
949 mvreg_write(pp
, MVNETA_ACCESS_PROTECT_ENABLE
, win_protect
);
951 win_enable
&= ~(1 << i
);
952 mvreg_write(pp
, MVNETA_BASE_ADDR_ENABLE
, win_enable
);
957 /* Assign and initialize pools for port. In case of fail
958 * buffer manager will remain disabled for current port.
960 static int mvneta_bm_port_init(struct platform_device
*pdev
,
961 struct mvneta_port
*pp
)
963 struct device_node
*dn
= pdev
->dev
.of_node
;
964 u32 long_pool_id
, short_pool_id
, wsize
;
968 /* Get BM window information */
969 err
= mvebu_mbus_get_io_win_info(pp
->bm_priv
->bppi_phys_addr
, &wsize
,
976 /* Open NETA -> BM window */
977 err
= mvneta_mbus_io_win_set(pp
, pp
->bm_priv
->bppi_phys_addr
, wsize
,
980 netdev_info(pp
->dev
, "fail to configure mbus window to BM\n");
984 if (of_property_read_u32(dn
, "bm,pool-long", &long_pool_id
)) {
985 netdev_info(pp
->dev
, "missing long pool id\n");
989 /* Create port's long pool depending on mtu */
990 pp
->pool_long
= mvneta_bm_pool_use(pp
->bm_priv
, long_pool_id
,
991 MVNETA_BM_LONG
, pp
->id
,
992 MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
));
993 if (!pp
->pool_long
) {
994 netdev_info(pp
->dev
, "fail to obtain long pool for port\n");
998 pp
->pool_long
->port_map
|= 1 << pp
->id
;
1000 mvneta_bm_pool_bufsize_set(pp
, pp
->pool_long
->buf_size
,
1003 /* If short pool id is not defined, assume using single pool */
1004 if (of_property_read_u32(dn
, "bm,pool-short", &short_pool_id
))
1005 short_pool_id
= long_pool_id
;
1007 /* Create port's short pool */
1008 pp
->pool_short
= mvneta_bm_pool_use(pp
->bm_priv
, short_pool_id
,
1009 MVNETA_BM_SHORT
, pp
->id
,
1010 MVNETA_BM_SHORT_PKT_SIZE
);
1011 if (!pp
->pool_short
) {
1012 netdev_info(pp
->dev
, "fail to obtain short pool for port\n");
1013 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
1017 if (short_pool_id
!= long_pool_id
) {
1018 pp
->pool_short
->port_map
|= 1 << pp
->id
;
1019 mvneta_bm_pool_bufsize_set(pp
, pp
->pool_short
->buf_size
,
1020 pp
->pool_short
->id
);
1026 /* Update settings of a pool for bigger packets */
1027 static void mvneta_bm_update_mtu(struct mvneta_port
*pp
, int mtu
)
1029 struct mvneta_bm_pool
*bm_pool
= pp
->pool_long
;
1030 struct hwbm_pool
*hwbm_pool
= &bm_pool
->hwbm_pool
;
1033 /* Release all buffers from long pool */
1034 mvneta_bm_bufs_free(pp
->bm_priv
, bm_pool
, 1 << pp
->id
);
1035 if (hwbm_pool
->buf_num
) {
1036 WARN(1, "cannot free all buffers in pool %d\n",
1041 bm_pool
->pkt_size
= MVNETA_RX_PKT_SIZE(mtu
);
1042 bm_pool
->buf_size
= MVNETA_RX_BUF_SIZE(bm_pool
->pkt_size
);
1043 hwbm_pool
->frag_size
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) +
1044 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool
->pkt_size
));
1046 /* Fill entire long pool */
1047 num
= hwbm_pool_add(hwbm_pool
, hwbm_pool
->size
, GFP_ATOMIC
);
1048 if (num
!= hwbm_pool
->size
) {
1049 WARN(1, "pool %d: %d of %d allocated\n",
1050 bm_pool
->id
, num
, hwbm_pool
->size
);
1053 mvneta_bm_pool_bufsize_set(pp
, bm_pool
->buf_size
, bm_pool
->id
);
1058 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
1059 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_short
, 1 << pp
->id
);
1062 mvreg_write(pp
, MVNETA_ACC_MODE
, MVNETA_ACC_MODE_EXT1
);
1063 netdev_info(pp
->dev
, "fail to update MTU, fall back to software BM\n");
1066 /* Start the Ethernet port RX and TX activity */
1067 static void mvneta_port_up(struct mvneta_port
*pp
)
1072 /* Enable all initialized TXs. */
1074 for (queue
= 0; queue
< txq_number
; queue
++) {
1075 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
1076 if (txq
->descs
!= NULL
)
1077 q_map
|= (1 << queue
);
1079 mvreg_write(pp
, MVNETA_TXQ_CMD
, q_map
);
1081 /* Enable all initialized RXQs. */
1082 for (queue
= 0; queue
< rxq_number
; queue
++) {
1083 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
1085 if (rxq
->descs
!= NULL
)
1086 q_map
|= (1 << queue
);
1088 mvreg_write(pp
, MVNETA_RXQ_CMD
, q_map
);
1091 /* Stop the Ethernet port activity */
1092 static void mvneta_port_down(struct mvneta_port
*pp
)
1097 /* Stop Rx port activity. Check port Rx activity. */
1098 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
) & MVNETA_RXQ_ENABLE_MASK
;
1100 /* Issue stop command for active channels only */
1102 mvreg_write(pp
, MVNETA_RXQ_CMD
,
1103 val
<< MVNETA_RXQ_DISABLE_SHIFT
);
1105 /* Wait for all Rx activity to terminate. */
1108 if (count
++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC
) {
1109 netdev_warn(pp
->dev
,
1110 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1116 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
);
1117 } while (val
& MVNETA_RXQ_ENABLE_MASK
);
1119 /* Stop Tx port activity. Check port Tx activity. Issue stop
1120 * command for active channels only
1122 val
= (mvreg_read(pp
, MVNETA_TXQ_CMD
)) & MVNETA_TXQ_ENABLE_MASK
;
1125 mvreg_write(pp
, MVNETA_TXQ_CMD
,
1126 (val
<< MVNETA_TXQ_DISABLE_SHIFT
));
1128 /* Wait for all Tx activity to terminate. */
1131 if (count
++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC
) {
1132 netdev_warn(pp
->dev
,
1133 "TIMEOUT for TX stopped status=0x%08x\n",
1139 /* Check TX Command reg that all Txqs are stopped */
1140 val
= mvreg_read(pp
, MVNETA_TXQ_CMD
);
1142 } while (val
& MVNETA_TXQ_ENABLE_MASK
);
1144 /* Double check to verify that TX FIFO is empty */
1147 if (count
++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT
) {
1148 netdev_warn(pp
->dev
,
1149 "TX FIFO empty timeout status=0x%08x\n",
1155 val
= mvreg_read(pp
, MVNETA_PORT_STATUS
);
1156 } while (!(val
& MVNETA_TX_FIFO_EMPTY
) &&
1157 (val
& MVNETA_TX_IN_PRGRS
));
1162 /* Enable the port by setting the port enable bit of the MAC control register */
1163 static void mvneta_port_enable(struct mvneta_port
*pp
)
1168 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
1169 val
|= MVNETA_GMAC0_PORT_ENABLE
;
1170 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
1173 /* Disable the port and wait for about 200 usec before retuning */
1174 static void mvneta_port_disable(struct mvneta_port
*pp
)
1178 /* Reset the Enable bit in the Serial Control Register */
1179 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
1180 val
&= ~MVNETA_GMAC0_PORT_ENABLE
;
1181 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
1186 /* Multicast tables methods */
1188 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1189 static void mvneta_set_ucast_table(struct mvneta_port
*pp
, int queue
)
1197 val
= 0x1 | (queue
<< 1);
1198 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
1201 for (offset
= 0; offset
<= 0xc; offset
+= 4)
1202 mvreg_write(pp
, MVNETA_DA_FILT_UCAST_BASE
+ offset
, val
);
1205 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1206 static void mvneta_set_special_mcast_table(struct mvneta_port
*pp
, int queue
)
1214 val
= 0x1 | (queue
<< 1);
1215 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
1218 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
1219 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ offset
, val
);
1223 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1224 static void mvneta_set_other_mcast_table(struct mvneta_port
*pp
, int queue
)
1230 memset(pp
->mcast_count
, 0, sizeof(pp
->mcast_count
));
1233 memset(pp
->mcast_count
, 1, sizeof(pp
->mcast_count
));
1234 val
= 0x1 | (queue
<< 1);
1235 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
1238 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
1239 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ offset
, val
);
1242 static void mvneta_set_autoneg(struct mvneta_port
*pp
, int enable
)
1247 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
1248 val
&= ~(MVNETA_GMAC_FORCE_LINK_PASS
|
1249 MVNETA_GMAC_FORCE_LINK_DOWN
|
1250 MVNETA_GMAC_AN_FLOW_CTRL_EN
);
1251 val
|= MVNETA_GMAC_INBAND_AN_ENABLE
|
1252 MVNETA_GMAC_AN_SPEED_EN
|
1253 MVNETA_GMAC_AN_DUPLEX_EN
;
1254 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
1256 val
= mvreg_read(pp
, MVNETA_GMAC_CLOCK_DIVIDER
);
1257 val
|= MVNETA_GMAC_1MS_CLOCK_ENABLE
;
1258 mvreg_write(pp
, MVNETA_GMAC_CLOCK_DIVIDER
, val
);
1260 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
1261 val
|= MVNETA_GMAC2_INBAND_AN_ENABLE
;
1262 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, val
);
1264 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
1265 val
&= ~(MVNETA_GMAC_INBAND_AN_ENABLE
|
1266 MVNETA_GMAC_AN_SPEED_EN
|
1267 MVNETA_GMAC_AN_DUPLEX_EN
);
1268 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
1270 val
= mvreg_read(pp
, MVNETA_GMAC_CLOCK_DIVIDER
);
1271 val
&= ~MVNETA_GMAC_1MS_CLOCK_ENABLE
;
1272 mvreg_write(pp
, MVNETA_GMAC_CLOCK_DIVIDER
, val
);
1274 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
1275 val
&= ~MVNETA_GMAC2_INBAND_AN_ENABLE
;
1276 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, val
);
1280 static void mvneta_percpu_unmask_interrupt(void *arg
)
1282 struct mvneta_port
*pp
= arg
;
1284 /* All the queue are unmasked, but actually only the ones
1285 * mapped to this CPU will be unmasked
1287 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
1288 MVNETA_RX_INTR_MASK_ALL
|
1289 MVNETA_TX_INTR_MASK_ALL
|
1290 MVNETA_MISCINTR_INTR_MASK
);
1293 static void mvneta_percpu_mask_interrupt(void *arg
)
1295 struct mvneta_port
*pp
= arg
;
1297 /* All the queue are masked, but actually only the ones
1298 * mapped to this CPU will be masked
1300 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
1301 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
1302 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
1305 static void mvneta_percpu_clear_intr_cause(void *arg
)
1307 struct mvneta_port
*pp
= arg
;
1309 /* All the queue are cleared, but actually only the ones
1310 * mapped to this CPU will be cleared
1312 mvreg_write(pp
, MVNETA_INTR_NEW_CAUSE
, 0);
1313 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
1314 mvreg_write(pp
, MVNETA_INTR_OLD_CAUSE
, 0);
1317 /* This method sets defaults to the NETA port:
1318 * Clears interrupt Cause and Mask registers.
1319 * Clears all MAC tables.
1320 * Sets defaults to all registers.
1321 * Resets RX and TX descriptor rings.
1323 * This method can be called after mvneta_port_down() to return the port
1324 * settings to defaults.
1326 static void mvneta_defaults_set(struct mvneta_port
*pp
)
1331 int max_cpu
= num_present_cpus();
1333 /* Clear all Cause registers */
1334 on_each_cpu(mvneta_percpu_clear_intr_cause
, pp
, true);
1336 /* Mask all interrupts */
1337 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
1338 mvreg_write(pp
, MVNETA_INTR_ENABLE
, 0);
1340 /* Enable MBUS Retry bit16 */
1341 mvreg_write(pp
, MVNETA_MBUS_RETRY
, 0x20);
1343 /* Set CPU queue access map. CPUs are assigned to the RX and
1344 * TX queues modulo their number. If there is only one TX
1345 * queue then it is assigned to the CPU associated to the
1348 for_each_present_cpu(cpu
) {
1349 int rxq_map
= 0, txq_map
= 0;
1352 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
1353 if ((rxq
% max_cpu
) == cpu
)
1354 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(rxq
);
1356 for (txq
= 0; txq
< txq_number
; txq
++)
1357 if ((txq
% max_cpu
) == cpu
)
1358 txq_map
|= MVNETA_CPU_TXQ_ACCESS(txq
);
1360 /* With only one TX queue we configure a special case
1361 * which will allow to get all the irq on a single
1364 if (txq_number
== 1)
1365 txq_map
= (cpu
== pp
->rxq_def
) ?
1366 MVNETA_CPU_TXQ_ACCESS(1) : 0;
1368 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
), rxq_map
| txq_map
);
1371 /* Reset RX and TX DMAs */
1372 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
1373 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
1375 /* Disable Legacy WRR, Disable EJP, Release from reset */
1376 mvreg_write(pp
, MVNETA_TXQ_CMD_1
, 0);
1377 for (queue
= 0; queue
< txq_number
; queue
++) {
1378 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(queue
), 0);
1379 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(queue
), 0);
1382 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
1383 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
1385 /* Set Port Acceleration Mode */
1387 /* HW buffer management + legacy parser */
1388 val
= MVNETA_ACC_MODE_EXT2
;
1390 /* SW buffer management + legacy parser */
1391 val
= MVNETA_ACC_MODE_EXT1
;
1392 mvreg_write(pp
, MVNETA_ACC_MODE
, val
);
1395 mvreg_write(pp
, MVNETA_BM_ADDRESS
, pp
->bm_priv
->bppi_phys_addr
);
1397 /* Update val of portCfg register accordingly with all RxQueue types */
1398 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(pp
->rxq_def
);
1399 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
1402 mvreg_write(pp
, MVNETA_PORT_CONFIG_EXTEND
, val
);
1403 mvreg_write(pp
, MVNETA_RX_MIN_FRAME_SIZE
, 64);
1405 /* Build PORT_SDMA_CONFIG_REG */
1408 /* Default burst size */
1409 val
|= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
1410 val
|= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
1411 val
|= MVNETA_RX_NO_DATA_SWAP
| MVNETA_TX_NO_DATA_SWAP
;
1413 #if defined(__BIG_ENDIAN)
1414 val
|= MVNETA_DESC_SWAP
;
1417 /* Assign port SDMA configuration */
1418 mvreg_write(pp
, MVNETA_SDMA_CONFIG
, val
);
1420 /* Disable PHY polling in hardware, since we're using the
1421 * kernel phylib to do this.
1423 val
= mvreg_read(pp
, MVNETA_UNIT_CONTROL
);
1424 val
&= ~MVNETA_PHY_POLLING_ENABLE
;
1425 mvreg_write(pp
, MVNETA_UNIT_CONTROL
, val
);
1427 mvneta_set_autoneg(pp
, pp
->use_inband_status
);
1428 mvneta_set_ucast_table(pp
, -1);
1429 mvneta_set_special_mcast_table(pp
, -1);
1430 mvneta_set_other_mcast_table(pp
, -1);
1432 /* Set port interrupt enable register - default enable all */
1433 mvreg_write(pp
, MVNETA_INTR_ENABLE
,
1434 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1435 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK
));
1437 mvneta_mib_counters_clear(pp
);
1440 /* Set max sizes for tx queues */
1441 static void mvneta_txq_max_tx_size_set(struct mvneta_port
*pp
, int max_tx_size
)
1447 mtu
= max_tx_size
* 8;
1448 if (mtu
> MVNETA_TX_MTU_MAX
)
1449 mtu
= MVNETA_TX_MTU_MAX
;
1452 val
= mvreg_read(pp
, MVNETA_TX_MTU
);
1453 val
&= ~MVNETA_TX_MTU_MAX
;
1455 mvreg_write(pp
, MVNETA_TX_MTU
, val
);
1457 /* TX token size and all TXQs token size must be larger that MTU */
1458 val
= mvreg_read(pp
, MVNETA_TX_TOKEN_SIZE
);
1460 size
= val
& MVNETA_TX_TOKEN_SIZE_MAX
;
1463 val
&= ~MVNETA_TX_TOKEN_SIZE_MAX
;
1465 mvreg_write(pp
, MVNETA_TX_TOKEN_SIZE
, val
);
1467 for (queue
= 0; queue
< txq_number
; queue
++) {
1468 val
= mvreg_read(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
));
1470 size
= val
& MVNETA_TXQ_TOKEN_SIZE_MAX
;
1473 val
&= ~MVNETA_TXQ_TOKEN_SIZE_MAX
;
1475 mvreg_write(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
), val
);
1480 /* Set unicast address */
1481 static void mvneta_set_ucast_addr(struct mvneta_port
*pp
, u8 last_nibble
,
1484 unsigned int unicast_reg
;
1485 unsigned int tbl_offset
;
1486 unsigned int reg_offset
;
1488 /* Locate the Unicast table entry */
1489 last_nibble
= (0xf & last_nibble
);
1491 /* offset from unicast tbl base */
1492 tbl_offset
= (last_nibble
/ 4) * 4;
1494 /* offset within the above reg */
1495 reg_offset
= last_nibble
% 4;
1497 unicast_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
));
1500 /* Clear accepts frame bit at specified unicast DA tbl entry */
1501 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1503 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1504 unicast_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1507 mvreg_write(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
), unicast_reg
);
1510 /* Set mac address */
1511 static void mvneta_mac_addr_set(struct mvneta_port
*pp
, unsigned char *addr
,
1518 mac_l
= (addr
[4] << 8) | (addr
[5]);
1519 mac_h
= (addr
[0] << 24) | (addr
[1] << 16) |
1520 (addr
[2] << 8) | (addr
[3] << 0);
1522 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, mac_l
);
1523 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, mac_h
);
1526 /* Accept frames of this address */
1527 mvneta_set_ucast_addr(pp
, addr
[5], queue
);
1530 /* Set the number of packets that will be received before RX interrupt
1531 * will be generated by HW.
1533 static void mvneta_rx_pkts_coal_set(struct mvneta_port
*pp
,
1534 struct mvneta_rx_queue
*rxq
, u32 value
)
1536 mvreg_write(pp
, MVNETA_RXQ_THRESHOLD_REG(rxq
->id
),
1537 value
| MVNETA_RXQ_NON_OCCUPIED(0));
1538 rxq
->pkts_coal
= value
;
1541 /* Set the time delay in usec before RX interrupt will be generated by
1544 static void mvneta_rx_time_coal_set(struct mvneta_port
*pp
,
1545 struct mvneta_rx_queue
*rxq
, u32 value
)
1548 unsigned long clk_rate
;
1550 clk_rate
= clk_get_rate(pp
->clk
);
1551 val
= (clk_rate
/ 1000000) * value
;
1553 mvreg_write(pp
, MVNETA_RXQ_TIME_COAL_REG(rxq
->id
), val
);
1554 rxq
->time_coal
= value
;
1557 /* Set threshold for TX_DONE pkts coalescing */
1558 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port
*pp
,
1559 struct mvneta_tx_queue
*txq
, u32 value
)
1563 val
= mvreg_read(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
));
1565 val
&= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK
;
1566 val
|= MVNETA_TXQ_SENT_THRESH_MASK(value
);
1568 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), val
);
1570 txq
->done_pkts_coal
= value
;
1573 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1574 static void mvneta_rx_desc_fill(struct mvneta_rx_desc
*rx_desc
,
1575 u32 phys_addr
, u32 cookie
)
1577 rx_desc
->buf_cookie
= cookie
;
1578 rx_desc
->buf_phys_addr
= phys_addr
;
1581 /* Decrement sent descriptors counter */
1582 static void mvneta_txq_sent_desc_dec(struct mvneta_port
*pp
,
1583 struct mvneta_tx_queue
*txq
,
1588 /* Only 255 TX descriptors can be updated at once */
1589 while (sent_desc
> 0xff) {
1590 val
= 0xff << MVNETA_TXQ_DEC_SENT_SHIFT
;
1591 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1592 sent_desc
= sent_desc
- 0xff;
1595 val
= sent_desc
<< MVNETA_TXQ_DEC_SENT_SHIFT
;
1596 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1599 /* Get number of TX descriptors already sent by HW */
1600 static int mvneta_txq_sent_desc_num_get(struct mvneta_port
*pp
,
1601 struct mvneta_tx_queue
*txq
)
1606 val
= mvreg_read(pp
, MVNETA_TXQ_STATUS_REG(txq
->id
));
1607 sent_desc
= (val
& MVNETA_TXQ_SENT_DESC_MASK
) >>
1608 MVNETA_TXQ_SENT_DESC_SHIFT
;
1613 /* Get number of sent descriptors and decrement counter.
1614 * The number of sent descriptors is returned.
1616 static int mvneta_txq_sent_desc_proc(struct mvneta_port
*pp
,
1617 struct mvneta_tx_queue
*txq
)
1621 /* Get number of sent descriptors */
1622 sent_desc
= mvneta_txq_sent_desc_num_get(pp
, txq
);
1624 /* Decrement sent descriptors counter */
1626 mvneta_txq_sent_desc_dec(pp
, txq
, sent_desc
);
1631 /* Set TXQ descriptors fields relevant for CSUM calculation */
1632 static u32
mvneta_txq_desc_csum(int l3_offs
, int l3_proto
,
1633 int ip_hdr_len
, int l4_proto
)
1637 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1638 * G_L4_chk, L4_type; required only for checksum
1641 command
= l3_offs
<< MVNETA_TX_L3_OFF_SHIFT
;
1642 command
|= ip_hdr_len
<< MVNETA_TX_IP_HLEN_SHIFT
;
1644 if (l3_proto
== htons(ETH_P_IP
))
1645 command
|= MVNETA_TXD_IP_CSUM
;
1647 command
|= MVNETA_TX_L3_IP6
;
1649 if (l4_proto
== IPPROTO_TCP
)
1650 command
|= MVNETA_TX_L4_CSUM_FULL
;
1651 else if (l4_proto
== IPPROTO_UDP
)
1652 command
|= MVNETA_TX_L4_UDP
| MVNETA_TX_L4_CSUM_FULL
;
1654 command
|= MVNETA_TX_L4_CSUM_NOT
;
1660 /* Display more error info */
1661 static void mvneta_rx_error(struct mvneta_port
*pp
,
1662 struct mvneta_rx_desc
*rx_desc
)
1664 u32 status
= rx_desc
->status
;
1666 if (!mvneta_rxq_desc_is_first_last(status
)) {
1668 "bad rx status %08x (buffer oversize), size=%d\n",
1669 status
, rx_desc
->data_size
);
1673 switch (status
& MVNETA_RXD_ERR_CODE_MASK
) {
1674 case MVNETA_RXD_ERR_CRC
:
1675 netdev_err(pp
->dev
, "bad rx status %08x (crc error), size=%d\n",
1676 status
, rx_desc
->data_size
);
1678 case MVNETA_RXD_ERR_OVERRUN
:
1679 netdev_err(pp
->dev
, "bad rx status %08x (overrun error), size=%d\n",
1680 status
, rx_desc
->data_size
);
1682 case MVNETA_RXD_ERR_LEN
:
1683 netdev_err(pp
->dev
, "bad rx status %08x (max frame length error), size=%d\n",
1684 status
, rx_desc
->data_size
);
1686 case MVNETA_RXD_ERR_RESOURCE
:
1687 netdev_err(pp
->dev
, "bad rx status %08x (resource error), size=%d\n",
1688 status
, rx_desc
->data_size
);
1693 /* Handle RX checksum offload based on the descriptor's status */
1694 static void mvneta_rx_csum(struct mvneta_port
*pp
, u32 status
,
1695 struct sk_buff
*skb
)
1697 if ((status
& MVNETA_RXD_L3_IP4
) &&
1698 (status
& MVNETA_RXD_L4_CSUM_OK
)) {
1700 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1704 skb
->ip_summed
= CHECKSUM_NONE
;
1707 /* Return tx queue pointer (find last set bit) according to <cause> returned
1708 * form tx_done reg. <cause> must not be null. The return value is always a
1709 * valid queue for matching the first one found in <cause>.
1711 static struct mvneta_tx_queue
*mvneta_tx_done_policy(struct mvneta_port
*pp
,
1714 int queue
= fls(cause
) - 1;
1716 return &pp
->txqs
[queue
];
1719 /* Free tx queue skbuffs */
1720 static void mvneta_txq_bufs_free(struct mvneta_port
*pp
,
1721 struct mvneta_tx_queue
*txq
, int num
)
1725 for (i
= 0; i
< num
; i
++) {
1726 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+
1728 struct sk_buff
*skb
= txq
->tx_skb
[txq
->txq_get_index
];
1730 mvneta_txq_inc_get(txq
);
1732 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
1733 dma_unmap_single(pp
->dev
->dev
.parent
,
1734 tx_desc
->buf_phys_addr
,
1735 tx_desc
->data_size
, DMA_TO_DEVICE
);
1738 dev_kfree_skb_any(skb
);
1742 /* Handle end of transmission */
1743 static void mvneta_txq_done(struct mvneta_port
*pp
,
1744 struct mvneta_tx_queue
*txq
)
1746 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
1749 tx_done
= mvneta_txq_sent_desc_proc(pp
, txq
);
1753 mvneta_txq_bufs_free(pp
, txq
, tx_done
);
1755 txq
->count
-= tx_done
;
1757 if (netif_tx_queue_stopped(nq
)) {
1758 if (txq
->count
<= txq
->tx_wake_threshold
)
1759 netif_tx_wake_queue(nq
);
1763 void *mvneta_frag_alloc(unsigned int frag_size
)
1765 if (likely(frag_size
<= PAGE_SIZE
))
1766 return netdev_alloc_frag(frag_size
);
1768 return kmalloc(frag_size
, GFP_ATOMIC
);
1770 EXPORT_SYMBOL_GPL(mvneta_frag_alloc
);
1772 void mvneta_frag_free(unsigned int frag_size
, void *data
)
1774 if (likely(frag_size
<= PAGE_SIZE
))
1775 skb_free_frag(data
);
1779 EXPORT_SYMBOL_GPL(mvneta_frag_free
);
1781 /* Refill processing for SW buffer management */
1782 static int mvneta_rx_refill(struct mvneta_port
*pp
,
1783 struct mvneta_rx_desc
*rx_desc
)
1786 dma_addr_t phys_addr
;
1789 data
= mvneta_frag_alloc(pp
->frag_size
);
1793 phys_addr
= dma_map_single(pp
->dev
->dev
.parent
, data
,
1794 MVNETA_RX_BUF_SIZE(pp
->pkt_size
),
1796 if (unlikely(dma_mapping_error(pp
->dev
->dev
.parent
, phys_addr
))) {
1797 mvneta_frag_free(pp
->frag_size
, data
);
1801 mvneta_rx_desc_fill(rx_desc
, phys_addr
, (u32
)data
);
1805 /* Handle tx checksum */
1806 static u32
mvneta_skb_tx_csum(struct mvneta_port
*pp
, struct sk_buff
*skb
)
1808 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1810 __be16 l3_proto
= vlan_get_protocol(skb
);
1813 if (l3_proto
== htons(ETH_P_IP
)) {
1814 struct iphdr
*ip4h
= ip_hdr(skb
);
1816 /* Calculate IPv4 checksum and L4 checksum */
1817 ip_hdr_len
= ip4h
->ihl
;
1818 l4_proto
= ip4h
->protocol
;
1819 } else if (l3_proto
== htons(ETH_P_IPV6
)) {
1820 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
1822 /* Read l4_protocol from one of IPv6 extra headers */
1823 if (skb_network_header_len(skb
) > 0)
1824 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
1825 l4_proto
= ip6h
->nexthdr
;
1827 return MVNETA_TX_L4_CSUM_NOT
;
1829 return mvneta_txq_desc_csum(skb_network_offset(skb
),
1830 l3_proto
, ip_hdr_len
, l4_proto
);
1833 return MVNETA_TX_L4_CSUM_NOT
;
1836 /* Drop packets received by the RXQ and free buffers */
1837 static void mvneta_rxq_drop_pkts(struct mvneta_port
*pp
,
1838 struct mvneta_rx_queue
*rxq
)
1842 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1844 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
1847 for (i
= 0; i
< rx_done
; i
++) {
1848 struct mvneta_rx_desc
*rx_desc
=
1849 mvneta_rxq_next_desc_get(rxq
);
1850 u8 pool_id
= MVNETA_RX_GET_BM_POOL_ID(rx_desc
);
1851 struct mvneta_bm_pool
*bm_pool
;
1853 bm_pool
= &pp
->bm_priv
->bm_pools
[pool_id
];
1854 /* Return dropped buffer to the pool */
1855 mvneta_bm_pool_put_bp(pp
->bm_priv
, bm_pool
,
1856 rx_desc
->buf_phys_addr
);
1861 for (i
= 0; i
< rxq
->size
; i
++) {
1862 struct mvneta_rx_desc
*rx_desc
= rxq
->descs
+ i
;
1863 void *data
= (void *)rx_desc
->buf_cookie
;
1865 dma_unmap_single(pp
->dev
->dev
.parent
, rx_desc
->buf_phys_addr
,
1866 MVNETA_RX_BUF_SIZE(pp
->pkt_size
), DMA_FROM_DEVICE
);
1867 mvneta_frag_free(pp
->frag_size
, data
);
1871 /* Main rx processing when using software buffer management */
1872 static int mvneta_rx_swbm(struct mvneta_port
*pp
, int rx_todo
,
1873 struct mvneta_rx_queue
*rxq
)
1875 struct mvneta_pcpu_port
*port
= this_cpu_ptr(pp
->ports
);
1876 struct net_device
*dev
= pp
->dev
;
1881 /* Get number of received packets */
1882 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1884 if (rx_todo
> rx_done
)
1889 /* Fairness NAPI loop */
1890 while (rx_done
< rx_todo
) {
1891 struct mvneta_rx_desc
*rx_desc
= mvneta_rxq_next_desc_get(rxq
);
1892 struct sk_buff
*skb
;
1893 unsigned char *data
;
1894 dma_addr_t phys_addr
;
1895 u32 rx_status
, frag_size
;
1899 rx_status
= rx_desc
->status
;
1900 rx_bytes
= rx_desc
->data_size
- (ETH_FCS_LEN
+ MVNETA_MH_SIZE
);
1901 data
= (unsigned char *)rx_desc
->buf_cookie
;
1902 phys_addr
= rx_desc
->buf_phys_addr
;
1904 if (!mvneta_rxq_desc_is_first_last(rx_status
) ||
1905 (rx_status
& MVNETA_RXD_ERR_SUMMARY
)) {
1907 dev
->stats
.rx_errors
++;
1908 mvneta_rx_error(pp
, rx_desc
);
1909 /* leave the descriptor untouched */
1913 if (rx_bytes
<= rx_copybreak
) {
1914 /* better copy a small frame and not unmap the DMA region */
1915 skb
= netdev_alloc_skb_ip_align(dev
, rx_bytes
);
1917 goto err_drop_frame
;
1919 dma_sync_single_range_for_cpu(dev
->dev
.parent
,
1920 rx_desc
->buf_phys_addr
,
1921 MVNETA_MH_SIZE
+ NET_SKB_PAD
,
1924 memcpy(skb_put(skb
, rx_bytes
),
1925 data
+ MVNETA_MH_SIZE
+ NET_SKB_PAD
,
1928 skb
->protocol
= eth_type_trans(skb
, dev
);
1929 mvneta_rx_csum(pp
, rx_status
, skb
);
1930 napi_gro_receive(&port
->napi
, skb
);
1933 rcvd_bytes
+= rx_bytes
;
1935 /* leave the descriptor and buffer untouched */
1939 /* Refill processing */
1940 err
= mvneta_rx_refill(pp
, rx_desc
);
1942 netdev_err(dev
, "Linux processing - Can't refill\n");
1944 goto err_drop_frame
;
1947 frag_size
= pp
->frag_size
;
1949 skb
= build_skb(data
, frag_size
> PAGE_SIZE
? 0 : frag_size
);
1951 /* After refill old buffer has to be unmapped regardless
1952 * the skb is successfully built or not.
1954 dma_unmap_single(dev
->dev
.parent
, phys_addr
,
1955 MVNETA_RX_BUF_SIZE(pp
->pkt_size
),
1959 goto err_drop_frame
;
1962 rcvd_bytes
+= rx_bytes
;
1964 /* Linux processing */
1965 skb_reserve(skb
, MVNETA_MH_SIZE
+ NET_SKB_PAD
);
1966 skb_put(skb
, rx_bytes
);
1968 skb
->protocol
= eth_type_trans(skb
, dev
);
1970 mvneta_rx_csum(pp
, rx_status
, skb
);
1972 napi_gro_receive(&port
->napi
, skb
);
1976 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
1978 u64_stats_update_begin(&stats
->syncp
);
1979 stats
->rx_packets
+= rcvd_pkts
;
1980 stats
->rx_bytes
+= rcvd_bytes
;
1981 u64_stats_update_end(&stats
->syncp
);
1984 /* Update rxq management counters */
1985 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
1990 /* Main rx processing when using hardware buffer management */
1991 static int mvneta_rx_hwbm(struct mvneta_port
*pp
, int rx_todo
,
1992 struct mvneta_rx_queue
*rxq
)
1994 struct mvneta_pcpu_port
*port
= this_cpu_ptr(pp
->ports
);
1995 struct net_device
*dev
= pp
->dev
;
2000 /* Get number of received packets */
2001 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
2003 if (rx_todo
> rx_done
)
2008 /* Fairness NAPI loop */
2009 while (rx_done
< rx_todo
) {
2010 struct mvneta_rx_desc
*rx_desc
= mvneta_rxq_next_desc_get(rxq
);
2011 struct mvneta_bm_pool
*bm_pool
= NULL
;
2012 struct sk_buff
*skb
;
2013 unsigned char *data
;
2014 dma_addr_t phys_addr
;
2015 u32 rx_status
, frag_size
;
2020 rx_status
= rx_desc
->status
;
2021 rx_bytes
= rx_desc
->data_size
- (ETH_FCS_LEN
+ MVNETA_MH_SIZE
);
2022 data
= (unsigned char *)rx_desc
->buf_cookie
;
2023 phys_addr
= rx_desc
->buf_phys_addr
;
2024 pool_id
= MVNETA_RX_GET_BM_POOL_ID(rx_desc
);
2025 bm_pool
= &pp
->bm_priv
->bm_pools
[pool_id
];
2027 if (!mvneta_rxq_desc_is_first_last(rx_status
) ||
2028 (rx_status
& MVNETA_RXD_ERR_SUMMARY
)) {
2029 err_drop_frame_ret_pool
:
2030 /* Return the buffer to the pool */
2031 mvneta_bm_pool_put_bp(pp
->bm_priv
, bm_pool
,
2032 rx_desc
->buf_phys_addr
);
2034 dev
->stats
.rx_errors
++;
2035 mvneta_rx_error(pp
, rx_desc
);
2036 /* leave the descriptor untouched */
2040 if (rx_bytes
<= rx_copybreak
) {
2041 /* better copy a small frame and not unmap the DMA region */
2042 skb
= netdev_alloc_skb_ip_align(dev
, rx_bytes
);
2044 goto err_drop_frame_ret_pool
;
2046 dma_sync_single_range_for_cpu(dev
->dev
.parent
,
2047 rx_desc
->buf_phys_addr
,
2048 MVNETA_MH_SIZE
+ NET_SKB_PAD
,
2051 memcpy(skb_put(skb
, rx_bytes
),
2052 data
+ MVNETA_MH_SIZE
+ NET_SKB_PAD
,
2055 skb
->protocol
= eth_type_trans(skb
, dev
);
2056 mvneta_rx_csum(pp
, rx_status
, skb
);
2057 napi_gro_receive(&port
->napi
, skb
);
2060 rcvd_bytes
+= rx_bytes
;
2062 /* Return the buffer to the pool */
2063 mvneta_bm_pool_put_bp(pp
->bm_priv
, bm_pool
,
2064 rx_desc
->buf_phys_addr
);
2066 /* leave the descriptor and buffer untouched */
2070 /* Refill processing */
2071 err
= hwbm_pool_refill(&bm_pool
->hwbm_pool
, GFP_ATOMIC
);
2073 netdev_err(dev
, "Linux processing - Can't refill\n");
2075 goto err_drop_frame_ret_pool
;
2078 frag_size
= bm_pool
->hwbm_pool
.frag_size
;
2080 skb
= build_skb(data
, frag_size
> PAGE_SIZE
? 0 : frag_size
);
2082 /* After refill old buffer has to be unmapped regardless
2083 * the skb is successfully built or not.
2085 dma_unmap_single(&pp
->bm_priv
->pdev
->dev
, phys_addr
,
2086 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
2088 goto err_drop_frame
;
2091 rcvd_bytes
+= rx_bytes
;
2093 /* Linux processing */
2094 skb_reserve(skb
, MVNETA_MH_SIZE
+ NET_SKB_PAD
);
2095 skb_put(skb
, rx_bytes
);
2097 skb
->protocol
= eth_type_trans(skb
, dev
);
2099 mvneta_rx_csum(pp
, rx_status
, skb
);
2101 napi_gro_receive(&port
->napi
, skb
);
2105 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2107 u64_stats_update_begin(&stats
->syncp
);
2108 stats
->rx_packets
+= rcvd_pkts
;
2109 stats
->rx_bytes
+= rcvd_bytes
;
2110 u64_stats_update_end(&stats
->syncp
);
2113 /* Update rxq management counters */
2114 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
2120 mvneta_tso_put_hdr(struct sk_buff
*skb
,
2121 struct mvneta_port
*pp
, struct mvneta_tx_queue
*txq
)
2123 struct mvneta_tx_desc
*tx_desc
;
2124 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
2126 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
2127 tx_desc
= mvneta_txq_next_desc_get(txq
);
2128 tx_desc
->data_size
= hdr_len
;
2129 tx_desc
->command
= mvneta_skb_tx_csum(pp
, skb
);
2130 tx_desc
->command
|= MVNETA_TXD_F_DESC
;
2131 tx_desc
->buf_phys_addr
= txq
->tso_hdrs_phys
+
2132 txq
->txq_put_index
* TSO_HEADER_SIZE
;
2133 mvneta_txq_inc_put(txq
);
2137 mvneta_tso_put_data(struct net_device
*dev
, struct mvneta_tx_queue
*txq
,
2138 struct sk_buff
*skb
, char *data
, int size
,
2139 bool last_tcp
, bool is_last
)
2141 struct mvneta_tx_desc
*tx_desc
;
2143 tx_desc
= mvneta_txq_next_desc_get(txq
);
2144 tx_desc
->data_size
= size
;
2145 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, data
,
2146 size
, DMA_TO_DEVICE
);
2147 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
2148 tx_desc
->buf_phys_addr
))) {
2149 mvneta_txq_desc_put(txq
);
2153 tx_desc
->command
= 0;
2154 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
2157 /* last descriptor in the TCP packet */
2158 tx_desc
->command
= MVNETA_TXD_L_DESC
;
2160 /* last descriptor in SKB */
2162 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
2164 mvneta_txq_inc_put(txq
);
2168 static int mvneta_tx_tso(struct sk_buff
*skb
, struct net_device
*dev
,
2169 struct mvneta_tx_queue
*txq
)
2171 int total_len
, data_left
;
2173 struct mvneta_port
*pp
= netdev_priv(dev
);
2175 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
2178 /* Count needed descriptors */
2179 if ((txq
->count
+ tso_count_descs(skb
)) >= txq
->size
)
2182 if (skb_headlen(skb
) < (skb_transport_offset(skb
) + tcp_hdrlen(skb
))) {
2183 pr_info("*** Is this even possible???!?!?\n");
2187 /* Initialize the TSO handler, and prepare the first payload */
2188 tso_start(skb
, &tso
);
2190 total_len
= skb
->len
- hdr_len
;
2191 while (total_len
> 0) {
2194 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
2195 total_len
-= data_left
;
2198 /* prepare packet headers: MAC + IP + TCP */
2199 hdr
= txq
->tso_hdrs
+ txq
->txq_put_index
* TSO_HEADER_SIZE
;
2200 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
2202 mvneta_tso_put_hdr(skb
, pp
, txq
);
2204 while (data_left
> 0) {
2208 size
= min_t(int, tso
.size
, data_left
);
2210 if (mvneta_tso_put_data(dev
, txq
, skb
,
2217 tso_build_data(skb
, &tso
, size
);
2224 /* Release all used data descriptors; header descriptors must not
2227 for (i
= desc_count
- 1; i
>= 0; i
--) {
2228 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+ i
;
2229 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
2230 dma_unmap_single(pp
->dev
->dev
.parent
,
2231 tx_desc
->buf_phys_addr
,
2234 mvneta_txq_desc_put(txq
);
2239 /* Handle tx fragmentation processing */
2240 static int mvneta_tx_frag_process(struct mvneta_port
*pp
, struct sk_buff
*skb
,
2241 struct mvneta_tx_queue
*txq
)
2243 struct mvneta_tx_desc
*tx_desc
;
2244 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
2246 for (i
= 0; i
< nr_frags
; i
++) {
2247 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2248 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
2250 tx_desc
= mvneta_txq_next_desc_get(txq
);
2251 tx_desc
->data_size
= frag
->size
;
2253 tx_desc
->buf_phys_addr
=
2254 dma_map_single(pp
->dev
->dev
.parent
, addr
,
2255 tx_desc
->data_size
, DMA_TO_DEVICE
);
2257 if (dma_mapping_error(pp
->dev
->dev
.parent
,
2258 tx_desc
->buf_phys_addr
)) {
2259 mvneta_txq_desc_put(txq
);
2263 if (i
== nr_frags
- 1) {
2264 /* Last descriptor */
2265 tx_desc
->command
= MVNETA_TXD_L_DESC
| MVNETA_TXD_Z_PAD
;
2266 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
2268 /* Descriptor in the middle: Not First, Not Last */
2269 tx_desc
->command
= 0;
2270 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
2272 mvneta_txq_inc_put(txq
);
2278 /* Release all descriptors that were used to map fragments of
2279 * this packet, as well as the corresponding DMA mappings
2281 for (i
= i
- 1; i
>= 0; i
--) {
2282 tx_desc
= txq
->descs
+ i
;
2283 dma_unmap_single(pp
->dev
->dev
.parent
,
2284 tx_desc
->buf_phys_addr
,
2287 mvneta_txq_desc_put(txq
);
2293 /* Main tx processing */
2294 static int mvneta_tx(struct sk_buff
*skb
, struct net_device
*dev
)
2296 struct mvneta_port
*pp
= netdev_priv(dev
);
2297 u16 txq_id
= skb_get_queue_mapping(skb
);
2298 struct mvneta_tx_queue
*txq
= &pp
->txqs
[txq_id
];
2299 struct mvneta_tx_desc
*tx_desc
;
2304 if (!netif_running(dev
))
2307 if (skb_is_gso(skb
)) {
2308 frags
= mvneta_tx_tso(skb
, dev
, txq
);
2312 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
2314 /* Get a descriptor for the first part of the packet */
2315 tx_desc
= mvneta_txq_next_desc_get(txq
);
2317 tx_cmd
= mvneta_skb_tx_csum(pp
, skb
);
2319 tx_desc
->data_size
= skb_headlen(skb
);
2321 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
2324 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
2325 tx_desc
->buf_phys_addr
))) {
2326 mvneta_txq_desc_put(txq
);
2332 /* First and Last descriptor */
2333 tx_cmd
|= MVNETA_TXD_FLZ_DESC
;
2334 tx_desc
->command
= tx_cmd
;
2335 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
2336 mvneta_txq_inc_put(txq
);
2338 /* First but not Last */
2339 tx_cmd
|= MVNETA_TXD_F_DESC
;
2340 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
2341 mvneta_txq_inc_put(txq
);
2342 tx_desc
->command
= tx_cmd
;
2343 /* Continue with other skb fragments */
2344 if (mvneta_tx_frag_process(pp
, skb
, txq
)) {
2345 dma_unmap_single(dev
->dev
.parent
,
2346 tx_desc
->buf_phys_addr
,
2349 mvneta_txq_desc_put(txq
);
2357 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2358 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
2360 txq
->count
+= frags
;
2361 mvneta_txq_pend_desc_add(pp
, txq
, frags
);
2363 if (txq
->count
>= txq
->tx_stop_threshold
)
2364 netif_tx_stop_queue(nq
);
2366 u64_stats_update_begin(&stats
->syncp
);
2367 stats
->tx_packets
++;
2368 stats
->tx_bytes
+= len
;
2369 u64_stats_update_end(&stats
->syncp
);
2371 dev
->stats
.tx_dropped
++;
2372 dev_kfree_skb_any(skb
);
2375 return NETDEV_TX_OK
;
2379 /* Free tx resources, when resetting a port */
2380 static void mvneta_txq_done_force(struct mvneta_port
*pp
,
2381 struct mvneta_tx_queue
*txq
)
2384 int tx_done
= txq
->count
;
2386 mvneta_txq_bufs_free(pp
, txq
, tx_done
);
2390 txq
->txq_put_index
= 0;
2391 txq
->txq_get_index
= 0;
2394 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
2395 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2397 static void mvneta_tx_done_gbe(struct mvneta_port
*pp
, u32 cause_tx_done
)
2399 struct mvneta_tx_queue
*txq
;
2400 struct netdev_queue
*nq
;
2402 while (cause_tx_done
) {
2403 txq
= mvneta_tx_done_policy(pp
, cause_tx_done
);
2405 nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
2406 __netif_tx_lock(nq
, smp_processor_id());
2409 mvneta_txq_done(pp
, txq
);
2411 __netif_tx_unlock(nq
);
2412 cause_tx_done
&= ~((1 << txq
->id
));
2416 /* Compute crc8 of the specified address, using a unique algorithm ,
2417 * according to hw spec, different than generic crc8 algorithm
2419 static int mvneta_addr_crc(unsigned char *addr
)
2424 for (i
= 0; i
< ETH_ALEN
; i
++) {
2427 crc
= (crc
^ addr
[i
]) << 8;
2428 for (j
= 7; j
>= 0; j
--) {
2429 if (crc
& (0x100 << j
))
2437 /* This method controls the net device special MAC multicast support.
2438 * The Special Multicast Table for MAC addresses supports MAC of the form
2439 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2440 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2441 * Table entries in the DA-Filter table. This method set the Special
2442 * Multicast Table appropriate entry.
2444 static void mvneta_set_special_mcast_addr(struct mvneta_port
*pp
,
2445 unsigned char last_byte
,
2448 unsigned int smc_table_reg
;
2449 unsigned int tbl_offset
;
2450 unsigned int reg_offset
;
2452 /* Register offset from SMC table base */
2453 tbl_offset
= (last_byte
/ 4);
2454 /* Entry offset within the above reg */
2455 reg_offset
= last_byte
% 4;
2457 smc_table_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_SPEC_MCAST
2461 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
2463 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
2464 smc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
2467 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ tbl_offset
* 4,
2471 /* This method controls the network device Other MAC multicast support.
2472 * The Other Multicast Table is used for multicast of another type.
2473 * A CRC-8 is used as an index to the Other Multicast Table entries
2474 * in the DA-Filter table.
2475 * The method gets the CRC-8 value from the calling routine and
2476 * sets the Other Multicast Table appropriate entry according to the
2479 static void mvneta_set_other_mcast_addr(struct mvneta_port
*pp
,
2483 unsigned int omc_table_reg
;
2484 unsigned int tbl_offset
;
2485 unsigned int reg_offset
;
2487 tbl_offset
= (crc8
/ 4) * 4; /* Register offset from OMC table base */
2488 reg_offset
= crc8
% 4; /* Entry offset within the above reg */
2490 omc_table_reg
= mvreg_read(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
);
2493 /* Clear accepts frame bit at specified Other DA table entry */
2494 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
2496 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
2497 omc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
2500 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
, omc_table_reg
);
2503 /* The network device supports multicast using two tables:
2504 * 1) Special Multicast Table for MAC addresses of the form
2505 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2506 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2507 * Table entries in the DA-Filter table.
2508 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2509 * is used as an index to the Other Multicast Table entries in the
2512 static int mvneta_mcast_addr_set(struct mvneta_port
*pp
, unsigned char *p_addr
,
2515 unsigned char crc_result
= 0;
2517 if (memcmp(p_addr
, "\x01\x00\x5e\x00\x00", 5) == 0) {
2518 mvneta_set_special_mcast_addr(pp
, p_addr
[5], queue
);
2522 crc_result
= mvneta_addr_crc(p_addr
);
2524 if (pp
->mcast_count
[crc_result
] == 0) {
2525 netdev_info(pp
->dev
, "No valid Mcast for crc8=0x%02x\n",
2530 pp
->mcast_count
[crc_result
]--;
2531 if (pp
->mcast_count
[crc_result
] != 0) {
2532 netdev_info(pp
->dev
,
2533 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2534 pp
->mcast_count
[crc_result
], crc_result
);
2538 pp
->mcast_count
[crc_result
]++;
2540 mvneta_set_other_mcast_addr(pp
, crc_result
, queue
);
2545 /* Configure Fitering mode of Ethernet port */
2546 static void mvneta_rx_unicast_promisc_set(struct mvneta_port
*pp
,
2549 u32 port_cfg_reg
, val
;
2551 port_cfg_reg
= mvreg_read(pp
, MVNETA_PORT_CONFIG
);
2553 val
= mvreg_read(pp
, MVNETA_TYPE_PRIO
);
2555 /* Set / Clear UPM bit in port configuration register */
2557 /* Accept all Unicast addresses */
2558 port_cfg_reg
|= MVNETA_UNI_PROMISC_MODE
;
2559 val
|= MVNETA_FORCE_UNI
;
2560 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, 0xffff);
2561 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, 0xffffffff);
2563 /* Reject all Unicast addresses */
2564 port_cfg_reg
&= ~MVNETA_UNI_PROMISC_MODE
;
2565 val
&= ~MVNETA_FORCE_UNI
;
2568 mvreg_write(pp
, MVNETA_PORT_CONFIG
, port_cfg_reg
);
2569 mvreg_write(pp
, MVNETA_TYPE_PRIO
, val
);
2572 /* register unicast and multicast addresses */
2573 static void mvneta_set_rx_mode(struct net_device
*dev
)
2575 struct mvneta_port
*pp
= netdev_priv(dev
);
2576 struct netdev_hw_addr
*ha
;
2578 if (dev
->flags
& IFF_PROMISC
) {
2579 /* Accept all: Multicast + Unicast */
2580 mvneta_rx_unicast_promisc_set(pp
, 1);
2581 mvneta_set_ucast_table(pp
, pp
->rxq_def
);
2582 mvneta_set_special_mcast_table(pp
, pp
->rxq_def
);
2583 mvneta_set_other_mcast_table(pp
, pp
->rxq_def
);
2585 /* Accept single Unicast */
2586 mvneta_rx_unicast_promisc_set(pp
, 0);
2587 mvneta_set_ucast_table(pp
, -1);
2588 mvneta_mac_addr_set(pp
, dev
->dev_addr
, pp
->rxq_def
);
2590 if (dev
->flags
& IFF_ALLMULTI
) {
2591 /* Accept all multicast */
2592 mvneta_set_special_mcast_table(pp
, pp
->rxq_def
);
2593 mvneta_set_other_mcast_table(pp
, pp
->rxq_def
);
2595 /* Accept only initialized multicast */
2596 mvneta_set_special_mcast_table(pp
, -1);
2597 mvneta_set_other_mcast_table(pp
, -1);
2599 if (!netdev_mc_empty(dev
)) {
2600 netdev_for_each_mc_addr(ha
, dev
) {
2601 mvneta_mcast_addr_set(pp
, ha
->addr
,
2609 /* Interrupt handling - the callback for request_irq() */
2610 static irqreturn_t
mvneta_isr(int irq
, void *dev_id
)
2612 struct mvneta_pcpu_port
*port
= (struct mvneta_pcpu_port
*)dev_id
;
2614 disable_percpu_irq(port
->pp
->dev
->irq
);
2615 napi_schedule(&port
->napi
);
2620 static int mvneta_fixed_link_update(struct mvneta_port
*pp
,
2621 struct phy_device
*phy
)
2623 struct fixed_phy_status status
;
2624 struct fixed_phy_status changed
= {};
2625 u32 gmac_stat
= mvreg_read(pp
, MVNETA_GMAC_STATUS
);
2627 status
.link
= !!(gmac_stat
& MVNETA_GMAC_LINK_UP
);
2628 if (gmac_stat
& MVNETA_GMAC_SPEED_1000
)
2629 status
.speed
= SPEED_1000
;
2630 else if (gmac_stat
& MVNETA_GMAC_SPEED_100
)
2631 status
.speed
= SPEED_100
;
2633 status
.speed
= SPEED_10
;
2634 status
.duplex
= !!(gmac_stat
& MVNETA_GMAC_FULL_DUPLEX
);
2638 fixed_phy_update_state(phy
, &status
, &changed
);
2643 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2644 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2645 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2646 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2647 * Each CPU has its own causeRxTx register
2649 static int mvneta_poll(struct napi_struct
*napi
, int budget
)
2654 struct mvneta_port
*pp
= netdev_priv(napi
->dev
);
2655 struct mvneta_pcpu_port
*port
= this_cpu_ptr(pp
->ports
);
2657 if (!netif_running(pp
->dev
)) {
2658 napi_complete(&port
->napi
);
2662 /* Read cause register */
2663 cause_rx_tx
= mvreg_read(pp
, MVNETA_INTR_NEW_CAUSE
);
2664 if (cause_rx_tx
& MVNETA_MISCINTR_INTR_MASK
) {
2665 u32 cause_misc
= mvreg_read(pp
, MVNETA_INTR_MISC_CAUSE
);
2667 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
2668 if (pp
->use_inband_status
&& (cause_misc
&
2669 (MVNETA_CAUSE_PHY_STATUS_CHANGE
|
2670 MVNETA_CAUSE_LINK_CHANGE
|
2671 MVNETA_CAUSE_PSC_SYNC_CHANGE
))) {
2672 mvneta_fixed_link_update(pp
, pp
->phy_dev
);
2676 /* Release Tx descriptors */
2677 if (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
) {
2678 mvneta_tx_done_gbe(pp
, (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
));
2679 cause_rx_tx
&= ~MVNETA_TX_INTR_MASK_ALL
;
2682 /* For the case where the last mvneta_poll did not process all
2685 rx_queue
= fls(((cause_rx_tx
>> 8) & 0xff));
2687 cause_rx_tx
|= port
->cause_rx_tx
;
2690 rx_queue
= rx_queue
- 1;
2692 rx_done
= mvneta_rx_hwbm(pp
, budget
, &pp
->rxqs
[rx_queue
]);
2694 rx_done
= mvneta_rx_swbm(pp
, budget
, &pp
->rxqs
[rx_queue
]);
2701 napi_complete(&port
->napi
);
2702 enable_percpu_irq(pp
->dev
->irq
, 0);
2705 port
->cause_rx_tx
= cause_rx_tx
;
2709 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2710 static int mvneta_rxq_fill(struct mvneta_port
*pp
, struct mvneta_rx_queue
*rxq
,
2715 for (i
= 0; i
< num
; i
++) {
2716 memset(rxq
->descs
+ i
, 0, sizeof(struct mvneta_rx_desc
));
2717 if (mvneta_rx_refill(pp
, rxq
->descs
+ i
) != 0) {
2718 netdev_err(pp
->dev
, "%s:rxq %d, %d of %d buffs filled\n",
2719 __func__
, rxq
->id
, i
, num
);
2724 /* Add this number of RX descriptors as non occupied (ready to
2727 mvneta_rxq_non_occup_desc_add(pp
, rxq
, i
);
2732 /* Free all packets pending transmit from all TXQs and reset TX port */
2733 static void mvneta_tx_reset(struct mvneta_port
*pp
)
2737 /* free the skb's in the tx ring */
2738 for (queue
= 0; queue
< txq_number
; queue
++)
2739 mvneta_txq_done_force(pp
, &pp
->txqs
[queue
]);
2741 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
2742 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
2745 static void mvneta_rx_reset(struct mvneta_port
*pp
)
2747 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
2748 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
2751 /* Rx/Tx queue initialization/cleanup methods */
2753 /* Create a specified RX queue */
2754 static int mvneta_rxq_init(struct mvneta_port
*pp
,
2755 struct mvneta_rx_queue
*rxq
)
2758 rxq
->size
= pp
->rx_ring_size
;
2760 /* Allocate memory for RX descriptors */
2761 rxq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2762 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2763 &rxq
->descs_phys
, GFP_KERNEL
);
2764 if (rxq
->descs
== NULL
)
2767 BUG_ON(rxq
->descs
!=
2768 PTR_ALIGN(rxq
->descs
, MVNETA_CPU_D_CACHE_LINE_SIZE
));
2770 rxq
->last_desc
= rxq
->size
- 1;
2772 /* Set Rx descriptors queue starting address */
2773 mvreg_write(pp
, MVNETA_RXQ_BASE_ADDR_REG(rxq
->id
), rxq
->descs_phys
);
2774 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), rxq
->size
);
2777 mvneta_rxq_offset_set(pp
, rxq
, NET_SKB_PAD
);
2779 /* Set coalescing pkts and time */
2780 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
2781 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
2784 /* Fill RXQ with buffers from RX pool */
2785 mvneta_rxq_buf_size_set(pp
, rxq
,
2786 MVNETA_RX_BUF_SIZE(pp
->pkt_size
));
2787 mvneta_rxq_bm_disable(pp
, rxq
);
2789 mvneta_rxq_bm_enable(pp
, rxq
);
2790 mvneta_rxq_long_pool_set(pp
, rxq
);
2791 mvneta_rxq_short_pool_set(pp
, rxq
);
2794 mvneta_rxq_fill(pp
, rxq
, rxq
->size
);
2799 /* Cleanup Rx queue */
2800 static void mvneta_rxq_deinit(struct mvneta_port
*pp
,
2801 struct mvneta_rx_queue
*rxq
)
2803 mvneta_rxq_drop_pkts(pp
, rxq
);
2806 dma_free_coherent(pp
->dev
->dev
.parent
,
2807 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2813 rxq
->next_desc_to_proc
= 0;
2814 rxq
->descs_phys
= 0;
2817 /* Create and initialize a tx queue */
2818 static int mvneta_txq_init(struct mvneta_port
*pp
,
2819 struct mvneta_tx_queue
*txq
)
2823 txq
->size
= pp
->tx_ring_size
;
2825 /* A queue must always have room for at least one skb.
2826 * Therefore, stop the queue when the free entries reaches
2827 * the maximum number of descriptors per skb.
2829 txq
->tx_stop_threshold
= txq
->size
- MVNETA_MAX_SKB_DESCS
;
2830 txq
->tx_wake_threshold
= txq
->tx_stop_threshold
/ 2;
2833 /* Allocate memory for TX descriptors */
2834 txq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2835 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2836 &txq
->descs_phys
, GFP_KERNEL
);
2837 if (txq
->descs
== NULL
)
2840 /* Make sure descriptor address is cache line size aligned */
2841 BUG_ON(txq
->descs
!=
2842 PTR_ALIGN(txq
->descs
, MVNETA_CPU_D_CACHE_LINE_SIZE
));
2844 txq
->last_desc
= txq
->size
- 1;
2846 /* Set maximum bandwidth for enabled TXQs */
2847 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0x03ffffff);
2848 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0x3fffffff);
2850 /* Set Tx descriptors queue starting address */
2851 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), txq
->descs_phys
);
2852 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), txq
->size
);
2854 txq
->tx_skb
= kmalloc(txq
->size
* sizeof(*txq
->tx_skb
), GFP_KERNEL
);
2855 if (txq
->tx_skb
== NULL
) {
2856 dma_free_coherent(pp
->dev
->dev
.parent
,
2857 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2858 txq
->descs
, txq
->descs_phys
);
2862 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2863 txq
->tso_hdrs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2864 txq
->size
* TSO_HEADER_SIZE
,
2865 &txq
->tso_hdrs_phys
, GFP_KERNEL
);
2866 if (txq
->tso_hdrs
== NULL
) {
2868 dma_free_coherent(pp
->dev
->dev
.parent
,
2869 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2870 txq
->descs
, txq
->descs_phys
);
2873 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
2875 /* Setup XPS mapping */
2877 cpu
= txq
->id
% num_present_cpus();
2879 cpu
= pp
->rxq_def
% num_present_cpus();
2880 cpumask_set_cpu(cpu
, &txq
->affinity_mask
);
2881 netif_set_xps_queue(pp
->dev
, &txq
->affinity_mask
, txq
->id
);
2886 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2887 static void mvneta_txq_deinit(struct mvneta_port
*pp
,
2888 struct mvneta_tx_queue
*txq
)
2893 dma_free_coherent(pp
->dev
->dev
.parent
,
2894 txq
->size
* TSO_HEADER_SIZE
,
2895 txq
->tso_hdrs
, txq
->tso_hdrs_phys
);
2897 dma_free_coherent(pp
->dev
->dev
.parent
,
2898 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2899 txq
->descs
, txq
->descs_phys
);
2903 txq
->next_desc_to_proc
= 0;
2904 txq
->descs_phys
= 0;
2906 /* Set minimum bandwidth for disabled TXQs */
2907 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0);
2908 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0);
2910 /* Set Tx descriptors queue starting address and size */
2911 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), 0);
2912 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), 0);
2915 /* Cleanup all Tx queues */
2916 static void mvneta_cleanup_txqs(struct mvneta_port
*pp
)
2920 for (queue
= 0; queue
< txq_number
; queue
++)
2921 mvneta_txq_deinit(pp
, &pp
->txqs
[queue
]);
2924 /* Cleanup all Rx queues */
2925 static void mvneta_cleanup_rxqs(struct mvneta_port
*pp
)
2929 for (queue
= 0; queue
< txq_number
; queue
++)
2930 mvneta_rxq_deinit(pp
, &pp
->rxqs
[queue
]);
2934 /* Init all Rx queues */
2935 static int mvneta_setup_rxqs(struct mvneta_port
*pp
)
2939 for (queue
= 0; queue
< rxq_number
; queue
++) {
2940 int err
= mvneta_rxq_init(pp
, &pp
->rxqs
[queue
]);
2943 netdev_err(pp
->dev
, "%s: can't create rxq=%d\n",
2945 mvneta_cleanup_rxqs(pp
);
2953 /* Init all tx queues */
2954 static int mvneta_setup_txqs(struct mvneta_port
*pp
)
2958 for (queue
= 0; queue
< txq_number
; queue
++) {
2959 int err
= mvneta_txq_init(pp
, &pp
->txqs
[queue
]);
2961 netdev_err(pp
->dev
, "%s: can't create txq=%d\n",
2963 mvneta_cleanup_txqs(pp
);
2971 static void mvneta_start_dev(struct mvneta_port
*pp
)
2975 mvneta_max_rx_size_set(pp
, pp
->pkt_size
);
2976 mvneta_txq_max_tx_size_set(pp
, pp
->pkt_size
);
2978 /* start the Rx/Tx activity */
2979 mvneta_port_enable(pp
);
2981 /* Enable polling on the port */
2982 for_each_online_cpu(cpu
) {
2983 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
2985 napi_enable(&port
->napi
);
2988 /* Unmask interrupts. It has to be done from each CPU */
2989 on_each_cpu(mvneta_percpu_unmask_interrupt
, pp
, true);
2991 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
2992 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
2993 MVNETA_CAUSE_LINK_CHANGE
|
2994 MVNETA_CAUSE_PSC_SYNC_CHANGE
);
2996 phy_start(pp
->phy_dev
);
2997 netif_tx_start_all_queues(pp
->dev
);
3000 static void mvneta_stop_dev(struct mvneta_port
*pp
)
3004 phy_stop(pp
->phy_dev
);
3006 for_each_online_cpu(cpu
) {
3007 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
3009 napi_disable(&port
->napi
);
3012 netif_carrier_off(pp
->dev
);
3014 mvneta_port_down(pp
);
3015 netif_tx_stop_all_queues(pp
->dev
);
3017 /* Stop the port activity */
3018 mvneta_port_disable(pp
);
3020 /* Clear all ethernet port interrupts */
3021 on_each_cpu(mvneta_percpu_clear_intr_cause
, pp
, true);
3023 /* Mask all ethernet port interrupts */
3024 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
3026 mvneta_tx_reset(pp
);
3027 mvneta_rx_reset(pp
);
3030 /* Return positive if MTU is valid */
3031 static int mvneta_check_mtu_valid(struct net_device
*dev
, int mtu
)
3034 netdev_err(dev
, "cannot change mtu to less than 68\n");
3038 /* 9676 == 9700 - 20 and rounding to 8 */
3040 netdev_info(dev
, "Illegal MTU value %d, round to 9676\n", mtu
);
3044 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu
), 8)) {
3045 netdev_info(dev
, "Illegal MTU value %d, rounding to %d\n",
3046 mtu
, ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8));
3047 mtu
= ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8);
3053 /* Change the device mtu */
3054 static int mvneta_change_mtu(struct net_device
*dev
, int mtu
)
3056 struct mvneta_port
*pp
= netdev_priv(dev
);
3059 mtu
= mvneta_check_mtu_valid(dev
, mtu
);
3065 if (!netif_running(dev
)) {
3067 mvneta_bm_update_mtu(pp
, mtu
);
3069 netdev_update_features(dev
);
3073 /* The interface is running, so we have to force a
3074 * reallocation of the queues
3076 mvneta_stop_dev(pp
);
3078 mvneta_cleanup_txqs(pp
);
3079 mvneta_cleanup_rxqs(pp
);
3082 mvneta_bm_update_mtu(pp
, mtu
);
3084 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(dev
->mtu
);
3085 pp
->frag_size
= SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp
->pkt_size
)) +
3086 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
3088 ret
= mvneta_setup_rxqs(pp
);
3090 netdev_err(dev
, "unable to setup rxqs after MTU change\n");
3094 ret
= mvneta_setup_txqs(pp
);
3096 netdev_err(dev
, "unable to setup txqs after MTU change\n");
3100 mvneta_start_dev(pp
);
3103 netdev_update_features(dev
);
3108 static netdev_features_t
mvneta_fix_features(struct net_device
*dev
,
3109 netdev_features_t features
)
3111 struct mvneta_port
*pp
= netdev_priv(dev
);
3113 if (pp
->tx_csum_limit
&& dev
->mtu
> pp
->tx_csum_limit
) {
3114 features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_TSO
);
3116 "Disable IP checksum for MTU greater than %dB\n",
3123 /* Get mac address */
3124 static void mvneta_get_mac_addr(struct mvneta_port
*pp
, unsigned char *addr
)
3126 u32 mac_addr_l
, mac_addr_h
;
3128 mac_addr_l
= mvreg_read(pp
, MVNETA_MAC_ADDR_LOW
);
3129 mac_addr_h
= mvreg_read(pp
, MVNETA_MAC_ADDR_HIGH
);
3130 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
3131 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
3132 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
3133 addr
[3] = mac_addr_h
& 0xFF;
3134 addr
[4] = (mac_addr_l
>> 8) & 0xFF;
3135 addr
[5] = mac_addr_l
& 0xFF;
3138 /* Handle setting mac address */
3139 static int mvneta_set_mac_addr(struct net_device
*dev
, void *addr
)
3141 struct mvneta_port
*pp
= netdev_priv(dev
);
3142 struct sockaddr
*sockaddr
= addr
;
3145 ret
= eth_prepare_mac_addr_change(dev
, addr
);
3148 /* Remove previous address table entry */
3149 mvneta_mac_addr_set(pp
, dev
->dev_addr
, -1);
3151 /* Set new addr in hw */
3152 mvneta_mac_addr_set(pp
, sockaddr
->sa_data
, pp
->rxq_def
);
3154 eth_commit_mac_addr_change(dev
, addr
);
3158 static void mvneta_adjust_link(struct net_device
*ndev
)
3160 struct mvneta_port
*pp
= netdev_priv(ndev
);
3161 struct phy_device
*phydev
= pp
->phy_dev
;
3162 int status_change
= 0;
3165 if ((pp
->speed
!= phydev
->speed
) ||
3166 (pp
->duplex
!= phydev
->duplex
)) {
3169 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3170 val
&= ~(MVNETA_GMAC_CONFIG_MII_SPEED
|
3171 MVNETA_GMAC_CONFIG_GMII_SPEED
|
3172 MVNETA_GMAC_CONFIG_FULL_DUPLEX
);
3175 val
|= MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
3177 if (phydev
->speed
== SPEED_1000
)
3178 val
|= MVNETA_GMAC_CONFIG_GMII_SPEED
;
3179 else if (phydev
->speed
== SPEED_100
)
3180 val
|= MVNETA_GMAC_CONFIG_MII_SPEED
;
3182 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
3184 pp
->duplex
= phydev
->duplex
;
3185 pp
->speed
= phydev
->speed
;
3189 if (phydev
->link
!= pp
->link
) {
3190 if (!phydev
->link
) {
3195 pp
->link
= phydev
->link
;
3199 if (status_change
) {
3201 if (!pp
->use_inband_status
) {
3202 u32 val
= mvreg_read(pp
,
3203 MVNETA_GMAC_AUTONEG_CONFIG
);
3204 val
&= ~MVNETA_GMAC_FORCE_LINK_DOWN
;
3205 val
|= MVNETA_GMAC_FORCE_LINK_PASS
;
3206 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
3211 if (!pp
->use_inband_status
) {
3212 u32 val
= mvreg_read(pp
,
3213 MVNETA_GMAC_AUTONEG_CONFIG
);
3214 val
&= ~MVNETA_GMAC_FORCE_LINK_PASS
;
3215 val
|= MVNETA_GMAC_FORCE_LINK_DOWN
;
3216 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
3219 mvneta_port_down(pp
);
3221 phy_print_status(phydev
);
3225 static int mvneta_mdio_probe(struct mvneta_port
*pp
)
3227 struct phy_device
*phy_dev
;
3229 phy_dev
= of_phy_connect(pp
->dev
, pp
->phy_node
, mvneta_adjust_link
, 0,
3232 netdev_err(pp
->dev
, "could not find the PHY\n");
3236 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
3237 phy_dev
->advertising
= phy_dev
->supported
;
3239 pp
->phy_dev
= phy_dev
;
3247 static void mvneta_mdio_remove(struct mvneta_port
*pp
)
3249 phy_disconnect(pp
->phy_dev
);
3253 static void mvneta_percpu_enable(void *arg
)
3255 struct mvneta_port
*pp
= arg
;
3257 enable_percpu_irq(pp
->dev
->irq
, IRQ_TYPE_NONE
);
3260 static void mvneta_percpu_disable(void *arg
)
3262 struct mvneta_port
*pp
= arg
;
3264 disable_percpu_irq(pp
->dev
->irq
);
3267 /* Electing a CPU must be done in an atomic way: it should be done
3268 * after or before the removal/insertion of a CPU and this function is
3271 static void mvneta_percpu_elect(struct mvneta_port
*pp
)
3273 int elected_cpu
= 0, max_cpu
, cpu
, i
= 0;
3275 /* Use the cpu associated to the rxq when it is online, in all
3276 * the other cases, use the cpu 0 which can't be offline.
3278 if (cpu_online(pp
->rxq_def
))
3279 elected_cpu
= pp
->rxq_def
;
3281 max_cpu
= num_present_cpus();
3283 for_each_online_cpu(cpu
) {
3284 int rxq_map
= 0, txq_map
= 0;
3287 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
3288 if ((rxq
% max_cpu
) == cpu
)
3289 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(rxq
);
3291 if (cpu
== elected_cpu
)
3292 /* Map the default receive queue queue to the
3295 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(pp
->rxq_def
);
3297 /* We update the TX queue map only if we have one
3298 * queue. In this case we associate the TX queue to
3299 * the CPU bound to the default RX queue
3301 if (txq_number
== 1)
3302 txq_map
= (cpu
== elected_cpu
) ?
3303 MVNETA_CPU_TXQ_ACCESS(1) : 0;
3305 txq_map
= mvreg_read(pp
, MVNETA_CPU_MAP(cpu
)) &
3306 MVNETA_CPU_TXQ_ACCESS_ALL_MASK
;
3308 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
), rxq_map
| txq_map
);
3310 /* Update the interrupt mask on each CPU according the
3313 smp_call_function_single(cpu
, mvneta_percpu_unmask_interrupt
,
3320 static int mvneta_percpu_notifier(struct notifier_block
*nfb
,
3321 unsigned long action
, void *hcpu
)
3323 struct mvneta_port
*pp
= container_of(nfb
, struct mvneta_port
,
3325 int cpu
= (unsigned long)hcpu
, other_cpu
;
3326 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
3330 case CPU_ONLINE_FROZEN
:
3331 case CPU_DOWN_FAILED
:
3332 case CPU_DOWN_FAILED_FROZEN
:
3333 spin_lock(&pp
->lock
);
3334 /* Configuring the driver for a new CPU while the
3335 * driver is stopping is racy, so just avoid it.
3337 if (pp
->is_stopped
) {
3338 spin_unlock(&pp
->lock
);
3341 netif_tx_stop_all_queues(pp
->dev
);
3343 /* We have to synchronise on tha napi of each CPU
3344 * except the one just being waked up
3346 for_each_online_cpu(other_cpu
) {
3347 if (other_cpu
!= cpu
) {
3348 struct mvneta_pcpu_port
*other_port
=
3349 per_cpu_ptr(pp
->ports
, other_cpu
);
3351 napi_synchronize(&other_port
->napi
);
3355 /* Mask all ethernet port interrupts */
3356 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
3357 napi_enable(&port
->napi
);
3360 /* Enable per-CPU interrupts on the CPU that is
3363 smp_call_function_single(cpu
, mvneta_percpu_enable
,
3366 /* Enable per-CPU interrupt on the one CPU we care
3369 mvneta_percpu_elect(pp
);
3371 /* Unmask all ethernet port interrupts */
3372 on_each_cpu(mvneta_percpu_unmask_interrupt
, pp
, true);
3373 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
3374 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
3375 MVNETA_CAUSE_LINK_CHANGE
|
3376 MVNETA_CAUSE_PSC_SYNC_CHANGE
);
3377 netif_tx_start_all_queues(pp
->dev
);
3378 spin_unlock(&pp
->lock
);
3380 case CPU_DOWN_PREPARE
:
3381 case CPU_DOWN_PREPARE_FROZEN
:
3382 netif_tx_stop_all_queues(pp
->dev
);
3383 /* Thanks to this lock we are sure that any pending
3384 * cpu election is done
3386 spin_lock(&pp
->lock
);
3387 /* Mask all ethernet port interrupts */
3388 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
3389 spin_unlock(&pp
->lock
);
3391 napi_synchronize(&port
->napi
);
3392 napi_disable(&port
->napi
);
3393 /* Disable per-CPU interrupts on the CPU that is
3396 smp_call_function_single(cpu
, mvneta_percpu_disable
,
3401 case CPU_DEAD_FROZEN
:
3402 /* Check if a new CPU must be elected now this on is down */
3403 spin_lock(&pp
->lock
);
3404 mvneta_percpu_elect(pp
);
3405 spin_unlock(&pp
->lock
);
3406 /* Unmask all ethernet port interrupts */
3407 on_each_cpu(mvneta_percpu_unmask_interrupt
, pp
, true);
3408 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
3409 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
3410 MVNETA_CAUSE_LINK_CHANGE
|
3411 MVNETA_CAUSE_PSC_SYNC_CHANGE
);
3412 netif_tx_start_all_queues(pp
->dev
);
3419 static int mvneta_open(struct net_device
*dev
)
3421 struct mvneta_port
*pp
= netdev_priv(dev
);
3424 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
);
3425 pp
->frag_size
= SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp
->pkt_size
)) +
3426 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
3428 ret
= mvneta_setup_rxqs(pp
);
3432 ret
= mvneta_setup_txqs(pp
);
3434 goto err_cleanup_rxqs
;
3436 /* Connect to port interrupt line */
3437 ret
= request_percpu_irq(pp
->dev
->irq
, mvneta_isr
,
3438 MVNETA_DRIVER_NAME
, pp
->ports
);
3440 netdev_err(pp
->dev
, "cannot request irq %d\n", pp
->dev
->irq
);
3441 goto err_cleanup_txqs
;
3444 /* Enable per-CPU interrupt on all the CPU to handle our RX
3447 on_each_cpu(mvneta_percpu_enable
, pp
, true);
3449 pp
->is_stopped
= false;
3450 /* Register a CPU notifier to handle the case where our CPU
3451 * might be taken offline.
3453 register_cpu_notifier(&pp
->cpu_notifier
);
3455 /* In default link is down */
3456 netif_carrier_off(pp
->dev
);
3458 ret
= mvneta_mdio_probe(pp
);
3460 netdev_err(dev
, "cannot probe MDIO bus\n");
3464 mvneta_start_dev(pp
);
3469 free_percpu_irq(pp
->dev
->irq
, pp
->ports
);
3471 mvneta_cleanup_txqs(pp
);
3473 mvneta_cleanup_rxqs(pp
);
3477 /* Stop the port, free port interrupt line */
3478 static int mvneta_stop(struct net_device
*dev
)
3480 struct mvneta_port
*pp
= netdev_priv(dev
);
3482 /* Inform that we are stopping so we don't want to setup the
3483 * driver for new CPUs in the notifiers. The code of the
3484 * notifier for CPU online is protected by the same spinlock,
3485 * so when we get the lock, the notifer work is done.
3487 spin_lock(&pp
->lock
);
3488 pp
->is_stopped
= true;
3489 spin_unlock(&pp
->lock
);
3491 mvneta_stop_dev(pp
);
3492 mvneta_mdio_remove(pp
);
3493 unregister_cpu_notifier(&pp
->cpu_notifier
);
3494 on_each_cpu(mvneta_percpu_disable
, pp
, true);
3495 free_percpu_irq(dev
->irq
, pp
->ports
);
3496 mvneta_cleanup_rxqs(pp
);
3497 mvneta_cleanup_txqs(pp
);
3502 static int mvneta_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3504 struct mvneta_port
*pp
= netdev_priv(dev
);
3509 return phy_mii_ioctl(pp
->phy_dev
, ifr
, cmd
);
3512 /* Ethtool methods */
3514 /* Get settings (phy address, speed) for ethtools */
3515 int mvneta_ethtool_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
3517 struct mvneta_port
*pp
= netdev_priv(dev
);
3522 return phy_ethtool_gset(pp
->phy_dev
, cmd
);
3525 /* Set settings (phy address, speed) for ethtools */
3526 int mvneta_ethtool_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
3528 struct mvneta_port
*pp
= netdev_priv(dev
);
3529 struct phy_device
*phydev
= pp
->phy_dev
;
3534 if ((cmd
->autoneg
== AUTONEG_ENABLE
) != pp
->use_inband_status
) {
3537 mvneta_set_autoneg(pp
, cmd
->autoneg
== AUTONEG_ENABLE
);
3539 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
3540 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3541 val
&= ~(MVNETA_GMAC_CONFIG_MII_SPEED
|
3542 MVNETA_GMAC_CONFIG_GMII_SPEED
|
3543 MVNETA_GMAC_CONFIG_FULL_DUPLEX
);
3546 val
|= MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
3548 if (phydev
->speed
== SPEED_1000
)
3549 val
|= MVNETA_GMAC_CONFIG_GMII_SPEED
;
3550 else if (phydev
->speed
== SPEED_100
)
3551 val
|= MVNETA_GMAC_CONFIG_MII_SPEED
;
3553 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
3556 pp
->use_inband_status
= (cmd
->autoneg
== AUTONEG_ENABLE
);
3557 netdev_info(pp
->dev
, "autoneg status set to %i\n",
3558 pp
->use_inband_status
);
3560 if (netif_running(dev
)) {
3561 mvneta_port_down(pp
);
3566 return phy_ethtool_sset(pp
->phy_dev
, cmd
);
3569 /* Set interrupt coalescing for ethtools */
3570 static int mvneta_ethtool_set_coalesce(struct net_device
*dev
,
3571 struct ethtool_coalesce
*c
)
3573 struct mvneta_port
*pp
= netdev_priv(dev
);
3576 for (queue
= 0; queue
< rxq_number
; queue
++) {
3577 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
3578 rxq
->time_coal
= c
->rx_coalesce_usecs
;
3579 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
3580 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
3581 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
3584 for (queue
= 0; queue
< txq_number
; queue
++) {
3585 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
3586 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
3587 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
3593 /* get coalescing for ethtools */
3594 static int mvneta_ethtool_get_coalesce(struct net_device
*dev
,
3595 struct ethtool_coalesce
*c
)
3597 struct mvneta_port
*pp
= netdev_priv(dev
);
3599 c
->rx_coalesce_usecs
= pp
->rxqs
[0].time_coal
;
3600 c
->rx_max_coalesced_frames
= pp
->rxqs
[0].pkts_coal
;
3602 c
->tx_max_coalesced_frames
= pp
->txqs
[0].done_pkts_coal
;
3607 static void mvneta_ethtool_get_drvinfo(struct net_device
*dev
,
3608 struct ethtool_drvinfo
*drvinfo
)
3610 strlcpy(drvinfo
->driver
, MVNETA_DRIVER_NAME
,
3611 sizeof(drvinfo
->driver
));
3612 strlcpy(drvinfo
->version
, MVNETA_DRIVER_VERSION
,
3613 sizeof(drvinfo
->version
));
3614 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
3615 sizeof(drvinfo
->bus_info
));
3619 static void mvneta_ethtool_get_ringparam(struct net_device
*netdev
,
3620 struct ethtool_ringparam
*ring
)
3622 struct mvneta_port
*pp
= netdev_priv(netdev
);
3624 ring
->rx_max_pending
= MVNETA_MAX_RXD
;
3625 ring
->tx_max_pending
= MVNETA_MAX_TXD
;
3626 ring
->rx_pending
= pp
->rx_ring_size
;
3627 ring
->tx_pending
= pp
->tx_ring_size
;
3630 static int mvneta_ethtool_set_ringparam(struct net_device
*dev
,
3631 struct ethtool_ringparam
*ring
)
3633 struct mvneta_port
*pp
= netdev_priv(dev
);
3635 if ((ring
->rx_pending
== 0) || (ring
->tx_pending
== 0))
3637 pp
->rx_ring_size
= ring
->rx_pending
< MVNETA_MAX_RXD
?
3638 ring
->rx_pending
: MVNETA_MAX_RXD
;
3640 pp
->tx_ring_size
= clamp_t(u16
, ring
->tx_pending
,
3641 MVNETA_MAX_SKB_DESCS
* 2, MVNETA_MAX_TXD
);
3642 if (pp
->tx_ring_size
!= ring
->tx_pending
)
3643 netdev_warn(dev
, "TX queue size set to %u (requested %u)\n",
3644 pp
->tx_ring_size
, ring
->tx_pending
);
3646 if (netif_running(dev
)) {
3648 if (mvneta_open(dev
)) {
3650 "error on opening device after ring param change\n");
3658 static void mvneta_ethtool_get_strings(struct net_device
*netdev
, u32 sset
,
3661 if (sset
== ETH_SS_STATS
) {
3664 for (i
= 0; i
< ARRAY_SIZE(mvneta_statistics
); i
++)
3665 memcpy(data
+ i
* ETH_GSTRING_LEN
,
3666 mvneta_statistics
[i
].name
, ETH_GSTRING_LEN
);
3670 static void mvneta_ethtool_update_stats(struct mvneta_port
*pp
)
3672 const struct mvneta_statistic
*s
;
3673 void __iomem
*base
= pp
->base
;
3678 for (i
= 0, s
= mvneta_statistics
;
3679 s
< mvneta_statistics
+ ARRAY_SIZE(mvneta_statistics
);
3683 val
= readl_relaxed(base
+ s
->offset
);
3684 pp
->ethtool_stats
[i
] += val
;
3687 /* Docs say to read low 32-bit then high */
3688 low
= readl_relaxed(base
+ s
->offset
);
3689 high
= readl_relaxed(base
+ s
->offset
+ 4);
3690 val64
= (u64
)high
<< 32 | low
;
3691 pp
->ethtool_stats
[i
] += val64
;
3697 static void mvneta_ethtool_get_stats(struct net_device
*dev
,
3698 struct ethtool_stats
*stats
, u64
*data
)
3700 struct mvneta_port
*pp
= netdev_priv(dev
);
3703 mvneta_ethtool_update_stats(pp
);
3705 for (i
= 0; i
< ARRAY_SIZE(mvneta_statistics
); i
++)
3706 *data
++ = pp
->ethtool_stats
[i
];
3709 static int mvneta_ethtool_get_sset_count(struct net_device
*dev
, int sset
)
3711 if (sset
== ETH_SS_STATS
)
3712 return ARRAY_SIZE(mvneta_statistics
);
3716 static u32
mvneta_ethtool_get_rxfh_indir_size(struct net_device
*dev
)
3718 return MVNETA_RSS_LU_TABLE_SIZE
;
3721 static int mvneta_ethtool_get_rxnfc(struct net_device
*dev
,
3722 struct ethtool_rxnfc
*info
,
3723 u32
*rules __always_unused
)
3725 switch (info
->cmd
) {
3726 case ETHTOOL_GRXRINGS
:
3727 info
->data
= rxq_number
;
3736 static int mvneta_config_rss(struct mvneta_port
*pp
)
3741 netif_tx_stop_all_queues(pp
->dev
);
3743 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
3745 /* We have to synchronise on the napi of each CPU */
3746 for_each_online_cpu(cpu
) {
3747 struct mvneta_pcpu_port
*pcpu_port
=
3748 per_cpu_ptr(pp
->ports
, cpu
);
3750 napi_synchronize(&pcpu_port
->napi
);
3751 napi_disable(&pcpu_port
->napi
);
3754 pp
->rxq_def
= pp
->indir
[0];
3756 /* Update unicast mapping */
3757 mvneta_set_rx_mode(pp
->dev
);
3759 /* Update val of portCfg register accordingly with all RxQueue types */
3760 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(pp
->rxq_def
);
3761 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
3763 /* Update the elected CPU matching the new rxq_def */
3764 spin_lock(&pp
->lock
);
3765 mvneta_percpu_elect(pp
);
3766 spin_unlock(&pp
->lock
);
3768 /* We have to synchronise on the napi of each CPU */
3769 for_each_online_cpu(cpu
) {
3770 struct mvneta_pcpu_port
*pcpu_port
=
3771 per_cpu_ptr(pp
->ports
, cpu
);
3773 napi_enable(&pcpu_port
->napi
);
3776 netif_tx_start_all_queues(pp
->dev
);
3781 static int mvneta_ethtool_set_rxfh(struct net_device
*dev
, const u32
*indir
,
3782 const u8
*key
, const u8 hfunc
)
3784 struct mvneta_port
*pp
= netdev_priv(dev
);
3785 /* We require at least one supported parameter to be changed
3786 * and no change in any of the unsupported parameters
3789 (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
))
3795 memcpy(pp
->indir
, indir
, MVNETA_RSS_LU_TABLE_SIZE
);
3797 return mvneta_config_rss(pp
);
3800 static int mvneta_ethtool_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
,
3803 struct mvneta_port
*pp
= netdev_priv(dev
);
3806 *hfunc
= ETH_RSS_HASH_TOP
;
3811 memcpy(indir
, pp
->indir
, MVNETA_RSS_LU_TABLE_SIZE
);
3816 static const struct net_device_ops mvneta_netdev_ops
= {
3817 .ndo_open
= mvneta_open
,
3818 .ndo_stop
= mvneta_stop
,
3819 .ndo_start_xmit
= mvneta_tx
,
3820 .ndo_set_rx_mode
= mvneta_set_rx_mode
,
3821 .ndo_set_mac_address
= mvneta_set_mac_addr
,
3822 .ndo_change_mtu
= mvneta_change_mtu
,
3823 .ndo_fix_features
= mvneta_fix_features
,
3824 .ndo_get_stats64
= mvneta_get_stats64
,
3825 .ndo_do_ioctl
= mvneta_ioctl
,
3828 const struct ethtool_ops mvneta_eth_tool_ops
= {
3829 .get_link
= ethtool_op_get_link
,
3830 .get_settings
= mvneta_ethtool_get_settings
,
3831 .set_settings
= mvneta_ethtool_set_settings
,
3832 .set_coalesce
= mvneta_ethtool_set_coalesce
,
3833 .get_coalesce
= mvneta_ethtool_get_coalesce
,
3834 .get_drvinfo
= mvneta_ethtool_get_drvinfo
,
3835 .get_ringparam
= mvneta_ethtool_get_ringparam
,
3836 .set_ringparam
= mvneta_ethtool_set_ringparam
,
3837 .get_strings
= mvneta_ethtool_get_strings
,
3838 .get_ethtool_stats
= mvneta_ethtool_get_stats
,
3839 .get_sset_count
= mvneta_ethtool_get_sset_count
,
3840 .get_rxfh_indir_size
= mvneta_ethtool_get_rxfh_indir_size
,
3841 .get_rxnfc
= mvneta_ethtool_get_rxnfc
,
3842 .get_rxfh
= mvneta_ethtool_get_rxfh
,
3843 .set_rxfh
= mvneta_ethtool_set_rxfh
,
3847 static int mvneta_init(struct device
*dev
, struct mvneta_port
*pp
)
3852 mvneta_port_disable(pp
);
3854 /* Set port default values */
3855 mvneta_defaults_set(pp
);
3857 pp
->txqs
= devm_kcalloc(dev
, txq_number
, sizeof(struct mvneta_tx_queue
),
3862 /* Initialize TX descriptor rings */
3863 for (queue
= 0; queue
< txq_number
; queue
++) {
3864 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
3866 txq
->size
= pp
->tx_ring_size
;
3867 txq
->done_pkts_coal
= MVNETA_TXDONE_COAL_PKTS
;
3870 pp
->rxqs
= devm_kcalloc(dev
, rxq_number
, sizeof(struct mvneta_rx_queue
),
3875 /* Create Rx descriptor rings */
3876 for (queue
= 0; queue
< rxq_number
; queue
++) {
3877 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
3879 rxq
->size
= pp
->rx_ring_size
;
3880 rxq
->pkts_coal
= MVNETA_RX_COAL_PKTS
;
3881 rxq
->time_coal
= MVNETA_RX_COAL_USEC
;
3887 /* platform glue : initialize decoding windows */
3888 static void mvneta_conf_mbus_windows(struct mvneta_port
*pp
,
3889 const struct mbus_dram_target_info
*dram
)
3895 for (i
= 0; i
< 6; i
++) {
3896 mvreg_write(pp
, MVNETA_WIN_BASE(i
), 0);
3897 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), 0);
3900 mvreg_write(pp
, MVNETA_WIN_REMAP(i
), 0);
3906 for (i
= 0; i
< dram
->num_cs
; i
++) {
3907 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
3908 mvreg_write(pp
, MVNETA_WIN_BASE(i
), (cs
->base
& 0xffff0000) |
3909 (cs
->mbus_attr
<< 8) | dram
->mbus_dram_target_id
);
3911 mvreg_write(pp
, MVNETA_WIN_SIZE(i
),
3912 (cs
->size
- 1) & 0xffff0000);
3914 win_enable
&= ~(1 << i
);
3915 win_protect
|= 3 << (2 * i
);
3918 mvreg_write(pp
, MVNETA_BASE_ADDR_ENABLE
, win_enable
);
3919 mvreg_write(pp
, MVNETA_ACCESS_PROTECT_ENABLE
, win_protect
);
3922 /* Power up the port */
3923 static int mvneta_port_power_up(struct mvneta_port
*pp
, int phy_mode
)
3927 /* MAC Cause register should be cleared */
3928 mvreg_write(pp
, MVNETA_UNIT_INTR_CAUSE
, 0);
3930 ctrl
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
3932 /* Even though it might look weird, when we're configured in
3933 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3936 case PHY_INTERFACE_MODE_QSGMII
:
3937 mvreg_write(pp
, MVNETA_SERDES_CFG
, MVNETA_QSGMII_SERDES_PROTO
);
3938 ctrl
|= MVNETA_GMAC2_PCS_ENABLE
| MVNETA_GMAC2_PORT_RGMII
;
3940 case PHY_INTERFACE_MODE_SGMII
:
3941 mvreg_write(pp
, MVNETA_SERDES_CFG
, MVNETA_SGMII_SERDES_PROTO
);
3942 ctrl
|= MVNETA_GMAC2_PCS_ENABLE
| MVNETA_GMAC2_PORT_RGMII
;
3944 case PHY_INTERFACE_MODE_RGMII
:
3945 case PHY_INTERFACE_MODE_RGMII_ID
:
3946 ctrl
|= MVNETA_GMAC2_PORT_RGMII
;
3952 /* Cancel Port Reset */
3953 ctrl
&= ~MVNETA_GMAC2_PORT_RESET
;
3954 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, ctrl
);
3956 while ((mvreg_read(pp
, MVNETA_GMAC_CTRL_2
) &
3957 MVNETA_GMAC2_PORT_RESET
) != 0)
3963 /* Device initialization routine */
3964 static int mvneta_probe(struct platform_device
*pdev
)
3966 const struct mbus_dram_target_info
*dram_target_info
;
3967 struct resource
*res
;
3968 struct device_node
*dn
= pdev
->dev
.of_node
;
3969 struct device_node
*phy_node
;
3970 struct device_node
*bm_node
;
3971 struct mvneta_port
*pp
;
3972 struct net_device
*dev
;
3973 const char *dt_mac_addr
;
3974 char hw_mac_addr
[ETH_ALEN
];
3975 const char *mac_from
;
3976 const char *managed
;
3982 dev
= alloc_etherdev_mqs(sizeof(struct mvneta_port
), txq_number
, rxq_number
);
3986 dev
->irq
= irq_of_parse_and_map(dn
, 0);
3987 if (dev
->irq
== 0) {
3989 goto err_free_netdev
;
3992 phy_node
= of_parse_phandle(dn
, "phy", 0);
3994 if (!of_phy_is_fixed_link(dn
)) {
3995 dev_err(&pdev
->dev
, "no PHY specified\n");
4000 err
= of_phy_register_fixed_link(dn
);
4002 dev_err(&pdev
->dev
, "cannot register fixed PHY\n");
4006 /* In the case of a fixed PHY, the DT node associated
4007 * to the PHY is the Ethernet MAC DT node.
4009 phy_node
= of_node_get(dn
);
4012 phy_mode
= of_get_phy_mode(dn
);
4014 dev_err(&pdev
->dev
, "incorrect phy-mode\n");
4016 goto err_put_phy_node
;
4019 dev
->tx_queue_len
= MVNETA_MAX_TXD
;
4020 dev
->watchdog_timeo
= 5 * HZ
;
4021 dev
->netdev_ops
= &mvneta_netdev_ops
;
4023 dev
->ethtool_ops
= &mvneta_eth_tool_ops
;
4025 pp
= netdev_priv(dev
);
4026 spin_lock_init(&pp
->lock
);
4027 pp
->phy_node
= phy_node
;
4028 pp
->phy_interface
= phy_mode
;
4030 err
= of_property_read_string(dn
, "managed", &managed
);
4031 pp
->use_inband_status
= (err
== 0 &&
4032 strcmp(managed
, "in-band-status") == 0);
4033 pp
->cpu_notifier
.notifier_call
= mvneta_percpu_notifier
;
4035 pp
->rxq_def
= rxq_def
;
4037 pp
->indir
[0] = rxq_def
;
4039 pp
->clk
= devm_clk_get(&pdev
->dev
, "core");
4040 if (IS_ERR(pp
->clk
))
4041 pp
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
4042 if (IS_ERR(pp
->clk
)) {
4043 err
= PTR_ERR(pp
->clk
);
4044 goto err_put_phy_node
;
4047 clk_prepare_enable(pp
->clk
);
4049 pp
->clk_bus
= devm_clk_get(&pdev
->dev
, "bus");
4050 if (!IS_ERR(pp
->clk_bus
))
4051 clk_prepare_enable(pp
->clk_bus
);
4053 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
4054 pp
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
4055 if (IS_ERR(pp
->base
)) {
4056 err
= PTR_ERR(pp
->base
);
4060 /* Alloc per-cpu port structure */
4061 pp
->ports
= alloc_percpu(struct mvneta_pcpu_port
);
4067 /* Alloc per-cpu stats */
4068 pp
->stats
= netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats
);
4071 goto err_free_ports
;
4074 dt_mac_addr
= of_get_mac_address(dn
);
4076 mac_from
= "device tree";
4077 memcpy(dev
->dev_addr
, dt_mac_addr
, ETH_ALEN
);
4079 mvneta_get_mac_addr(pp
, hw_mac_addr
);
4080 if (is_valid_ether_addr(hw_mac_addr
)) {
4081 mac_from
= "hardware";
4082 memcpy(dev
->dev_addr
, hw_mac_addr
, ETH_ALEN
);
4084 mac_from
= "random";
4085 eth_hw_addr_random(dev
);
4089 if (!of_property_read_u32(dn
, "tx-csum-limit", &tx_csum_limit
)) {
4090 if (tx_csum_limit
< 0 ||
4091 tx_csum_limit
> MVNETA_TX_CSUM_MAX_SIZE
) {
4092 tx_csum_limit
= MVNETA_TX_CSUM_DEF_SIZE
;
4093 dev_info(&pdev
->dev
,
4094 "Wrong TX csum limit in DT, set to %dB\n",
4095 MVNETA_TX_CSUM_DEF_SIZE
);
4097 } else if (of_device_is_compatible(dn
, "marvell,armada-370-neta")) {
4098 tx_csum_limit
= MVNETA_TX_CSUM_DEF_SIZE
;
4100 tx_csum_limit
= MVNETA_TX_CSUM_MAX_SIZE
;
4103 pp
->tx_csum_limit
= tx_csum_limit
;
4105 dram_target_info
= mv_mbus_dram_info();
4106 if (dram_target_info
)
4107 mvneta_conf_mbus_windows(pp
, dram_target_info
);
4109 pp
->tx_ring_size
= MVNETA_MAX_TXD
;
4110 pp
->rx_ring_size
= MVNETA_MAX_RXD
;
4113 SET_NETDEV_DEV(dev
, &pdev
->dev
);
4115 pp
->id
= global_port_id
++;
4117 /* Obtain access to BM resources if enabled and already initialized */
4118 bm_node
= of_parse_phandle(dn
, "buffer-manager", 0);
4119 if (bm_node
&& bm_node
->data
) {
4120 pp
->bm_priv
= bm_node
->data
;
4121 err
= mvneta_bm_port_init(pdev
, pp
);
4123 dev_info(&pdev
->dev
, "use SW buffer management\n");
4128 err
= mvneta_init(&pdev
->dev
, pp
);
4132 err
= mvneta_port_power_up(pp
, phy_mode
);
4134 dev_err(&pdev
->dev
, "can't power up port\n");
4138 for_each_present_cpu(cpu
) {
4139 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
4141 netif_napi_add(dev
, &port
->napi
, mvneta_poll
, NAPI_POLL_WEIGHT
);
4145 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
;
4146 dev
->hw_features
|= dev
->features
;
4147 dev
->vlan_features
|= dev
->features
;
4148 dev
->priv_flags
|= IFF_UNICAST_FLT
| IFF_LIVE_ADDR_CHANGE
;
4149 dev
->gso_max_segs
= MVNETA_MAX_TSO_SEGS
;
4151 err
= register_netdev(dev
);
4153 dev_err(&pdev
->dev
, "failed to register\n");
4154 goto err_free_stats
;
4157 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
,
4160 platform_set_drvdata(pdev
, pp
->dev
);
4162 if (pp
->use_inband_status
) {
4163 struct phy_device
*phy
= of_phy_find_device(dn
);
4165 mvneta_fixed_link_update(pp
, phy
);
4167 put_device(&phy
->mdio
.dev
);
4173 unregister_netdev(dev
);
4175 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
4176 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_short
,
4180 free_percpu(pp
->stats
);
4182 free_percpu(pp
->ports
);
4184 clk_disable_unprepare(pp
->clk_bus
);
4185 clk_disable_unprepare(pp
->clk
);
4187 of_node_put(phy_node
);
4189 irq_dispose_mapping(dev
->irq
);
4195 /* Device removal routine */
4196 static int mvneta_remove(struct platform_device
*pdev
)
4198 struct net_device
*dev
= platform_get_drvdata(pdev
);
4199 struct mvneta_port
*pp
= netdev_priv(dev
);
4201 unregister_netdev(dev
);
4202 clk_disable_unprepare(pp
->clk_bus
);
4203 clk_disable_unprepare(pp
->clk
);
4204 free_percpu(pp
->ports
);
4205 free_percpu(pp
->stats
);
4206 irq_dispose_mapping(dev
->irq
);
4207 of_node_put(pp
->phy_node
);
4211 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
4212 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_short
,
4219 static const struct of_device_id mvneta_match
[] = {
4220 { .compatible
= "marvell,armada-370-neta" },
4221 { .compatible
= "marvell,armada-xp-neta" },
4224 MODULE_DEVICE_TABLE(of
, mvneta_match
);
4226 static struct platform_driver mvneta_driver
= {
4227 .probe
= mvneta_probe
,
4228 .remove
= mvneta_remove
,
4230 .name
= MVNETA_DRIVER_NAME
,
4231 .of_match_table
= mvneta_match
,
4235 module_platform_driver(mvneta_driver
);
4237 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4238 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
4239 MODULE_LICENSE("GPL");
4241 module_param(rxq_number
, int, S_IRUGO
);
4242 module_param(txq_number
, int, S_IRUGO
);
4244 module_param(rxq_def
, int, S_IRUGO
);
4245 module_param(rx_copybreak
, int, S_IRUGO
| S_IWUSR
);