2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
19 #include <linux/inetdevice.h>
20 #include <linux/mbus.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/if_vlan.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include <linux/of_address.h>
33 #include <linux/phy.h>
34 #include <linux/clk.h>
35 #include <linux/cpu.h>
38 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
39 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
40 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
41 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
42 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
43 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
44 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
45 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
46 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
47 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
48 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
49 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
50 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
51 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
52 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
53 #define MVNETA_PORT_RX_RESET 0x1cc0
54 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
55 #define MVNETA_PHY_ADDR 0x2000
56 #define MVNETA_PHY_ADDR_MASK 0x1f
57 #define MVNETA_MBUS_RETRY 0x2010
58 #define MVNETA_UNIT_INTR_CAUSE 0x2080
59 #define MVNETA_UNIT_CONTROL 0x20B0
60 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
61 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
62 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
63 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
64 #define MVNETA_BASE_ADDR_ENABLE 0x2290
65 #define MVNETA_PORT_CONFIG 0x2400
66 #define MVNETA_UNI_PROMISC_MODE BIT(0)
67 #define MVNETA_DEF_RXQ(q) ((q) << 1)
68 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
69 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
70 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
71 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
72 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
73 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
74 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
75 MVNETA_DEF_RXQ_ARP(q) | \
76 MVNETA_DEF_RXQ_TCP(q) | \
77 MVNETA_DEF_RXQ_UDP(q) | \
78 MVNETA_DEF_RXQ_BPDU(q) | \
79 MVNETA_TX_UNSET_ERR_SUM | \
80 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
81 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
82 #define MVNETA_MAC_ADDR_LOW 0x2414
83 #define MVNETA_MAC_ADDR_HIGH 0x2418
84 #define MVNETA_SDMA_CONFIG 0x241c
85 #define MVNETA_SDMA_BRST_SIZE_16 4
86 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
87 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
88 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
89 #define MVNETA_DESC_SWAP BIT(6)
90 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
91 #define MVNETA_PORT_STATUS 0x2444
92 #define MVNETA_TX_IN_PRGRS BIT(1)
93 #define MVNETA_TX_FIFO_EMPTY BIT(8)
94 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
95 #define MVNETA_SERDES_CFG 0x24A0
96 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
97 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
98 #define MVNETA_TYPE_PRIO 0x24bc
99 #define MVNETA_FORCE_UNI BIT(21)
100 #define MVNETA_TXQ_CMD_1 0x24e4
101 #define MVNETA_TXQ_CMD 0x2448
102 #define MVNETA_TXQ_DISABLE_SHIFT 8
103 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
104 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
105 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488
106 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
107 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
108 #define MVNETA_ACC_MODE 0x2500
109 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
110 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
111 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
112 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
114 /* Exception Interrupt Port/Queue Cause register */
116 #define MVNETA_INTR_NEW_CAUSE 0x25a0
117 #define MVNETA_INTR_NEW_MASK 0x25a4
119 /* bits 0..7 = TXQ SENT, one bit per queue.
120 * bits 8..15 = RXQ OCCUP, one bit per queue.
121 * bits 16..23 = RXQ FREE, one bit per queue.
122 * bit 29 = OLD_REG_SUM, see old reg ?
123 * bit 30 = TX_ERR_SUM, one bit for 4 ports
124 * bit 31 = MISC_SUM, one bit for 4 ports
126 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
127 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
128 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
129 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
130 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
132 #define MVNETA_INTR_OLD_CAUSE 0x25a8
133 #define MVNETA_INTR_OLD_MASK 0x25ac
135 /* Data Path Port/Queue Cause Register */
136 #define MVNETA_INTR_MISC_CAUSE 0x25b0
137 #define MVNETA_INTR_MISC_MASK 0x25b4
139 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
140 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
141 #define MVNETA_CAUSE_PTP BIT(4)
143 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
144 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
145 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
146 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
147 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
148 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
149 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
150 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
152 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
153 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
154 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
156 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
157 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
158 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
160 #define MVNETA_INTR_ENABLE 0x25b8
161 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
162 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
164 #define MVNETA_RXQ_CMD 0x2680
165 #define MVNETA_RXQ_DISABLE_SHIFT 8
166 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
167 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
168 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
169 #define MVNETA_GMAC_CTRL_0 0x2c00
170 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
171 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
172 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
173 #define MVNETA_GMAC_CTRL_2 0x2c08
174 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
175 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
176 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
177 #define MVNETA_GMAC2_PORT_RESET BIT(6)
178 #define MVNETA_GMAC_STATUS 0x2c10
179 #define MVNETA_GMAC_LINK_UP BIT(0)
180 #define MVNETA_GMAC_SPEED_1000 BIT(1)
181 #define MVNETA_GMAC_SPEED_100 BIT(2)
182 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
183 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
184 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
185 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
186 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
187 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
188 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
189 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
190 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
191 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
192 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
193 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
194 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
195 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
196 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
197 #define MVNETA_MIB_COUNTERS_BASE 0x3000
198 #define MVNETA_MIB_LATE_COLLISION 0x7c
199 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
200 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
201 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
202 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
203 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
204 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
205 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
206 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
207 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
208 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
209 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
210 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
211 #define MVNETA_PORT_TX_RESET 0x3cf0
212 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
213 #define MVNETA_TX_MTU 0x3e0c
214 #define MVNETA_TX_TOKEN_SIZE 0x3e14
215 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
216 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
217 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
219 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
221 /* Descriptor ring Macros */
222 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
223 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
225 /* Various constants */
228 #define MVNETA_TXDONE_COAL_PKTS 1
229 #define MVNETA_RX_COAL_PKTS 32
230 #define MVNETA_RX_COAL_USEC 100
232 /* The two bytes Marvell header. Either contains a special value used
233 * by Marvell switches when a specific hardware mode is enabled (not
234 * supported by this driver) or is filled automatically by zeroes on
235 * the RX side. Those two bytes being at the front of the Ethernet
236 * header, they allow to have the IP header aligned on a 4 bytes
237 * boundary automatically: the hardware skips those two bytes on its
240 #define MVNETA_MH_SIZE 2
242 #define MVNETA_VLAN_TAG_LEN 4
244 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
245 #define MVNETA_TX_CSUM_MAX_SIZE 9800
246 #define MVNETA_ACC_MODE_EXT 1
248 /* Timeout constants */
249 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
250 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
251 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
253 #define MVNETA_TX_MTU_MAX 0x3ffff
255 /* TSO header size */
256 #define TSO_HEADER_SIZE 128
258 /* Max number of Rx descriptors */
259 #define MVNETA_MAX_RXD 128
261 /* Max number of Tx descriptors */
262 #define MVNETA_MAX_TXD 532
264 /* Max number of allowed TCP segments for software TSO */
265 #define MVNETA_MAX_TSO_SEGS 100
267 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
269 /* descriptor aligned size */
270 #define MVNETA_DESC_ALIGNED_SIZE 32
272 #define MVNETA_RX_PKT_SIZE(mtu) \
273 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
274 ETH_HLEN + ETH_FCS_LEN, \
275 MVNETA_CPU_D_CACHE_LINE_SIZE)
277 #define IS_TSO_HEADER(txq, addr) \
278 ((addr >= txq->tso_hdrs_phys) && \
279 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
281 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
283 struct mvneta_statistic
{
284 unsigned short offset
;
286 const char name
[ETH_GSTRING_LEN
];
292 static const struct mvneta_statistic mvneta_statistics
[] = {
293 { 0x3000, T_REG_64
, "good_octets_received", },
294 { 0x3010, T_REG_32
, "good_frames_received", },
295 { 0x3008, T_REG_32
, "bad_octets_received", },
296 { 0x3014, T_REG_32
, "bad_frames_received", },
297 { 0x3018, T_REG_32
, "broadcast_frames_received", },
298 { 0x301c, T_REG_32
, "multicast_frames_received", },
299 { 0x3050, T_REG_32
, "unrec_mac_control_received", },
300 { 0x3058, T_REG_32
, "good_fc_received", },
301 { 0x305c, T_REG_32
, "bad_fc_received", },
302 { 0x3060, T_REG_32
, "undersize_received", },
303 { 0x3064, T_REG_32
, "fragments_received", },
304 { 0x3068, T_REG_32
, "oversize_received", },
305 { 0x306c, T_REG_32
, "jabber_received", },
306 { 0x3070, T_REG_32
, "mac_receive_error", },
307 { 0x3074, T_REG_32
, "bad_crc_event", },
308 { 0x3078, T_REG_32
, "collision", },
309 { 0x307c, T_REG_32
, "late_collision", },
310 { 0x2484, T_REG_32
, "rx_discard", },
311 { 0x2488, T_REG_32
, "rx_overrun", },
312 { 0x3020, T_REG_32
, "frames_64_octets", },
313 { 0x3024, T_REG_32
, "frames_65_to_127_octets", },
314 { 0x3028, T_REG_32
, "frames_128_to_255_octets", },
315 { 0x302c, T_REG_32
, "frames_256_to_511_octets", },
316 { 0x3030, T_REG_32
, "frames_512_to_1023_octets", },
317 { 0x3034, T_REG_32
, "frames_1024_to_max_octets", },
318 { 0x3038, T_REG_64
, "good_octets_sent", },
319 { 0x3040, T_REG_32
, "good_frames_sent", },
320 { 0x3044, T_REG_32
, "excessive_collision", },
321 { 0x3048, T_REG_32
, "multicast_frames_sent", },
322 { 0x304c, T_REG_32
, "broadcast_frames_sent", },
323 { 0x3054, T_REG_32
, "fc_sent", },
324 { 0x300c, T_REG_32
, "internal_mac_transmit_err", },
327 struct mvneta_pcpu_stats
{
328 struct u64_stats_sync syncp
;
335 struct mvneta_pcpu_port
{
336 /* Pointer to the shared port */
337 struct mvneta_port
*pp
;
339 /* Pointer to the CPU-local NAPI struct */
340 struct napi_struct napi
;
342 /* Cause of the previous interrupt */
347 struct mvneta_pcpu_port __percpu
*ports
;
348 struct mvneta_pcpu_stats __percpu
*stats
;
351 unsigned int frag_size
;
353 struct mvneta_rx_queue
*rxqs
;
354 struct mvneta_tx_queue
*txqs
;
355 struct net_device
*dev
;
356 struct notifier_block cpu_notifier
;
364 struct mii_bus
*mii_bus
;
365 struct phy_device
*phy_dev
;
366 phy_interface_t phy_interface
;
367 struct device_node
*phy_node
;
371 unsigned int tx_csum_limit
;
372 int use_inband_status
:1;
374 u64 ethtool_stats
[ARRAY_SIZE(mvneta_statistics
)];
377 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
378 * layout of the transmit and reception DMA descriptors, and their
379 * layout is therefore defined by the hardware design
382 #define MVNETA_TX_L3_OFF_SHIFT 0
383 #define MVNETA_TX_IP_HLEN_SHIFT 8
384 #define MVNETA_TX_L4_UDP BIT(16)
385 #define MVNETA_TX_L3_IP6 BIT(17)
386 #define MVNETA_TXD_IP_CSUM BIT(18)
387 #define MVNETA_TXD_Z_PAD BIT(19)
388 #define MVNETA_TXD_L_DESC BIT(20)
389 #define MVNETA_TXD_F_DESC BIT(21)
390 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
391 MVNETA_TXD_L_DESC | \
393 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
394 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
396 #define MVNETA_RXD_ERR_CRC 0x0
397 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
398 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
399 #define MVNETA_RXD_ERR_LEN BIT(18)
400 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
401 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
402 #define MVNETA_RXD_L3_IP4 BIT(25)
403 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
404 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
406 #if defined(__LITTLE_ENDIAN)
407 struct mvneta_tx_desc
{
408 u32 command
; /* Options used by HW for packet transmitting.*/
409 u16 reserverd1
; /* csum_l4 (for future use) */
410 u16 data_size
; /* Data size of transmitted packet in bytes */
411 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
412 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
413 u32 reserved3
[4]; /* Reserved - (for future use) */
416 struct mvneta_rx_desc
{
417 u32 status
; /* Info about received packet */
418 u16 reserved1
; /* pnc_info - (for future use, PnC) */
419 u16 data_size
; /* Size of received packet in bytes */
421 u32 buf_phys_addr
; /* Physical address of the buffer */
422 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
424 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
425 u16 reserved3
; /* prefetch_cmd, for future use */
426 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
428 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
429 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
432 struct mvneta_tx_desc
{
433 u16 data_size
; /* Data size of transmitted packet in bytes */
434 u16 reserverd1
; /* csum_l4 (for future use) */
435 u32 command
; /* Options used by HW for packet transmitting.*/
436 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
437 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
438 u32 reserved3
[4]; /* Reserved - (for future use) */
441 struct mvneta_rx_desc
{
442 u16 data_size
; /* Size of received packet in bytes */
443 u16 reserved1
; /* pnc_info - (for future use, PnC) */
444 u32 status
; /* Info about received packet */
446 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
447 u32 buf_phys_addr
; /* Physical address of the buffer */
449 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
450 u16 reserved3
; /* prefetch_cmd, for future use */
451 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
453 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
454 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
458 struct mvneta_tx_queue
{
459 /* Number of this TX queue, in the range 0-7 */
462 /* Number of TX DMA descriptors in the descriptor ring */
465 /* Number of currently used TX DMA descriptor in the
469 int tx_stop_threshold
;
470 int tx_wake_threshold
;
472 /* Array of transmitted skb */
473 struct sk_buff
**tx_skb
;
475 /* Index of last TX DMA descriptor that was inserted */
478 /* Index of the TX DMA descriptor to be cleaned up */
483 /* Virtual address of the TX DMA descriptors array */
484 struct mvneta_tx_desc
*descs
;
486 /* DMA address of the TX DMA descriptors array */
487 dma_addr_t descs_phys
;
489 /* Index of the last TX DMA descriptor */
492 /* Index of the next TX DMA descriptor to process */
493 int next_desc_to_proc
;
495 /* DMA buffers for TSO headers */
498 /* DMA address of TSO headers */
499 dma_addr_t tso_hdrs_phys
;
502 struct mvneta_rx_queue
{
503 /* rx queue number, in the range 0-7 */
506 /* num of rx descriptors in the rx descriptor ring */
509 /* counter of times when mvneta_refill() failed */
515 /* Virtual address of the RX DMA descriptors array */
516 struct mvneta_rx_desc
*descs
;
518 /* DMA address of the RX DMA descriptors array */
519 dma_addr_t descs_phys
;
521 /* Index of the last RX DMA descriptor */
524 /* Index of the next RX DMA descriptor to process */
525 int next_desc_to_proc
;
528 /* The hardware supports eight (8) rx queues, but we are only allowing
529 * the first one to be used. Therefore, let's just allocate one queue.
531 static int rxq_number
= 8;
532 static int txq_number
= 8;
536 static int rx_copybreak __read_mostly
= 256;
538 #define MVNETA_DRIVER_NAME "mvneta"
539 #define MVNETA_DRIVER_VERSION "1.0"
541 /* Utility/helper methods */
543 /* Write helper method */
544 static void mvreg_write(struct mvneta_port
*pp
, u32 offset
, u32 data
)
546 writel(data
, pp
->base
+ offset
);
549 /* Read helper method */
550 static u32
mvreg_read(struct mvneta_port
*pp
, u32 offset
)
552 return readl(pp
->base
+ offset
);
555 /* Increment txq get counter */
556 static void mvneta_txq_inc_get(struct mvneta_tx_queue
*txq
)
558 txq
->txq_get_index
++;
559 if (txq
->txq_get_index
== txq
->size
)
560 txq
->txq_get_index
= 0;
563 /* Increment txq put counter */
564 static void mvneta_txq_inc_put(struct mvneta_tx_queue
*txq
)
566 txq
->txq_put_index
++;
567 if (txq
->txq_put_index
== txq
->size
)
568 txq
->txq_put_index
= 0;
572 /* Clear all MIB counters */
573 static void mvneta_mib_counters_clear(struct mvneta_port
*pp
)
578 /* Perform dummy reads from MIB counters */
579 for (i
= 0; i
< MVNETA_MIB_LATE_COLLISION
; i
+= 4)
580 dummy
= mvreg_read(pp
, (MVNETA_MIB_COUNTERS_BASE
+ i
));
581 dummy
= mvreg_read(pp
, MVNETA_RX_DISCARD_FRAME_COUNT
);
582 dummy
= mvreg_read(pp
, MVNETA_OVERRUN_FRAME_COUNT
);
585 /* Get System Network Statistics */
586 struct rtnl_link_stats64
*mvneta_get_stats64(struct net_device
*dev
,
587 struct rtnl_link_stats64
*stats
)
589 struct mvneta_port
*pp
= netdev_priv(dev
);
593 for_each_possible_cpu(cpu
) {
594 struct mvneta_pcpu_stats
*cpu_stats
;
600 cpu_stats
= per_cpu_ptr(pp
->stats
, cpu
);
602 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
603 rx_packets
= cpu_stats
->rx_packets
;
604 rx_bytes
= cpu_stats
->rx_bytes
;
605 tx_packets
= cpu_stats
->tx_packets
;
606 tx_bytes
= cpu_stats
->tx_bytes
;
607 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
609 stats
->rx_packets
+= rx_packets
;
610 stats
->rx_bytes
+= rx_bytes
;
611 stats
->tx_packets
+= tx_packets
;
612 stats
->tx_bytes
+= tx_bytes
;
615 stats
->rx_errors
= dev
->stats
.rx_errors
;
616 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
618 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
623 /* Rx descriptors helper methods */
625 /* Checks whether the RX descriptor having this status is both the first
626 * and the last descriptor for the RX packet. Each RX packet is currently
627 * received through a single RX descriptor, so not having each RX
628 * descriptor with its first and last bits set is an error
630 static int mvneta_rxq_desc_is_first_last(u32 status
)
632 return (status
& MVNETA_RXD_FIRST_LAST_DESC
) ==
633 MVNETA_RXD_FIRST_LAST_DESC
;
636 /* Add number of descriptors ready to receive new packets */
637 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port
*pp
,
638 struct mvneta_rx_queue
*rxq
,
641 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
644 while (ndescs
> MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
) {
645 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
646 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
<<
647 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
648 ndescs
-= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
;
651 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
652 (ndescs
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
655 /* Get number of RX descriptors occupied by received packets */
656 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port
*pp
,
657 struct mvneta_rx_queue
*rxq
)
661 val
= mvreg_read(pp
, MVNETA_RXQ_STATUS_REG(rxq
->id
));
662 return val
& MVNETA_RXQ_OCCUPIED_ALL_MASK
;
665 /* Update num of rx desc called upon return from rx path or
666 * from mvneta_rxq_drop_pkts().
668 static void mvneta_rxq_desc_num_update(struct mvneta_port
*pp
,
669 struct mvneta_rx_queue
*rxq
,
670 int rx_done
, int rx_filled
)
674 if ((rx_done
<= 0xff) && (rx_filled
<= 0xff)) {
676 (rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
);
677 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
681 /* Only 255 descriptors can be added at once */
682 while ((rx_done
> 0) || (rx_filled
> 0)) {
683 if (rx_done
<= 0xff) {
690 if (rx_filled
<= 0xff) {
691 val
|= rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
694 val
|= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
697 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
701 /* Get pointer to next RX descriptor to be processed by SW */
702 static struct mvneta_rx_desc
*
703 mvneta_rxq_next_desc_get(struct mvneta_rx_queue
*rxq
)
705 int rx_desc
= rxq
->next_desc_to_proc
;
707 rxq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(rxq
, rx_desc
);
708 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
709 return rxq
->descs
+ rx_desc
;
712 /* Change maximum receive size of the port. */
713 static void mvneta_max_rx_size_set(struct mvneta_port
*pp
, int max_rx_size
)
717 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
718 val
&= ~MVNETA_GMAC_MAX_RX_SIZE_MASK
;
719 val
|= ((max_rx_size
- MVNETA_MH_SIZE
) / 2) <<
720 MVNETA_GMAC_MAX_RX_SIZE_SHIFT
;
721 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
725 /* Set rx queue offset */
726 static void mvneta_rxq_offset_set(struct mvneta_port
*pp
,
727 struct mvneta_rx_queue
*rxq
,
732 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
733 val
&= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK
;
736 val
|= MVNETA_RXQ_PKT_OFFSET_MASK(offset
>> 3);
737 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
741 /* Tx descriptors helper methods */
743 /* Update HW with number of TX descriptors to be sent */
744 static void mvneta_txq_pend_desc_add(struct mvneta_port
*pp
,
745 struct mvneta_tx_queue
*txq
,
750 /* Only 255 descriptors can be added at once ; Assume caller
751 * process TX desriptors in quanta less than 256
754 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
757 /* Get pointer to next TX descriptor to be processed (send) by HW */
758 static struct mvneta_tx_desc
*
759 mvneta_txq_next_desc_get(struct mvneta_tx_queue
*txq
)
761 int tx_desc
= txq
->next_desc_to_proc
;
763 txq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(txq
, tx_desc
);
764 return txq
->descs
+ tx_desc
;
767 /* Release the last allocated TX descriptor. Useful to handle DMA
768 * mapping failures in the TX path.
770 static void mvneta_txq_desc_put(struct mvneta_tx_queue
*txq
)
772 if (txq
->next_desc_to_proc
== 0)
773 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
775 txq
->next_desc_to_proc
--;
778 /* Set rxq buf size */
779 static void mvneta_rxq_buf_size_set(struct mvneta_port
*pp
,
780 struct mvneta_rx_queue
*rxq
,
785 val
= mvreg_read(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
));
787 val
&= ~MVNETA_RXQ_BUF_SIZE_MASK
;
788 val
|= ((buf_size
>> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT
);
790 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), val
);
793 /* Disable buffer management (BM) */
794 static void mvneta_rxq_bm_disable(struct mvneta_port
*pp
,
795 struct mvneta_rx_queue
*rxq
)
799 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
800 val
&= ~MVNETA_RXQ_HW_BUF_ALLOC
;
801 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
804 /* Start the Ethernet port RX and TX activity */
805 static void mvneta_port_up(struct mvneta_port
*pp
)
810 /* Enable all initialized TXs. */
812 for (queue
= 0; queue
< txq_number
; queue
++) {
813 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
814 if (txq
->descs
!= NULL
)
815 q_map
|= (1 << queue
);
817 mvreg_write(pp
, MVNETA_TXQ_CMD
, q_map
);
819 /* Enable all initialized RXQs. */
820 mvreg_write(pp
, MVNETA_RXQ_CMD
, BIT(rxq_def
));
823 /* Stop the Ethernet port activity */
824 static void mvneta_port_down(struct mvneta_port
*pp
)
829 /* Stop Rx port activity. Check port Rx activity. */
830 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
) & MVNETA_RXQ_ENABLE_MASK
;
832 /* Issue stop command for active channels only */
834 mvreg_write(pp
, MVNETA_RXQ_CMD
,
835 val
<< MVNETA_RXQ_DISABLE_SHIFT
);
837 /* Wait for all Rx activity to terminate. */
840 if (count
++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC
) {
842 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
848 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
);
849 } while (val
& 0xff);
851 /* Stop Tx port activity. Check port Tx activity. Issue stop
852 * command for active channels only
854 val
= (mvreg_read(pp
, MVNETA_TXQ_CMD
)) & MVNETA_TXQ_ENABLE_MASK
;
857 mvreg_write(pp
, MVNETA_TXQ_CMD
,
858 (val
<< MVNETA_TXQ_DISABLE_SHIFT
));
860 /* Wait for all Tx activity to terminate. */
863 if (count
++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC
) {
865 "TIMEOUT for TX stopped status=0x%08x\n",
871 /* Check TX Command reg that all Txqs are stopped */
872 val
= mvreg_read(pp
, MVNETA_TXQ_CMD
);
874 } while (val
& 0xff);
876 /* Double check to verify that TX FIFO is empty */
879 if (count
++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT
) {
881 "TX FIFO empty timeout status=0x08%x\n",
887 val
= mvreg_read(pp
, MVNETA_PORT_STATUS
);
888 } while (!(val
& MVNETA_TX_FIFO_EMPTY
) &&
889 (val
& MVNETA_TX_IN_PRGRS
));
894 /* Enable the port by setting the port enable bit of the MAC control register */
895 static void mvneta_port_enable(struct mvneta_port
*pp
)
900 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
901 val
|= MVNETA_GMAC0_PORT_ENABLE
;
902 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
905 /* Disable the port and wait for about 200 usec before retuning */
906 static void mvneta_port_disable(struct mvneta_port
*pp
)
910 /* Reset the Enable bit in the Serial Control Register */
911 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
912 val
&= ~MVNETA_GMAC0_PORT_ENABLE
;
913 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
918 /* Multicast tables methods */
920 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
921 static void mvneta_set_ucast_table(struct mvneta_port
*pp
, int queue
)
929 val
= 0x1 | (queue
<< 1);
930 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
933 for (offset
= 0; offset
<= 0xc; offset
+= 4)
934 mvreg_write(pp
, MVNETA_DA_FILT_UCAST_BASE
+ offset
, val
);
937 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
938 static void mvneta_set_special_mcast_table(struct mvneta_port
*pp
, int queue
)
946 val
= 0x1 | (queue
<< 1);
947 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
950 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
951 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ offset
, val
);
955 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
956 static void mvneta_set_other_mcast_table(struct mvneta_port
*pp
, int queue
)
962 memset(pp
->mcast_count
, 0, sizeof(pp
->mcast_count
));
965 memset(pp
->mcast_count
, 1, sizeof(pp
->mcast_count
));
966 val
= 0x1 | (queue
<< 1);
967 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
970 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
971 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ offset
, val
);
974 /* This method sets defaults to the NETA port:
975 * Clears interrupt Cause and Mask registers.
976 * Clears all MAC tables.
977 * Sets defaults to all registers.
978 * Resets RX and TX descriptor rings.
980 * This method can be called after mvneta_port_down() to return the port
981 * settings to defaults.
983 static void mvneta_defaults_set(struct mvneta_port
*pp
)
989 /* Clear all Cause registers */
990 mvreg_write(pp
, MVNETA_INTR_NEW_CAUSE
, 0);
991 mvreg_write(pp
, MVNETA_INTR_OLD_CAUSE
, 0);
992 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
994 /* Mask all interrupts */
995 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
996 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
997 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
998 mvreg_write(pp
, MVNETA_INTR_ENABLE
, 0);
1000 /* Enable MBUS Retry bit16 */
1001 mvreg_write(pp
, MVNETA_MBUS_RETRY
, 0x20);
1003 /* Set CPU queue access map - all CPUs have access to all RX
1004 * queues and to all TX queues
1006 for_each_present_cpu(cpu
)
1007 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
),
1008 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK
|
1009 MVNETA_CPU_TXQ_ACCESS_ALL_MASK
));
1011 /* Reset RX and TX DMAs */
1012 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
1013 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
1015 /* Disable Legacy WRR, Disable EJP, Release from reset */
1016 mvreg_write(pp
, MVNETA_TXQ_CMD_1
, 0);
1017 for (queue
= 0; queue
< txq_number
; queue
++) {
1018 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(queue
), 0);
1019 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(queue
), 0);
1022 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
1023 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
1025 /* Set Port Acceleration Mode */
1026 val
= MVNETA_ACC_MODE_EXT
;
1027 mvreg_write(pp
, MVNETA_ACC_MODE
, val
);
1029 /* Update val of portCfg register accordingly with all RxQueue types */
1030 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def
);
1031 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
1034 mvreg_write(pp
, MVNETA_PORT_CONFIG_EXTEND
, val
);
1035 mvreg_write(pp
, MVNETA_RX_MIN_FRAME_SIZE
, 64);
1037 /* Build PORT_SDMA_CONFIG_REG */
1040 /* Default burst size */
1041 val
|= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
1042 val
|= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
1043 val
|= MVNETA_RX_NO_DATA_SWAP
| MVNETA_TX_NO_DATA_SWAP
;
1045 #if defined(__BIG_ENDIAN)
1046 val
|= MVNETA_DESC_SWAP
;
1049 /* Assign port SDMA configuration */
1050 mvreg_write(pp
, MVNETA_SDMA_CONFIG
, val
);
1052 /* Disable PHY polling in hardware, since we're using the
1053 * kernel phylib to do this.
1055 val
= mvreg_read(pp
, MVNETA_UNIT_CONTROL
);
1056 val
&= ~MVNETA_PHY_POLLING_ENABLE
;
1057 mvreg_write(pp
, MVNETA_UNIT_CONTROL
, val
);
1059 if (pp
->use_inband_status
) {
1060 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
1061 val
&= ~(MVNETA_GMAC_FORCE_LINK_PASS
|
1062 MVNETA_GMAC_FORCE_LINK_DOWN
|
1063 MVNETA_GMAC_AN_FLOW_CTRL_EN
);
1064 val
|= MVNETA_GMAC_INBAND_AN_ENABLE
|
1065 MVNETA_GMAC_AN_SPEED_EN
|
1066 MVNETA_GMAC_AN_DUPLEX_EN
;
1067 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
1068 val
= mvreg_read(pp
, MVNETA_GMAC_CLOCK_DIVIDER
);
1069 val
|= MVNETA_GMAC_1MS_CLOCK_ENABLE
;
1070 mvreg_write(pp
, MVNETA_GMAC_CLOCK_DIVIDER
, val
);
1072 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
1073 val
&= ~(MVNETA_GMAC_INBAND_AN_ENABLE
|
1074 MVNETA_GMAC_AN_SPEED_EN
|
1075 MVNETA_GMAC_AN_DUPLEX_EN
);
1076 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
1079 mvneta_set_ucast_table(pp
, -1);
1080 mvneta_set_special_mcast_table(pp
, -1);
1081 mvneta_set_other_mcast_table(pp
, -1);
1083 /* Set port interrupt enable register - default enable all */
1084 mvreg_write(pp
, MVNETA_INTR_ENABLE
,
1085 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1086 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK
));
1088 mvneta_mib_counters_clear(pp
);
1091 /* Set max sizes for tx queues */
1092 static void mvneta_txq_max_tx_size_set(struct mvneta_port
*pp
, int max_tx_size
)
1098 mtu
= max_tx_size
* 8;
1099 if (mtu
> MVNETA_TX_MTU_MAX
)
1100 mtu
= MVNETA_TX_MTU_MAX
;
1103 val
= mvreg_read(pp
, MVNETA_TX_MTU
);
1104 val
&= ~MVNETA_TX_MTU_MAX
;
1106 mvreg_write(pp
, MVNETA_TX_MTU
, val
);
1108 /* TX token size and all TXQs token size must be larger that MTU */
1109 val
= mvreg_read(pp
, MVNETA_TX_TOKEN_SIZE
);
1111 size
= val
& MVNETA_TX_TOKEN_SIZE_MAX
;
1114 val
&= ~MVNETA_TX_TOKEN_SIZE_MAX
;
1116 mvreg_write(pp
, MVNETA_TX_TOKEN_SIZE
, val
);
1118 for (queue
= 0; queue
< txq_number
; queue
++) {
1119 val
= mvreg_read(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
));
1121 size
= val
& MVNETA_TXQ_TOKEN_SIZE_MAX
;
1124 val
&= ~MVNETA_TXQ_TOKEN_SIZE_MAX
;
1126 mvreg_write(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
), val
);
1131 /* Set unicast address */
1132 static void mvneta_set_ucast_addr(struct mvneta_port
*pp
, u8 last_nibble
,
1135 unsigned int unicast_reg
;
1136 unsigned int tbl_offset
;
1137 unsigned int reg_offset
;
1139 /* Locate the Unicast table entry */
1140 last_nibble
= (0xf & last_nibble
);
1142 /* offset from unicast tbl base */
1143 tbl_offset
= (last_nibble
/ 4) * 4;
1145 /* offset within the above reg */
1146 reg_offset
= last_nibble
% 4;
1148 unicast_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
));
1151 /* Clear accepts frame bit at specified unicast DA tbl entry */
1152 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1154 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1155 unicast_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1158 mvreg_write(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
), unicast_reg
);
1161 /* Set mac address */
1162 static void mvneta_mac_addr_set(struct mvneta_port
*pp
, unsigned char *addr
,
1169 mac_l
= (addr
[4] << 8) | (addr
[5]);
1170 mac_h
= (addr
[0] << 24) | (addr
[1] << 16) |
1171 (addr
[2] << 8) | (addr
[3] << 0);
1173 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, mac_l
);
1174 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, mac_h
);
1177 /* Accept frames of this address */
1178 mvneta_set_ucast_addr(pp
, addr
[5], queue
);
1181 /* Set the number of packets that will be received before RX interrupt
1182 * will be generated by HW.
1184 static void mvneta_rx_pkts_coal_set(struct mvneta_port
*pp
,
1185 struct mvneta_rx_queue
*rxq
, u32 value
)
1187 mvreg_write(pp
, MVNETA_RXQ_THRESHOLD_REG(rxq
->id
),
1188 value
| MVNETA_RXQ_NON_OCCUPIED(0));
1189 rxq
->pkts_coal
= value
;
1192 /* Set the time delay in usec before RX interrupt will be generated by
1195 static void mvneta_rx_time_coal_set(struct mvneta_port
*pp
,
1196 struct mvneta_rx_queue
*rxq
, u32 value
)
1199 unsigned long clk_rate
;
1201 clk_rate
= clk_get_rate(pp
->clk
);
1202 val
= (clk_rate
/ 1000000) * value
;
1204 mvreg_write(pp
, MVNETA_RXQ_TIME_COAL_REG(rxq
->id
), val
);
1205 rxq
->time_coal
= value
;
1208 /* Set threshold for TX_DONE pkts coalescing */
1209 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port
*pp
,
1210 struct mvneta_tx_queue
*txq
, u32 value
)
1214 val
= mvreg_read(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
));
1216 val
&= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK
;
1217 val
|= MVNETA_TXQ_SENT_THRESH_MASK(value
);
1219 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), val
);
1221 txq
->done_pkts_coal
= value
;
1224 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1225 static void mvneta_rx_desc_fill(struct mvneta_rx_desc
*rx_desc
,
1226 u32 phys_addr
, u32 cookie
)
1228 rx_desc
->buf_cookie
= cookie
;
1229 rx_desc
->buf_phys_addr
= phys_addr
;
1232 /* Decrement sent descriptors counter */
1233 static void mvneta_txq_sent_desc_dec(struct mvneta_port
*pp
,
1234 struct mvneta_tx_queue
*txq
,
1239 /* Only 255 TX descriptors can be updated at once */
1240 while (sent_desc
> 0xff) {
1241 val
= 0xff << MVNETA_TXQ_DEC_SENT_SHIFT
;
1242 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1243 sent_desc
= sent_desc
- 0xff;
1246 val
= sent_desc
<< MVNETA_TXQ_DEC_SENT_SHIFT
;
1247 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1250 /* Get number of TX descriptors already sent by HW */
1251 static int mvneta_txq_sent_desc_num_get(struct mvneta_port
*pp
,
1252 struct mvneta_tx_queue
*txq
)
1257 val
= mvreg_read(pp
, MVNETA_TXQ_STATUS_REG(txq
->id
));
1258 sent_desc
= (val
& MVNETA_TXQ_SENT_DESC_MASK
) >>
1259 MVNETA_TXQ_SENT_DESC_SHIFT
;
1264 /* Get number of sent descriptors and decrement counter.
1265 * The number of sent descriptors is returned.
1267 static int mvneta_txq_sent_desc_proc(struct mvneta_port
*pp
,
1268 struct mvneta_tx_queue
*txq
)
1272 /* Get number of sent descriptors */
1273 sent_desc
= mvneta_txq_sent_desc_num_get(pp
, txq
);
1275 /* Decrement sent descriptors counter */
1277 mvneta_txq_sent_desc_dec(pp
, txq
, sent_desc
);
1282 /* Set TXQ descriptors fields relevant for CSUM calculation */
1283 static u32
mvneta_txq_desc_csum(int l3_offs
, int l3_proto
,
1284 int ip_hdr_len
, int l4_proto
)
1288 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1289 * G_L4_chk, L4_type; required only for checksum
1292 command
= l3_offs
<< MVNETA_TX_L3_OFF_SHIFT
;
1293 command
|= ip_hdr_len
<< MVNETA_TX_IP_HLEN_SHIFT
;
1295 if (l3_proto
== htons(ETH_P_IP
))
1296 command
|= MVNETA_TXD_IP_CSUM
;
1298 command
|= MVNETA_TX_L3_IP6
;
1300 if (l4_proto
== IPPROTO_TCP
)
1301 command
|= MVNETA_TX_L4_CSUM_FULL
;
1302 else if (l4_proto
== IPPROTO_UDP
)
1303 command
|= MVNETA_TX_L4_UDP
| MVNETA_TX_L4_CSUM_FULL
;
1305 command
|= MVNETA_TX_L4_CSUM_NOT
;
1311 /* Display more error info */
1312 static void mvneta_rx_error(struct mvneta_port
*pp
,
1313 struct mvneta_rx_desc
*rx_desc
)
1315 u32 status
= rx_desc
->status
;
1317 if (!mvneta_rxq_desc_is_first_last(status
)) {
1319 "bad rx status %08x (buffer oversize), size=%d\n",
1320 status
, rx_desc
->data_size
);
1324 switch (status
& MVNETA_RXD_ERR_CODE_MASK
) {
1325 case MVNETA_RXD_ERR_CRC
:
1326 netdev_err(pp
->dev
, "bad rx status %08x (crc error), size=%d\n",
1327 status
, rx_desc
->data_size
);
1329 case MVNETA_RXD_ERR_OVERRUN
:
1330 netdev_err(pp
->dev
, "bad rx status %08x (overrun error), size=%d\n",
1331 status
, rx_desc
->data_size
);
1333 case MVNETA_RXD_ERR_LEN
:
1334 netdev_err(pp
->dev
, "bad rx status %08x (max frame length error), size=%d\n",
1335 status
, rx_desc
->data_size
);
1337 case MVNETA_RXD_ERR_RESOURCE
:
1338 netdev_err(pp
->dev
, "bad rx status %08x (resource error), size=%d\n",
1339 status
, rx_desc
->data_size
);
1344 /* Handle RX checksum offload based on the descriptor's status */
1345 static void mvneta_rx_csum(struct mvneta_port
*pp
, u32 status
,
1346 struct sk_buff
*skb
)
1348 if ((status
& MVNETA_RXD_L3_IP4
) &&
1349 (status
& MVNETA_RXD_L4_CSUM_OK
)) {
1351 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1355 skb
->ip_summed
= CHECKSUM_NONE
;
1358 /* Return tx queue pointer (find last set bit) according to <cause> returned
1359 * form tx_done reg. <cause> must not be null. The return value is always a
1360 * valid queue for matching the first one found in <cause>.
1362 static struct mvneta_tx_queue
*mvneta_tx_done_policy(struct mvneta_port
*pp
,
1365 int queue
= fls(cause
) - 1;
1367 return &pp
->txqs
[queue
];
1370 /* Free tx queue skbuffs */
1371 static void mvneta_txq_bufs_free(struct mvneta_port
*pp
,
1372 struct mvneta_tx_queue
*txq
, int num
)
1376 for (i
= 0; i
< num
; i
++) {
1377 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+
1379 struct sk_buff
*skb
= txq
->tx_skb
[txq
->txq_get_index
];
1381 mvneta_txq_inc_get(txq
);
1383 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
1384 dma_unmap_single(pp
->dev
->dev
.parent
,
1385 tx_desc
->buf_phys_addr
,
1386 tx_desc
->data_size
, DMA_TO_DEVICE
);
1389 dev_kfree_skb_any(skb
);
1393 /* Handle end of transmission */
1394 static void mvneta_txq_done(struct mvneta_port
*pp
,
1395 struct mvneta_tx_queue
*txq
)
1397 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
1400 tx_done
= mvneta_txq_sent_desc_proc(pp
, txq
);
1404 mvneta_txq_bufs_free(pp
, txq
, tx_done
);
1406 txq
->count
-= tx_done
;
1408 if (netif_tx_queue_stopped(nq
)) {
1409 if (txq
->count
<= txq
->tx_wake_threshold
)
1410 netif_tx_wake_queue(nq
);
1414 static void *mvneta_frag_alloc(const struct mvneta_port
*pp
)
1416 if (likely(pp
->frag_size
<= PAGE_SIZE
))
1417 return netdev_alloc_frag(pp
->frag_size
);
1419 return kmalloc(pp
->frag_size
, GFP_ATOMIC
);
1422 static void mvneta_frag_free(const struct mvneta_port
*pp
, void *data
)
1424 if (likely(pp
->frag_size
<= PAGE_SIZE
))
1425 skb_free_frag(data
);
1430 /* Refill processing */
1431 static int mvneta_rx_refill(struct mvneta_port
*pp
,
1432 struct mvneta_rx_desc
*rx_desc
)
1435 dma_addr_t phys_addr
;
1438 data
= mvneta_frag_alloc(pp
);
1442 phys_addr
= dma_map_single(pp
->dev
->dev
.parent
, data
,
1443 MVNETA_RX_BUF_SIZE(pp
->pkt_size
),
1445 if (unlikely(dma_mapping_error(pp
->dev
->dev
.parent
, phys_addr
))) {
1446 mvneta_frag_free(pp
, data
);
1450 mvneta_rx_desc_fill(rx_desc
, phys_addr
, (u32
)data
);
1454 /* Handle tx checksum */
1455 static u32
mvneta_skb_tx_csum(struct mvneta_port
*pp
, struct sk_buff
*skb
)
1457 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1459 __be16 l3_proto
= vlan_get_protocol(skb
);
1462 if (l3_proto
== htons(ETH_P_IP
)) {
1463 struct iphdr
*ip4h
= ip_hdr(skb
);
1465 /* Calculate IPv4 checksum and L4 checksum */
1466 ip_hdr_len
= ip4h
->ihl
;
1467 l4_proto
= ip4h
->protocol
;
1468 } else if (l3_proto
== htons(ETH_P_IPV6
)) {
1469 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
1471 /* Read l4_protocol from one of IPv6 extra headers */
1472 if (skb_network_header_len(skb
) > 0)
1473 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
1474 l4_proto
= ip6h
->nexthdr
;
1476 return MVNETA_TX_L4_CSUM_NOT
;
1478 return mvneta_txq_desc_csum(skb_network_offset(skb
),
1479 l3_proto
, ip_hdr_len
, l4_proto
);
1482 return MVNETA_TX_L4_CSUM_NOT
;
1485 /* Drop packets received by the RXQ and free buffers */
1486 static void mvneta_rxq_drop_pkts(struct mvneta_port
*pp
,
1487 struct mvneta_rx_queue
*rxq
)
1491 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1492 for (i
= 0; i
< rxq
->size
; i
++) {
1493 struct mvneta_rx_desc
*rx_desc
= rxq
->descs
+ i
;
1494 void *data
= (void *)rx_desc
->buf_cookie
;
1496 mvneta_frag_free(pp
, data
);
1497 dma_unmap_single(pp
->dev
->dev
.parent
, rx_desc
->buf_phys_addr
,
1498 MVNETA_RX_BUF_SIZE(pp
->pkt_size
), DMA_FROM_DEVICE
);
1502 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
1505 /* Main rx processing */
1506 static int mvneta_rx(struct mvneta_port
*pp
, int rx_todo
,
1507 struct mvneta_rx_queue
*rxq
)
1509 struct mvneta_pcpu_port
*port
= this_cpu_ptr(pp
->ports
);
1510 struct net_device
*dev
= pp
->dev
;
1515 /* Get number of received packets */
1516 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1518 if (rx_todo
> rx_done
)
1523 /* Fairness NAPI loop */
1524 while (rx_done
< rx_todo
) {
1525 struct mvneta_rx_desc
*rx_desc
= mvneta_rxq_next_desc_get(rxq
);
1526 struct sk_buff
*skb
;
1527 unsigned char *data
;
1528 dma_addr_t phys_addr
;
1533 rx_status
= rx_desc
->status
;
1534 rx_bytes
= rx_desc
->data_size
- (ETH_FCS_LEN
+ MVNETA_MH_SIZE
);
1535 data
= (unsigned char *)rx_desc
->buf_cookie
;
1536 phys_addr
= rx_desc
->buf_phys_addr
;
1538 if (!mvneta_rxq_desc_is_first_last(rx_status
) ||
1539 (rx_status
& MVNETA_RXD_ERR_SUMMARY
)) {
1541 dev
->stats
.rx_errors
++;
1542 mvneta_rx_error(pp
, rx_desc
);
1543 /* leave the descriptor untouched */
1547 if (rx_bytes
<= rx_copybreak
) {
1548 /* better copy a small frame and not unmap the DMA region */
1549 skb
= netdev_alloc_skb_ip_align(dev
, rx_bytes
);
1551 goto err_drop_frame
;
1553 dma_sync_single_range_for_cpu(dev
->dev
.parent
,
1554 rx_desc
->buf_phys_addr
,
1555 MVNETA_MH_SIZE
+ NET_SKB_PAD
,
1558 memcpy(skb_put(skb
, rx_bytes
),
1559 data
+ MVNETA_MH_SIZE
+ NET_SKB_PAD
,
1562 skb
->protocol
= eth_type_trans(skb
, dev
);
1563 mvneta_rx_csum(pp
, rx_status
, skb
);
1564 napi_gro_receive(&port
->napi
, skb
);
1567 rcvd_bytes
+= rx_bytes
;
1569 /* leave the descriptor and buffer untouched */
1573 /* Refill processing */
1574 err
= mvneta_rx_refill(pp
, rx_desc
);
1576 netdev_err(dev
, "Linux processing - Can't refill\n");
1578 goto err_drop_frame
;
1581 skb
= build_skb(data
, pp
->frag_size
> PAGE_SIZE
? 0 : pp
->frag_size
);
1583 goto err_drop_frame
;
1585 dma_unmap_single(dev
->dev
.parent
, phys_addr
,
1586 MVNETA_RX_BUF_SIZE(pp
->pkt_size
), DMA_FROM_DEVICE
);
1589 rcvd_bytes
+= rx_bytes
;
1591 /* Linux processing */
1592 skb_reserve(skb
, MVNETA_MH_SIZE
+ NET_SKB_PAD
);
1593 skb_put(skb
, rx_bytes
);
1595 skb
->protocol
= eth_type_trans(skb
, dev
);
1597 mvneta_rx_csum(pp
, rx_status
, skb
);
1599 napi_gro_receive(&port
->napi
, skb
);
1603 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
1605 u64_stats_update_begin(&stats
->syncp
);
1606 stats
->rx_packets
+= rcvd_pkts
;
1607 stats
->rx_bytes
+= rcvd_bytes
;
1608 u64_stats_update_end(&stats
->syncp
);
1611 /* Update rxq management counters */
1612 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
1618 mvneta_tso_put_hdr(struct sk_buff
*skb
,
1619 struct mvneta_port
*pp
, struct mvneta_tx_queue
*txq
)
1621 struct mvneta_tx_desc
*tx_desc
;
1622 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1624 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1625 tx_desc
= mvneta_txq_next_desc_get(txq
);
1626 tx_desc
->data_size
= hdr_len
;
1627 tx_desc
->command
= mvneta_skb_tx_csum(pp
, skb
);
1628 tx_desc
->command
|= MVNETA_TXD_F_DESC
;
1629 tx_desc
->buf_phys_addr
= txq
->tso_hdrs_phys
+
1630 txq
->txq_put_index
* TSO_HEADER_SIZE
;
1631 mvneta_txq_inc_put(txq
);
1635 mvneta_tso_put_data(struct net_device
*dev
, struct mvneta_tx_queue
*txq
,
1636 struct sk_buff
*skb
, char *data
, int size
,
1637 bool last_tcp
, bool is_last
)
1639 struct mvneta_tx_desc
*tx_desc
;
1641 tx_desc
= mvneta_txq_next_desc_get(txq
);
1642 tx_desc
->data_size
= size
;
1643 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, data
,
1644 size
, DMA_TO_DEVICE
);
1645 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
1646 tx_desc
->buf_phys_addr
))) {
1647 mvneta_txq_desc_put(txq
);
1651 tx_desc
->command
= 0;
1652 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1655 /* last descriptor in the TCP packet */
1656 tx_desc
->command
= MVNETA_TXD_L_DESC
;
1658 /* last descriptor in SKB */
1660 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
1662 mvneta_txq_inc_put(txq
);
1666 static int mvneta_tx_tso(struct sk_buff
*skb
, struct net_device
*dev
,
1667 struct mvneta_tx_queue
*txq
)
1669 int total_len
, data_left
;
1671 struct mvneta_port
*pp
= netdev_priv(dev
);
1673 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1676 /* Count needed descriptors */
1677 if ((txq
->count
+ tso_count_descs(skb
)) >= txq
->size
)
1680 if (skb_headlen(skb
) < (skb_transport_offset(skb
) + tcp_hdrlen(skb
))) {
1681 pr_info("*** Is this even possible???!?!?\n");
1685 /* Initialize the TSO handler, and prepare the first payload */
1686 tso_start(skb
, &tso
);
1688 total_len
= skb
->len
- hdr_len
;
1689 while (total_len
> 0) {
1692 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
1693 total_len
-= data_left
;
1696 /* prepare packet headers: MAC + IP + TCP */
1697 hdr
= txq
->tso_hdrs
+ txq
->txq_put_index
* TSO_HEADER_SIZE
;
1698 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
1700 mvneta_tso_put_hdr(skb
, pp
, txq
);
1702 while (data_left
> 0) {
1706 size
= min_t(int, tso
.size
, data_left
);
1708 if (mvneta_tso_put_data(dev
, txq
, skb
,
1715 tso_build_data(skb
, &tso
, size
);
1722 /* Release all used data descriptors; header descriptors must not
1725 for (i
= desc_count
- 1; i
>= 0; i
--) {
1726 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+ i
;
1727 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
1728 dma_unmap_single(pp
->dev
->dev
.parent
,
1729 tx_desc
->buf_phys_addr
,
1732 mvneta_txq_desc_put(txq
);
1737 /* Handle tx fragmentation processing */
1738 static int mvneta_tx_frag_process(struct mvneta_port
*pp
, struct sk_buff
*skb
,
1739 struct mvneta_tx_queue
*txq
)
1741 struct mvneta_tx_desc
*tx_desc
;
1742 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
1744 for (i
= 0; i
< nr_frags
; i
++) {
1745 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1746 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
1748 tx_desc
= mvneta_txq_next_desc_get(txq
);
1749 tx_desc
->data_size
= frag
->size
;
1751 tx_desc
->buf_phys_addr
=
1752 dma_map_single(pp
->dev
->dev
.parent
, addr
,
1753 tx_desc
->data_size
, DMA_TO_DEVICE
);
1755 if (dma_mapping_error(pp
->dev
->dev
.parent
,
1756 tx_desc
->buf_phys_addr
)) {
1757 mvneta_txq_desc_put(txq
);
1761 if (i
== nr_frags
- 1) {
1762 /* Last descriptor */
1763 tx_desc
->command
= MVNETA_TXD_L_DESC
| MVNETA_TXD_Z_PAD
;
1764 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
1766 /* Descriptor in the middle: Not First, Not Last */
1767 tx_desc
->command
= 0;
1768 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1770 mvneta_txq_inc_put(txq
);
1776 /* Release all descriptors that were used to map fragments of
1777 * this packet, as well as the corresponding DMA mappings
1779 for (i
= i
- 1; i
>= 0; i
--) {
1780 tx_desc
= txq
->descs
+ i
;
1781 dma_unmap_single(pp
->dev
->dev
.parent
,
1782 tx_desc
->buf_phys_addr
,
1785 mvneta_txq_desc_put(txq
);
1791 /* Main tx processing */
1792 static int mvneta_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1794 struct mvneta_port
*pp
= netdev_priv(dev
);
1795 u16 txq_id
= skb_get_queue_mapping(skb
);
1796 struct mvneta_tx_queue
*txq
= &pp
->txqs
[txq_id
];
1797 struct mvneta_tx_desc
*tx_desc
;
1802 if (!netif_running(dev
))
1805 if (skb_is_gso(skb
)) {
1806 frags
= mvneta_tx_tso(skb
, dev
, txq
);
1810 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
1812 /* Get a descriptor for the first part of the packet */
1813 tx_desc
= mvneta_txq_next_desc_get(txq
);
1815 tx_cmd
= mvneta_skb_tx_csum(pp
, skb
);
1817 tx_desc
->data_size
= skb_headlen(skb
);
1819 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
1822 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
1823 tx_desc
->buf_phys_addr
))) {
1824 mvneta_txq_desc_put(txq
);
1830 /* First and Last descriptor */
1831 tx_cmd
|= MVNETA_TXD_FLZ_DESC
;
1832 tx_desc
->command
= tx_cmd
;
1833 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
1834 mvneta_txq_inc_put(txq
);
1836 /* First but not Last */
1837 tx_cmd
|= MVNETA_TXD_F_DESC
;
1838 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1839 mvneta_txq_inc_put(txq
);
1840 tx_desc
->command
= tx_cmd
;
1841 /* Continue with other skb fragments */
1842 if (mvneta_tx_frag_process(pp
, skb
, txq
)) {
1843 dma_unmap_single(dev
->dev
.parent
,
1844 tx_desc
->buf_phys_addr
,
1847 mvneta_txq_desc_put(txq
);
1855 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
1856 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
1858 txq
->count
+= frags
;
1859 mvneta_txq_pend_desc_add(pp
, txq
, frags
);
1861 if (txq
->count
>= txq
->tx_stop_threshold
)
1862 netif_tx_stop_queue(nq
);
1864 u64_stats_update_begin(&stats
->syncp
);
1865 stats
->tx_packets
++;
1866 stats
->tx_bytes
+= len
;
1867 u64_stats_update_end(&stats
->syncp
);
1869 dev
->stats
.tx_dropped
++;
1870 dev_kfree_skb_any(skb
);
1873 return NETDEV_TX_OK
;
1877 /* Free tx resources, when resetting a port */
1878 static void mvneta_txq_done_force(struct mvneta_port
*pp
,
1879 struct mvneta_tx_queue
*txq
)
1882 int tx_done
= txq
->count
;
1884 mvneta_txq_bufs_free(pp
, txq
, tx_done
);
1888 txq
->txq_put_index
= 0;
1889 txq
->txq_get_index
= 0;
1892 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
1893 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1895 static void mvneta_tx_done_gbe(struct mvneta_port
*pp
, u32 cause_tx_done
)
1897 struct mvneta_tx_queue
*txq
;
1898 struct netdev_queue
*nq
;
1900 while (cause_tx_done
) {
1901 txq
= mvneta_tx_done_policy(pp
, cause_tx_done
);
1903 nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
1904 __netif_tx_lock(nq
, smp_processor_id());
1907 mvneta_txq_done(pp
, txq
);
1909 __netif_tx_unlock(nq
);
1910 cause_tx_done
&= ~((1 << txq
->id
));
1914 /* Compute crc8 of the specified address, using a unique algorithm ,
1915 * according to hw spec, different than generic crc8 algorithm
1917 static int mvneta_addr_crc(unsigned char *addr
)
1922 for (i
= 0; i
< ETH_ALEN
; i
++) {
1925 crc
= (crc
^ addr
[i
]) << 8;
1926 for (j
= 7; j
>= 0; j
--) {
1927 if (crc
& (0x100 << j
))
1935 /* This method controls the net device special MAC multicast support.
1936 * The Special Multicast Table for MAC addresses supports MAC of the form
1937 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1938 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1939 * Table entries in the DA-Filter table. This method set the Special
1940 * Multicast Table appropriate entry.
1942 static void mvneta_set_special_mcast_addr(struct mvneta_port
*pp
,
1943 unsigned char last_byte
,
1946 unsigned int smc_table_reg
;
1947 unsigned int tbl_offset
;
1948 unsigned int reg_offset
;
1950 /* Register offset from SMC table base */
1951 tbl_offset
= (last_byte
/ 4);
1952 /* Entry offset within the above reg */
1953 reg_offset
= last_byte
% 4;
1955 smc_table_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_SPEC_MCAST
1959 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
1961 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
1962 smc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1965 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ tbl_offset
* 4,
1969 /* This method controls the network device Other MAC multicast support.
1970 * The Other Multicast Table is used for multicast of another type.
1971 * A CRC-8 is used as an index to the Other Multicast Table entries
1972 * in the DA-Filter table.
1973 * The method gets the CRC-8 value from the calling routine and
1974 * sets the Other Multicast Table appropriate entry according to the
1977 static void mvneta_set_other_mcast_addr(struct mvneta_port
*pp
,
1981 unsigned int omc_table_reg
;
1982 unsigned int tbl_offset
;
1983 unsigned int reg_offset
;
1985 tbl_offset
= (crc8
/ 4) * 4; /* Register offset from OMC table base */
1986 reg_offset
= crc8
% 4; /* Entry offset within the above reg */
1988 omc_table_reg
= mvreg_read(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
);
1991 /* Clear accepts frame bit at specified Other DA table entry */
1992 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
1994 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
1995 omc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1998 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
, omc_table_reg
);
2001 /* The network device supports multicast using two tables:
2002 * 1) Special Multicast Table for MAC addresses of the form
2003 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2004 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2005 * Table entries in the DA-Filter table.
2006 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2007 * is used as an index to the Other Multicast Table entries in the
2010 static int mvneta_mcast_addr_set(struct mvneta_port
*pp
, unsigned char *p_addr
,
2013 unsigned char crc_result
= 0;
2015 if (memcmp(p_addr
, "\x01\x00\x5e\x00\x00", 5) == 0) {
2016 mvneta_set_special_mcast_addr(pp
, p_addr
[5], queue
);
2020 crc_result
= mvneta_addr_crc(p_addr
);
2022 if (pp
->mcast_count
[crc_result
] == 0) {
2023 netdev_info(pp
->dev
, "No valid Mcast for crc8=0x%02x\n",
2028 pp
->mcast_count
[crc_result
]--;
2029 if (pp
->mcast_count
[crc_result
] != 0) {
2030 netdev_info(pp
->dev
,
2031 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2032 pp
->mcast_count
[crc_result
], crc_result
);
2036 pp
->mcast_count
[crc_result
]++;
2038 mvneta_set_other_mcast_addr(pp
, crc_result
, queue
);
2043 /* Configure Fitering mode of Ethernet port */
2044 static void mvneta_rx_unicast_promisc_set(struct mvneta_port
*pp
,
2047 u32 port_cfg_reg
, val
;
2049 port_cfg_reg
= mvreg_read(pp
, MVNETA_PORT_CONFIG
);
2051 val
= mvreg_read(pp
, MVNETA_TYPE_PRIO
);
2053 /* Set / Clear UPM bit in port configuration register */
2055 /* Accept all Unicast addresses */
2056 port_cfg_reg
|= MVNETA_UNI_PROMISC_MODE
;
2057 val
|= MVNETA_FORCE_UNI
;
2058 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, 0xffff);
2059 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, 0xffffffff);
2061 /* Reject all Unicast addresses */
2062 port_cfg_reg
&= ~MVNETA_UNI_PROMISC_MODE
;
2063 val
&= ~MVNETA_FORCE_UNI
;
2066 mvreg_write(pp
, MVNETA_PORT_CONFIG
, port_cfg_reg
);
2067 mvreg_write(pp
, MVNETA_TYPE_PRIO
, val
);
2070 /* register unicast and multicast addresses */
2071 static void mvneta_set_rx_mode(struct net_device
*dev
)
2073 struct mvneta_port
*pp
= netdev_priv(dev
);
2074 struct netdev_hw_addr
*ha
;
2076 if (dev
->flags
& IFF_PROMISC
) {
2077 /* Accept all: Multicast + Unicast */
2078 mvneta_rx_unicast_promisc_set(pp
, 1);
2079 mvneta_set_ucast_table(pp
, rxq_def
);
2080 mvneta_set_special_mcast_table(pp
, rxq_def
);
2081 mvneta_set_other_mcast_table(pp
, rxq_def
);
2083 /* Accept single Unicast */
2084 mvneta_rx_unicast_promisc_set(pp
, 0);
2085 mvneta_set_ucast_table(pp
, -1);
2086 mvneta_mac_addr_set(pp
, dev
->dev_addr
, rxq_def
);
2088 if (dev
->flags
& IFF_ALLMULTI
) {
2089 /* Accept all multicast */
2090 mvneta_set_special_mcast_table(pp
, rxq_def
);
2091 mvneta_set_other_mcast_table(pp
, rxq_def
);
2093 /* Accept only initialized multicast */
2094 mvneta_set_special_mcast_table(pp
, -1);
2095 mvneta_set_other_mcast_table(pp
, -1);
2097 if (!netdev_mc_empty(dev
)) {
2098 netdev_for_each_mc_addr(ha
, dev
) {
2099 mvneta_mcast_addr_set(pp
, ha
->addr
,
2107 /* Interrupt handling - the callback for request_irq() */
2108 static irqreturn_t
mvneta_isr(int irq
, void *dev_id
)
2110 struct mvneta_pcpu_port
*port
= (struct mvneta_pcpu_port
*)dev_id
;
2112 disable_percpu_irq(port
->pp
->dev
->irq
);
2113 napi_schedule(&port
->napi
);
2118 static int mvneta_fixed_link_update(struct mvneta_port
*pp
,
2119 struct phy_device
*phy
)
2121 struct fixed_phy_status status
;
2122 struct fixed_phy_status changed
= {};
2123 u32 gmac_stat
= mvreg_read(pp
, MVNETA_GMAC_STATUS
);
2125 status
.link
= !!(gmac_stat
& MVNETA_GMAC_LINK_UP
);
2126 if (gmac_stat
& MVNETA_GMAC_SPEED_1000
)
2127 status
.speed
= SPEED_1000
;
2128 else if (gmac_stat
& MVNETA_GMAC_SPEED_100
)
2129 status
.speed
= SPEED_100
;
2131 status
.speed
= SPEED_10
;
2132 status
.duplex
= !!(gmac_stat
& MVNETA_GMAC_FULL_DUPLEX
);
2136 fixed_phy_update_state(phy
, &status
, &changed
);
2141 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2142 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2143 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2144 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2145 * Each CPU has its own causeRxTx register
2147 static int mvneta_poll(struct napi_struct
*napi
, int budget
)
2151 struct mvneta_port
*pp
= netdev_priv(napi
->dev
);
2152 struct mvneta_pcpu_port
*port
= this_cpu_ptr(pp
->ports
);
2154 if (!netif_running(pp
->dev
)) {
2155 napi_complete(&port
->napi
);
2159 /* Read cause register */
2160 cause_rx_tx
= mvreg_read(pp
, MVNETA_INTR_NEW_CAUSE
);
2161 if (cause_rx_tx
& MVNETA_MISCINTR_INTR_MASK
) {
2162 u32 cause_misc
= mvreg_read(pp
, MVNETA_INTR_MISC_CAUSE
);
2164 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
2165 if (pp
->use_inband_status
&& (cause_misc
&
2166 (MVNETA_CAUSE_PHY_STATUS_CHANGE
|
2167 MVNETA_CAUSE_LINK_CHANGE
|
2168 MVNETA_CAUSE_PSC_SYNC_CHANGE
))) {
2169 mvneta_fixed_link_update(pp
, pp
->phy_dev
);
2173 /* Release Tx descriptors */
2174 if (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
) {
2175 mvneta_tx_done_gbe(pp
, (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
));
2176 cause_rx_tx
&= ~MVNETA_TX_INTR_MASK_ALL
;
2179 /* For the case where the last mvneta_poll did not process all
2182 cause_rx_tx
|= port
->cause_rx_tx
;
2183 rx_done
= mvneta_rx(pp
, budget
, &pp
->rxqs
[rxq_def
]);
2188 napi_complete(&port
->napi
);
2189 enable_percpu_irq(pp
->dev
->irq
, 0);
2192 port
->cause_rx_tx
= cause_rx_tx
;
2196 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2197 static int mvneta_rxq_fill(struct mvneta_port
*pp
, struct mvneta_rx_queue
*rxq
,
2202 for (i
= 0; i
< num
; i
++) {
2203 memset(rxq
->descs
+ i
, 0, sizeof(struct mvneta_rx_desc
));
2204 if (mvneta_rx_refill(pp
, rxq
->descs
+ i
) != 0) {
2205 netdev_err(pp
->dev
, "%s:rxq %d, %d of %d buffs filled\n",
2206 __func__
, rxq
->id
, i
, num
);
2211 /* Add this number of RX descriptors as non occupied (ready to
2214 mvneta_rxq_non_occup_desc_add(pp
, rxq
, i
);
2219 /* Free all packets pending transmit from all TXQs and reset TX port */
2220 static void mvneta_tx_reset(struct mvneta_port
*pp
)
2224 /* free the skb's in the tx ring */
2225 for (queue
= 0; queue
< txq_number
; queue
++)
2226 mvneta_txq_done_force(pp
, &pp
->txqs
[queue
]);
2228 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
2229 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
2232 static void mvneta_rx_reset(struct mvneta_port
*pp
)
2234 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
2235 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
2238 /* Rx/Tx queue initialization/cleanup methods */
2240 /* Create a specified RX queue */
2241 static int mvneta_rxq_init(struct mvneta_port
*pp
,
2242 struct mvneta_rx_queue
*rxq
)
2245 rxq
->size
= pp
->rx_ring_size
;
2247 /* Allocate memory for RX descriptors */
2248 rxq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2249 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2250 &rxq
->descs_phys
, GFP_KERNEL
);
2251 if (rxq
->descs
== NULL
)
2254 BUG_ON(rxq
->descs
!=
2255 PTR_ALIGN(rxq
->descs
, MVNETA_CPU_D_CACHE_LINE_SIZE
));
2257 rxq
->last_desc
= rxq
->size
- 1;
2259 /* Set Rx descriptors queue starting address */
2260 mvreg_write(pp
, MVNETA_RXQ_BASE_ADDR_REG(rxq
->id
), rxq
->descs_phys
);
2261 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), rxq
->size
);
2264 mvneta_rxq_offset_set(pp
, rxq
, NET_SKB_PAD
);
2266 /* Set coalescing pkts and time */
2267 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
2268 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
2270 /* Fill RXQ with buffers from RX pool */
2271 mvneta_rxq_buf_size_set(pp
, rxq
, MVNETA_RX_BUF_SIZE(pp
->pkt_size
));
2272 mvneta_rxq_bm_disable(pp
, rxq
);
2273 mvneta_rxq_fill(pp
, rxq
, rxq
->size
);
2278 /* Cleanup Rx queue */
2279 static void mvneta_rxq_deinit(struct mvneta_port
*pp
,
2280 struct mvneta_rx_queue
*rxq
)
2282 mvneta_rxq_drop_pkts(pp
, rxq
);
2285 dma_free_coherent(pp
->dev
->dev
.parent
,
2286 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2292 rxq
->next_desc_to_proc
= 0;
2293 rxq
->descs_phys
= 0;
2296 /* Create and initialize a tx queue */
2297 static int mvneta_txq_init(struct mvneta_port
*pp
,
2298 struct mvneta_tx_queue
*txq
)
2300 txq
->size
= pp
->tx_ring_size
;
2302 /* A queue must always have room for at least one skb.
2303 * Therefore, stop the queue when the free entries reaches
2304 * the maximum number of descriptors per skb.
2306 txq
->tx_stop_threshold
= txq
->size
- MVNETA_MAX_SKB_DESCS
;
2307 txq
->tx_wake_threshold
= txq
->tx_stop_threshold
/ 2;
2310 /* Allocate memory for TX descriptors */
2311 txq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2312 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2313 &txq
->descs_phys
, GFP_KERNEL
);
2314 if (txq
->descs
== NULL
)
2317 /* Make sure descriptor address is cache line size aligned */
2318 BUG_ON(txq
->descs
!=
2319 PTR_ALIGN(txq
->descs
, MVNETA_CPU_D_CACHE_LINE_SIZE
));
2321 txq
->last_desc
= txq
->size
- 1;
2323 /* Set maximum bandwidth for enabled TXQs */
2324 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0x03ffffff);
2325 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0x3fffffff);
2327 /* Set Tx descriptors queue starting address */
2328 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), txq
->descs_phys
);
2329 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), txq
->size
);
2331 txq
->tx_skb
= kmalloc(txq
->size
* sizeof(*txq
->tx_skb
), GFP_KERNEL
);
2332 if (txq
->tx_skb
== NULL
) {
2333 dma_free_coherent(pp
->dev
->dev
.parent
,
2334 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2335 txq
->descs
, txq
->descs_phys
);
2339 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2340 txq
->tso_hdrs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2341 txq
->size
* TSO_HEADER_SIZE
,
2342 &txq
->tso_hdrs_phys
, GFP_KERNEL
);
2343 if (txq
->tso_hdrs
== NULL
) {
2345 dma_free_coherent(pp
->dev
->dev
.parent
,
2346 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2347 txq
->descs
, txq
->descs_phys
);
2350 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
2355 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2356 static void mvneta_txq_deinit(struct mvneta_port
*pp
,
2357 struct mvneta_tx_queue
*txq
)
2362 dma_free_coherent(pp
->dev
->dev
.parent
,
2363 txq
->size
* TSO_HEADER_SIZE
,
2364 txq
->tso_hdrs
, txq
->tso_hdrs_phys
);
2366 dma_free_coherent(pp
->dev
->dev
.parent
,
2367 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2368 txq
->descs
, txq
->descs_phys
);
2372 txq
->next_desc_to_proc
= 0;
2373 txq
->descs_phys
= 0;
2375 /* Set minimum bandwidth for disabled TXQs */
2376 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0);
2377 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0);
2379 /* Set Tx descriptors queue starting address and size */
2380 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), 0);
2381 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), 0);
2384 /* Cleanup all Tx queues */
2385 static void mvneta_cleanup_txqs(struct mvneta_port
*pp
)
2389 for (queue
= 0; queue
< txq_number
; queue
++)
2390 mvneta_txq_deinit(pp
, &pp
->txqs
[queue
]);
2393 /* Cleanup all Rx queues */
2394 static void mvneta_cleanup_rxqs(struct mvneta_port
*pp
)
2396 mvneta_rxq_deinit(pp
, &pp
->rxqs
[rxq_def
]);
2400 /* Init all Rx queues */
2401 static int mvneta_setup_rxqs(struct mvneta_port
*pp
)
2403 int err
= mvneta_rxq_init(pp
, &pp
->rxqs
[rxq_def
]);
2405 netdev_err(pp
->dev
, "%s: can't create rxq=%d\n",
2407 mvneta_cleanup_rxqs(pp
);
2414 /* Init all tx queues */
2415 static int mvneta_setup_txqs(struct mvneta_port
*pp
)
2419 for (queue
= 0; queue
< txq_number
; queue
++) {
2420 int err
= mvneta_txq_init(pp
, &pp
->txqs
[queue
]);
2422 netdev_err(pp
->dev
, "%s: can't create txq=%d\n",
2424 mvneta_cleanup_txqs(pp
);
2432 static void mvneta_start_dev(struct mvneta_port
*pp
)
2436 mvneta_max_rx_size_set(pp
, pp
->pkt_size
);
2437 mvneta_txq_max_tx_size_set(pp
, pp
->pkt_size
);
2439 /* start the Rx/Tx activity */
2440 mvneta_port_enable(pp
);
2442 /* Enable polling on the port */
2443 for_each_present_cpu(cpu
) {
2444 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
2446 napi_enable(&port
->napi
);
2449 /* Unmask interrupts */
2450 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
2451 MVNETA_RX_INTR_MASK(rxq_number
) |
2452 MVNETA_TX_INTR_MASK(txq_number
) |
2453 MVNETA_MISCINTR_INTR_MASK
);
2454 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
2455 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
2456 MVNETA_CAUSE_LINK_CHANGE
|
2457 MVNETA_CAUSE_PSC_SYNC_CHANGE
);
2459 phy_start(pp
->phy_dev
);
2460 netif_tx_start_all_queues(pp
->dev
);
2463 static void mvneta_stop_dev(struct mvneta_port
*pp
)
2467 phy_stop(pp
->phy_dev
);
2469 for_each_present_cpu(cpu
) {
2470 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
2472 napi_disable(&port
->napi
);
2475 netif_carrier_off(pp
->dev
);
2477 mvneta_port_down(pp
);
2478 netif_tx_stop_all_queues(pp
->dev
);
2480 /* Stop the port activity */
2481 mvneta_port_disable(pp
);
2483 /* Clear all ethernet port interrupts */
2484 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
2485 mvreg_write(pp
, MVNETA_INTR_OLD_CAUSE
, 0);
2487 /* Mask all ethernet port interrupts */
2488 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
2489 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
2490 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
2492 mvneta_tx_reset(pp
);
2493 mvneta_rx_reset(pp
);
2496 /* Return positive if MTU is valid */
2497 static int mvneta_check_mtu_valid(struct net_device
*dev
, int mtu
)
2500 netdev_err(dev
, "cannot change mtu to less than 68\n");
2504 /* 9676 == 9700 - 20 and rounding to 8 */
2506 netdev_info(dev
, "Illegal MTU value %d, round to 9676\n", mtu
);
2510 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu
), 8)) {
2511 netdev_info(dev
, "Illegal MTU value %d, rounding to %d\n",
2512 mtu
, ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8));
2513 mtu
= ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8);
2519 /* Change the device mtu */
2520 static int mvneta_change_mtu(struct net_device
*dev
, int mtu
)
2522 struct mvneta_port
*pp
= netdev_priv(dev
);
2525 mtu
= mvneta_check_mtu_valid(dev
, mtu
);
2531 if (!netif_running(dev
)) {
2532 netdev_update_features(dev
);
2536 /* The interface is running, so we have to force a
2537 * reallocation of the queues
2539 mvneta_stop_dev(pp
);
2541 mvneta_cleanup_txqs(pp
);
2542 mvneta_cleanup_rxqs(pp
);
2544 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(dev
->mtu
);
2545 pp
->frag_size
= SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp
->pkt_size
)) +
2546 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
2548 ret
= mvneta_setup_rxqs(pp
);
2550 netdev_err(dev
, "unable to setup rxqs after MTU change\n");
2554 ret
= mvneta_setup_txqs(pp
);
2556 netdev_err(dev
, "unable to setup txqs after MTU change\n");
2560 mvneta_start_dev(pp
);
2563 netdev_update_features(dev
);
2568 static netdev_features_t
mvneta_fix_features(struct net_device
*dev
,
2569 netdev_features_t features
)
2571 struct mvneta_port
*pp
= netdev_priv(dev
);
2573 if (pp
->tx_csum_limit
&& dev
->mtu
> pp
->tx_csum_limit
) {
2574 features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_TSO
);
2576 "Disable IP checksum for MTU greater than %dB\n",
2583 /* Get mac address */
2584 static void mvneta_get_mac_addr(struct mvneta_port
*pp
, unsigned char *addr
)
2586 u32 mac_addr_l
, mac_addr_h
;
2588 mac_addr_l
= mvreg_read(pp
, MVNETA_MAC_ADDR_LOW
);
2589 mac_addr_h
= mvreg_read(pp
, MVNETA_MAC_ADDR_HIGH
);
2590 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
2591 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
2592 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
2593 addr
[3] = mac_addr_h
& 0xFF;
2594 addr
[4] = (mac_addr_l
>> 8) & 0xFF;
2595 addr
[5] = mac_addr_l
& 0xFF;
2598 /* Handle setting mac address */
2599 static int mvneta_set_mac_addr(struct net_device
*dev
, void *addr
)
2601 struct mvneta_port
*pp
= netdev_priv(dev
);
2602 struct sockaddr
*sockaddr
= addr
;
2605 ret
= eth_prepare_mac_addr_change(dev
, addr
);
2608 /* Remove previous address table entry */
2609 mvneta_mac_addr_set(pp
, dev
->dev_addr
, -1);
2611 /* Set new addr in hw */
2612 mvneta_mac_addr_set(pp
, sockaddr
->sa_data
, rxq_def
);
2614 eth_commit_mac_addr_change(dev
, addr
);
2618 static void mvneta_adjust_link(struct net_device
*ndev
)
2620 struct mvneta_port
*pp
= netdev_priv(ndev
);
2621 struct phy_device
*phydev
= pp
->phy_dev
;
2622 int status_change
= 0;
2625 if ((pp
->speed
!= phydev
->speed
) ||
2626 (pp
->duplex
!= phydev
->duplex
)) {
2629 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
2630 val
&= ~(MVNETA_GMAC_CONFIG_MII_SPEED
|
2631 MVNETA_GMAC_CONFIG_GMII_SPEED
|
2632 MVNETA_GMAC_CONFIG_FULL_DUPLEX
);
2635 val
|= MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
2637 if (phydev
->speed
== SPEED_1000
)
2638 val
|= MVNETA_GMAC_CONFIG_GMII_SPEED
;
2639 else if (phydev
->speed
== SPEED_100
)
2640 val
|= MVNETA_GMAC_CONFIG_MII_SPEED
;
2642 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
2644 pp
->duplex
= phydev
->duplex
;
2645 pp
->speed
= phydev
->speed
;
2649 if (phydev
->link
!= pp
->link
) {
2650 if (!phydev
->link
) {
2655 pp
->link
= phydev
->link
;
2659 if (status_change
) {
2661 if (!pp
->use_inband_status
) {
2662 u32 val
= mvreg_read(pp
,
2663 MVNETA_GMAC_AUTONEG_CONFIG
);
2664 val
&= ~MVNETA_GMAC_FORCE_LINK_DOWN
;
2665 val
|= MVNETA_GMAC_FORCE_LINK_PASS
;
2666 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
2671 if (!pp
->use_inband_status
) {
2672 u32 val
= mvreg_read(pp
,
2673 MVNETA_GMAC_AUTONEG_CONFIG
);
2674 val
&= ~MVNETA_GMAC_FORCE_LINK_PASS
;
2675 val
|= MVNETA_GMAC_FORCE_LINK_DOWN
;
2676 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
2679 mvneta_port_down(pp
);
2681 phy_print_status(phydev
);
2685 static int mvneta_mdio_probe(struct mvneta_port
*pp
)
2687 struct phy_device
*phy_dev
;
2689 phy_dev
= of_phy_connect(pp
->dev
, pp
->phy_node
, mvneta_adjust_link
, 0,
2692 netdev_err(pp
->dev
, "could not find the PHY\n");
2696 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
2697 phy_dev
->advertising
= phy_dev
->supported
;
2699 pp
->phy_dev
= phy_dev
;
2707 static void mvneta_mdio_remove(struct mvneta_port
*pp
)
2709 phy_disconnect(pp
->phy_dev
);
2713 static void mvneta_percpu_enable(void *arg
)
2715 struct mvneta_port
*pp
= arg
;
2717 enable_percpu_irq(pp
->dev
->irq
, IRQ_TYPE_NONE
);
2720 static void mvneta_percpu_disable(void *arg
)
2722 struct mvneta_port
*pp
= arg
;
2724 disable_percpu_irq(pp
->dev
->irq
);
2727 static void mvneta_percpu_elect(struct mvneta_port
*pp
)
2729 int online_cpu_idx
, cpu
, i
= 0;
2731 online_cpu_idx
= rxq_def
% num_online_cpus();
2733 for_each_online_cpu(cpu
) {
2734 if (i
== online_cpu_idx
)
2735 /* Enable per-CPU interrupt on the one CPU we
2738 smp_call_function_single(cpu
, mvneta_percpu_enable
,
2741 /* Disable per-CPU interrupt on all the other CPU */
2742 smp_call_function_single(cpu
, mvneta_percpu_disable
,
2748 static int mvneta_percpu_notifier(struct notifier_block
*nfb
,
2749 unsigned long action
, void *hcpu
)
2751 struct mvneta_port
*pp
= container_of(nfb
, struct mvneta_port
,
2753 int cpu
= (unsigned long)hcpu
, other_cpu
;
2754 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
2758 case CPU_ONLINE_FROZEN
:
2759 netif_tx_stop_all_queues(pp
->dev
);
2761 /* We have to synchronise on tha napi of each CPU
2762 * except the one just being waked up
2764 for_each_online_cpu(other_cpu
) {
2765 if (other_cpu
!= cpu
) {
2766 struct mvneta_pcpu_port
*other_port
=
2767 per_cpu_ptr(pp
->ports
, other_cpu
);
2769 napi_synchronize(&other_port
->napi
);
2773 /* Mask all ethernet port interrupts */
2774 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
2775 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
2776 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
2777 napi_enable(&port
->napi
);
2779 /* Enable per-CPU interrupt on the one CPU we care
2782 mvneta_percpu_elect(pp
);
2784 /* Unmask all ethernet port interrupts */
2785 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
2786 MVNETA_RX_INTR_MASK(rxq_number
) |
2787 MVNETA_TX_INTR_MASK(txq_number
) |
2788 MVNETA_MISCINTR_INTR_MASK
);
2789 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
2790 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
2791 MVNETA_CAUSE_LINK_CHANGE
|
2792 MVNETA_CAUSE_PSC_SYNC_CHANGE
);
2793 netif_tx_start_all_queues(pp
->dev
);
2795 case CPU_DOWN_PREPARE
:
2796 case CPU_DOWN_PREPARE_FROZEN
:
2797 netif_tx_stop_all_queues(pp
->dev
);
2798 /* Mask all ethernet port interrupts */
2799 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
2800 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
2801 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
2803 napi_synchronize(&port
->napi
);
2804 napi_disable(&port
->napi
);
2805 /* Disable per-CPU interrupts on the CPU that is
2808 smp_call_function_single(cpu
, mvneta_percpu_disable
,
2813 case CPU_DEAD_FROZEN
:
2814 /* Check if a new CPU must be elected now this on is down */
2815 mvneta_percpu_elect(pp
);
2816 /* Unmask all ethernet port interrupts */
2817 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
2818 MVNETA_RX_INTR_MASK(rxq_number
) |
2819 MVNETA_TX_INTR_MASK(txq_number
) |
2820 MVNETA_MISCINTR_INTR_MASK
);
2821 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
2822 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
2823 MVNETA_CAUSE_LINK_CHANGE
|
2824 MVNETA_CAUSE_PSC_SYNC_CHANGE
);
2825 netif_tx_start_all_queues(pp
->dev
);
2832 static int mvneta_open(struct net_device
*dev
)
2834 struct mvneta_port
*pp
= netdev_priv(dev
);
2837 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
);
2838 pp
->frag_size
= SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp
->pkt_size
)) +
2839 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
2841 ret
= mvneta_setup_rxqs(pp
);
2845 ret
= mvneta_setup_txqs(pp
);
2847 goto err_cleanup_rxqs
;
2849 /* Connect to port interrupt line */
2850 ret
= request_percpu_irq(pp
->dev
->irq
, mvneta_isr
,
2851 MVNETA_DRIVER_NAME
, pp
->ports
);
2853 netdev_err(pp
->dev
, "cannot request irq %d\n", pp
->dev
->irq
);
2854 goto err_cleanup_txqs
;
2857 /* Even though the documentation says that request_percpu_irq
2858 * doesn't enable the interrupts automatically, it actually
2859 * does so on the local CPU.
2861 * Make sure it's disabled.
2863 mvneta_percpu_disable(pp
);
2865 /* Elect a CPU to handle our RX queue interrupt */
2866 mvneta_percpu_elect(pp
);
2868 /* Register a CPU notifier to handle the case where our CPU
2869 * might be taken offline.
2871 register_cpu_notifier(&pp
->cpu_notifier
);
2873 /* In default link is down */
2874 netif_carrier_off(pp
->dev
);
2876 ret
= mvneta_mdio_probe(pp
);
2878 netdev_err(dev
, "cannot probe MDIO bus\n");
2882 mvneta_start_dev(pp
);
2887 free_percpu_irq(pp
->dev
->irq
, pp
->ports
);
2889 mvneta_cleanup_txqs(pp
);
2891 mvneta_cleanup_rxqs(pp
);
2895 /* Stop the port, free port interrupt line */
2896 static int mvneta_stop(struct net_device
*dev
)
2898 struct mvneta_port
*pp
= netdev_priv(dev
);
2901 mvneta_stop_dev(pp
);
2902 mvneta_mdio_remove(pp
);
2903 unregister_cpu_notifier(&pp
->cpu_notifier
);
2904 for_each_present_cpu(cpu
)
2905 smp_call_function_single(cpu
, mvneta_percpu_disable
, pp
, true);
2906 free_percpu_irq(dev
->irq
, pp
->ports
);
2907 mvneta_cleanup_rxqs(pp
);
2908 mvneta_cleanup_txqs(pp
);
2913 static int mvneta_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2915 struct mvneta_port
*pp
= netdev_priv(dev
);
2920 return phy_mii_ioctl(pp
->phy_dev
, ifr
, cmd
);
2923 /* Ethtool methods */
2925 /* Get settings (phy address, speed) for ethtools */
2926 int mvneta_ethtool_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2928 struct mvneta_port
*pp
= netdev_priv(dev
);
2933 return phy_ethtool_gset(pp
->phy_dev
, cmd
);
2936 /* Set settings (phy address, speed) for ethtools */
2937 int mvneta_ethtool_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2939 struct mvneta_port
*pp
= netdev_priv(dev
);
2944 return phy_ethtool_sset(pp
->phy_dev
, cmd
);
2947 /* Set interrupt coalescing for ethtools */
2948 static int mvneta_ethtool_set_coalesce(struct net_device
*dev
,
2949 struct ethtool_coalesce
*c
)
2951 struct mvneta_port
*pp
= netdev_priv(dev
);
2954 for (queue
= 0; queue
< rxq_number
; queue
++) {
2955 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
2956 rxq
->time_coal
= c
->rx_coalesce_usecs
;
2957 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
2958 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
2959 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
2962 for (queue
= 0; queue
< txq_number
; queue
++) {
2963 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
2964 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
2965 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
2971 /* get coalescing for ethtools */
2972 static int mvneta_ethtool_get_coalesce(struct net_device
*dev
,
2973 struct ethtool_coalesce
*c
)
2975 struct mvneta_port
*pp
= netdev_priv(dev
);
2977 c
->rx_coalesce_usecs
= pp
->rxqs
[0].time_coal
;
2978 c
->rx_max_coalesced_frames
= pp
->rxqs
[0].pkts_coal
;
2980 c
->tx_max_coalesced_frames
= pp
->txqs
[0].done_pkts_coal
;
2985 static void mvneta_ethtool_get_drvinfo(struct net_device
*dev
,
2986 struct ethtool_drvinfo
*drvinfo
)
2988 strlcpy(drvinfo
->driver
, MVNETA_DRIVER_NAME
,
2989 sizeof(drvinfo
->driver
));
2990 strlcpy(drvinfo
->version
, MVNETA_DRIVER_VERSION
,
2991 sizeof(drvinfo
->version
));
2992 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
2993 sizeof(drvinfo
->bus_info
));
2997 static void mvneta_ethtool_get_ringparam(struct net_device
*netdev
,
2998 struct ethtool_ringparam
*ring
)
3000 struct mvneta_port
*pp
= netdev_priv(netdev
);
3002 ring
->rx_max_pending
= MVNETA_MAX_RXD
;
3003 ring
->tx_max_pending
= MVNETA_MAX_TXD
;
3004 ring
->rx_pending
= pp
->rx_ring_size
;
3005 ring
->tx_pending
= pp
->tx_ring_size
;
3008 static int mvneta_ethtool_set_ringparam(struct net_device
*dev
,
3009 struct ethtool_ringparam
*ring
)
3011 struct mvneta_port
*pp
= netdev_priv(dev
);
3013 if ((ring
->rx_pending
== 0) || (ring
->tx_pending
== 0))
3015 pp
->rx_ring_size
= ring
->rx_pending
< MVNETA_MAX_RXD
?
3016 ring
->rx_pending
: MVNETA_MAX_RXD
;
3018 pp
->tx_ring_size
= clamp_t(u16
, ring
->tx_pending
,
3019 MVNETA_MAX_SKB_DESCS
* 2, MVNETA_MAX_TXD
);
3020 if (pp
->tx_ring_size
!= ring
->tx_pending
)
3021 netdev_warn(dev
, "TX queue size set to %u (requested %u)\n",
3022 pp
->tx_ring_size
, ring
->tx_pending
);
3024 if (netif_running(dev
)) {
3026 if (mvneta_open(dev
)) {
3028 "error on opening device after ring param change\n");
3036 static void mvneta_ethtool_get_strings(struct net_device
*netdev
, u32 sset
,
3039 if (sset
== ETH_SS_STATS
) {
3042 for (i
= 0; i
< ARRAY_SIZE(mvneta_statistics
); i
++)
3043 memcpy(data
+ i
* ETH_GSTRING_LEN
,
3044 mvneta_statistics
[i
].name
, ETH_GSTRING_LEN
);
3048 static void mvneta_ethtool_update_stats(struct mvneta_port
*pp
)
3050 const struct mvneta_statistic
*s
;
3051 void __iomem
*base
= pp
->base
;
3055 for (i
= 0, s
= mvneta_statistics
;
3056 s
< mvneta_statistics
+ ARRAY_SIZE(mvneta_statistics
);
3062 val
= readl_relaxed(base
+ s
->offset
);
3065 /* Docs say to read low 32-bit then high */
3066 low
= readl_relaxed(base
+ s
->offset
);
3067 high
= readl_relaxed(base
+ s
->offset
+ 4);
3068 val
= (u64
)high
<< 32 | low
;
3072 pp
->ethtool_stats
[i
] += val
;
3076 static void mvneta_ethtool_get_stats(struct net_device
*dev
,
3077 struct ethtool_stats
*stats
, u64
*data
)
3079 struct mvneta_port
*pp
= netdev_priv(dev
);
3082 mvneta_ethtool_update_stats(pp
);
3084 for (i
= 0; i
< ARRAY_SIZE(mvneta_statistics
); i
++)
3085 *data
++ = pp
->ethtool_stats
[i
];
3088 static int mvneta_ethtool_get_sset_count(struct net_device
*dev
, int sset
)
3090 if (sset
== ETH_SS_STATS
)
3091 return ARRAY_SIZE(mvneta_statistics
);
3095 static const struct net_device_ops mvneta_netdev_ops
= {
3096 .ndo_open
= mvneta_open
,
3097 .ndo_stop
= mvneta_stop
,
3098 .ndo_start_xmit
= mvneta_tx
,
3099 .ndo_set_rx_mode
= mvneta_set_rx_mode
,
3100 .ndo_set_mac_address
= mvneta_set_mac_addr
,
3101 .ndo_change_mtu
= mvneta_change_mtu
,
3102 .ndo_fix_features
= mvneta_fix_features
,
3103 .ndo_get_stats64
= mvneta_get_stats64
,
3104 .ndo_do_ioctl
= mvneta_ioctl
,
3107 const struct ethtool_ops mvneta_eth_tool_ops
= {
3108 .get_link
= ethtool_op_get_link
,
3109 .get_settings
= mvneta_ethtool_get_settings
,
3110 .set_settings
= mvneta_ethtool_set_settings
,
3111 .set_coalesce
= mvneta_ethtool_set_coalesce
,
3112 .get_coalesce
= mvneta_ethtool_get_coalesce
,
3113 .get_drvinfo
= mvneta_ethtool_get_drvinfo
,
3114 .get_ringparam
= mvneta_ethtool_get_ringparam
,
3115 .set_ringparam
= mvneta_ethtool_set_ringparam
,
3116 .get_strings
= mvneta_ethtool_get_strings
,
3117 .get_ethtool_stats
= mvneta_ethtool_get_stats
,
3118 .get_sset_count
= mvneta_ethtool_get_sset_count
,
3122 static int mvneta_init(struct device
*dev
, struct mvneta_port
*pp
)
3127 mvneta_port_disable(pp
);
3129 /* Set port default values */
3130 mvneta_defaults_set(pp
);
3132 pp
->txqs
= devm_kcalloc(dev
, txq_number
, sizeof(struct mvneta_tx_queue
),
3137 /* Initialize TX descriptor rings */
3138 for (queue
= 0; queue
< txq_number
; queue
++) {
3139 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
3141 txq
->size
= pp
->tx_ring_size
;
3142 txq
->done_pkts_coal
= MVNETA_TXDONE_COAL_PKTS
;
3145 pp
->rxqs
= devm_kcalloc(dev
, rxq_number
, sizeof(struct mvneta_rx_queue
),
3150 /* Create Rx descriptor rings */
3151 for (queue
= 0; queue
< rxq_number
; queue
++) {
3152 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
3154 rxq
->size
= pp
->rx_ring_size
;
3155 rxq
->pkts_coal
= MVNETA_RX_COAL_PKTS
;
3156 rxq
->time_coal
= MVNETA_RX_COAL_USEC
;
3162 /* platform glue : initialize decoding windows */
3163 static void mvneta_conf_mbus_windows(struct mvneta_port
*pp
,
3164 const struct mbus_dram_target_info
*dram
)
3170 for (i
= 0; i
< 6; i
++) {
3171 mvreg_write(pp
, MVNETA_WIN_BASE(i
), 0);
3172 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), 0);
3175 mvreg_write(pp
, MVNETA_WIN_REMAP(i
), 0);
3181 for (i
= 0; i
< dram
->num_cs
; i
++) {
3182 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
3183 mvreg_write(pp
, MVNETA_WIN_BASE(i
), (cs
->base
& 0xffff0000) |
3184 (cs
->mbus_attr
<< 8) | dram
->mbus_dram_target_id
);
3186 mvreg_write(pp
, MVNETA_WIN_SIZE(i
),
3187 (cs
->size
- 1) & 0xffff0000);
3189 win_enable
&= ~(1 << i
);
3190 win_protect
|= 3 << (2 * i
);
3193 mvreg_write(pp
, MVNETA_BASE_ADDR_ENABLE
, win_enable
);
3196 /* Power up the port */
3197 static int mvneta_port_power_up(struct mvneta_port
*pp
, int phy_mode
)
3201 /* MAC Cause register should be cleared */
3202 mvreg_write(pp
, MVNETA_UNIT_INTR_CAUSE
, 0);
3204 ctrl
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
3206 /* Even though it might look weird, when we're configured in
3207 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3210 case PHY_INTERFACE_MODE_QSGMII
:
3211 mvreg_write(pp
, MVNETA_SERDES_CFG
, MVNETA_QSGMII_SERDES_PROTO
);
3212 ctrl
|= MVNETA_GMAC2_PCS_ENABLE
| MVNETA_GMAC2_PORT_RGMII
;
3214 case PHY_INTERFACE_MODE_SGMII
:
3215 mvreg_write(pp
, MVNETA_SERDES_CFG
, MVNETA_SGMII_SERDES_PROTO
);
3216 ctrl
|= MVNETA_GMAC2_PCS_ENABLE
| MVNETA_GMAC2_PORT_RGMII
;
3218 case PHY_INTERFACE_MODE_RGMII
:
3219 case PHY_INTERFACE_MODE_RGMII_ID
:
3220 ctrl
|= MVNETA_GMAC2_PORT_RGMII
;
3226 if (pp
->use_inband_status
)
3227 ctrl
|= MVNETA_GMAC2_INBAND_AN_ENABLE
;
3229 /* Cancel Port Reset */
3230 ctrl
&= ~MVNETA_GMAC2_PORT_RESET
;
3231 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, ctrl
);
3233 while ((mvreg_read(pp
, MVNETA_GMAC_CTRL_2
) &
3234 MVNETA_GMAC2_PORT_RESET
) != 0)
3240 /* Device initialization routine */
3241 static int mvneta_probe(struct platform_device
*pdev
)
3243 const struct mbus_dram_target_info
*dram_target_info
;
3244 struct resource
*res
;
3245 struct device_node
*dn
= pdev
->dev
.of_node
;
3246 struct device_node
*phy_node
;
3247 struct mvneta_port
*pp
;
3248 struct net_device
*dev
;
3249 const char *dt_mac_addr
;
3250 char hw_mac_addr
[ETH_ALEN
];
3251 const char *mac_from
;
3252 const char *managed
;
3257 dev
= alloc_etherdev_mqs(sizeof(struct mvneta_port
), txq_number
, rxq_number
);
3261 dev
->irq
= irq_of_parse_and_map(dn
, 0);
3262 if (dev
->irq
== 0) {
3264 goto err_free_netdev
;
3267 phy_node
= of_parse_phandle(dn
, "phy", 0);
3269 if (!of_phy_is_fixed_link(dn
)) {
3270 dev_err(&pdev
->dev
, "no PHY specified\n");
3275 err
= of_phy_register_fixed_link(dn
);
3277 dev_err(&pdev
->dev
, "cannot register fixed PHY\n");
3281 /* In the case of a fixed PHY, the DT node associated
3282 * to the PHY is the Ethernet MAC DT node.
3284 phy_node
= of_node_get(dn
);
3287 phy_mode
= of_get_phy_mode(dn
);
3289 dev_err(&pdev
->dev
, "incorrect phy-mode\n");
3291 goto err_put_phy_node
;
3294 dev
->tx_queue_len
= MVNETA_MAX_TXD
;
3295 dev
->watchdog_timeo
= 5 * HZ
;
3296 dev
->netdev_ops
= &mvneta_netdev_ops
;
3298 dev
->ethtool_ops
= &mvneta_eth_tool_ops
;
3300 pp
= netdev_priv(dev
);
3301 pp
->phy_node
= phy_node
;
3302 pp
->phy_interface
= phy_mode
;
3304 err
= of_property_read_string(dn
, "managed", &managed
);
3305 pp
->use_inband_status
= (err
== 0 &&
3306 strcmp(managed
, "in-band-status") == 0);
3307 pp
->cpu_notifier
.notifier_call
= mvneta_percpu_notifier
;
3309 pp
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
3310 if (IS_ERR(pp
->clk
)) {
3311 err
= PTR_ERR(pp
->clk
);
3312 goto err_put_phy_node
;
3315 clk_prepare_enable(pp
->clk
);
3317 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3318 pp
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
3319 if (IS_ERR(pp
->base
)) {
3320 err
= PTR_ERR(pp
->base
);
3324 /* Alloc per-cpu port structure */
3325 pp
->ports
= alloc_percpu(struct mvneta_pcpu_port
);
3331 /* Alloc per-cpu stats */
3332 pp
->stats
= netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats
);
3335 goto err_free_ports
;
3338 dt_mac_addr
= of_get_mac_address(dn
);
3340 mac_from
= "device tree";
3341 memcpy(dev
->dev_addr
, dt_mac_addr
, ETH_ALEN
);
3343 mvneta_get_mac_addr(pp
, hw_mac_addr
);
3344 if (is_valid_ether_addr(hw_mac_addr
)) {
3345 mac_from
= "hardware";
3346 memcpy(dev
->dev_addr
, hw_mac_addr
, ETH_ALEN
);
3348 mac_from
= "random";
3349 eth_hw_addr_random(dev
);
3353 if (of_device_is_compatible(dn
, "marvell,armada-370-neta"))
3354 pp
->tx_csum_limit
= 1600;
3356 pp
->tx_ring_size
= MVNETA_MAX_TXD
;
3357 pp
->rx_ring_size
= MVNETA_MAX_RXD
;
3360 SET_NETDEV_DEV(dev
, &pdev
->dev
);
3362 err
= mvneta_init(&pdev
->dev
, pp
);
3364 goto err_free_stats
;
3366 err
= mvneta_port_power_up(pp
, phy_mode
);
3368 dev_err(&pdev
->dev
, "can't power up port\n");
3369 goto err_free_stats
;
3372 dram_target_info
= mv_mbus_dram_info();
3373 if (dram_target_info
)
3374 mvneta_conf_mbus_windows(pp
, dram_target_info
);
3376 for_each_present_cpu(cpu
) {
3377 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
3379 netif_napi_add(dev
, &port
->napi
, mvneta_poll
, NAPI_POLL_WEIGHT
);
3383 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
;
3384 dev
->hw_features
|= dev
->features
;
3385 dev
->vlan_features
|= dev
->features
;
3386 dev
->priv_flags
|= IFF_UNICAST_FLT
;
3387 dev
->gso_max_segs
= MVNETA_MAX_TSO_SEGS
;
3389 err
= register_netdev(dev
);
3391 dev_err(&pdev
->dev
, "failed to register\n");
3392 goto err_free_stats
;
3395 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
,
3398 platform_set_drvdata(pdev
, pp
->dev
);
3400 if (pp
->use_inband_status
) {
3401 struct phy_device
*phy
= of_phy_find_device(dn
);
3403 mvneta_fixed_link_update(pp
, phy
);
3405 put_device(&phy
->dev
);
3411 free_percpu(pp
->stats
);
3413 free_percpu(pp
->ports
);
3415 clk_disable_unprepare(pp
->clk
);
3417 of_node_put(phy_node
);
3419 irq_dispose_mapping(dev
->irq
);
3425 /* Device removal routine */
3426 static int mvneta_remove(struct platform_device
*pdev
)
3428 struct net_device
*dev
= platform_get_drvdata(pdev
);
3429 struct mvneta_port
*pp
= netdev_priv(dev
);
3431 unregister_netdev(dev
);
3432 clk_disable_unprepare(pp
->clk
);
3433 free_percpu(pp
->ports
);
3434 free_percpu(pp
->stats
);
3435 irq_dispose_mapping(dev
->irq
);
3436 of_node_put(pp
->phy_node
);
3442 static const struct of_device_id mvneta_match
[] = {
3443 { .compatible
= "marvell,armada-370-neta" },
3444 { .compatible
= "marvell,armada-xp-neta" },
3447 MODULE_DEVICE_TABLE(of
, mvneta_match
);
3449 static struct platform_driver mvneta_driver
= {
3450 .probe
= mvneta_probe
,
3451 .remove
= mvneta_remove
,
3453 .name
= MVNETA_DRIVER_NAME
,
3454 .of_match_table
= mvneta_match
,
3458 module_platform_driver(mvneta_driver
);
3460 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
3461 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
3462 MODULE_LICENSE("GPL");
3464 module_param(rxq_number
, int, S_IRUGO
);
3465 module_param(txq_number
, int, S_IRUGO
);
3467 module_param(rxq_def
, int, S_IRUGO
);
3468 module_param(rx_copybreak
, int, S_IRUGO
| S_IWUSR
);