2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/kernel.h>
15 #include <linux/version.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/platform_device.h>
19 #include <linux/skbuff.h>
20 #include <linux/inetdevice.h>
21 #include <linux/mbus.h>
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/of_address.h>
31 #include <linux/phy.h>
32 #include <linux/clk.h>
35 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
36 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
37 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
38 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
39 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
40 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
41 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
42 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
43 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
44 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
45 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
46 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
47 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
48 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
49 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
50 #define MVNETA_PORT_RX_RESET 0x1cc0
51 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
52 #define MVNETA_PHY_ADDR 0x2000
53 #define MVNETA_PHY_ADDR_MASK 0x1f
54 #define MVNETA_MBUS_RETRY 0x2010
55 #define MVNETA_UNIT_INTR_CAUSE 0x2080
56 #define MVNETA_UNIT_CONTROL 0x20B0
57 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
58 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
59 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
60 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
61 #define MVNETA_BASE_ADDR_ENABLE 0x2290
62 #define MVNETA_PORT_CONFIG 0x2400
63 #define MVNETA_UNI_PROMISC_MODE BIT(0)
64 #define MVNETA_DEF_RXQ(q) ((q) << 1)
65 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
66 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
67 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
68 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
69 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
70 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
71 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
72 MVNETA_DEF_RXQ_ARP(q) | \
73 MVNETA_DEF_RXQ_TCP(q) | \
74 MVNETA_DEF_RXQ_UDP(q) | \
75 MVNETA_DEF_RXQ_BPDU(q) | \
76 MVNETA_TX_UNSET_ERR_SUM | \
77 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
78 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
79 #define MVNETA_MAC_ADDR_LOW 0x2414
80 #define MVNETA_MAC_ADDR_HIGH 0x2418
81 #define MVNETA_SDMA_CONFIG 0x241c
82 #define MVNETA_SDMA_BRST_SIZE_16 4
83 #define MVNETA_NO_DESC_SWAP 0x0
84 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
85 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
86 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
87 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
88 #define MVNETA_PORT_STATUS 0x2444
89 #define MVNETA_TX_IN_PRGRS BIT(1)
90 #define MVNETA_TX_FIFO_EMPTY BIT(8)
91 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
92 #define MVNETA_TYPE_PRIO 0x24bc
93 #define MVNETA_FORCE_UNI BIT(21)
94 #define MVNETA_TXQ_CMD_1 0x24e4
95 #define MVNETA_TXQ_CMD 0x2448
96 #define MVNETA_TXQ_DISABLE_SHIFT 8
97 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
98 #define MVNETA_ACC_MODE 0x2500
99 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
100 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
101 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
102 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
103 #define MVNETA_INTR_NEW_CAUSE 0x25a0
104 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
105 #define MVNETA_INTR_NEW_MASK 0x25a4
106 #define MVNETA_INTR_OLD_CAUSE 0x25a8
107 #define MVNETA_INTR_OLD_MASK 0x25ac
108 #define MVNETA_INTR_MISC_CAUSE 0x25b0
109 #define MVNETA_INTR_MISC_MASK 0x25b4
110 #define MVNETA_INTR_ENABLE 0x25b8
111 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
112 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
113 #define MVNETA_RXQ_CMD 0x2680
114 #define MVNETA_RXQ_DISABLE_SHIFT 8
115 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
116 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
117 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
118 #define MVNETA_GMAC_CTRL_0 0x2c00
119 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
120 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
121 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
122 #define MVNETA_GMAC_CTRL_2 0x2c08
123 #define MVNETA_GMAC2_PSC_ENABLE BIT(3)
124 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
125 #define MVNETA_GMAC2_PORT_RESET BIT(6)
126 #define MVNETA_GMAC_STATUS 0x2c10
127 #define MVNETA_GMAC_LINK_UP BIT(0)
128 #define MVNETA_GMAC_SPEED_1000 BIT(1)
129 #define MVNETA_GMAC_SPEED_100 BIT(2)
130 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
131 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
132 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
133 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
134 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
135 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
136 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
137 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
138 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
139 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
140 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
141 #define MVNETA_MIB_COUNTERS_BASE 0x3080
142 #define MVNETA_MIB_LATE_COLLISION 0x7c
143 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
144 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
145 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
146 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
147 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
148 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
149 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
150 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
151 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
152 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
153 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
154 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
155 #define MVNETA_PORT_TX_RESET 0x3cf0
156 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
157 #define MVNETA_TX_MTU 0x3e0c
158 #define MVNETA_TX_TOKEN_SIZE 0x3e14
159 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
160 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
161 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
163 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
165 /* Descriptor ring Macros */
166 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
167 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
169 /* Various constants */
172 #define MVNETA_TXDONE_COAL_PKTS 16
173 #define MVNETA_RX_COAL_PKTS 32
174 #define MVNETA_RX_COAL_USEC 100
177 #define MVNETA_TX_DONE_TIMER_PERIOD 10
179 /* Napi polling weight */
180 #define MVNETA_RX_POLL_WEIGHT 64
182 /* The two bytes Marvell header. Either contains a special value used
183 * by Marvell switches when a specific hardware mode is enabled (not
184 * supported by this driver) or is filled automatically by zeroes on
185 * the RX side. Those two bytes being at the front of the Ethernet
186 * header, they allow to have the IP header aligned on a 4 bytes
187 * boundary automatically: the hardware skips those two bytes on its
190 #define MVNETA_MH_SIZE 2
192 #define MVNETA_VLAN_TAG_LEN 4
194 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
195 #define MVNETA_TX_CSUM_MAX_SIZE 9800
196 #define MVNETA_ACC_MODE_EXT 1
198 /* Timeout constants */
199 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
200 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
201 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
203 #define MVNETA_TX_MTU_MAX 0x3ffff
205 /* Max number of Rx descriptors */
206 #define MVNETA_MAX_RXD 128
208 /* Max number of Tx descriptors */
209 #define MVNETA_MAX_TXD 532
211 /* descriptor aligned size */
212 #define MVNETA_DESC_ALIGNED_SIZE 32
214 #define MVNETA_RX_PKT_SIZE(mtu) \
215 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
216 ETH_HLEN + ETH_FCS_LEN, \
217 MVNETA_CPU_D_CACHE_LINE_SIZE)
219 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
221 struct mvneta_stats
{
222 struct u64_stats_sync syncp
;
230 struct mvneta_rx_queue
*rxqs
;
231 struct mvneta_tx_queue
*txqs
;
232 struct timer_list tx_done_timer
;
233 struct net_device
*dev
;
236 struct napi_struct napi
;
240 #define MVNETA_F_TX_DONE_TIMER_BIT 0
250 struct mvneta_stats tx_stats
;
251 struct mvneta_stats rx_stats
;
253 struct mii_bus
*mii_bus
;
254 struct phy_device
*phy_dev
;
255 phy_interface_t phy_interface
;
256 struct device_node
*phy_node
;
262 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
263 * layout of the transmit and reception DMA descriptors, and their
264 * layout is therefore defined by the hardware design
266 struct mvneta_tx_desc
{
267 u32 command
; /* Options used by HW for packet transmitting.*/
268 #define MVNETA_TX_L3_OFF_SHIFT 0
269 #define MVNETA_TX_IP_HLEN_SHIFT 8
270 #define MVNETA_TX_L4_UDP BIT(16)
271 #define MVNETA_TX_L3_IP6 BIT(17)
272 #define MVNETA_TXD_IP_CSUM BIT(18)
273 #define MVNETA_TXD_Z_PAD BIT(19)
274 #define MVNETA_TXD_L_DESC BIT(20)
275 #define MVNETA_TXD_F_DESC BIT(21)
276 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
277 MVNETA_TXD_L_DESC | \
279 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
280 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
282 u16 reserverd1
; /* csum_l4 (for future use) */
283 u16 data_size
; /* Data size of transmitted packet in bytes */
284 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
285 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
286 u32 reserved3
[4]; /* Reserved - (for future use) */
289 struct mvneta_rx_desc
{
290 u32 status
; /* Info about received packet */
291 #define MVNETA_RXD_ERR_CRC 0x0
292 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
293 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
294 #define MVNETA_RXD_ERR_LEN BIT(18)
295 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
296 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
297 #define MVNETA_RXD_L3_IP4 BIT(25)
298 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
299 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
301 u16 reserved1
; /* pnc_info - (for future use, PnC) */
302 u16 data_size
; /* Size of received packet in bytes */
303 u32 buf_phys_addr
; /* Physical address of the buffer */
304 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
305 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
306 u16 reserved3
; /* prefetch_cmd, for future use */
307 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
308 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
309 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
312 struct mvneta_tx_queue
{
313 /* Number of this TX queue, in the range 0-7 */
316 /* Number of TX DMA descriptors in the descriptor ring */
319 /* Number of currently used TX DMA descriptor in the
324 /* Array of transmitted skb */
325 struct sk_buff
**tx_skb
;
327 /* Index of last TX DMA descriptor that was inserted */
330 /* Index of the TX DMA descriptor to be cleaned up */
335 /* Virtual address of the TX DMA descriptors array */
336 struct mvneta_tx_desc
*descs
;
338 /* DMA address of the TX DMA descriptors array */
339 dma_addr_t descs_phys
;
341 /* Index of the last TX DMA descriptor */
344 /* Index of the next TX DMA descriptor to process */
345 int next_desc_to_proc
;
348 struct mvneta_rx_queue
{
349 /* rx queue number, in the range 0-7 */
352 /* num of rx descriptors in the rx descriptor ring */
355 /* counter of times when mvneta_refill() failed */
361 /* Virtual address of the RX DMA descriptors array */
362 struct mvneta_rx_desc
*descs
;
364 /* DMA address of the RX DMA descriptors array */
365 dma_addr_t descs_phys
;
367 /* Index of the last RX DMA descriptor */
370 /* Index of the next RX DMA descriptor to process */
371 int next_desc_to_proc
;
374 static int rxq_number
= 8;
375 static int txq_number
= 8;
380 #define MVNETA_DRIVER_NAME "mvneta"
381 #define MVNETA_DRIVER_VERSION "1.0"
383 /* Utility/helper methods */
385 /* Write helper method */
386 static void mvreg_write(struct mvneta_port
*pp
, u32 offset
, u32 data
)
388 writel(data
, pp
->base
+ offset
);
391 /* Read helper method */
392 static u32
mvreg_read(struct mvneta_port
*pp
, u32 offset
)
394 return readl(pp
->base
+ offset
);
397 /* Increment txq get counter */
398 static void mvneta_txq_inc_get(struct mvneta_tx_queue
*txq
)
400 txq
->txq_get_index
++;
401 if (txq
->txq_get_index
== txq
->size
)
402 txq
->txq_get_index
= 0;
405 /* Increment txq put counter */
406 static void mvneta_txq_inc_put(struct mvneta_tx_queue
*txq
)
408 txq
->txq_put_index
++;
409 if (txq
->txq_put_index
== txq
->size
)
410 txq
->txq_put_index
= 0;
414 /* Clear all MIB counters */
415 static void mvneta_mib_counters_clear(struct mvneta_port
*pp
)
420 /* Perform dummy reads from MIB counters */
421 for (i
= 0; i
< MVNETA_MIB_LATE_COLLISION
; i
+= 4)
422 dummy
= mvreg_read(pp
, (MVNETA_MIB_COUNTERS_BASE
+ i
));
425 /* Get System Network Statistics */
426 struct rtnl_link_stats64
*mvneta_get_stats64(struct net_device
*dev
,
427 struct rtnl_link_stats64
*stats
)
429 struct mvneta_port
*pp
= netdev_priv(dev
);
432 memset(stats
, 0, sizeof(struct rtnl_link_stats64
));
435 start
= u64_stats_fetch_begin_bh(&pp
->rx_stats
.syncp
);
436 stats
->rx_packets
= pp
->rx_stats
.packets
;
437 stats
->rx_bytes
= pp
->rx_stats
.bytes
;
438 } while (u64_stats_fetch_retry_bh(&pp
->rx_stats
.syncp
, start
));
442 start
= u64_stats_fetch_begin_bh(&pp
->tx_stats
.syncp
);
443 stats
->tx_packets
= pp
->tx_stats
.packets
;
444 stats
->tx_bytes
= pp
->tx_stats
.bytes
;
445 } while (u64_stats_fetch_retry_bh(&pp
->tx_stats
.syncp
, start
));
447 stats
->rx_errors
= dev
->stats
.rx_errors
;
448 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
450 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
455 /* Rx descriptors helper methods */
457 /* Checks whether the given RX descriptor is both the first and the
458 * last descriptor for the RX packet. Each RX packet is currently
459 * received through a single RX descriptor, so not having each RX
460 * descriptor with its first and last bits set is an error
462 static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc
*desc
)
464 return (desc
->status
& MVNETA_RXD_FIRST_LAST_DESC
) ==
465 MVNETA_RXD_FIRST_LAST_DESC
;
468 /* Add number of descriptors ready to receive new packets */
469 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port
*pp
,
470 struct mvneta_rx_queue
*rxq
,
473 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
476 while (ndescs
> MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
) {
477 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
478 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
<<
479 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
480 ndescs
-= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
;
483 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
484 (ndescs
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
487 /* Get number of RX descriptors occupied by received packets */
488 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port
*pp
,
489 struct mvneta_rx_queue
*rxq
)
493 val
= mvreg_read(pp
, MVNETA_RXQ_STATUS_REG(rxq
->id
));
494 return val
& MVNETA_RXQ_OCCUPIED_ALL_MASK
;
497 /* Update num of rx desc called upon return from rx path or
498 * from mvneta_rxq_drop_pkts().
500 static void mvneta_rxq_desc_num_update(struct mvneta_port
*pp
,
501 struct mvneta_rx_queue
*rxq
,
502 int rx_done
, int rx_filled
)
506 if ((rx_done
<= 0xff) && (rx_filled
<= 0xff)) {
508 (rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
);
509 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
513 /* Only 255 descriptors can be added at once */
514 while ((rx_done
> 0) || (rx_filled
> 0)) {
515 if (rx_done
<= 0xff) {
522 if (rx_filled
<= 0xff) {
523 val
|= rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
526 val
|= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
529 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
533 /* Get pointer to next RX descriptor to be processed by SW */
534 static struct mvneta_rx_desc
*
535 mvneta_rxq_next_desc_get(struct mvneta_rx_queue
*rxq
)
537 int rx_desc
= rxq
->next_desc_to_proc
;
539 rxq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(rxq
, rx_desc
);
540 return rxq
->descs
+ rx_desc
;
543 /* Change maximum receive size of the port. */
544 static void mvneta_max_rx_size_set(struct mvneta_port
*pp
, int max_rx_size
)
548 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
549 val
&= ~MVNETA_GMAC_MAX_RX_SIZE_MASK
;
550 val
|= ((max_rx_size
- MVNETA_MH_SIZE
) / 2) <<
551 MVNETA_GMAC_MAX_RX_SIZE_SHIFT
;
552 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
556 /* Set rx queue offset */
557 static void mvneta_rxq_offset_set(struct mvneta_port
*pp
,
558 struct mvneta_rx_queue
*rxq
,
563 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
564 val
&= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK
;
567 val
|= MVNETA_RXQ_PKT_OFFSET_MASK(offset
>> 3);
568 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
572 /* Tx descriptors helper methods */
574 /* Update HW with number of TX descriptors to be sent */
575 static void mvneta_txq_pend_desc_add(struct mvneta_port
*pp
,
576 struct mvneta_tx_queue
*txq
,
581 /* Only 255 descriptors can be added at once ; Assume caller
582 * process TX desriptors in quanta less than 256
585 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
588 /* Get pointer to next TX descriptor to be processed (send) by HW */
589 static struct mvneta_tx_desc
*
590 mvneta_txq_next_desc_get(struct mvneta_tx_queue
*txq
)
592 int tx_desc
= txq
->next_desc_to_proc
;
594 txq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(txq
, tx_desc
);
595 return txq
->descs
+ tx_desc
;
598 /* Release the last allocated TX descriptor. Useful to handle DMA
599 * mapping failures in the TX path.
601 static void mvneta_txq_desc_put(struct mvneta_tx_queue
*txq
)
603 if (txq
->next_desc_to_proc
== 0)
604 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
606 txq
->next_desc_to_proc
--;
609 /* Set rxq buf size */
610 static void mvneta_rxq_buf_size_set(struct mvneta_port
*pp
,
611 struct mvneta_rx_queue
*rxq
,
616 val
= mvreg_read(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
));
618 val
&= ~MVNETA_RXQ_BUF_SIZE_MASK
;
619 val
|= ((buf_size
>> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT
);
621 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), val
);
624 /* Disable buffer management (BM) */
625 static void mvneta_rxq_bm_disable(struct mvneta_port
*pp
,
626 struct mvneta_rx_queue
*rxq
)
630 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
631 val
&= ~MVNETA_RXQ_HW_BUF_ALLOC
;
632 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
637 /* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
638 static void __devinit
mvneta_gmac_rgmii_set(struct mvneta_port
*pp
, int enable
)
642 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
645 val
|= MVNETA_GMAC2_PORT_RGMII
;
647 val
&= ~MVNETA_GMAC2_PORT_RGMII
;
649 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, val
);
652 /* Config SGMII port */
653 static void __devinit
mvneta_port_sgmii_config(struct mvneta_port
*pp
)
657 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
658 val
|= MVNETA_GMAC2_PSC_ENABLE
;
659 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, val
);
662 /* Start the Ethernet port RX and TX activity */
663 static void mvneta_port_up(struct mvneta_port
*pp
)
668 /* Enable all initialized TXs. */
669 mvneta_mib_counters_clear(pp
);
671 for (queue
= 0; queue
< txq_number
; queue
++) {
672 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
673 if (txq
->descs
!= NULL
)
674 q_map
|= (1 << queue
);
676 mvreg_write(pp
, MVNETA_TXQ_CMD
, q_map
);
678 /* Enable all initialized RXQs. */
680 for (queue
= 0; queue
< rxq_number
; queue
++) {
681 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
682 if (rxq
->descs
!= NULL
)
683 q_map
|= (1 << queue
);
686 mvreg_write(pp
, MVNETA_RXQ_CMD
, q_map
);
689 /* Stop the Ethernet port activity */
690 static void mvneta_port_down(struct mvneta_port
*pp
)
695 /* Stop Rx port activity. Check port Rx activity. */
696 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
) & MVNETA_RXQ_ENABLE_MASK
;
698 /* Issue stop command for active channels only */
700 mvreg_write(pp
, MVNETA_RXQ_CMD
,
701 val
<< MVNETA_RXQ_DISABLE_SHIFT
);
703 /* Wait for all Rx activity to terminate. */
706 if (count
++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC
) {
708 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
714 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
);
715 } while (val
& 0xff);
717 /* Stop Tx port activity. Check port Tx activity. Issue stop
718 * command for active channels only
720 val
= (mvreg_read(pp
, MVNETA_TXQ_CMD
)) & MVNETA_TXQ_ENABLE_MASK
;
723 mvreg_write(pp
, MVNETA_TXQ_CMD
,
724 (val
<< MVNETA_TXQ_DISABLE_SHIFT
));
726 /* Wait for all Tx activity to terminate. */
729 if (count
++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC
) {
731 "TIMEOUT for TX stopped status=0x%08x\n",
737 /* Check TX Command reg that all Txqs are stopped */
738 val
= mvreg_read(pp
, MVNETA_TXQ_CMD
);
740 } while (val
& 0xff);
742 /* Double check to verify that TX FIFO is empty */
745 if (count
++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT
) {
747 "TX FIFO empty timeout status=0x08%x\n",
753 val
= mvreg_read(pp
, MVNETA_PORT_STATUS
);
754 } while (!(val
& MVNETA_TX_FIFO_EMPTY
) &&
755 (val
& MVNETA_TX_IN_PRGRS
));
760 /* Enable the port by setting the port enable bit of the MAC control register */
761 static void mvneta_port_enable(struct mvneta_port
*pp
)
766 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
767 val
|= MVNETA_GMAC0_PORT_ENABLE
;
768 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
771 /* Disable the port and wait for about 200 usec before retuning */
772 static void mvneta_port_disable(struct mvneta_port
*pp
)
776 /* Reset the Enable bit in the Serial Control Register */
777 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
778 val
&= ~MVNETA_GMAC0_PORT_ENABLE
;
779 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
784 /* Multicast tables methods */
786 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
787 static void mvneta_set_ucast_table(struct mvneta_port
*pp
, int queue
)
795 val
= 0x1 | (queue
<< 1);
796 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
799 for (offset
= 0; offset
<= 0xc; offset
+= 4)
800 mvreg_write(pp
, MVNETA_DA_FILT_UCAST_BASE
+ offset
, val
);
803 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
804 static void mvneta_set_special_mcast_table(struct mvneta_port
*pp
, int queue
)
812 val
= 0x1 | (queue
<< 1);
813 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
816 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
817 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ offset
, val
);
821 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
822 static void mvneta_set_other_mcast_table(struct mvneta_port
*pp
, int queue
)
828 memset(pp
->mcast_count
, 0, sizeof(pp
->mcast_count
));
831 memset(pp
->mcast_count
, 1, sizeof(pp
->mcast_count
));
832 val
= 0x1 | (queue
<< 1);
833 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
836 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
837 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ offset
, val
);
840 /* This method sets defaults to the NETA port:
841 * Clears interrupt Cause and Mask registers.
842 * Clears all MAC tables.
843 * Sets defaults to all registers.
844 * Resets RX and TX descriptor rings.
846 * This method can be called after mvneta_port_down() to return the port
847 * settings to defaults.
849 static void mvneta_defaults_set(struct mvneta_port
*pp
)
855 /* Clear all Cause registers */
856 mvreg_write(pp
, MVNETA_INTR_NEW_CAUSE
, 0);
857 mvreg_write(pp
, MVNETA_INTR_OLD_CAUSE
, 0);
858 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
860 /* Mask all interrupts */
861 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
862 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
863 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
864 mvreg_write(pp
, MVNETA_INTR_ENABLE
, 0);
866 /* Enable MBUS Retry bit16 */
867 mvreg_write(pp
, MVNETA_MBUS_RETRY
, 0x20);
869 /* Set CPU queue access map - all CPUs have access to all RX
870 * queues and to all TX queues
872 for (cpu
= 0; cpu
< CONFIG_NR_CPUS
; cpu
++)
873 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
),
874 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK
|
875 MVNETA_CPU_TXQ_ACCESS_ALL_MASK
));
877 /* Reset RX and TX DMAs */
878 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
879 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
881 /* Disable Legacy WRR, Disable EJP, Release from reset */
882 mvreg_write(pp
, MVNETA_TXQ_CMD_1
, 0);
883 for (queue
= 0; queue
< txq_number
; queue
++) {
884 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(queue
), 0);
885 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(queue
), 0);
888 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
889 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
891 /* Set Port Acceleration Mode */
892 val
= MVNETA_ACC_MODE_EXT
;
893 mvreg_write(pp
, MVNETA_ACC_MODE
, val
);
895 /* Update val of portCfg register accordingly with all RxQueue types */
896 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def
);
897 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
900 mvreg_write(pp
, MVNETA_PORT_CONFIG_EXTEND
, val
);
901 mvreg_write(pp
, MVNETA_RX_MIN_FRAME_SIZE
, 64);
903 /* Build PORT_SDMA_CONFIG_REG */
906 /* Default burst size */
907 val
|= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
908 val
|= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
910 val
|= (MVNETA_RX_NO_DATA_SWAP
| MVNETA_TX_NO_DATA_SWAP
|
911 MVNETA_NO_DESC_SWAP
);
913 /* Assign port SDMA configuration */
914 mvreg_write(pp
, MVNETA_SDMA_CONFIG
, val
);
916 mvneta_set_ucast_table(pp
, -1);
917 mvneta_set_special_mcast_table(pp
, -1);
918 mvneta_set_other_mcast_table(pp
, -1);
920 /* Set port interrupt enable register - default enable all */
921 mvreg_write(pp
, MVNETA_INTR_ENABLE
,
922 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
923 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK
));
926 /* Set max sizes for tx queues */
927 static void mvneta_txq_max_tx_size_set(struct mvneta_port
*pp
, int max_tx_size
)
933 mtu
= max_tx_size
* 8;
934 if (mtu
> MVNETA_TX_MTU_MAX
)
935 mtu
= MVNETA_TX_MTU_MAX
;
938 val
= mvreg_read(pp
, MVNETA_TX_MTU
);
939 val
&= ~MVNETA_TX_MTU_MAX
;
941 mvreg_write(pp
, MVNETA_TX_MTU
, val
);
943 /* TX token size and all TXQs token size must be larger that MTU */
944 val
= mvreg_read(pp
, MVNETA_TX_TOKEN_SIZE
);
946 size
= val
& MVNETA_TX_TOKEN_SIZE_MAX
;
949 val
&= ~MVNETA_TX_TOKEN_SIZE_MAX
;
951 mvreg_write(pp
, MVNETA_TX_TOKEN_SIZE
, val
);
953 for (queue
= 0; queue
< txq_number
; queue
++) {
954 val
= mvreg_read(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
));
956 size
= val
& MVNETA_TXQ_TOKEN_SIZE_MAX
;
959 val
&= ~MVNETA_TXQ_TOKEN_SIZE_MAX
;
961 mvreg_write(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
), val
);
966 /* Set unicast address */
967 static void mvneta_set_ucast_addr(struct mvneta_port
*pp
, u8 last_nibble
,
970 unsigned int unicast_reg
;
971 unsigned int tbl_offset
;
972 unsigned int reg_offset
;
974 /* Locate the Unicast table entry */
975 last_nibble
= (0xf & last_nibble
);
977 /* offset from unicast tbl base */
978 tbl_offset
= (last_nibble
/ 4) * 4;
980 /* offset within the above reg */
981 reg_offset
= last_nibble
% 4;
983 unicast_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
));
986 /* Clear accepts frame bit at specified unicast DA tbl entry */
987 unicast_reg
&= ~(0xff << (8 * reg_offset
));
989 unicast_reg
&= ~(0xff << (8 * reg_offset
));
990 unicast_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
993 mvreg_write(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
), unicast_reg
);
996 /* Set mac address */
997 static void mvneta_mac_addr_set(struct mvneta_port
*pp
, unsigned char *addr
,
1004 mac_l
= (addr
[4] << 8) | (addr
[5]);
1005 mac_h
= (addr
[0] << 24) | (addr
[1] << 16) |
1006 (addr
[2] << 8) | (addr
[3] << 0);
1008 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, mac_l
);
1009 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, mac_h
);
1012 /* Accept frames of this address */
1013 mvneta_set_ucast_addr(pp
, addr
[5], queue
);
1016 /* Set the number of packets that will be received before RX interrupt
1017 * will be generated by HW.
1019 static void mvneta_rx_pkts_coal_set(struct mvneta_port
*pp
,
1020 struct mvneta_rx_queue
*rxq
, u32 value
)
1022 mvreg_write(pp
, MVNETA_RXQ_THRESHOLD_REG(rxq
->id
),
1023 value
| MVNETA_RXQ_NON_OCCUPIED(0));
1024 rxq
->pkts_coal
= value
;
1027 /* Set the time delay in usec before RX interrupt will be generated by
1030 static void mvneta_rx_time_coal_set(struct mvneta_port
*pp
,
1031 struct mvneta_rx_queue
*rxq
, u32 value
)
1034 unsigned long clk_rate
;
1036 clk_rate
= clk_get_rate(pp
->clk
);
1037 val
= (clk_rate
/ 1000000) * value
;
1039 mvreg_write(pp
, MVNETA_RXQ_TIME_COAL_REG(rxq
->id
), val
);
1040 rxq
->time_coal
= value
;
1043 /* Set threshold for TX_DONE pkts coalescing */
1044 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port
*pp
,
1045 struct mvneta_tx_queue
*txq
, u32 value
)
1049 val
= mvreg_read(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
));
1051 val
&= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK
;
1052 val
|= MVNETA_TXQ_SENT_THRESH_MASK(value
);
1054 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), val
);
1056 txq
->done_pkts_coal
= value
;
1059 /* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
1060 static void mvneta_add_tx_done_timer(struct mvneta_port
*pp
)
1062 if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT
, &pp
->flags
) == 0) {
1063 pp
->tx_done_timer
.expires
= jiffies
+
1064 msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD
);
1065 add_timer(&pp
->tx_done_timer
);
1070 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1071 static void mvneta_rx_desc_fill(struct mvneta_rx_desc
*rx_desc
,
1072 u32 phys_addr
, u32 cookie
)
1074 rx_desc
->buf_cookie
= cookie
;
1075 rx_desc
->buf_phys_addr
= phys_addr
;
1078 /* Decrement sent descriptors counter */
1079 static void mvneta_txq_sent_desc_dec(struct mvneta_port
*pp
,
1080 struct mvneta_tx_queue
*txq
,
1085 /* Only 255 TX descriptors can be updated at once */
1086 while (sent_desc
> 0xff) {
1087 val
= 0xff << MVNETA_TXQ_DEC_SENT_SHIFT
;
1088 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1089 sent_desc
= sent_desc
- 0xff;
1092 val
= sent_desc
<< MVNETA_TXQ_DEC_SENT_SHIFT
;
1093 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1096 /* Get number of TX descriptors already sent by HW */
1097 static int mvneta_txq_sent_desc_num_get(struct mvneta_port
*pp
,
1098 struct mvneta_tx_queue
*txq
)
1103 val
= mvreg_read(pp
, MVNETA_TXQ_STATUS_REG(txq
->id
));
1104 sent_desc
= (val
& MVNETA_TXQ_SENT_DESC_MASK
) >>
1105 MVNETA_TXQ_SENT_DESC_SHIFT
;
1110 /* Get number of sent descriptors and decrement counter.
1111 * The number of sent descriptors is returned.
1113 static int mvneta_txq_sent_desc_proc(struct mvneta_port
*pp
,
1114 struct mvneta_tx_queue
*txq
)
1118 /* Get number of sent descriptors */
1119 sent_desc
= mvneta_txq_sent_desc_num_get(pp
, txq
);
1121 /* Decrement sent descriptors counter */
1123 mvneta_txq_sent_desc_dec(pp
, txq
, sent_desc
);
1128 /* Set TXQ descriptors fields relevant for CSUM calculation */
1129 static u32
mvneta_txq_desc_csum(int l3_offs
, int l3_proto
,
1130 int ip_hdr_len
, int l4_proto
)
1134 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1135 * G_L4_chk, L4_type; required only for checksum
1138 command
= l3_offs
<< MVNETA_TX_L3_OFF_SHIFT
;
1139 command
|= ip_hdr_len
<< MVNETA_TX_IP_HLEN_SHIFT
;
1141 if (l3_proto
== swab16(ETH_P_IP
))
1142 command
|= MVNETA_TXD_IP_CSUM
;
1144 command
|= MVNETA_TX_L3_IP6
;
1146 if (l4_proto
== IPPROTO_TCP
)
1147 command
|= MVNETA_TX_L4_CSUM_FULL
;
1148 else if (l4_proto
== IPPROTO_UDP
)
1149 command
|= MVNETA_TX_L4_UDP
| MVNETA_TX_L4_CSUM_FULL
;
1151 command
|= MVNETA_TX_L4_CSUM_NOT
;
1157 /* Display more error info */
1158 static void mvneta_rx_error(struct mvneta_port
*pp
,
1159 struct mvneta_rx_desc
*rx_desc
)
1161 u32 status
= rx_desc
->status
;
1163 if (!mvneta_rxq_desc_is_first_last(rx_desc
)) {
1165 "bad rx status %08x (buffer oversize), size=%d\n",
1166 rx_desc
->status
, rx_desc
->data_size
);
1170 switch (status
& MVNETA_RXD_ERR_CODE_MASK
) {
1171 case MVNETA_RXD_ERR_CRC
:
1172 netdev_err(pp
->dev
, "bad rx status %08x (crc error), size=%d\n",
1173 status
, rx_desc
->data_size
);
1175 case MVNETA_RXD_ERR_OVERRUN
:
1176 netdev_err(pp
->dev
, "bad rx status %08x (overrun error), size=%d\n",
1177 status
, rx_desc
->data_size
);
1179 case MVNETA_RXD_ERR_LEN
:
1180 netdev_err(pp
->dev
, "bad rx status %08x (max frame length error), size=%d\n",
1181 status
, rx_desc
->data_size
);
1183 case MVNETA_RXD_ERR_RESOURCE
:
1184 netdev_err(pp
->dev
, "bad rx status %08x (resource error), size=%d\n",
1185 status
, rx_desc
->data_size
);
1190 /* Handle RX checksum offload */
1191 static void mvneta_rx_csum(struct mvneta_port
*pp
,
1192 struct mvneta_rx_desc
*rx_desc
,
1193 struct sk_buff
*skb
)
1195 if ((rx_desc
->status
& MVNETA_RXD_L3_IP4
) &&
1196 (rx_desc
->status
& MVNETA_RXD_L4_CSUM_OK
)) {
1198 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1202 skb
->ip_summed
= CHECKSUM_NONE
;
1205 /* Return tx queue pointer (find last set bit) according to causeTxDone reg */
1206 static struct mvneta_tx_queue
*mvneta_tx_done_policy(struct mvneta_port
*pp
,
1209 int queue
= fls(cause
) - 1;
1211 return (queue
< 0 || queue
>= txq_number
) ? NULL
: &pp
->txqs
[queue
];
1214 /* Free tx queue skbuffs */
1215 static void mvneta_txq_bufs_free(struct mvneta_port
*pp
,
1216 struct mvneta_tx_queue
*txq
, int num
)
1220 for (i
= 0; i
< num
; i
++) {
1221 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+
1223 struct sk_buff
*skb
= txq
->tx_skb
[txq
->txq_get_index
];
1225 mvneta_txq_inc_get(txq
);
1230 dma_unmap_single(pp
->dev
->dev
.parent
, tx_desc
->buf_phys_addr
,
1231 tx_desc
->data_size
, DMA_TO_DEVICE
);
1232 dev_kfree_skb_any(skb
);
1236 /* Handle end of transmission */
1237 static int mvneta_txq_done(struct mvneta_port
*pp
,
1238 struct mvneta_tx_queue
*txq
)
1240 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
1243 tx_done
= mvneta_txq_sent_desc_proc(pp
, txq
);
1246 mvneta_txq_bufs_free(pp
, txq
, tx_done
);
1248 txq
->count
-= tx_done
;
1250 if (netif_tx_queue_stopped(nq
)) {
1251 if (txq
->size
- txq
->count
>= MAX_SKB_FRAGS
+ 1)
1252 netif_tx_wake_queue(nq
);
1258 /* Refill processing */
1259 static int mvneta_rx_refill(struct mvneta_port
*pp
,
1260 struct mvneta_rx_desc
*rx_desc
)
1263 dma_addr_t phys_addr
;
1264 struct sk_buff
*skb
;
1266 skb
= netdev_alloc_skb(pp
->dev
, pp
->pkt_size
);
1270 phys_addr
= dma_map_single(pp
->dev
->dev
.parent
, skb
->head
,
1271 MVNETA_RX_BUF_SIZE(pp
->pkt_size
),
1273 if (unlikely(dma_mapping_error(pp
->dev
->dev
.parent
, phys_addr
))) {
1278 mvneta_rx_desc_fill(rx_desc
, phys_addr
, (u32
)skb
);
1283 /* Handle tx checksum */
1284 static u32
mvneta_skb_tx_csum(struct mvneta_port
*pp
, struct sk_buff
*skb
)
1286 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1290 if (skb
->protocol
== htons(ETH_P_IP
)) {
1291 struct iphdr
*ip4h
= ip_hdr(skb
);
1293 /* Calculate IPv4 checksum and L4 checksum */
1294 ip_hdr_len
= ip4h
->ihl
;
1295 l4_proto
= ip4h
->protocol
;
1296 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1297 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
1299 /* Read l4_protocol from one of IPv6 extra headers */
1300 if (skb_network_header_len(skb
) > 0)
1301 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
1302 l4_proto
= ip6h
->nexthdr
;
1304 return MVNETA_TX_L4_CSUM_NOT
;
1306 return mvneta_txq_desc_csum(skb_network_offset(skb
),
1307 skb
->protocol
, ip_hdr_len
, l4_proto
);
1310 return MVNETA_TX_L4_CSUM_NOT
;
1313 /* Returns rx queue pointer (find last set bit) according to causeRxTx
1316 static struct mvneta_rx_queue
*mvneta_rx_policy(struct mvneta_port
*pp
,
1319 int queue
= fls(cause
>> 8) - 1;
1321 return (queue
< 0 || queue
>= rxq_number
) ? NULL
: &pp
->rxqs
[queue
];
1324 /* Drop packets received by the RXQ and free buffers */
1325 static void mvneta_rxq_drop_pkts(struct mvneta_port
*pp
,
1326 struct mvneta_rx_queue
*rxq
)
1330 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1331 for (i
= 0; i
< rxq
->size
; i
++) {
1332 struct mvneta_rx_desc
*rx_desc
= rxq
->descs
+ i
;
1333 struct sk_buff
*skb
= (struct sk_buff
*)rx_desc
->buf_cookie
;
1335 dev_kfree_skb_any(skb
);
1336 dma_unmap_single(pp
->dev
->dev
.parent
, rx_desc
->buf_phys_addr
,
1337 rx_desc
->data_size
, DMA_FROM_DEVICE
);
1341 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
1344 /* Main rx processing */
1345 static int mvneta_rx(struct mvneta_port
*pp
, int rx_todo
,
1346 struct mvneta_rx_queue
*rxq
)
1348 struct net_device
*dev
= pp
->dev
;
1349 int rx_done
, rx_filled
;
1351 /* Get number of received packets */
1352 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1354 if (rx_todo
> rx_done
)
1360 /* Fairness NAPI loop */
1361 while (rx_done
< rx_todo
) {
1362 struct mvneta_rx_desc
*rx_desc
= mvneta_rxq_next_desc_get(rxq
);
1363 struct sk_buff
*skb
;
1370 rx_status
= rx_desc
->status
;
1371 skb
= (struct sk_buff
*)rx_desc
->buf_cookie
;
1373 if (!mvneta_rxq_desc_is_first_last(rx_desc
) ||
1374 (rx_status
& MVNETA_RXD_ERR_SUMMARY
)) {
1375 dev
->stats
.rx_errors
++;
1376 mvneta_rx_error(pp
, rx_desc
);
1377 mvneta_rx_desc_fill(rx_desc
, rx_desc
->buf_phys_addr
,
1382 dma_unmap_single(pp
->dev
->dev
.parent
, rx_desc
->buf_phys_addr
,
1383 rx_desc
->data_size
, DMA_FROM_DEVICE
);
1385 rx_bytes
= rx_desc
->data_size
-
1386 (ETH_FCS_LEN
+ MVNETA_MH_SIZE
);
1387 u64_stats_update_begin(&pp
->rx_stats
.syncp
);
1388 pp
->rx_stats
.packets
++;
1389 pp
->rx_stats
.bytes
+= rx_bytes
;
1390 u64_stats_update_end(&pp
->rx_stats
.syncp
);
1392 /* Linux processing */
1393 skb_reserve(skb
, MVNETA_MH_SIZE
);
1394 skb_put(skb
, rx_bytes
);
1396 skb
->protocol
= eth_type_trans(skb
, dev
);
1398 mvneta_rx_csum(pp
, rx_desc
, skb
);
1400 napi_gro_receive(&pp
->napi
, skb
);
1402 /* Refill processing */
1403 err
= mvneta_rx_refill(pp
, rx_desc
);
1405 netdev_err(pp
->dev
, "Linux processing - Can't refill\n");
1411 /* Update rxq management counters */
1412 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_filled
);
1417 /* Handle tx fragmentation processing */
1418 static int mvneta_tx_frag_process(struct mvneta_port
*pp
, struct sk_buff
*skb
,
1419 struct mvneta_tx_queue
*txq
)
1421 struct mvneta_tx_desc
*tx_desc
;
1424 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1425 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1426 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
1428 tx_desc
= mvneta_txq_next_desc_get(txq
);
1429 tx_desc
->data_size
= frag
->size
;
1431 tx_desc
->buf_phys_addr
=
1432 dma_map_single(pp
->dev
->dev
.parent
, addr
,
1433 tx_desc
->data_size
, DMA_TO_DEVICE
);
1435 if (dma_mapping_error(pp
->dev
->dev
.parent
,
1436 tx_desc
->buf_phys_addr
)) {
1437 mvneta_txq_desc_put(txq
);
1441 if (i
== (skb_shinfo(skb
)->nr_frags
- 1)) {
1442 /* Last descriptor */
1443 tx_desc
->command
= MVNETA_TXD_L_DESC
| MVNETA_TXD_Z_PAD
;
1445 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
1447 mvneta_txq_inc_put(txq
);
1449 /* Descriptor in the middle: Not First, Not Last */
1450 tx_desc
->command
= 0;
1452 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1453 mvneta_txq_inc_put(txq
);
1460 /* Release all descriptors that were used to map fragments of
1461 * this packet, as well as the corresponding DMA mappings
1463 for (i
= i
- 1; i
>= 0; i
--) {
1464 tx_desc
= txq
->descs
+ i
;
1465 dma_unmap_single(pp
->dev
->dev
.parent
,
1466 tx_desc
->buf_phys_addr
,
1469 mvneta_txq_desc_put(txq
);
1475 /* Main tx processing */
1476 static int mvneta_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1478 struct mvneta_port
*pp
= netdev_priv(dev
);
1479 struct mvneta_tx_queue
*txq
= &pp
->txqs
[txq_def
];
1480 struct mvneta_tx_desc
*tx_desc
;
1481 struct netdev_queue
*nq
;
1485 if (!netif_running(dev
))
1488 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
1489 nq
= netdev_get_tx_queue(dev
, txq_def
);
1491 /* Get a descriptor for the first part of the packet */
1492 tx_desc
= mvneta_txq_next_desc_get(txq
);
1494 tx_cmd
= mvneta_skb_tx_csum(pp
, skb
);
1496 tx_desc
->data_size
= skb_headlen(skb
);
1498 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
1501 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
1502 tx_desc
->buf_phys_addr
))) {
1503 mvneta_txq_desc_put(txq
);
1509 /* First and Last descriptor */
1510 tx_cmd
|= MVNETA_TXD_FLZ_DESC
;
1511 tx_desc
->command
= tx_cmd
;
1512 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
1513 mvneta_txq_inc_put(txq
);
1515 /* First but not Last */
1516 tx_cmd
|= MVNETA_TXD_F_DESC
;
1517 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1518 mvneta_txq_inc_put(txq
);
1519 tx_desc
->command
= tx_cmd
;
1520 /* Continue with other skb fragments */
1521 if (mvneta_tx_frag_process(pp
, skb
, txq
)) {
1522 dma_unmap_single(dev
->dev
.parent
,
1523 tx_desc
->buf_phys_addr
,
1526 mvneta_txq_desc_put(txq
);
1532 txq
->count
+= frags
;
1533 mvneta_txq_pend_desc_add(pp
, txq
, frags
);
1535 if (txq
->size
- txq
->count
< MAX_SKB_FRAGS
+ 1)
1536 netif_tx_stop_queue(nq
);
1540 u64_stats_update_begin(&pp
->tx_stats
.syncp
);
1541 pp
->tx_stats
.packets
++;
1542 pp
->tx_stats
.bytes
+= skb
->len
;
1543 u64_stats_update_end(&pp
->tx_stats
.syncp
);
1546 dev
->stats
.tx_dropped
++;
1547 dev_kfree_skb_any(skb
);
1550 if (txq
->count
>= MVNETA_TXDONE_COAL_PKTS
)
1551 mvneta_txq_done(pp
, txq
);
1553 /* If after calling mvneta_txq_done, count equals
1554 * frags, we need to set the timer
1556 if (txq
->count
== frags
&& frags
> 0)
1557 mvneta_add_tx_done_timer(pp
);
1559 return NETDEV_TX_OK
;
1563 /* Free tx resources, when resetting a port */
1564 static void mvneta_txq_done_force(struct mvneta_port
*pp
,
1565 struct mvneta_tx_queue
*txq
)
1568 int tx_done
= txq
->count
;
1570 mvneta_txq_bufs_free(pp
, txq
, tx_done
);
1574 txq
->txq_put_index
= 0;
1575 txq
->txq_get_index
= 0;
1578 /* handle tx done - called from tx done timer callback */
1579 static u32
mvneta_tx_done_gbe(struct mvneta_port
*pp
, u32 cause_tx_done
,
1582 struct mvneta_tx_queue
*txq
;
1584 struct netdev_queue
*nq
;
1587 while (cause_tx_done
!= 0) {
1588 txq
= mvneta_tx_done_policy(pp
, cause_tx_done
);
1592 nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
1593 __netif_tx_lock(nq
, smp_processor_id());
1596 tx_done
+= mvneta_txq_done(pp
, txq
);
1597 *tx_todo
+= txq
->count
;
1600 __netif_tx_unlock(nq
);
1601 cause_tx_done
&= ~((1 << txq
->id
));
1607 /* Compute crc8 of the specified address, using a unique algorithm ,
1608 * according to hw spec, different than generic crc8 algorithm
1610 static int mvneta_addr_crc(unsigned char *addr
)
1615 for (i
= 0; i
< ETH_ALEN
; i
++) {
1618 crc
= (crc
^ addr
[i
]) << 8;
1619 for (j
= 7; j
>= 0; j
--) {
1620 if (crc
& (0x100 << j
))
1628 /* This method controls the net device special MAC multicast support.
1629 * The Special Multicast Table for MAC addresses supports MAC of the form
1630 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1631 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1632 * Table entries in the DA-Filter table. This method set the Special
1633 * Multicast Table appropriate entry.
1635 static void mvneta_set_special_mcast_addr(struct mvneta_port
*pp
,
1636 unsigned char last_byte
,
1639 unsigned int smc_table_reg
;
1640 unsigned int tbl_offset
;
1641 unsigned int reg_offset
;
1643 /* Register offset from SMC table base */
1644 tbl_offset
= (last_byte
/ 4);
1645 /* Entry offset within the above reg */
1646 reg_offset
= last_byte
% 4;
1648 smc_table_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_SPEC_MCAST
1652 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
1654 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
1655 smc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1658 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ tbl_offset
* 4,
1662 /* This method controls the network device Other MAC multicast support.
1663 * The Other Multicast Table is used for multicast of another type.
1664 * A CRC-8 is used as an index to the Other Multicast Table entries
1665 * in the DA-Filter table.
1666 * The method gets the CRC-8 value from the calling routine and
1667 * sets the Other Multicast Table appropriate entry according to the
1670 static void mvneta_set_other_mcast_addr(struct mvneta_port
*pp
,
1674 unsigned int omc_table_reg
;
1675 unsigned int tbl_offset
;
1676 unsigned int reg_offset
;
1678 tbl_offset
= (crc8
/ 4) * 4; /* Register offset from OMC table base */
1679 reg_offset
= crc8
% 4; /* Entry offset within the above reg */
1681 omc_table_reg
= mvreg_read(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
);
1684 /* Clear accepts frame bit at specified Other DA table entry */
1685 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
1687 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
1688 omc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1691 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
, omc_table_reg
);
1694 /* The network device supports multicast using two tables:
1695 * 1) Special Multicast Table for MAC addresses of the form
1696 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1697 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1698 * Table entries in the DA-Filter table.
1699 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
1700 * is used as an index to the Other Multicast Table entries in the
1703 static int mvneta_mcast_addr_set(struct mvneta_port
*pp
, unsigned char *p_addr
,
1706 unsigned char crc_result
= 0;
1708 if (memcmp(p_addr
, "\x01\x00\x5e\x00\x00", 5) == 0) {
1709 mvneta_set_special_mcast_addr(pp
, p_addr
[5], queue
);
1713 crc_result
= mvneta_addr_crc(p_addr
);
1715 if (pp
->mcast_count
[crc_result
] == 0) {
1716 netdev_info(pp
->dev
, "No valid Mcast for crc8=0x%02x\n",
1721 pp
->mcast_count
[crc_result
]--;
1722 if (pp
->mcast_count
[crc_result
] != 0) {
1723 netdev_info(pp
->dev
,
1724 "After delete there are %d valid Mcast for crc8=0x%02x\n",
1725 pp
->mcast_count
[crc_result
], crc_result
);
1729 pp
->mcast_count
[crc_result
]++;
1731 mvneta_set_other_mcast_addr(pp
, crc_result
, queue
);
1736 /* Configure Fitering mode of Ethernet port */
1737 static void mvneta_rx_unicast_promisc_set(struct mvneta_port
*pp
,
1740 u32 port_cfg_reg
, val
;
1742 port_cfg_reg
= mvreg_read(pp
, MVNETA_PORT_CONFIG
);
1744 val
= mvreg_read(pp
, MVNETA_TYPE_PRIO
);
1746 /* Set / Clear UPM bit in port configuration register */
1748 /* Accept all Unicast addresses */
1749 port_cfg_reg
|= MVNETA_UNI_PROMISC_MODE
;
1750 val
|= MVNETA_FORCE_UNI
;
1751 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, 0xffff);
1752 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, 0xffffffff);
1754 /* Reject all Unicast addresses */
1755 port_cfg_reg
&= ~MVNETA_UNI_PROMISC_MODE
;
1756 val
&= ~MVNETA_FORCE_UNI
;
1759 mvreg_write(pp
, MVNETA_PORT_CONFIG
, port_cfg_reg
);
1760 mvreg_write(pp
, MVNETA_TYPE_PRIO
, val
);
1763 /* register unicast and multicast addresses */
1764 static void mvneta_set_rx_mode(struct net_device
*dev
)
1766 struct mvneta_port
*pp
= netdev_priv(dev
);
1767 struct netdev_hw_addr
*ha
;
1769 if (dev
->flags
& IFF_PROMISC
) {
1770 /* Accept all: Multicast + Unicast */
1771 mvneta_rx_unicast_promisc_set(pp
, 1);
1772 mvneta_set_ucast_table(pp
, rxq_def
);
1773 mvneta_set_special_mcast_table(pp
, rxq_def
);
1774 mvneta_set_other_mcast_table(pp
, rxq_def
);
1776 /* Accept single Unicast */
1777 mvneta_rx_unicast_promisc_set(pp
, 0);
1778 mvneta_set_ucast_table(pp
, -1);
1779 mvneta_mac_addr_set(pp
, dev
->dev_addr
, rxq_def
);
1781 if (dev
->flags
& IFF_ALLMULTI
) {
1782 /* Accept all multicast */
1783 mvneta_set_special_mcast_table(pp
, rxq_def
);
1784 mvneta_set_other_mcast_table(pp
, rxq_def
);
1786 /* Accept only initialized multicast */
1787 mvneta_set_special_mcast_table(pp
, -1);
1788 mvneta_set_other_mcast_table(pp
, -1);
1790 if (!netdev_mc_empty(dev
)) {
1791 netdev_for_each_mc_addr(ha
, dev
) {
1792 mvneta_mcast_addr_set(pp
, ha
->addr
,
1800 /* Interrupt handling - the callback for request_irq() */
1801 static irqreturn_t
mvneta_isr(int irq
, void *dev_id
)
1803 struct mvneta_port
*pp
= (struct mvneta_port
*)dev_id
;
1805 /* Mask all interrupts */
1806 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
1808 napi_schedule(&pp
->napi
);
1814 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
1815 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
1816 * Bits 8 -15 of the cause Rx Tx register indicate that are received
1817 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
1818 * Each CPU has its own causeRxTx register
1820 static int mvneta_poll(struct napi_struct
*napi
, int budget
)
1824 unsigned long flags
;
1825 struct mvneta_port
*pp
= netdev_priv(napi
->dev
);
1827 if (!netif_running(pp
->dev
)) {
1828 napi_complete(napi
);
1832 /* Read cause register */
1833 cause_rx_tx
= mvreg_read(pp
, MVNETA_INTR_NEW_CAUSE
) &
1834 MVNETA_RX_INTR_MASK(rxq_number
);
1836 /* For the case where the last mvneta_poll did not process all
1839 cause_rx_tx
|= pp
->cause_rx_tx
;
1840 if (rxq_number
> 1) {
1841 while ((cause_rx_tx
!= 0) && (budget
> 0)) {
1843 struct mvneta_rx_queue
*rxq
;
1844 /* get rx queue number from cause_rx_tx */
1845 rxq
= mvneta_rx_policy(pp
, cause_rx_tx
);
1849 /* process the packet in that rx queue */
1850 count
= mvneta_rx(pp
, budget
, rxq
);
1854 /* set off the rx bit of the
1855 * corresponding bit in the cause rx
1856 * tx register, so that next iteration
1857 * will find the next rx queue where
1858 * packets are received on
1860 cause_rx_tx
&= ~((1 << rxq
->id
) << 8);
1864 rx_done
= mvneta_rx(pp
, budget
, &pp
->rxqs
[rxq_def
]);
1870 napi_complete(napi
);
1871 local_irq_save(flags
);
1872 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
1873 MVNETA_RX_INTR_MASK(rxq_number
));
1874 local_irq_restore(flags
);
1877 pp
->cause_rx_tx
= cause_rx_tx
;
1881 /* tx done timer callback */
1882 static void mvneta_tx_done_timer_callback(unsigned long data
)
1884 struct net_device
*dev
= (struct net_device
*)data
;
1885 struct mvneta_port
*pp
= netdev_priv(dev
);
1886 int tx_done
= 0, tx_todo
= 0;
1888 if (!netif_running(dev
))
1891 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT
, &pp
->flags
);
1893 tx_done
= mvneta_tx_done_gbe(pp
,
1894 (((1 << txq_number
) - 1) &
1895 MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK
),
1898 mvneta_add_tx_done_timer(pp
);
1901 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
1902 static int mvneta_rxq_fill(struct mvneta_port
*pp
, struct mvneta_rx_queue
*rxq
,
1905 struct net_device
*dev
= pp
->dev
;
1908 for (i
= 0; i
< num
; i
++) {
1909 struct sk_buff
*skb
;
1910 struct mvneta_rx_desc
*rx_desc
;
1911 unsigned long phys_addr
;
1913 skb
= dev_alloc_skb(pp
->pkt_size
);
1915 netdev_err(dev
, "%s:rxq %d, %d of %d buffs filled\n",
1916 __func__
, rxq
->id
, i
, num
);
1920 rx_desc
= rxq
->descs
+ i
;
1921 memset(rx_desc
, 0, sizeof(struct mvneta_rx_desc
));
1922 phys_addr
= dma_map_single(dev
->dev
.parent
, skb
->head
,
1923 MVNETA_RX_BUF_SIZE(pp
->pkt_size
),
1925 if (unlikely(dma_mapping_error(dev
->dev
.parent
, phys_addr
))) {
1930 mvneta_rx_desc_fill(rx_desc
, phys_addr
, (u32
)skb
);
1933 /* Add this number of RX descriptors as non occupied (ready to
1936 mvneta_rxq_non_occup_desc_add(pp
, rxq
, i
);
1941 /* Free all packets pending transmit from all TXQs and reset TX port */
1942 static void mvneta_tx_reset(struct mvneta_port
*pp
)
1946 /* free the skb's in the hal tx ring */
1947 for (queue
= 0; queue
< txq_number
; queue
++)
1948 mvneta_txq_done_force(pp
, &pp
->txqs
[queue
]);
1950 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
1951 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
1954 static void mvneta_rx_reset(struct mvneta_port
*pp
)
1956 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
1957 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
1960 /* Rx/Tx queue initialization/cleanup methods */
1962 /* Create a specified RX queue */
1963 static int mvneta_rxq_init(struct mvneta_port
*pp
,
1964 struct mvneta_rx_queue
*rxq
)
1967 rxq
->size
= pp
->rx_ring_size
;
1969 /* Allocate memory for RX descriptors */
1970 rxq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
1971 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
1972 &rxq
->descs_phys
, GFP_KERNEL
);
1973 if (rxq
->descs
== NULL
) {
1975 "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
1976 rxq
->id
, rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
1981 BUG_ON(rxq
->descs
!=
1982 PTR_ALIGN(rxq
->descs
, MVNETA_CPU_D_CACHE_LINE_SIZE
));
1984 rxq
->last_desc
= rxq
->size
- 1;
1986 /* Set Rx descriptors queue starting address */
1987 mvreg_write(pp
, MVNETA_RXQ_BASE_ADDR_REG(rxq
->id
), rxq
->descs_phys
);
1988 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), rxq
->size
);
1991 mvneta_rxq_offset_set(pp
, rxq
, NET_SKB_PAD
);
1993 /* Set coalescing pkts and time */
1994 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
1995 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
1997 /* Fill RXQ with buffers from RX pool */
1998 mvneta_rxq_buf_size_set(pp
, rxq
, MVNETA_RX_BUF_SIZE(pp
->pkt_size
));
1999 mvneta_rxq_bm_disable(pp
, rxq
);
2000 mvneta_rxq_fill(pp
, rxq
, rxq
->size
);
2005 /* Cleanup Rx queue */
2006 static void mvneta_rxq_deinit(struct mvneta_port
*pp
,
2007 struct mvneta_rx_queue
*rxq
)
2009 mvneta_rxq_drop_pkts(pp
, rxq
);
2012 dma_free_coherent(pp
->dev
->dev
.parent
,
2013 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2019 rxq
->next_desc_to_proc
= 0;
2020 rxq
->descs_phys
= 0;
2023 /* Create and initialize a tx queue */
2024 static int mvneta_txq_init(struct mvneta_port
*pp
,
2025 struct mvneta_tx_queue
*txq
)
2027 txq
->size
= pp
->tx_ring_size
;
2029 /* Allocate memory for TX descriptors */
2030 txq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2031 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2032 &txq
->descs_phys
, GFP_KERNEL
);
2033 if (txq
->descs
== NULL
) {
2035 "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
2036 txq
->id
, txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2041 /* Make sure descriptor address is cache line size aligned */
2042 BUG_ON(txq
->descs
!=
2043 PTR_ALIGN(txq
->descs
, MVNETA_CPU_D_CACHE_LINE_SIZE
));
2045 txq
->last_desc
= txq
->size
- 1;
2047 /* Set maximum bandwidth for enabled TXQs */
2048 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0x03ffffff);
2049 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0x3fffffff);
2051 /* Set Tx descriptors queue starting address */
2052 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), txq
->descs_phys
);
2053 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), txq
->size
);
2055 txq
->tx_skb
= kmalloc(txq
->size
* sizeof(*txq
->tx_skb
), GFP_KERNEL
);
2056 if (txq
->tx_skb
== NULL
) {
2057 dma_free_coherent(pp
->dev
->dev
.parent
,
2058 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2059 txq
->descs
, txq
->descs_phys
);
2062 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
2067 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2068 static void mvneta_txq_deinit(struct mvneta_port
*pp
,
2069 struct mvneta_tx_queue
*txq
)
2074 dma_free_coherent(pp
->dev
->dev
.parent
,
2075 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2076 txq
->descs
, txq
->descs_phys
);
2080 txq
->next_desc_to_proc
= 0;
2081 txq
->descs_phys
= 0;
2083 /* Set minimum bandwidth for disabled TXQs */
2084 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0);
2085 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0);
2087 /* Set Tx descriptors queue starting address and size */
2088 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), 0);
2089 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), 0);
2092 /* Cleanup all Tx queues */
2093 static void mvneta_cleanup_txqs(struct mvneta_port
*pp
)
2097 for (queue
= 0; queue
< txq_number
; queue
++)
2098 mvneta_txq_deinit(pp
, &pp
->txqs
[queue
]);
2101 /* Cleanup all Rx queues */
2102 static void mvneta_cleanup_rxqs(struct mvneta_port
*pp
)
2106 for (queue
= 0; queue
< rxq_number
; queue
++)
2107 mvneta_rxq_deinit(pp
, &pp
->rxqs
[queue
]);
2111 /* Init all Rx queues */
2112 static int mvneta_setup_rxqs(struct mvneta_port
*pp
)
2116 for (queue
= 0; queue
< rxq_number
; queue
++) {
2117 int err
= mvneta_rxq_init(pp
, &pp
->rxqs
[queue
]);
2119 netdev_err(pp
->dev
, "%s: can't create rxq=%d\n",
2121 mvneta_cleanup_rxqs(pp
);
2129 /* Init all tx queues */
2130 static int mvneta_setup_txqs(struct mvneta_port
*pp
)
2134 for (queue
= 0; queue
< txq_number
; queue
++) {
2135 int err
= mvneta_txq_init(pp
, &pp
->txqs
[queue
]);
2137 netdev_err(pp
->dev
, "%s: can't create txq=%d\n",
2139 mvneta_cleanup_txqs(pp
);
2147 static void mvneta_start_dev(struct mvneta_port
*pp
)
2149 mvneta_max_rx_size_set(pp
, pp
->pkt_size
);
2150 mvneta_txq_max_tx_size_set(pp
, pp
->pkt_size
);
2152 /* start the Rx/Tx activity */
2153 mvneta_port_enable(pp
);
2155 /* Enable polling on the port */
2156 napi_enable(&pp
->napi
);
2158 /* Unmask interrupts */
2159 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
2160 MVNETA_RX_INTR_MASK(rxq_number
));
2162 phy_start(pp
->phy_dev
);
2163 netif_tx_start_all_queues(pp
->dev
);
2166 static void mvneta_stop_dev(struct mvneta_port
*pp
)
2168 phy_stop(pp
->phy_dev
);
2170 napi_disable(&pp
->napi
);
2172 netif_carrier_off(pp
->dev
);
2174 mvneta_port_down(pp
);
2175 netif_tx_stop_all_queues(pp
->dev
);
2177 /* Stop the port activity */
2178 mvneta_port_disable(pp
);
2180 /* Clear all ethernet port interrupts */
2181 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
2182 mvreg_write(pp
, MVNETA_INTR_OLD_CAUSE
, 0);
2184 /* Mask all ethernet port interrupts */
2185 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
2186 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
2187 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
2189 mvneta_tx_reset(pp
);
2190 mvneta_rx_reset(pp
);
2193 /* tx timeout callback - display a message and stop/start the network device */
2194 static void mvneta_tx_timeout(struct net_device
*dev
)
2196 struct mvneta_port
*pp
= netdev_priv(dev
);
2198 netdev_info(dev
, "tx timeout\n");
2199 mvneta_stop_dev(pp
);
2200 mvneta_start_dev(pp
);
2203 /* Return positive if MTU is valid */
2204 static int mvneta_check_mtu_valid(struct net_device
*dev
, int mtu
)
2207 netdev_err(dev
, "cannot change mtu to less than 68\n");
2211 /* 9676 == 9700 - 20 and rounding to 8 */
2213 netdev_info(dev
, "Illegal MTU value %d, round to 9676\n", mtu
);
2217 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu
), 8)) {
2218 netdev_info(dev
, "Illegal MTU value %d, rounding to %d\n",
2219 mtu
, ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8));
2220 mtu
= ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8);
2226 /* Change the device mtu */
2227 static int mvneta_change_mtu(struct net_device
*dev
, int mtu
)
2229 struct mvneta_port
*pp
= netdev_priv(dev
);
2232 mtu
= mvneta_check_mtu_valid(dev
, mtu
);
2238 if (!netif_running(dev
))
2241 /* The interface is running, so we have to force a
2242 * reallocation of the RXQs
2244 mvneta_stop_dev(pp
);
2246 mvneta_cleanup_txqs(pp
);
2247 mvneta_cleanup_rxqs(pp
);
2249 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
);
2251 ret
= mvneta_setup_rxqs(pp
);
2253 netdev_err(pp
->dev
, "unable to setup rxqs after MTU change\n");
2257 mvneta_setup_txqs(pp
);
2259 mvneta_start_dev(pp
);
2265 /* Handle setting mac address */
2266 static int mvneta_set_mac_addr(struct net_device
*dev
, void *addr
)
2268 struct mvneta_port
*pp
= netdev_priv(dev
);
2272 if (netif_running(dev
))
2275 /* Remove previous address table entry */
2276 mvneta_mac_addr_set(pp
, dev
->dev_addr
, -1);
2278 /* Set new addr in hw */
2279 mvneta_mac_addr_set(pp
, mac
, rxq_def
);
2281 /* Set addr in the device */
2282 for (i
= 0; i
< ETH_ALEN
; i
++)
2283 dev
->dev_addr
[i
] = mac
[i
];
2288 static void mvneta_adjust_link(struct net_device
*ndev
)
2290 struct mvneta_port
*pp
= netdev_priv(ndev
);
2291 struct phy_device
*phydev
= pp
->phy_dev
;
2292 int status_change
= 0;
2295 if ((pp
->speed
!= phydev
->speed
) ||
2296 (pp
->duplex
!= phydev
->duplex
)) {
2299 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
2300 val
&= ~(MVNETA_GMAC_CONFIG_MII_SPEED
|
2301 MVNETA_GMAC_CONFIG_GMII_SPEED
|
2302 MVNETA_GMAC_CONFIG_FULL_DUPLEX
);
2305 val
|= MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
2307 if (phydev
->speed
== SPEED_1000
)
2308 val
|= MVNETA_GMAC_CONFIG_GMII_SPEED
;
2310 val
|= MVNETA_GMAC_CONFIG_MII_SPEED
;
2312 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
2314 pp
->duplex
= phydev
->duplex
;
2315 pp
->speed
= phydev
->speed
;
2319 if (phydev
->link
!= pp
->link
) {
2320 if (!phydev
->link
) {
2325 pp
->link
= phydev
->link
;
2329 if (status_change
) {
2331 u32 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
2332 val
|= (MVNETA_GMAC_FORCE_LINK_PASS
|
2333 MVNETA_GMAC_FORCE_LINK_DOWN
);
2334 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
2336 netdev_info(pp
->dev
, "link up\n");
2338 mvneta_port_down(pp
);
2339 netdev_info(pp
->dev
, "link down\n");
2344 static int mvneta_mdio_probe(struct mvneta_port
*pp
)
2346 struct phy_device
*phy_dev
;
2348 phy_dev
= of_phy_connect(pp
->dev
, pp
->phy_node
, mvneta_adjust_link
, 0,
2351 netdev_err(pp
->dev
, "could not find the PHY\n");
2355 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
2356 phy_dev
->advertising
= phy_dev
->supported
;
2358 pp
->phy_dev
= phy_dev
;
2366 static void mvneta_mdio_remove(struct mvneta_port
*pp
)
2368 phy_disconnect(pp
->phy_dev
);
2372 static int mvneta_open(struct net_device
*dev
)
2374 struct mvneta_port
*pp
= netdev_priv(dev
);
2377 mvneta_mac_addr_set(pp
, dev
->dev_addr
, rxq_def
);
2379 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
);
2381 ret
= mvneta_setup_rxqs(pp
);
2385 ret
= mvneta_setup_txqs(pp
);
2387 goto err_cleanup_rxqs
;
2389 /* Connect to port interrupt line */
2390 ret
= request_irq(pp
->dev
->irq
, mvneta_isr
, 0,
2391 MVNETA_DRIVER_NAME
, pp
);
2393 netdev_err(pp
->dev
, "cannot request irq %d\n", pp
->dev
->irq
);
2394 goto err_cleanup_txqs
;
2397 /* In default link is down */
2398 netif_carrier_off(pp
->dev
);
2400 ret
= mvneta_mdio_probe(pp
);
2402 netdev_err(dev
, "cannot probe MDIO bus\n");
2406 mvneta_start_dev(pp
);
2411 free_irq(pp
->dev
->irq
, pp
);
2413 mvneta_cleanup_txqs(pp
);
2415 mvneta_cleanup_rxqs(pp
);
2419 /* Stop the port, free port interrupt line */
2420 static int mvneta_stop(struct net_device
*dev
)
2422 struct mvneta_port
*pp
= netdev_priv(dev
);
2424 mvneta_stop_dev(pp
);
2425 mvneta_mdio_remove(pp
);
2426 free_irq(dev
->irq
, pp
);
2427 mvneta_cleanup_rxqs(pp
);
2428 mvneta_cleanup_txqs(pp
);
2429 del_timer(&pp
->tx_done_timer
);
2430 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT
, &pp
->flags
);
2435 /* Ethtool methods */
2437 /* Get settings (phy address, speed) for ethtools */
2438 int mvneta_ethtool_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2440 struct mvneta_port
*pp
= netdev_priv(dev
);
2445 return phy_ethtool_gset(pp
->phy_dev
, cmd
);
2448 /* Set settings (phy address, speed) for ethtools */
2449 int mvneta_ethtool_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2451 struct mvneta_port
*pp
= netdev_priv(dev
);
2456 return phy_ethtool_sset(pp
->phy_dev
, cmd
);
2459 /* Set interrupt coalescing for ethtools */
2460 static int mvneta_ethtool_set_coalesce(struct net_device
*dev
,
2461 struct ethtool_coalesce
*c
)
2463 struct mvneta_port
*pp
= netdev_priv(dev
);
2466 for (queue
= 0; queue
< rxq_number
; queue
++) {
2467 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
2468 rxq
->time_coal
= c
->rx_coalesce_usecs
;
2469 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
2470 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
2471 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
2474 for (queue
= 0; queue
< txq_number
; queue
++) {
2475 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
2476 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
2477 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
2483 /* get coalescing for ethtools */
2484 static int mvneta_ethtool_get_coalesce(struct net_device
*dev
,
2485 struct ethtool_coalesce
*c
)
2487 struct mvneta_port
*pp
= netdev_priv(dev
);
2489 c
->rx_coalesce_usecs
= pp
->rxqs
[0].time_coal
;
2490 c
->rx_max_coalesced_frames
= pp
->rxqs
[0].pkts_coal
;
2492 c
->tx_max_coalesced_frames
= pp
->txqs
[0].done_pkts_coal
;
2497 static void mvneta_ethtool_get_drvinfo(struct net_device
*dev
,
2498 struct ethtool_drvinfo
*drvinfo
)
2500 strlcpy(drvinfo
->driver
, MVNETA_DRIVER_NAME
,
2501 sizeof(drvinfo
->driver
));
2502 strlcpy(drvinfo
->version
, MVNETA_DRIVER_VERSION
,
2503 sizeof(drvinfo
->version
));
2504 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
2505 sizeof(drvinfo
->bus_info
));
2509 static void mvneta_ethtool_get_ringparam(struct net_device
*netdev
,
2510 struct ethtool_ringparam
*ring
)
2512 struct mvneta_port
*pp
= netdev_priv(netdev
);
2514 ring
->rx_max_pending
= MVNETA_MAX_RXD
;
2515 ring
->tx_max_pending
= MVNETA_MAX_TXD
;
2516 ring
->rx_pending
= pp
->rx_ring_size
;
2517 ring
->tx_pending
= pp
->tx_ring_size
;
2520 static int mvneta_ethtool_set_ringparam(struct net_device
*dev
,
2521 struct ethtool_ringparam
*ring
)
2523 struct mvneta_port
*pp
= netdev_priv(dev
);
2525 if ((ring
->rx_pending
== 0) || (ring
->tx_pending
== 0))
2527 pp
->rx_ring_size
= ring
->rx_pending
< MVNETA_MAX_RXD
?
2528 ring
->rx_pending
: MVNETA_MAX_RXD
;
2529 pp
->tx_ring_size
= ring
->tx_pending
< MVNETA_MAX_TXD
?
2530 ring
->tx_pending
: MVNETA_MAX_TXD
;
2532 if (netif_running(dev
)) {
2534 if (mvneta_open(dev
)) {
2536 "error on opening device after ring param change\n");
2544 static const struct net_device_ops mvneta_netdev_ops
= {
2545 .ndo_open
= mvneta_open
,
2546 .ndo_stop
= mvneta_stop
,
2547 .ndo_start_xmit
= mvneta_tx
,
2548 .ndo_set_rx_mode
= mvneta_set_rx_mode
,
2549 .ndo_set_mac_address
= mvneta_set_mac_addr
,
2550 .ndo_change_mtu
= mvneta_change_mtu
,
2551 .ndo_tx_timeout
= mvneta_tx_timeout
,
2552 .ndo_get_stats64
= mvneta_get_stats64
,
2555 const struct ethtool_ops mvneta_eth_tool_ops
= {
2556 .get_link
= ethtool_op_get_link
,
2557 .get_settings
= mvneta_ethtool_get_settings
,
2558 .set_settings
= mvneta_ethtool_set_settings
,
2559 .set_coalesce
= mvneta_ethtool_set_coalesce
,
2560 .get_coalesce
= mvneta_ethtool_get_coalesce
,
2561 .get_drvinfo
= mvneta_ethtool_get_drvinfo
,
2562 .get_ringparam
= mvneta_ethtool_get_ringparam
,
2563 .set_ringparam
= mvneta_ethtool_set_ringparam
,
2567 static int __devinit
mvneta_init(struct mvneta_port
*pp
, int phy_addr
)
2572 mvneta_port_disable(pp
);
2574 /* Set port default values */
2575 mvneta_defaults_set(pp
);
2577 pp
->txqs
= kzalloc(txq_number
* sizeof(struct mvneta_tx_queue
),
2582 /* Initialize TX descriptor rings */
2583 for (queue
= 0; queue
< txq_number
; queue
++) {
2584 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
2586 txq
->size
= pp
->tx_ring_size
;
2587 txq
->done_pkts_coal
= MVNETA_TXDONE_COAL_PKTS
;
2590 pp
->rxqs
= kzalloc(rxq_number
* sizeof(struct mvneta_rx_queue
),
2597 /* Create Rx descriptor rings */
2598 for (queue
= 0; queue
< rxq_number
; queue
++) {
2599 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
2601 rxq
->size
= pp
->rx_ring_size
;
2602 rxq
->pkts_coal
= MVNETA_RX_COAL_PKTS
;
2603 rxq
->time_coal
= MVNETA_RX_COAL_USEC
;
2609 static void __devexit
mvneta_deinit(struct mvneta_port
*pp
)
2615 /* platform glue : initialize decoding windows */
2616 static void __devinit
2617 mvneta_conf_mbus_windows(struct mvneta_port
*pp
,
2618 const struct mbus_dram_target_info
*dram
)
2624 for (i
= 0; i
< 6; i
++) {
2625 mvreg_write(pp
, MVNETA_WIN_BASE(i
), 0);
2626 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), 0);
2629 mvreg_write(pp
, MVNETA_WIN_REMAP(i
), 0);
2635 for (i
= 0; i
< dram
->num_cs
; i
++) {
2636 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
2637 mvreg_write(pp
, MVNETA_WIN_BASE(i
), (cs
->base
& 0xffff0000) |
2638 (cs
->mbus_attr
<< 8) | dram
->mbus_dram_target_id
);
2640 mvreg_write(pp
, MVNETA_WIN_SIZE(i
),
2641 (cs
->size
- 1) & 0xffff0000);
2643 win_enable
&= ~(1 << i
);
2644 win_protect
|= 3 << (2 * i
);
2647 mvreg_write(pp
, MVNETA_BASE_ADDR_ENABLE
, win_enable
);
2650 /* Power up the port */
2651 static void __devinit
mvneta_port_power_up(struct mvneta_port
*pp
, int phy_mode
)
2655 /* MAC Cause register should be cleared */
2656 mvreg_write(pp
, MVNETA_UNIT_INTR_CAUSE
, 0);
2658 if (phy_mode
== PHY_INTERFACE_MODE_SGMII
)
2659 mvneta_port_sgmii_config(pp
);
2661 mvneta_gmac_rgmii_set(pp
, 1);
2663 /* Cancel Port Reset */
2664 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
2665 val
&= ~MVNETA_GMAC2_PORT_RESET
;
2666 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, val
);
2668 while ((mvreg_read(pp
, MVNETA_GMAC_CTRL_2
) &
2669 MVNETA_GMAC2_PORT_RESET
) != 0)
2673 /* Device initialization routine */
2674 static int __devinit
mvneta_probe(struct platform_device
*pdev
)
2676 const struct mbus_dram_target_info
*dram_target_info
;
2677 struct device_node
*dn
= pdev
->dev
.of_node
;
2678 struct device_node
*phy_node
;
2680 struct mvneta_port
*pp
;
2681 struct net_device
*dev
;
2682 const char *mac_addr
;
2686 /* Our multiqueue support is not complete, so for now, only
2687 * allow the usage of the first RX queue
2690 dev_err(&pdev
->dev
, "Invalid rxq_def argument: %d\n", rxq_def
);
2694 dev
= alloc_etherdev_mq(sizeof(struct mvneta_port
), 8);
2698 dev
->irq
= irq_of_parse_and_map(dn
, 0);
2699 if (dev
->irq
== 0) {
2701 goto err_free_netdev
;
2704 phy_node
= of_parse_phandle(dn
, "phy", 0);
2706 dev_err(&pdev
->dev
, "no associated PHY\n");
2711 phy_mode
= of_get_phy_mode(dn
);
2713 dev_err(&pdev
->dev
, "incorrect phy-mode\n");
2718 mac_addr
= of_get_mac_address(dn
);
2720 if (!mac_addr
|| !is_valid_ether_addr(mac_addr
))
2721 eth_hw_addr_random(dev
);
2723 memcpy(dev
->dev_addr
, mac_addr
, ETH_ALEN
);
2725 dev
->tx_queue_len
= MVNETA_MAX_TXD
;
2726 dev
->watchdog_timeo
= 5 * HZ
;
2727 dev
->netdev_ops
= &mvneta_netdev_ops
;
2729 SET_ETHTOOL_OPS(dev
, &mvneta_eth_tool_ops
);
2731 pp
= netdev_priv(dev
);
2733 pp
->tx_done_timer
.function
= mvneta_tx_done_timer_callback
;
2734 init_timer(&pp
->tx_done_timer
);
2735 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT
, &pp
->flags
);
2737 pp
->weight
= MVNETA_RX_POLL_WEIGHT
;
2738 pp
->phy_node
= phy_node
;
2739 pp
->phy_interface
= phy_mode
;
2741 pp
->base
= of_iomap(dn
, 0);
2742 if (pp
->base
== NULL
) {
2747 pp
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
2748 if (IS_ERR(pp
->clk
)) {
2749 err
= PTR_ERR(pp
->clk
);
2753 clk_prepare_enable(pp
->clk
);
2755 pp
->tx_done_timer
.data
= (unsigned long)dev
;
2757 pp
->tx_ring_size
= MVNETA_MAX_TXD
;
2758 pp
->rx_ring_size
= MVNETA_MAX_RXD
;
2761 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2763 err
= mvneta_init(pp
, phy_addr
);
2765 dev_err(&pdev
->dev
, "can't init eth hal\n");
2768 mvneta_port_power_up(pp
, phy_mode
);
2770 dram_target_info
= mv_mbus_dram_info();
2771 if (dram_target_info
)
2772 mvneta_conf_mbus_windows(pp
, dram_target_info
);
2774 netif_napi_add(dev
, &pp
->napi
, mvneta_poll
, pp
->weight
);
2776 err
= register_netdev(dev
);
2778 dev_err(&pdev
->dev
, "failed to register\n");
2782 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
;
2783 dev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
;
2784 dev
->priv_flags
|= IFF_UNICAST_FLT
;
2786 netdev_info(dev
, "mac: %pM\n", dev
->dev_addr
);
2788 platform_set_drvdata(pdev
, pp
->dev
);
2795 clk_disable_unprepare(pp
->clk
);
2799 irq_dispose_mapping(dev
->irq
);
2805 /* Device removal routine */
2806 static int __devexit
mvneta_remove(struct platform_device
*pdev
)
2808 struct net_device
*dev
= platform_get_drvdata(pdev
);
2809 struct mvneta_port
*pp
= netdev_priv(dev
);
2811 unregister_netdev(dev
);
2813 clk_disable_unprepare(pp
->clk
);
2815 irq_dispose_mapping(dev
->irq
);
2818 platform_set_drvdata(pdev
, NULL
);
2823 static const struct of_device_id mvneta_match
[] = {
2824 { .compatible
= "marvell,armada-370-neta" },
2827 MODULE_DEVICE_TABLE(of
, mvneta_match
);
2829 static struct platform_driver mvneta_driver
= {
2830 .probe
= mvneta_probe
,
2831 .remove
= __devexit_p(mvneta_remove
),
2833 .name
= MVNETA_DRIVER_NAME
,
2834 .of_match_table
= mvneta_match
,
2838 module_platform_driver(mvneta_driver
);
2840 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
2841 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
2842 MODULE_LICENSE("GPL");
2844 module_param(rxq_number
, int, S_IRUGO
);
2845 module_param(txq_number
, int, S_IRUGO
);
2847 module_param(rxq_def
, int, S_IRUGO
);
2848 module_param(txq_def
, int, S_IRUGO
);