Merge tag 'powerpc-4.1-3' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
[deliverable/linux.git] / drivers / net / ethernet / marvell / mv643xx_eth.c
CommitLineData
1da177e4 1/*
9c1bbdfe 2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
1da177e4
LT
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
4 *
5 * Based on the 64360 driver from:
4547fa61
LB
6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
7 * Rabeeh Khoury <rabeeh@marvell.com>
1da177e4
LT
8 *
9 * Copyright (C) 2003 PMC-Sierra, Inc.,
3bb8a18a 10 * written by Manish Lachwani
1da177e4
LT
11 *
12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
13 *
c8aaea25 14 * Copyright (C) 2004-2006 MontaVista Software, Inc.
1da177e4
LT
15 * Dale Farnsworth <dale@farnsworth.org>
16 *
17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
18 * <sjhill@realitydiluted.com>
19 *
4547fa61
LB
20 * Copyright (C) 2007-2008 Marvell Semiconductor
21 * Lennert Buytenhek <buytenh@marvell.com>
22 *
3871c387
MS
23 * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
24 *
1da177e4
LT
25 * This program is free software; you can redistribute it and/or
26 * modify it under the terms of the GNU General Public License
27 * as published by the Free Software Foundation; either version 2
28 * of the License, or (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
0ab75ae8 36 * along with this program; if not, see <http://www.gnu.org/licenses/>.
1da177e4 37 */
a779d38c 38
7542db8b
JP
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
1da177e4
LT
41#include <linux/init.h>
42#include <linux/dma-mapping.h>
b6298c22 43#include <linux/in.h>
c3efab8e 44#include <linux/ip.h>
3ae8f4e0 45#include <net/tso.h>
1da177e4
LT
46#include <linux/tcp.h>
47#include <linux/udp.h>
48#include <linux/etherdevice.h>
1da177e4
LT
49#include <linux/delay.h>
50#include <linux/ethtool.h>
d052d1be 51#include <linux/platform_device.h>
fbd6a754
LB
52#include <linux/module.h>
53#include <linux/kernel.h>
54#include <linux/spinlock.h>
55#include <linux/workqueue.h>
ed94493f 56#include <linux/phy.h>
fbd6a754 57#include <linux/mv643xx_eth.h>
10a9948d 58#include <linux/io.h>
3619eb85 59#include <linux/interrupt.h>
10a9948d 60#include <linux/types.h>
5a0e3ad6 61#include <linux/slab.h>
452503eb 62#include <linux/clk.h>
76723bca
SH
63#include <linux/of.h>
64#include <linux/of_irq.h>
65#include <linux/of_net.h>
cc9d4598 66#include <linux/of_mdio.h>
fbd6a754 67
e5371493 68static char mv643xx_eth_driver_name[] = "mv643xx_eth";
042af53c 69static char mv643xx_eth_driver_version[] = "1.4";
c9df406f 70
fbd6a754 71
fbd6a754
LB
72/*
73 * Registers shared between all ports.
74 */
3cb4667c 75#define PHY_ADDR 0x0000
3cb4667c
LB
76#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
77#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
78#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
79#define WINDOW_BAR_ENABLE 0x0290
80#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
fbd6a754
LB
81
82/*
37a6084f
LB
83 * Main per-port registers. These live at offset 0x0400 for
84 * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
fbd6a754 85 */
37a6084f 86#define PORT_CONFIG 0x0000
d9a073ea 87#define UNICAST_PROMISCUOUS_MODE 0x00000001
37a6084f
LB
88#define PORT_CONFIG_EXT 0x0004
89#define MAC_ADDR_LOW 0x0014
90#define MAC_ADDR_HIGH 0x0018
91#define SDMA_CONFIG 0x001c
becfad97
LB
92#define TX_BURST_SIZE_16_64BIT 0x01000000
93#define TX_BURST_SIZE_4_64BIT 0x00800000
94#define BLM_TX_NO_SWAP 0x00000020
95#define BLM_RX_NO_SWAP 0x00000010
96#define RX_BURST_SIZE_16_64BIT 0x00000008
97#define RX_BURST_SIZE_4_64BIT 0x00000004
37a6084f 98#define PORT_SERIAL_CONTROL 0x003c
becfad97
LB
99#define SET_MII_SPEED_TO_100 0x01000000
100#define SET_GMII_SPEED_TO_1000 0x00800000
101#define SET_FULL_DUPLEX_MODE 0x00200000
102#define MAX_RX_PACKET_9700BYTE 0x000a0000
103#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
104#define DO_NOT_FORCE_LINK_FAIL 0x00000400
105#define SERIAL_PORT_CONTROL_RESERVED 0x00000200
106#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
107#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
108#define FORCE_LINK_PASS 0x00000002
109#define SERIAL_PORT_ENABLE 0x00000001
37a6084f 110#define PORT_STATUS 0x0044
a2a41689 111#define TX_FIFO_EMPTY 0x00000400
ae9ae064 112#define TX_IN_PROGRESS 0x00000080
2f7eb47a
LB
113#define PORT_SPEED_MASK 0x00000030
114#define PORT_SPEED_1000 0x00000010
115#define PORT_SPEED_100 0x00000020
116#define PORT_SPEED_10 0x00000000
117#define FLOW_CONTROL_ENABLED 0x00000008
118#define FULL_DUPLEX 0x00000004
81600eea 119#define LINK_UP 0x00000002
37a6084f
LB
120#define TXQ_COMMAND 0x0048
121#define TXQ_FIX_PRIO_CONF 0x004c
cb85215f
SH
122#define PORT_SERIAL_CONTROL1 0x004c
123#define CLK125_BYPASS_EN 0x00000010
37a6084f
LB
124#define TX_BW_RATE 0x0050
125#define TX_BW_MTU 0x0058
126#define TX_BW_BURST 0x005c
127#define INT_CAUSE 0x0060
226bb6b7 128#define INT_TX_END 0x07f80000
e0ca8410 129#define INT_TX_END_0 0x00080000
befefe21 130#define INT_RX 0x000003fc
e0ca8410 131#define INT_RX_0 0x00000004
073a345c 132#define INT_EXT 0x00000002
37a6084f 133#define INT_CAUSE_EXT 0x0064
befefe21
LB
134#define INT_EXT_LINK_PHY 0x00110000
135#define INT_EXT_TX 0x000000ff
37a6084f
LB
136#define INT_MASK 0x0068
137#define INT_MASK_EXT 0x006c
138#define TX_FIFO_URGENT_THRESHOLD 0x0074
302476c9
PZ
139#define RX_DISCARD_FRAME_CNT 0x0084
140#define RX_OVERRUN_FRAME_CNT 0x0088
37a6084f
LB
141#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
142#define TX_BW_RATE_MOVED 0x00e0
143#define TX_BW_MTU_MOVED 0x00e8
144#define TX_BW_BURST_MOVED 0x00ec
145#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
146#define RXQ_COMMAND 0x0280
147#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
148#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
149#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
150#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
151
152/*
153 * Misc per-port registers.
154 */
3cb4667c
LB
155#define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
156#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
157#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
158#define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
fbd6a754 159
2679a550
LB
160
161/*
becfad97 162 * SDMA configuration register default value.
2679a550 163 */
fbd6a754
LB
164#if defined(__BIG_ENDIAN)
165#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
e0c6ef93
LB
166 (RX_BURST_SIZE_4_64BIT | \
167 TX_BURST_SIZE_4_64BIT)
fbd6a754
LB
168#elif defined(__LITTLE_ENDIAN)
169#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
e0c6ef93
LB
170 (RX_BURST_SIZE_4_64BIT | \
171 BLM_RX_NO_SWAP | \
172 BLM_TX_NO_SWAP | \
173 TX_BURST_SIZE_4_64BIT)
fbd6a754
LB
174#else
175#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
176#endif
177
2beff77b
LB
178
179/*
becfad97 180 * Misc definitions.
2beff77b 181 */
becfad97 182#define DEFAULT_RX_QUEUE_SIZE 128
3ae8f4e0 183#define DEFAULT_TX_QUEUE_SIZE 512
7fd96ce4 184#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
fbd6a754 185
3ae8f4e0 186#define TSO_HEADER_SIZE 128
fbd6a754 187
ee9e4956
EG
188/* Max number of allowed TCP segments for software TSO */
189#define MV643XX_MAX_TSO_SEGS 100
190#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
191
b926260c
EG
192#define IS_TSO_HEADER(txq, addr) \
193 ((addr >= txq->tso_hdrs_dma) && \
194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
9e911414
EG
195
196#define DESC_DMA_MAP_SINGLE 0
197#define DESC_DMA_MAP_PAGE 1
198
7ca72a3b
LB
199/*
200 * RX/TX descriptors.
fbd6a754
LB
201 */
202#if defined(__BIG_ENDIAN)
cc9754b3 203struct rx_desc {
fbd6a754
LB
204 u16 byte_cnt; /* Descriptor buffer byte count */
205 u16 buf_size; /* Buffer size */
206 u32 cmd_sts; /* Descriptor command status */
207 u32 next_desc_ptr; /* Next descriptor pointer */
208 u32 buf_ptr; /* Descriptor buffer pointer */
209};
210
cc9754b3 211struct tx_desc {
fbd6a754
LB
212 u16 byte_cnt; /* buffer byte count */
213 u16 l4i_chk; /* CPU provided TCP checksum */
214 u32 cmd_sts; /* Command/status field */
215 u32 next_desc_ptr; /* Pointer to next descriptor */
216 u32 buf_ptr; /* pointer to buffer for this descriptor*/
217};
218#elif defined(__LITTLE_ENDIAN)
cc9754b3 219struct rx_desc {
fbd6a754
LB
220 u32 cmd_sts; /* Descriptor command status */
221 u16 buf_size; /* Buffer size */
222 u16 byte_cnt; /* Descriptor buffer byte count */
223 u32 buf_ptr; /* Descriptor buffer pointer */
224 u32 next_desc_ptr; /* Next descriptor pointer */
225};
226
cc9754b3 227struct tx_desc {
fbd6a754
LB
228 u32 cmd_sts; /* Command/status field */
229 u16 l4i_chk; /* CPU provided TCP checksum */
230 u16 byte_cnt; /* buffer byte count */
231 u32 buf_ptr; /* pointer to buffer for this descriptor*/
232 u32 next_desc_ptr; /* Pointer to next descriptor */
233};
234#else
235#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
236#endif
237
7ca72a3b 238/* RX & TX descriptor command */
cc9754b3 239#define BUFFER_OWNED_BY_DMA 0x80000000
7ca72a3b
LB
240
241/* RX & TX descriptor status */
cc9754b3 242#define ERROR_SUMMARY 0x00000001
7ca72a3b
LB
243
244/* RX descriptor status */
cc9754b3
LB
245#define LAYER_4_CHECKSUM_OK 0x40000000
246#define RX_ENABLE_INTERRUPT 0x20000000
247#define RX_FIRST_DESC 0x08000000
248#define RX_LAST_DESC 0x04000000
eaf5d590
LB
249#define RX_IP_HDR_OK 0x02000000
250#define RX_PKT_IS_IPV4 0x01000000
251#define RX_PKT_IS_ETHERNETV2 0x00800000
252#define RX_PKT_LAYER4_TYPE_MASK 0x00600000
253#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
254#define RX_PKT_IS_VLAN_TAGGED 0x00080000
7ca72a3b
LB
255
256/* TX descriptor command */
cc9754b3
LB
257#define TX_ENABLE_INTERRUPT 0x00800000
258#define GEN_CRC 0x00400000
259#define TX_FIRST_DESC 0x00200000
260#define TX_LAST_DESC 0x00100000
261#define ZERO_PADDING 0x00080000
262#define GEN_IP_V4_CHECKSUM 0x00040000
263#define GEN_TCP_UDP_CHECKSUM 0x00020000
264#define UDP_FRAME 0x00010000
e32b6617 265#define MAC_HDR_EXTRA_4_BYTES 0x00008000
84411f73 266#define GEN_TCP_UDP_CHK_FULL 0x00000400
e32b6617 267#define MAC_HDR_EXTRA_8_BYTES 0x00000200
7ca72a3b 268
cc9754b3 269#define TX_IHL_SHIFT 11
7ca72a3b
LB
270
271
c9df406f 272/* global *******************************************************************/
e5371493 273struct mv643xx_eth_shared_private {
fc32b0e2
LB
274 /*
275 * Ethernet controller base address.
276 */
cc9754b3 277 void __iomem *base;
c9df406f 278
fc32b0e2
LB
279 /*
280 * Per-port MBUS window access register value.
281 */
c9df406f
LB
282 u32 win_protect;
283
fc32b0e2
LB
284 /*
285 * Hardware-specific parameters.
286 */
773fc3ee 287 int extended_rx_coal_limit;
457b1d5a 288 int tx_bw_control;
9b2c2ff7 289 int tx_csum_limit;
20922486 290 struct clk *clk;
c9df406f
LB
291};
292
457b1d5a
LB
293#define TX_BW_CONTROL_ABSENT 0
294#define TX_BW_CONTROL_OLD_LAYOUT 1
295#define TX_BW_CONTROL_NEW_LAYOUT 2
296
e7d2f4db
LB
297static int mv643xx_eth_open(struct net_device *dev);
298static int mv643xx_eth_stop(struct net_device *dev);
299
c9df406f
LB
300
301/* per-port *****************************************************************/
e5371493 302struct mib_counters {
fbd6a754
LB
303 u64 good_octets_received;
304 u32 bad_octets_received;
305 u32 internal_mac_transmit_err;
306 u32 good_frames_received;
307 u32 bad_frames_received;
308 u32 broadcast_frames_received;
309 u32 multicast_frames_received;
310 u32 frames_64_octets;
311 u32 frames_65_to_127_octets;
312 u32 frames_128_to_255_octets;
313 u32 frames_256_to_511_octets;
314 u32 frames_512_to_1023_octets;
315 u32 frames_1024_to_max_octets;
316 u64 good_octets_sent;
317 u32 good_frames_sent;
318 u32 excessive_collision;
319 u32 multicast_frames_sent;
320 u32 broadcast_frames_sent;
321 u32 unrec_mac_control_received;
322 u32 fc_sent;
323 u32 good_fc_received;
324 u32 bad_fc_received;
325 u32 undersize_received;
326 u32 fragments_received;
327 u32 oversize_received;
328 u32 jabber_received;
329 u32 mac_receive_error;
330 u32 bad_crc_event;
331 u32 collision;
332 u32 late_collision;
302476c9
PZ
333 /* Non MIB hardware counters */
334 u32 rx_discard;
335 u32 rx_overrun;
fbd6a754
LB
336};
337
8a578111 338struct rx_queue {
64da80a2
LB
339 int index;
340
8a578111
LB
341 int rx_ring_size;
342
343 int rx_desc_count;
344 int rx_curr_desc;
345 int rx_used_desc;
346
347 struct rx_desc *rx_desc_area;
348 dma_addr_t rx_desc_dma;
349 int rx_desc_area_size;
350 struct sk_buff **rx_skb;
8a578111
LB
351};
352
13d64285 353struct tx_queue {
3d6b35bc
LB
354 int index;
355
13d64285 356 int tx_ring_size;
fbd6a754 357
13d64285
LB
358 int tx_desc_count;
359 int tx_curr_desc;
360 int tx_used_desc;
fbd6a754 361
ee9e4956
EG
362 int tx_stop_threshold;
363 int tx_wake_threshold;
364
3ae8f4e0
EG
365 char *tso_hdrs;
366 dma_addr_t tso_hdrs_dma;
367
5daffe94 368 struct tx_desc *tx_desc_area;
9e911414 369 char *tx_desc_mapping; /* array to track the type of the dma mapping */
fbd6a754
LB
370 dma_addr_t tx_desc_dma;
371 int tx_desc_area_size;
99ab08e0
LB
372
373 struct sk_buff_head tx_skb;
8fd89211
LB
374
375 unsigned long tx_packets;
376 unsigned long tx_bytes;
377 unsigned long tx_dropped;
13d64285
LB
378};
379
380struct mv643xx_eth_private {
381 struct mv643xx_eth_shared_private *shared;
37a6084f 382 void __iomem *base;
fc32b0e2 383 int port_num;
13d64285 384
fc32b0e2 385 struct net_device *dev;
fbd6a754 386
ed94493f 387 struct phy_device *phy;
fbd6a754 388
4ff3495a
LB
389 struct timer_list mib_counters_timer;
390 spinlock_t mib_counters_lock;
fc32b0e2 391 struct mib_counters mib_counters;
4ff3495a 392
fc32b0e2 393 struct work_struct tx_timeout_task;
8a578111 394
1fa38c58 395 struct napi_struct napi;
e0ca8410 396 u32 int_mask;
1319ebad 397 u8 oom;
1fa38c58
LB
398 u8 work_link;
399 u8 work_tx;
400 u8 work_tx_end;
401 u8 work_rx;
402 u8 work_rx_refill;
1fa38c58 403
2bcb4b0f 404 int skb_size;
2bcb4b0f 405
8a578111
LB
406 /*
407 * RX state.
408 */
e7d2f4db 409 int rx_ring_size;
8a578111
LB
410 unsigned long rx_desc_sram_addr;
411 int rx_desc_sram_size;
f7981c1c 412 int rxq_count;
2257e05c 413 struct timer_list rx_oom;
64da80a2 414 struct rx_queue rxq[8];
13d64285
LB
415
416 /*
417 * TX state.
418 */
e7d2f4db 419 int tx_ring_size;
13d64285
LB
420 unsigned long tx_desc_sram_addr;
421 int tx_desc_sram_size;
f7981c1c 422 int txq_count;
3d6b35bc 423 struct tx_queue txq[8];
452503eb
AL
424
425 /*
426 * Hardware-specific parameters.
427 */
428 struct clk *clk;
429 unsigned int t_clk;
fbd6a754 430};
1da177e4 431
fbd6a754 432
c9df406f 433/* port register accessors **************************************************/
e5371493 434static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
c9df406f 435{
cc9754b3 436 return readl(mp->shared->base + offset);
c9df406f 437}
fbd6a754 438
37a6084f
LB
439static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
440{
441 return readl(mp->base + offset);
442}
443
e5371493 444static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
c9df406f 445{
cc9754b3 446 writel(data, mp->shared->base + offset);
c9df406f 447}
fbd6a754 448
37a6084f
LB
449static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
450{
451 writel(data, mp->base + offset);
452}
453
fbd6a754 454
c9df406f 455/* rxq/txq helper functions *************************************************/
8a578111 456static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
c9df406f 457{
64da80a2 458 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
c9df406f 459}
fbd6a754 460
13d64285
LB
461static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
462{
3d6b35bc 463 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
13d64285
LB
464}
465
8a578111 466static void rxq_enable(struct rx_queue *rxq)
c9df406f 467{
8a578111 468 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
37a6084f 469 wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
8a578111 470}
1da177e4 471
8a578111
LB
472static void rxq_disable(struct rx_queue *rxq)
473{
474 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
64da80a2 475 u8 mask = 1 << rxq->index;
1da177e4 476
37a6084f
LB
477 wrlp(mp, RXQ_COMMAND, mask << 8);
478 while (rdlp(mp, RXQ_COMMAND) & mask)
8a578111 479 udelay(10);
c9df406f
LB
480}
481
6b368f68
LB
482static void txq_reset_hw_ptr(struct tx_queue *txq)
483{
484 struct mv643xx_eth_private *mp = txq_to_mp(txq);
6b368f68
LB
485 u32 addr;
486
487 addr = (u32)txq->tx_desc_dma;
488 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
37a6084f 489 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
6b368f68
LB
490}
491
13d64285 492static void txq_enable(struct tx_queue *txq)
1da177e4 493{
13d64285 494 struct mv643xx_eth_private *mp = txq_to_mp(txq);
37a6084f 495 wrlp(mp, TXQ_COMMAND, 1 << txq->index);
1da177e4
LT
496}
497
13d64285 498static void txq_disable(struct tx_queue *txq)
1da177e4 499{
13d64285 500 struct mv643xx_eth_private *mp = txq_to_mp(txq);
3d6b35bc 501 u8 mask = 1 << txq->index;
c9df406f 502
37a6084f
LB
503 wrlp(mp, TXQ_COMMAND, mask << 8);
504 while (rdlp(mp, TXQ_COMMAND) & mask)
13d64285
LB
505 udelay(10);
506}
507
1fa38c58 508static void txq_maybe_wake(struct tx_queue *txq)
13d64285
LB
509{
510 struct mv643xx_eth_private *mp = txq_to_mp(txq);
e5ef1de1 511 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
3d6b35bc 512
8fd89211
LB
513 if (netif_tx_queue_stopped(nq)) {
514 __netif_tx_lock(nq, smp_processor_id());
ee9e4956 515 if (txq->tx_desc_count <= txq->tx_wake_threshold)
8fd89211
LB
516 netif_tx_wake_queue(nq);
517 __netif_tx_unlock(nq);
518 }
1da177e4
LT
519}
520
8a578111 521static int rxq_process(struct rx_queue *rxq, int budget)
1da177e4 522{
8a578111
LB
523 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
524 struct net_device_stats *stats = &mp->dev->stats;
525 int rx;
1da177e4 526
8a578111 527 rx = 0;
9e1f3772 528 while (rx < budget && rxq->rx_desc_count) {
fc32b0e2 529 struct rx_desc *rx_desc;
96587661 530 unsigned int cmd_sts;
fc32b0e2 531 struct sk_buff *skb;
6b8f90c2 532 u16 byte_cnt;
ff561eef 533
8a578111 534 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
1da177e4 535
96587661 536 cmd_sts = rx_desc->cmd_sts;
2257e05c 537 if (cmd_sts & BUFFER_OWNED_BY_DMA)
96587661 538 break;
96587661 539 rmb();
1da177e4 540
8a578111
LB
541 skb = rxq->rx_skb[rxq->rx_curr_desc];
542 rxq->rx_skb[rxq->rx_curr_desc] = NULL;
ff561eef 543
9da78745
LB
544 rxq->rx_curr_desc++;
545 if (rxq->rx_curr_desc == rxq->rx_ring_size)
546 rxq->rx_curr_desc = 0;
ff561eef 547
eb0519b5 548 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
abe78717 549 rx_desc->buf_size, DMA_FROM_DEVICE);
8a578111
LB
550 rxq->rx_desc_count--;
551 rx++;
b1dd9ca1 552
1fa38c58
LB
553 mp->work_rx_refill |= 1 << rxq->index;
554
6b8f90c2
LB
555 byte_cnt = rx_desc->byte_cnt;
556
468d09f8
DF
557 /*
558 * Update statistics.
fc32b0e2
LB
559 *
560 * Note that the descriptor byte count includes 2 dummy
561 * bytes automatically inserted by the hardware at the
562 * start of the packet (which we don't count), and a 4
563 * byte CRC at the end of the packet (which we do count).
468d09f8 564 */
1da177e4 565 stats->rx_packets++;
6b8f90c2 566 stats->rx_bytes += byte_cnt - 2;
96587661 567
1da177e4 568 /*
fc32b0e2
LB
569 * In case we received a packet without first / last bits
570 * on, or the error summary bit is set, the packet needs
571 * to be dropped.
1da177e4 572 */
f61e5547
LB
573 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
574 != (RX_FIRST_DESC | RX_LAST_DESC))
575 goto err;
576
577 /*
578 * The -4 is for the CRC in the trailer of the
579 * received packet
580 */
581 skb_put(skb, byte_cnt - 2 - 4);
582
583 if (cmd_sts & LAYER_4_CHECKSUM_OK)
584 skb->ip_summed = CHECKSUM_UNNECESSARY;
585 skb->protocol = eth_type_trans(skb, mp->dev);
eaf5d590 586
3619eb85 587 napi_gro_receive(&mp->napi, skb);
f61e5547
LB
588
589 continue;
590
591err:
592 stats->rx_dropped++;
593
594 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
595 (RX_FIRST_DESC | RX_LAST_DESC)) {
596 if (net_ratelimit())
7542db8b
JP
597 netdev_err(mp->dev,
598 "received packet spanning multiple descriptors\n");
1da177e4 599 }
f61e5547
LB
600
601 if (cmd_sts & ERROR_SUMMARY)
602 stats->rx_errors++;
603
604 dev_kfree_skb(skb);
1da177e4 605 }
fc32b0e2 606
1fa38c58
LB
607 if (rx < budget)
608 mp->work_rx &= ~(1 << rxq->index);
609
8a578111 610 return rx;
1da177e4
LT
611}
612
1fa38c58 613static int rxq_refill(struct rx_queue *rxq, int budget)
d0412d96 614{
1fa38c58 615 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1fa38c58 616 int refilled;
8a578111 617
1fa38c58
LB
618 refilled = 0;
619 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
620 struct sk_buff *skb;
1fa38c58 621 int rx;
53771522 622 struct rx_desc *rx_desc;
530e557a 623 int size;
d0412d96 624
acb600de 625 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
2bcb4b0f 626
1fa38c58 627 if (skb == NULL) {
1319ebad 628 mp->oom = 1;
1fa38c58
LB
629 goto oom;
630 }
d0412d96 631
7fd96ce4
LB
632 if (SKB_DMA_REALIGN)
633 skb_reserve(skb, SKB_DMA_REALIGN);
2257e05c 634
1fa38c58
LB
635 refilled++;
636 rxq->rx_desc_count++;
c9df406f 637
1fa38c58
LB
638 rx = rxq->rx_used_desc++;
639 if (rxq->rx_used_desc == rxq->rx_ring_size)
640 rxq->rx_used_desc = 0;
2257e05c 641
53771522
LB
642 rx_desc = rxq->rx_desc_area + rx;
643
18f1d054 644 size = skb_end_pointer(skb) - skb->data;
eb0519b5 645 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
530e557a 646 skb->data, size,
eb0519b5 647 DMA_FROM_DEVICE);
530e557a 648 rx_desc->buf_size = size;
1fa38c58
LB
649 rxq->rx_skb[rx] = skb;
650 wmb();
53771522 651 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
1fa38c58 652 wmb();
2257e05c 653
1fa38c58
LB
654 /*
655 * The hardware automatically prepends 2 bytes of
656 * dummy data to each received packet, so that the
657 * IP header ends up 16-byte aligned.
658 */
659 skb_reserve(skb, 2);
660 }
661
662 if (refilled < budget)
663 mp->work_rx_refill &= ~(1 << rxq->index);
664
665oom:
666 return refilled;
d0412d96
JC
667}
668
c9df406f
LB
669
670/* tx ***********************************************************************/
c9df406f 671static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
1da177e4 672{
13d64285 673 int frag;
1da177e4 674
c9df406f 675 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
9e903e08
ED
676 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
677
678 if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
c9df406f 679 return 1;
1da177e4 680 }
13d64285 681
c9df406f
LB
682 return 0;
683}
7303fde8 684
0a8fa933
EG
685static inline __be16 sum16_as_be(__sum16 sum)
686{
687 return (__force __be16)sum;
688}
689
690static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
691 u16 *l4i_chk, u32 *command, int length)
692{
693 int ret;
694 u32 cmd = 0;
695
696 if (skb->ip_summed == CHECKSUM_PARTIAL) {
697 int hdr_len;
698 int tag_bytes;
699
700 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
701 skb->protocol != htons(ETH_P_8021Q));
702
703 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
704 tag_bytes = hdr_len - ETH_HLEN;
705
706 if (length - hdr_len > mp->shared->tx_csum_limit ||
707 unlikely(tag_bytes & ~12)) {
708 ret = skb_checksum_help(skb);
709 if (!ret)
710 goto no_csum;
711 return ret;
712 }
713
714 if (tag_bytes & 4)
715 cmd |= MAC_HDR_EXTRA_4_BYTES;
716 if (tag_bytes & 8)
717 cmd |= MAC_HDR_EXTRA_8_BYTES;
718
84411f73 719 cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
0a8fa933
EG
720 GEN_IP_V4_CHECKSUM |
721 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
722
84411f73
EG
723 /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
724 * it seems we don't need to pass the initial checksum. */
0a8fa933
EG
725 switch (ip_hdr(skb)->protocol) {
726 case IPPROTO_UDP:
727 cmd |= UDP_FRAME;
84411f73 728 *l4i_chk = 0;
0a8fa933
EG
729 break;
730 case IPPROTO_TCP:
84411f73 731 *l4i_chk = 0;
0a8fa933
EG
732 break;
733 default:
734 WARN(1, "protocol not supported");
735 }
736 } else {
737no_csum:
738 /* Errata BTS #50, IHL must be 5 if no HW checksum */
739 cmd |= 5 << TX_IHL_SHIFT;
740 }
741 *command = cmd;
742 return 0;
743}
744
3ae8f4e0
EG
745static inline int
746txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
747 struct sk_buff *skb, char *data, int length,
748 bool last_tcp, bool is_last)
749{
750 int tx_index;
751 u32 cmd_sts;
752 struct tx_desc *desc;
753
754 tx_index = txq->tx_curr_desc++;
755 if (txq->tx_curr_desc == txq->tx_ring_size)
756 txq->tx_curr_desc = 0;
757 desc = &txq->tx_desc_area[tx_index];
9e911414 758 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
3ae8f4e0
EG
759
760 desc->l4i_chk = 0;
761 desc->byte_cnt = length;
762 desc->buf_ptr = dma_map_single(dev->dev.parent, data,
763 length, DMA_TO_DEVICE);
764 if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
765 WARN(1, "dma_map_single failed!\n");
766 return -ENOMEM;
767 }
768
769 cmd_sts = BUFFER_OWNED_BY_DMA;
770 if (last_tcp) {
771 /* last descriptor in the TCP packet */
772 cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
773 /* last descriptor in SKB */
774 if (is_last)
775 cmd_sts |= TX_ENABLE_INTERRUPT;
776 }
777 desc->cmd_sts = cmd_sts;
778 return 0;
779}
780
781static inline void
782txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
783{
784 struct mv643xx_eth_private *mp = txq_to_mp(txq);
785 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
786 int tx_index;
787 struct tx_desc *desc;
788 int ret;
789 u32 cmd_csum = 0;
790 u16 l4i_chk = 0;
791
792 tx_index = txq->tx_curr_desc;
793 desc = &txq->tx_desc_area[tx_index];
794
795 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
796 if (ret)
797 WARN(1, "failed to prepare checksum!");
798
799 /* Should we set this? Can't use the value from skb_tx_csum()
800 * as it's not the correct initial L4 checksum to use. */
801 desc->l4i_chk = 0;
802
803 desc->byte_cnt = hdr_len;
804 desc->buf_ptr = txq->tso_hdrs_dma +
805 txq->tx_curr_desc * TSO_HEADER_SIZE;
806 desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
807 GEN_CRC;
808
809 txq->tx_curr_desc++;
810 if (txq->tx_curr_desc == txq->tx_ring_size)
811 txq->tx_curr_desc = 0;
812}
813
814static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
815 struct net_device *dev)
816{
817 struct mv643xx_eth_private *mp = txq_to_mp(txq);
818 int total_len, data_left, ret;
819 int desc_count = 0;
820 struct tso_t tso;
821 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
822
823 /* Count needed descriptors */
824 if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
825 netdev_dbg(dev, "not enough descriptors for TSO!\n");
826 return -EBUSY;
827 }
828
829 /* Initialize the TSO handler, and prepare the first payload */
830 tso_start(skb, &tso);
831
832 total_len = skb->len - hdr_len;
833 while (total_len > 0) {
834 char *hdr;
835
836 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
837 total_len -= data_left;
838 desc_count++;
839
840 /* prepare packet headers: MAC + IP + TCP */
841 hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
842 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
843 txq_put_hdr_tso(skb, txq, data_left);
844
845 while (data_left > 0) {
846 int size;
847 desc_count++;
848
849 size = min_t(int, tso.size, data_left);
850 ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
851 size == data_left,
852 total_len == 0);
853 if (ret)
854 goto err_release;
855 data_left -= size;
856 tso_build_data(skb, &tso, size);
857 }
858 }
859
860 __skb_queue_tail(&txq->tx_skb, skb);
861 skb_tx_timestamp(skb);
862
863 /* clear TX_END status */
864 mp->work_tx_end &= ~(1 << txq->index);
865
866 /* ensure all descriptors are written before poking hardware */
867 wmb();
868 txq_enable(txq);
869 txq->tx_desc_count += desc_count;
870 return 0;
871err_release:
872 /* TODO: Release all used data descriptors; header descriptors must not
873 * be DMA-unmapped.
874 */
875 return ret;
876}
877
13d64285 878static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
c9df406f 879{
eb0519b5 880 struct mv643xx_eth_private *mp = txq_to_mp(txq);
13d64285 881 int nr_frags = skb_shinfo(skb)->nr_frags;
c9df406f 882 int frag;
1da177e4 883
13d64285
LB
884 for (frag = 0; frag < nr_frags; frag++) {
885 skb_frag_t *this_frag;
886 int tx_index;
887 struct tx_desc *desc;
888
889 this_frag = &skb_shinfo(skb)->frags[frag];
66823b92
LB
890 tx_index = txq->tx_curr_desc++;
891 if (txq->tx_curr_desc == txq->tx_ring_size)
892 txq->tx_curr_desc = 0;
13d64285 893 desc = &txq->tx_desc_area[tx_index];
9e911414 894 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
13d64285
LB
895
896 /*
897 * The last fragment will generate an interrupt
898 * which will free the skb on TX completion.
899 */
900 if (frag == nr_frags - 1) {
901 desc->cmd_sts = BUFFER_OWNED_BY_DMA |
902 ZERO_PADDING | TX_LAST_DESC |
903 TX_ENABLE_INTERRUPT;
13d64285
LB
904 } else {
905 desc->cmd_sts = BUFFER_OWNED_BY_DMA;
13d64285
LB
906 }
907
c9df406f 908 desc->l4i_chk = 0;
9e903e08 909 desc->byte_cnt = skb_frag_size(this_frag);
9e911414
EG
910 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
911 this_frag, 0, desc->byte_cnt,
912 DMA_TO_DEVICE);
c9df406f 913 }
1da177e4
LT
914}
915
ee9e4956
EG
916static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
917 struct net_device *dev)
1da177e4 918{
8fa89bf5 919 struct mv643xx_eth_private *mp = txq_to_mp(txq);
13d64285 920 int nr_frags = skb_shinfo(skb)->nr_frags;
c9df406f 921 int tx_index;
cc9754b3 922 struct tx_desc *desc;
c9df406f 923 u32 cmd_sts;
4df89bd5 924 u16 l4i_chk;
0a8fa933 925 int length, ret;
1da177e4 926
0a8fa933 927 cmd_sts = 0;
4df89bd5 928 l4i_chk = 0;
c9df406f 929
ee9e4956
EG
930 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
931 if (net_ratelimit())
932 netdev_err(dev, "tx queue full?!\n");
933 return -EBUSY;
934 }
935
0a8fa933 936 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
ee9e4956 937 if (ret)
0a8fa933 938 return ret;
0a8fa933 939 cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
c9df406f 940
66823b92
LB
941 tx_index = txq->tx_curr_desc++;
942 if (txq->tx_curr_desc == txq->tx_ring_size)
943 txq->tx_curr_desc = 0;
4df89bd5 944 desc = &txq->tx_desc_area[tx_index];
9e911414 945 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
4df89bd5
LB
946
947 if (nr_frags) {
948 txq_submit_frag_skb(txq, skb);
949 length = skb_headlen(skb);
950 } else {
951 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
952 length = skb->len;
953 }
954
955 desc->l4i_chk = l4i_chk;
956 desc->byte_cnt = length;
eb0519b5
GP
957 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
958 length, DMA_TO_DEVICE);
4df89bd5 959
99ab08e0
LB
960 __skb_queue_tail(&txq->tx_skb, skb);
961
3b182d7d
RC
962 skb_tx_timestamp(skb);
963
c9df406f
LB
964 /* ensure all other descriptors are written before first cmd_sts */
965 wmb();
966 desc->cmd_sts = cmd_sts;
967
1fa38c58
LB
968 /* clear TX_END status */
969 mp->work_tx_end &= ~(1 << txq->index);
8fa89bf5 970
c9df406f
LB
971 /* ensure all descriptors are written before poking hardware */
972 wmb();
13d64285 973 txq_enable(txq);
c9df406f 974
13d64285 975 txq->tx_desc_count += nr_frags + 1;
4df89bd5
LB
976
977 return 0;
1da177e4 978}
1da177e4 979
0ccfe64d 980static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 981{
e5371493 982 struct mv643xx_eth_private *mp = netdev_priv(dev);
3ae8f4e0 983 int length, queue, ret;
13d64285 984 struct tx_queue *txq;
e5ef1de1 985 struct netdev_queue *nq;
afdb57a2 986
8fd89211
LB
987 queue = skb_get_queue_mapping(skb);
988 txq = mp->txq + queue;
989 nq = netdev_get_tx_queue(dev, queue);
990
c9df406f 991 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
7542db8b
JP
992 netdev_printk(KERN_DEBUG, dev,
993 "failed to linearize skb with tiny unaligned fragment\n");
c9df406f
LB
994 return NETDEV_TX_BUSY;
995 }
996
73151ce3
RC
997 length = skb->len;
998
3ae8f4e0
EG
999 if (skb_is_gso(skb))
1000 ret = txq_submit_tso(txq, skb, dev);
1001 else
ee9e4956 1002 ret = txq_submit_skb(txq, skb, dev);
3ae8f4e0 1003 if (!ret) {
73151ce3 1004 txq->tx_bytes += length;
4df89bd5 1005 txq->tx_packets++;
c9df406f 1006
ee9e4956 1007 if (txq->tx_desc_count >= txq->tx_stop_threshold)
4df89bd5 1008 netif_tx_stop_queue(nq);
dd11680d
EG
1009 } else {
1010 txq->tx_dropped++;
1011 dev_kfree_skb_any(skb);
4df89bd5 1012 }
c9df406f 1013
c9df406f 1014 return NETDEV_TX_OK;
1da177e4
LT
1015}
1016
c9df406f 1017
1fa38c58
LB
1018/* tx napi ******************************************************************/
1019static void txq_kick(struct tx_queue *txq)
1020{
1021 struct mv643xx_eth_private *mp = txq_to_mp(txq);
8fd89211 1022 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1fa38c58
LB
1023 u32 hw_desc_ptr;
1024 u32 expected_ptr;
1025
8fd89211 1026 __netif_tx_lock(nq, smp_processor_id());
1fa38c58 1027
37a6084f 1028 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
1fa38c58
LB
1029 goto out;
1030
37a6084f 1031 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
1fa38c58
LB
1032 expected_ptr = (u32)txq->tx_desc_dma +
1033 txq->tx_curr_desc * sizeof(struct tx_desc);
1034
1035 if (hw_desc_ptr != expected_ptr)
1036 txq_enable(txq);
1037
1038out:
8fd89211 1039 __netif_tx_unlock(nq);
1fa38c58
LB
1040
1041 mp->work_tx_end &= ~(1 << txq->index);
1042}
1043
1044static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1045{
1046 struct mv643xx_eth_private *mp = txq_to_mp(txq);
8fd89211 1047 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1fa38c58
LB
1048 int reclaimed;
1049
3aefe2b4 1050 __netif_tx_lock_bh(nq);
1fa38c58
LB
1051
1052 reclaimed = 0;
1053 while (reclaimed < budget && txq->tx_desc_count > 0) {
1054 int tx_index;
1055 struct tx_desc *desc;
1056 u32 cmd_sts;
9e911414 1057 char desc_dma_map;
1fa38c58
LB
1058
1059 tx_index = txq->tx_used_desc;
1060 desc = &txq->tx_desc_area[tx_index];
9e911414
EG
1061 desc_dma_map = txq->tx_desc_mapping[tx_index];
1062
1fa38c58
LB
1063 cmd_sts = desc->cmd_sts;
1064
1065 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
1066 if (!force)
1067 break;
1068 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
1069 }
1070
1071 txq->tx_used_desc = tx_index + 1;
1072 if (txq->tx_used_desc == txq->tx_ring_size)
1073 txq->tx_used_desc = 0;
1074
1075 reclaimed++;
1076 txq->tx_desc_count--;
1077
9e911414
EG
1078 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
1079
1080 if (desc_dma_map == DESC_DMA_MAP_PAGE)
1081 dma_unmap_page(mp->dev->dev.parent,
1082 desc->buf_ptr,
1083 desc->byte_cnt,
1084 DMA_TO_DEVICE);
1085 else
1086 dma_unmap_single(mp->dev->dev.parent,
1087 desc->buf_ptr,
1088 desc->byte_cnt,
1089 DMA_TO_DEVICE);
1090 }
2c2a9cbd
KB
1091
1092 if (cmd_sts & TX_ENABLE_INTERRUPT) {
1093 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
1094
1095 if (!WARN_ON(!skb))
1096 dev_kfree_skb(skb);
1097 }
1fa38c58
LB
1098
1099 if (cmd_sts & ERROR_SUMMARY) {
7542db8b 1100 netdev_info(mp->dev, "tx error\n");
1fa38c58
LB
1101 mp->dev->stats.tx_errors++;
1102 }
1103
1fa38c58
LB
1104 }
1105
3aefe2b4 1106 __netif_tx_unlock_bh(nq);
8fd89211 1107
1fa38c58
LB
1108 if (reclaimed < budget)
1109 mp->work_tx &= ~(1 << txq->index);
1110
1fa38c58
LB
1111 return reclaimed;
1112}
1113
1114
89df5fdc
LB
1115/* tx rate control **********************************************************/
1116/*
1117 * Set total maximum TX rate (shared by all TX queues for this port)
1118 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
1119 */
1120static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
1121{
1122 int token_rate;
1123 int mtu;
1124 int bucket_size;
1125
452503eb 1126 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
89df5fdc
LB
1127 if (token_rate > 1023)
1128 token_rate = 1023;
1129
1130 mtu = (mp->dev->mtu + 255) >> 8;
1131 if (mtu > 63)
1132 mtu = 63;
1133
1134 bucket_size = (burst + 255) >> 8;
1135 if (bucket_size > 65535)
1136 bucket_size = 65535;
1137
457b1d5a
LB
1138 switch (mp->shared->tx_bw_control) {
1139 case TX_BW_CONTROL_OLD_LAYOUT:
37a6084f
LB
1140 wrlp(mp, TX_BW_RATE, token_rate);
1141 wrlp(mp, TX_BW_MTU, mtu);
1142 wrlp(mp, TX_BW_BURST, bucket_size);
457b1d5a
LB
1143 break;
1144 case TX_BW_CONTROL_NEW_LAYOUT:
37a6084f
LB
1145 wrlp(mp, TX_BW_RATE_MOVED, token_rate);
1146 wrlp(mp, TX_BW_MTU_MOVED, mtu);
1147 wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
457b1d5a 1148 break;
1e881592 1149 }
89df5fdc
LB
1150}
1151
1152static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
1153{
1154 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1155 int token_rate;
1156 int bucket_size;
1157
452503eb 1158 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
89df5fdc
LB
1159 if (token_rate > 1023)
1160 token_rate = 1023;
1161
1162 bucket_size = (burst + 255) >> 8;
1163 if (bucket_size > 65535)
1164 bucket_size = 65535;
1165
37a6084f
LB
1166 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
1167 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
89df5fdc
LB
1168}
1169
1170static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1171{
1172 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1173 int off;
1174 u32 val;
1175
1176 /*
1177 * Turn on fixed priority mode.
1178 */
457b1d5a
LB
1179 off = 0;
1180 switch (mp->shared->tx_bw_control) {
1181 case TX_BW_CONTROL_OLD_LAYOUT:
37a6084f 1182 off = TXQ_FIX_PRIO_CONF;
457b1d5a
LB
1183 break;
1184 case TX_BW_CONTROL_NEW_LAYOUT:
37a6084f 1185 off = TXQ_FIX_PRIO_CONF_MOVED;
457b1d5a
LB
1186 break;
1187 }
89df5fdc 1188
457b1d5a 1189 if (off) {
37a6084f 1190 val = rdlp(mp, off);
457b1d5a 1191 val |= 1 << txq->index;
37a6084f 1192 wrlp(mp, off, val);
457b1d5a 1193 }
89df5fdc
LB
1194}
1195
89df5fdc 1196
c9df406f 1197/* mii management interface *************************************************/
0a9e413b 1198static void mv643xx_eth_adjust_link(struct net_device *dev)
260055bb 1199{
0a9e413b 1200 struct mv643xx_eth_private *mp = netdev_priv(dev);
260055bb
PS
1201 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1202 u32 autoneg_disable = FORCE_LINK_PASS |
1203 DISABLE_AUTO_NEG_SPEED_GMII |
1204 DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1205 DISABLE_AUTO_NEG_FOR_DUPLEX;
1206
1207 if (mp->phy->autoneg == AUTONEG_ENABLE) {
1208 /* enable auto negotiation */
1209 pscr &= ~autoneg_disable;
1210 goto out_write;
1211 }
1212
1213 pscr |= autoneg_disable;
1214
1215 if (mp->phy->speed == SPEED_1000) {
1216 /* force gigabit, half duplex not supported */
1217 pscr |= SET_GMII_SPEED_TO_1000;
1218 pscr |= SET_FULL_DUPLEX_MODE;
1219 goto out_write;
1220 }
1221
1222 pscr &= ~SET_GMII_SPEED_TO_1000;
1223
1224 if (mp->phy->speed == SPEED_100)
1225 pscr |= SET_MII_SPEED_TO_100;
1226 else
1227 pscr &= ~SET_MII_SPEED_TO_100;
1228
1229 if (mp->phy->duplex == DUPLEX_FULL)
1230 pscr |= SET_FULL_DUPLEX_MODE;
1231 else
1232 pscr &= ~SET_FULL_DUPLEX_MODE;
1233
1234out_write:
1235 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1236}
1237
8fd89211
LB
1238/* statistics ***************************************************************/
1239static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1240{
1241 struct mv643xx_eth_private *mp = netdev_priv(dev);
1242 struct net_device_stats *stats = &dev->stats;
1243 unsigned long tx_packets = 0;
1244 unsigned long tx_bytes = 0;
1245 unsigned long tx_dropped = 0;
1246 int i;
1247
1248 for (i = 0; i < mp->txq_count; i++) {
1249 struct tx_queue *txq = mp->txq + i;
1250
1251 tx_packets += txq->tx_packets;
1252 tx_bytes += txq->tx_bytes;
1253 tx_dropped += txq->tx_dropped;
1254 }
1255
1256 stats->tx_packets = tx_packets;
1257 stats->tx_bytes = tx_bytes;
1258 stats->tx_dropped = tx_dropped;
1259
1260 return stats;
1261}
1262
fc32b0e2 1263static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
c9df406f 1264{
fc32b0e2 1265 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
1da177e4
LT
1266}
1267
fc32b0e2 1268static void mib_counters_clear(struct mv643xx_eth_private *mp)
d0412d96 1269{
fc32b0e2
LB
1270 int i;
1271
1272 for (i = 0; i < 0x80; i += 4)
1273 mib_read(mp, i);
302476c9
PZ
1274
1275 /* Clear non MIB hw counters also */
1276 rdlp(mp, RX_DISCARD_FRAME_CNT);
1277 rdlp(mp, RX_OVERRUN_FRAME_CNT);
c9df406f 1278}
d0412d96 1279
fc32b0e2 1280static void mib_counters_update(struct mv643xx_eth_private *mp)
c9df406f 1281{
e5371493 1282 struct mib_counters *p = &mp->mib_counters;
4b8e3655 1283
57e8f26a 1284 spin_lock_bh(&mp->mib_counters_lock);
fc32b0e2 1285 p->good_octets_received += mib_read(mp, 0x00);
fc32b0e2
LB
1286 p->bad_octets_received += mib_read(mp, 0x08);
1287 p->internal_mac_transmit_err += mib_read(mp, 0x0c);
1288 p->good_frames_received += mib_read(mp, 0x10);
1289 p->bad_frames_received += mib_read(mp, 0x14);
1290 p->broadcast_frames_received += mib_read(mp, 0x18);
1291 p->multicast_frames_received += mib_read(mp, 0x1c);
1292 p->frames_64_octets += mib_read(mp, 0x20);
1293 p->frames_65_to_127_octets += mib_read(mp, 0x24);
1294 p->frames_128_to_255_octets += mib_read(mp, 0x28);
1295 p->frames_256_to_511_octets += mib_read(mp, 0x2c);
1296 p->frames_512_to_1023_octets += mib_read(mp, 0x30);
1297 p->frames_1024_to_max_octets += mib_read(mp, 0x34);
1298 p->good_octets_sent += mib_read(mp, 0x38);
fc32b0e2
LB
1299 p->good_frames_sent += mib_read(mp, 0x40);
1300 p->excessive_collision += mib_read(mp, 0x44);
1301 p->multicast_frames_sent += mib_read(mp, 0x48);
1302 p->broadcast_frames_sent += mib_read(mp, 0x4c);
1303 p->unrec_mac_control_received += mib_read(mp, 0x50);
1304 p->fc_sent += mib_read(mp, 0x54);
1305 p->good_fc_received += mib_read(mp, 0x58);
1306 p->bad_fc_received += mib_read(mp, 0x5c);
1307 p->undersize_received += mib_read(mp, 0x60);
1308 p->fragments_received += mib_read(mp, 0x64);
1309 p->oversize_received += mib_read(mp, 0x68);
1310 p->jabber_received += mib_read(mp, 0x6c);
1311 p->mac_receive_error += mib_read(mp, 0x70);
1312 p->bad_crc_event += mib_read(mp, 0x74);
1313 p->collision += mib_read(mp, 0x78);
1314 p->late_collision += mib_read(mp, 0x7c);
302476c9
PZ
1315 /* Non MIB hardware counters */
1316 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
1317 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
57e8f26a 1318 spin_unlock_bh(&mp->mib_counters_lock);
4ff3495a
LB
1319}
1320
1321static void mib_counters_timer_wrapper(unsigned long _mp)
1322{
1323 struct mv643xx_eth_private *mp = (void *)_mp;
4ff3495a 1324 mib_counters_update(mp);
041b4ddb 1325 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
d0412d96
JC
1326}
1327
c9df406f 1328
3e508034
LB
1329/* interrupt coalescing *****************************************************/
1330/*
1331 * Hardware coalescing parameters are set in units of 64 t_clk
1332 * cycles. I.e.:
1333 *
1334 * coal_delay_in_usec = 64000000 * register_value / t_clk_rate
1335 *
1336 * register_value = coal_delay_in_usec * t_clk_rate / 64000000
1337 *
1338 * In the ->set*() methods, we round the computed register value
1339 * to the nearest integer.
1340 */
1341static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1342{
1343 u32 val = rdlp(mp, SDMA_CONFIG);
1344 u64 temp;
1345
1346 if (mp->shared->extended_rx_coal_limit)
1347 temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
1348 else
1349 temp = (val & 0x003fff00) >> 8;
1350
1351 temp *= 64000000;
452503eb 1352 do_div(temp, mp->t_clk);
3e508034
LB
1353
1354 return (unsigned int)temp;
1355}
1356
1357static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1358{
1359 u64 temp;
1360 u32 val;
1361
452503eb 1362 temp = (u64)usec * mp->t_clk;
3e508034
LB
1363 temp += 31999999;
1364 do_div(temp, 64000000);
1365
1366 val = rdlp(mp, SDMA_CONFIG);
1367 if (mp->shared->extended_rx_coal_limit) {
1368 if (temp > 0xffff)
1369 temp = 0xffff;
1370 val &= ~0x023fff80;
1371 val |= (temp & 0x8000) << 10;
1372 val |= (temp & 0x7fff) << 7;
1373 } else {
1374 if (temp > 0x3fff)
1375 temp = 0x3fff;
1376 val &= ~0x003fff00;
1377 val |= (temp & 0x3fff) << 8;
1378 }
1379 wrlp(mp, SDMA_CONFIG, val);
1380}
1381
1382static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1383{
1384 u64 temp;
1385
1386 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1387 temp *= 64000000;
452503eb 1388 do_div(temp, mp->t_clk);
3e508034
LB
1389
1390 return (unsigned int)temp;
1391}
1392
1393static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1394{
1395 u64 temp;
1396
452503eb 1397 temp = (u64)usec * mp->t_clk;
3e508034
LB
1398 temp += 31999999;
1399 do_div(temp, 64000000);
1400
1401 if (temp > 0x3fff)
1402 temp = 0x3fff;
1403
1404 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
1405}
1406
1407
c9df406f 1408/* ethtool ******************************************************************/
e5371493 1409struct mv643xx_eth_stats {
c9df406f
LB
1410 char stat_string[ETH_GSTRING_LEN];
1411 int sizeof_stat;
16820054
LB
1412 int netdev_off;
1413 int mp_off;
c9df406f
LB
1414};
1415
16820054
LB
1416#define SSTAT(m) \
1417 { #m, FIELD_SIZEOF(struct net_device_stats, m), \
1418 offsetof(struct net_device, stats.m), -1 }
1419
1420#define MIBSTAT(m) \
1421 { #m, FIELD_SIZEOF(struct mib_counters, m), \
1422 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
1423
1424static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1425 SSTAT(rx_packets),
1426 SSTAT(tx_packets),
1427 SSTAT(rx_bytes),
1428 SSTAT(tx_bytes),
1429 SSTAT(rx_errors),
1430 SSTAT(tx_errors),
1431 SSTAT(rx_dropped),
1432 SSTAT(tx_dropped),
1433 MIBSTAT(good_octets_received),
1434 MIBSTAT(bad_octets_received),
1435 MIBSTAT(internal_mac_transmit_err),
1436 MIBSTAT(good_frames_received),
1437 MIBSTAT(bad_frames_received),
1438 MIBSTAT(broadcast_frames_received),
1439 MIBSTAT(multicast_frames_received),
1440 MIBSTAT(frames_64_octets),
1441 MIBSTAT(frames_65_to_127_octets),
1442 MIBSTAT(frames_128_to_255_octets),
1443 MIBSTAT(frames_256_to_511_octets),
1444 MIBSTAT(frames_512_to_1023_octets),
1445 MIBSTAT(frames_1024_to_max_octets),
1446 MIBSTAT(good_octets_sent),
1447 MIBSTAT(good_frames_sent),
1448 MIBSTAT(excessive_collision),
1449 MIBSTAT(multicast_frames_sent),
1450 MIBSTAT(broadcast_frames_sent),
1451 MIBSTAT(unrec_mac_control_received),
1452 MIBSTAT(fc_sent),
1453 MIBSTAT(good_fc_received),
1454 MIBSTAT(bad_fc_received),
1455 MIBSTAT(undersize_received),
1456 MIBSTAT(fragments_received),
1457 MIBSTAT(oversize_received),
1458 MIBSTAT(jabber_received),
1459 MIBSTAT(mac_receive_error),
1460 MIBSTAT(bad_crc_event),
1461 MIBSTAT(collision),
1462 MIBSTAT(late_collision),
302476c9
PZ
1463 MIBSTAT(rx_discard),
1464 MIBSTAT(rx_overrun),
c9df406f
LB
1465};
1466
10a9948d 1467static int
6bdf576e
LB
1468mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
1469 struct ethtool_cmd *cmd)
d0412d96 1470{
d0412d96
JC
1471 int err;
1472
ed94493f
LB
1473 err = phy_read_status(mp->phy);
1474 if (err == 0)
1475 err = phy_ethtool_gset(mp->phy, cmd);
d0412d96 1476
fc32b0e2
LB
1477 /*
1478 * The MAC does not support 1000baseT_Half.
1479 */
d0412d96
JC
1480 cmd->supported &= ~SUPPORTED_1000baseT_Half;
1481 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1482
1483 return err;
1484}
1485
10a9948d 1486static int
6bdf576e 1487mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
10a9948d 1488 struct ethtool_cmd *cmd)
bedfe324 1489{
81600eea
LB
1490 u32 port_status;
1491
37a6084f 1492 port_status = rdlp(mp, PORT_STATUS);
81600eea 1493
bedfe324
LB
1494 cmd->supported = SUPPORTED_MII;
1495 cmd->advertising = ADVERTISED_MII;
81600eea
LB
1496 switch (port_status & PORT_SPEED_MASK) {
1497 case PORT_SPEED_10:
70739497 1498 ethtool_cmd_speed_set(cmd, SPEED_10);
81600eea
LB
1499 break;
1500 case PORT_SPEED_100:
70739497 1501 ethtool_cmd_speed_set(cmd, SPEED_100);
81600eea
LB
1502 break;
1503 case PORT_SPEED_1000:
70739497 1504 ethtool_cmd_speed_set(cmd, SPEED_1000);
81600eea
LB
1505 break;
1506 default:
1507 cmd->speed = -1;
1508 break;
1509 }
1510 cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
bedfe324
LB
1511 cmd->port = PORT_MII;
1512 cmd->phy_address = 0;
1513 cmd->transceiver = XCVR_INTERNAL;
1514 cmd->autoneg = AUTONEG_DISABLE;
1515 cmd->maxtxpkt = 1;
1516 cmd->maxrxpkt = 1;
1517
1518 return 0;
1519}
1520
3871c387
MS
1521static void
1522mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1523{
1524 struct mv643xx_eth_private *mp = netdev_priv(dev);
1525 wol->supported = 0;
1526 wol->wolopts = 0;
1527 if (mp->phy)
1528 phy_ethtool_get_wol(mp->phy, wol);
1529}
1530
1531static int
1532mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1533{
1534 struct mv643xx_eth_private *mp = netdev_priv(dev);
1535 int err;
1536
1537 if (mp->phy == NULL)
1538 return -EOPNOTSUPP;
1539
1540 err = phy_ethtool_set_wol(mp->phy, wol);
1541 /* Given that mv643xx_eth works without the marvell-specific PHY driver,
1542 * this debugging hint is useful to have.
1543 */
1544 if (err == -EOPNOTSUPP)
1545 netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
1546 return err;
1547}
1548
6bdf576e
LB
1549static int
1550mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1551{
1552 struct mv643xx_eth_private *mp = netdev_priv(dev);
1553
1554 if (mp->phy != NULL)
1555 return mv643xx_eth_get_settings_phy(mp, cmd);
1556 else
1557 return mv643xx_eth_get_settings_phyless(mp, cmd);
1558}
1559
10a9948d
LB
1560static int
1561mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1da177e4 1562{
e5371493 1563 struct mv643xx_eth_private *mp = netdev_priv(dev);
260055bb 1564 int ret;
ab4384a6 1565
6bdf576e
LB
1566 if (mp->phy == NULL)
1567 return -EINVAL;
1568
fc32b0e2
LB
1569 /*
1570 * The MAC does not support 1000baseT_Half.
1571 */
1572 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1573
260055bb
PS
1574 ret = phy_ethtool_sset(mp->phy, cmd);
1575 if (!ret)
0a9e413b 1576 mv643xx_eth_adjust_link(dev);
260055bb 1577 return ret;
c9df406f 1578}
1da177e4 1579
fc32b0e2
LB
1580static void mv643xx_eth_get_drvinfo(struct net_device *dev,
1581 struct ethtool_drvinfo *drvinfo)
c9df406f 1582{
6f39da2c
AL
1583 strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
1584 sizeof(drvinfo->driver));
68aad78c 1585 strlcpy(drvinfo->version, mv643xx_eth_driver_version,
6f39da2c
AL
1586 sizeof(drvinfo->version));
1587 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
1588 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
16820054 1589 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
c9df406f 1590}
1da177e4 1591
fc32b0e2 1592static int mv643xx_eth_nway_reset(struct net_device *dev)
c9df406f 1593{
e5371493 1594 struct mv643xx_eth_private *mp = netdev_priv(dev);
1da177e4 1595
6bdf576e
LB
1596 if (mp->phy == NULL)
1597 return -EINVAL;
1da177e4 1598
6bdf576e 1599 return genphy_restart_aneg(mp->phy);
bedfe324
LB
1600}
1601
3e508034
LB
1602static int
1603mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1604{
1605 struct mv643xx_eth_private *mp = netdev_priv(dev);
1606
1607 ec->rx_coalesce_usecs = get_rx_coal(mp);
1608 ec->tx_coalesce_usecs = get_tx_coal(mp);
1609
1610 return 0;
1611}
1612
1613static int
1614mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1615{
1616 struct mv643xx_eth_private *mp = netdev_priv(dev);
1617
1618 set_rx_coal(mp, ec->rx_coalesce_usecs);
1619 set_tx_coal(mp, ec->tx_coalesce_usecs);
1620
1621 return 0;
1622}
1623
e7d2f4db
LB
1624static void
1625mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1626{
1627 struct mv643xx_eth_private *mp = netdev_priv(dev);
1628
1629 er->rx_max_pending = 4096;
1630 er->tx_max_pending = 4096;
e7d2f4db
LB
1631
1632 er->rx_pending = mp->rx_ring_size;
1633 er->tx_pending = mp->tx_ring_size;
e7d2f4db
LB
1634}
1635
1636static int
1637mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1638{
1639 struct mv643xx_eth_private *mp = netdev_priv(dev);
1640
1641 if (er->rx_mini_pending || er->rx_jumbo_pending)
1642 return -EINVAL;
1643
1644 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
ee9e4956
EG
1645 mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
1646 MV643XX_MAX_SKB_DESCS * 2, 4096);
1647 if (mp->tx_ring_size != er->tx_pending)
1648 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
1649 mp->tx_ring_size, er->tx_pending);
e7d2f4db
LB
1650
1651 if (netif_running(dev)) {
1652 mv643xx_eth_stop(dev);
1653 if (mv643xx_eth_open(dev)) {
7542db8b
JP
1654 netdev_err(dev,
1655 "fatal error on re-opening device after ring param change\n");
e7d2f4db
LB
1656 return -ENOMEM;
1657 }
1658 }
1659
1660 return 0;
1661}
1662
d888b373
LB
1663
1664static int
c8f44aff 1665mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
d888b373
LB
1666{
1667 struct mv643xx_eth_private *mp = netdev_priv(dev);
3ad9b358 1668 bool rx_csum = features & NETIF_F_RXCSUM;
d888b373
LB
1669
1670 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
1671
1672 return 0;
1673}
1674
fc32b0e2
LB
1675static void mv643xx_eth_get_strings(struct net_device *dev,
1676 uint32_t stringset, uint8_t *data)
c9df406f
LB
1677{
1678 int i;
1da177e4 1679
fc32b0e2
LB
1680 if (stringset == ETH_SS_STATS) {
1681 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
c9df406f 1682 memcpy(data + i * ETH_GSTRING_LEN,
16820054 1683 mv643xx_eth_stats[i].stat_string,
e5371493 1684 ETH_GSTRING_LEN);
c9df406f 1685 }
c9df406f
LB
1686 }
1687}
1da177e4 1688
fc32b0e2
LB
1689static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1690 struct ethtool_stats *stats,
1691 uint64_t *data)
c9df406f 1692{
b9873841 1693 struct mv643xx_eth_private *mp = netdev_priv(dev);
c9df406f 1694 int i;
1da177e4 1695
8fd89211 1696 mv643xx_eth_get_stats(dev);
fc32b0e2 1697 mib_counters_update(mp);
1da177e4 1698
16820054
LB
1699 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1700 const struct mv643xx_eth_stats *stat;
1701 void *p;
1702
1703 stat = mv643xx_eth_stats + i;
1704
1705 if (stat->netdev_off >= 0)
1706 p = ((void *)mp->dev) + stat->netdev_off;
1707 else
1708 p = ((void *)mp) + stat->mp_off;
1709
1710 data[i] = (stat->sizeof_stat == 8) ?
1711 *(uint64_t *)p : *(uint32_t *)p;
1da177e4 1712 }
c9df406f 1713}
1da177e4 1714
fc32b0e2 1715static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
c9df406f 1716{
fc32b0e2 1717 if (sset == ETH_SS_STATS)
16820054 1718 return ARRAY_SIZE(mv643xx_eth_stats);
fc32b0e2
LB
1719
1720 return -EOPNOTSUPP;
c9df406f 1721}
1da177e4 1722
e5371493 1723static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
fc32b0e2
LB
1724 .get_settings = mv643xx_eth_get_settings,
1725 .set_settings = mv643xx_eth_set_settings,
1726 .get_drvinfo = mv643xx_eth_get_drvinfo,
1727 .nway_reset = mv643xx_eth_nway_reset,
ed4ba4b5 1728 .get_link = ethtool_op_get_link,
3e508034
LB
1729 .get_coalesce = mv643xx_eth_get_coalesce,
1730 .set_coalesce = mv643xx_eth_set_coalesce,
e7d2f4db
LB
1731 .get_ringparam = mv643xx_eth_get_ringparam,
1732 .set_ringparam = mv643xx_eth_set_ringparam,
fc32b0e2
LB
1733 .get_strings = mv643xx_eth_get_strings,
1734 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
e5371493 1735 .get_sset_count = mv643xx_eth_get_sset_count,
ebad0a8d 1736 .get_ts_info = ethtool_op_get_ts_info,
3871c387
MS
1737 .get_wol = mv643xx_eth_get_wol,
1738 .set_wol = mv643xx_eth_set_wol,
c9df406f 1739};
1da177e4 1740
bea3348e 1741
c9df406f 1742/* address handling *********************************************************/
5daffe94 1743static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
c9df406f 1744{
66e63ffb
LB
1745 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
1746 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
1da177e4 1747
5daffe94
LB
1748 addr[0] = (mac_h >> 24) & 0xff;
1749 addr[1] = (mac_h >> 16) & 0xff;
1750 addr[2] = (mac_h >> 8) & 0xff;
1751 addr[3] = mac_h & 0xff;
1752 addr[4] = (mac_l >> 8) & 0xff;
1753 addr[5] = mac_l & 0xff;
c9df406f 1754}
1da177e4 1755
66e63ffb 1756static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
c9df406f 1757{
66e63ffb
LB
1758 wrlp(mp, MAC_ADDR_HIGH,
1759 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
1760 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
c9df406f 1761}
d0412d96 1762
66e63ffb 1763static u32 uc_addr_filter_mask(struct net_device *dev)
c9df406f 1764{
ccffad25 1765 struct netdev_hw_addr *ha;
66e63ffb 1766 u32 nibbles;
1da177e4 1767
66e63ffb
LB
1768 if (dev->flags & IFF_PROMISC)
1769 return 0;
1da177e4 1770
66e63ffb 1771 nibbles = 1 << (dev->dev_addr[5] & 0x0f);
32e7bfc4 1772 netdev_for_each_uc_addr(ha, dev) {
ccffad25 1773 if (memcmp(dev->dev_addr, ha->addr, 5))
66e63ffb 1774 return 0;
ccffad25 1775 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
66e63ffb 1776 return 0;
ff561eef 1777
ccffad25 1778 nibbles |= 1 << (ha->addr[5] & 0x0f);
66e63ffb 1779 }
1da177e4 1780
66e63ffb 1781 return nibbles;
1da177e4
LT
1782}
1783
66e63ffb 1784static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
1da177e4 1785{
e5371493 1786 struct mv643xx_eth_private *mp = netdev_priv(dev);
66e63ffb
LB
1787 u32 port_config;
1788 u32 nibbles;
1789 int i;
1da177e4 1790
cc9754b3 1791 uc_addr_set(mp, dev->dev_addr);
1da177e4 1792
6877f54e
PS
1793 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
1794
66e63ffb
LB
1795 nibbles = uc_addr_filter_mask(dev);
1796 if (!nibbles) {
1797 port_config |= UNICAST_PROMISCUOUS_MODE;
6877f54e 1798 nibbles = 0xffff;
66e63ffb
LB
1799 }
1800
1801 for (i = 0; i < 16; i += 4) {
1802 int off = UNICAST_TABLE(mp->port_num) + i;
1803 u32 v;
1804
1805 v = 0;
1806 if (nibbles & 1)
1807 v |= 0x00000001;
1808 if (nibbles & 2)
1809 v |= 0x00000100;
1810 if (nibbles & 4)
1811 v |= 0x00010000;
1812 if (nibbles & 8)
1813 v |= 0x01000000;
1814 nibbles >>= 4;
1815
1816 wrl(mp, off, v);
1817 }
1818
66e63ffb 1819 wrlp(mp, PORT_CONFIG, port_config);
1da177e4
LT
1820}
1821
69876569
LB
1822static int addr_crc(unsigned char *addr)
1823{
1824 int crc = 0;
1825 int i;
1826
1827 for (i = 0; i < 6; i++) {
1828 int j;
1829
1830 crc = (crc ^ addr[i]) << 8;
1831 for (j = 7; j >= 0; j--) {
1832 if (crc & (0x100 << j))
1833 crc ^= 0x107 << j;
1834 }
1835 }
1836
1837 return crc;
1838}
1839
66e63ffb 1840static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
1da177e4 1841{
fc32b0e2 1842 struct mv643xx_eth_private *mp = netdev_priv(dev);
66e63ffb
LB
1843 u32 *mc_spec;
1844 u32 *mc_other;
22bedad3 1845 struct netdev_hw_addr *ha;
fc32b0e2 1846 int i;
c8aaea25 1847
fc32b0e2 1848 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
66e63ffb
LB
1849 int port_num;
1850 u32 accept;
c8aaea25 1851
66e63ffb
LB
1852oom:
1853 port_num = mp->port_num;
1854 accept = 0x01010101;
fc32b0e2
LB
1855 for (i = 0; i < 0x100; i += 4) {
1856 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
1857 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
c9df406f
LB
1858 }
1859 return;
1860 }
c8aaea25 1861
82a5bd6a 1862 mc_spec = kmalloc(0x200, GFP_ATOMIC);
66e63ffb
LB
1863 if (mc_spec == NULL)
1864 goto oom;
1865 mc_other = mc_spec + (0x100 >> 2);
1866
1867 memset(mc_spec, 0, 0x100);
1868 memset(mc_other, 0, 0x100);
1da177e4 1869
22bedad3
JP
1870 netdev_for_each_mc_addr(ha, dev) {
1871 u8 *a = ha->addr;
66e63ffb
LB
1872 u32 *table;
1873 int entry;
1da177e4 1874
fc32b0e2 1875 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
66e63ffb
LB
1876 table = mc_spec;
1877 entry = a[5];
fc32b0e2 1878 } else {
66e63ffb
LB
1879 table = mc_other;
1880 entry = addr_crc(a);
fc32b0e2 1881 }
66e63ffb 1882
2b448334 1883 table[entry >> 2] |= 1 << (8 * (entry & 3));
fc32b0e2 1884 }
66e63ffb
LB
1885
1886 for (i = 0; i < 0x100; i += 4) {
1887 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
1888 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
1889 }
1890
1891 kfree(mc_spec);
1892}
1893
1894static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1895{
1896 mv643xx_eth_program_unicast_filter(dev);
1897 mv643xx_eth_program_multicast_filter(dev);
1898}
1899
1900static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1901{
1902 struct sockaddr *sa = addr;
1903
a29ec08a 1904 if (!is_valid_ether_addr(sa->sa_data))
504f9b5a 1905 return -EADDRNOTAVAIL;
a29ec08a 1906
66e63ffb
LB
1907 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
1908
1909 netif_addr_lock_bh(dev);
1910 mv643xx_eth_program_unicast_filter(dev);
1911 netif_addr_unlock_bh(dev);
1912
1913 return 0;
c9df406f 1914}
c8aaea25 1915
c8aaea25 1916
c9df406f 1917/* rx/tx queue initialisation ***********************************************/
64da80a2 1918static int rxq_init(struct mv643xx_eth_private *mp, int index)
c9df406f 1919{
64da80a2 1920 struct rx_queue *rxq = mp->rxq + index;
8a578111
LB
1921 struct rx_desc *rx_desc;
1922 int size;
c9df406f
LB
1923 int i;
1924
64da80a2
LB
1925 rxq->index = index;
1926
e7d2f4db 1927 rxq->rx_ring_size = mp->rx_ring_size;
8a578111
LB
1928
1929 rxq->rx_desc_count = 0;
1930 rxq->rx_curr_desc = 0;
1931 rxq->rx_used_desc = 0;
1932
1933 size = rxq->rx_ring_size * sizeof(struct rx_desc);
1934
f7981c1c 1935 if (index == 0 && size <= mp->rx_desc_sram_size) {
8a578111
LB
1936 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1937 mp->rx_desc_sram_size);
1938 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1939 } else {
eb0519b5
GP
1940 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1941 size, &rxq->rx_desc_dma,
1942 GFP_KERNEL);
f7ea3337
PJ
1943 }
1944
8a578111 1945 if (rxq->rx_desc_area == NULL) {
7542db8b 1946 netdev_err(mp->dev,
8a578111
LB
1947 "can't allocate rx ring (%d bytes)\n", size);
1948 goto out;
1949 }
1950 memset(rxq->rx_desc_area, 0, size);
1da177e4 1951
8a578111 1952 rxq->rx_desc_area_size = size;
9fa8e980 1953 rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
b2adaca9
JP
1954 GFP_KERNEL);
1955 if (rxq->rx_skb == NULL)
8a578111 1956 goto out_free;
8a578111 1957
64699336 1958 rx_desc = rxq->rx_desc_area;
8a578111 1959 for (i = 0; i < rxq->rx_ring_size; i++) {
9da78745
LB
1960 int nexti;
1961
1962 nexti = i + 1;
1963 if (nexti == rxq->rx_ring_size)
1964 nexti = 0;
1965
8a578111
LB
1966 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
1967 nexti * sizeof(struct rx_desc);
1968 }
1969
8a578111
LB
1970 return 0;
1971
1972
1973out_free:
f7981c1c 1974 if (index == 0 && size <= mp->rx_desc_sram_size)
8a578111
LB
1975 iounmap(rxq->rx_desc_area);
1976 else
eb0519b5 1977 dma_free_coherent(mp->dev->dev.parent, size,
8a578111
LB
1978 rxq->rx_desc_area,
1979 rxq->rx_desc_dma);
1980
1981out:
1982 return -ENOMEM;
c9df406f 1983}
c8aaea25 1984
8a578111 1985static void rxq_deinit(struct rx_queue *rxq)
c9df406f 1986{
8a578111
LB
1987 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1988 int i;
1989
1990 rxq_disable(rxq);
c8aaea25 1991
8a578111
LB
1992 for (i = 0; i < rxq->rx_ring_size; i++) {
1993 if (rxq->rx_skb[i]) {
1994 dev_kfree_skb(rxq->rx_skb[i]);
1995 rxq->rx_desc_count--;
1da177e4 1996 }
c8aaea25 1997 }
1da177e4 1998
8a578111 1999 if (rxq->rx_desc_count) {
7542db8b 2000 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
8a578111
LB
2001 rxq->rx_desc_count);
2002 }
2003
f7981c1c 2004 if (rxq->index == 0 &&
64da80a2 2005 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
8a578111 2006 iounmap(rxq->rx_desc_area);
c9df406f 2007 else
eb0519b5 2008 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
8a578111
LB
2009 rxq->rx_desc_area, rxq->rx_desc_dma);
2010
2011 kfree(rxq->rx_skb);
c9df406f 2012}
1da177e4 2013
3d6b35bc 2014static int txq_init(struct mv643xx_eth_private *mp, int index)
c9df406f 2015{
3d6b35bc 2016 struct tx_queue *txq = mp->txq + index;
13d64285
LB
2017 struct tx_desc *tx_desc;
2018 int size;
9e911414 2019 int ret;
c9df406f 2020 int i;
1da177e4 2021
3d6b35bc
LB
2022 txq->index = index;
2023
e7d2f4db 2024 txq->tx_ring_size = mp->tx_ring_size;
13d64285 2025
ee9e4956
EG
2026 /* A queue must always have room for at least one skb.
2027 * Therefore, stop the queue when the free entries reaches
2028 * the maximum number of descriptors per skb.
2029 */
2030 txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
2031 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2032
13d64285
LB
2033 txq->tx_desc_count = 0;
2034 txq->tx_curr_desc = 0;
2035 txq->tx_used_desc = 0;
2036
2037 size = txq->tx_ring_size * sizeof(struct tx_desc);
2038
f7981c1c 2039 if (index == 0 && size <= mp->tx_desc_sram_size) {
13d64285
LB
2040 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
2041 mp->tx_desc_sram_size);
2042 txq->tx_desc_dma = mp->tx_desc_sram_addr;
2043 } else {
eb0519b5
GP
2044 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
2045 size, &txq->tx_desc_dma,
2046 GFP_KERNEL);
13d64285
LB
2047 }
2048
2049 if (txq->tx_desc_area == NULL) {
7542db8b 2050 netdev_err(mp->dev,
13d64285 2051 "can't allocate tx ring (%d bytes)\n", size);
99ab08e0 2052 return -ENOMEM;
c9df406f 2053 }
13d64285
LB
2054 memset(txq->tx_desc_area, 0, size);
2055
2056 txq->tx_desc_area_size = size;
13d64285 2057
64699336 2058 tx_desc = txq->tx_desc_area;
13d64285 2059 for (i = 0; i < txq->tx_ring_size; i++) {
6b368f68 2060 struct tx_desc *txd = tx_desc + i;
9da78745
LB
2061 int nexti;
2062
2063 nexti = i + 1;
2064 if (nexti == txq->tx_ring_size)
2065 nexti = 0;
6b368f68
LB
2066
2067 txd->cmd_sts = 0;
2068 txd->next_desc_ptr = txq->tx_desc_dma +
13d64285
LB
2069 nexti * sizeof(struct tx_desc);
2070 }
2071
9e911414
EG
2072 txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
2073 GFP_KERNEL);
2074 if (!txq->tx_desc_mapping) {
2075 ret = -ENOMEM;
2076 goto err_free_desc_area;
2077 }
2078
3ae8f4e0
EG
2079 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2080 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2081 txq->tx_ring_size * TSO_HEADER_SIZE,
2082 &txq->tso_hdrs_dma, GFP_KERNEL);
2083 if (txq->tso_hdrs == NULL) {
9e911414
EG
2084 ret = -ENOMEM;
2085 goto err_free_desc_mapping;
3ae8f4e0 2086 }
99ab08e0 2087 skb_queue_head_init(&txq->tx_skb);
c9df406f 2088
99ab08e0 2089 return 0;
9e911414
EG
2090
2091err_free_desc_mapping:
2092 kfree(txq->tx_desc_mapping);
2093err_free_desc_area:
2094 if (index == 0 && size <= mp->tx_desc_sram_size)
2095 iounmap(txq->tx_desc_area);
2096 else
2097 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2098 txq->tx_desc_area, txq->tx_desc_dma);
2099 return ret;
c8aaea25 2100}
1da177e4 2101
13d64285 2102static void txq_deinit(struct tx_queue *txq)
c9df406f 2103{
13d64285 2104 struct mv643xx_eth_private *mp = txq_to_mp(txq);
fa3959f4 2105
13d64285 2106 txq_disable(txq);
1fa38c58 2107 txq_reclaim(txq, txq->tx_ring_size, 1);
1da177e4 2108
13d64285 2109 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
1da177e4 2110
f7981c1c 2111 if (txq->index == 0 &&
3d6b35bc 2112 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
13d64285 2113 iounmap(txq->tx_desc_area);
c9df406f 2114 else
eb0519b5 2115 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
13d64285 2116 txq->tx_desc_area, txq->tx_desc_dma);
9e911414
EG
2117 kfree(txq->tx_desc_mapping);
2118
3ae8f4e0
EG
2119 if (txq->tso_hdrs)
2120 dma_free_coherent(mp->dev->dev.parent,
2121 txq->tx_ring_size * TSO_HEADER_SIZE,
2122 txq->tso_hdrs, txq->tso_hdrs_dma);
c9df406f 2123}
1da177e4 2124
1da177e4 2125
c9df406f 2126/* netdev ops and related ***************************************************/
1fa38c58
LB
2127static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
2128{
2129 u32 int_cause;
2130 u32 int_cause_ext;
2131
e0ca8410 2132 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
1fa38c58
LB
2133 if (int_cause == 0)
2134 return 0;
2135
2136 int_cause_ext = 0;
e0ca8410
SB
2137 if (int_cause & INT_EXT) {
2138 int_cause &= ~INT_EXT;
37a6084f 2139 int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
e0ca8410 2140 }
1fa38c58 2141
1fa38c58 2142 if (int_cause) {
37a6084f 2143 wrlp(mp, INT_CAUSE, ~int_cause);
1fa38c58 2144 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
37a6084f 2145 ~(rdlp(mp, TXQ_COMMAND) & 0xff);
1fa38c58
LB
2146 mp->work_rx |= (int_cause & INT_RX) >> 2;
2147 }
2148
2149 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
2150 if (int_cause_ext) {
37a6084f 2151 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
1fa38c58
LB
2152 if (int_cause_ext & INT_EXT_LINK_PHY)
2153 mp->work_link = 1;
2154 mp->work_tx |= int_cause_ext & INT_EXT_TX;
2155 }
2156
2157 return 1;
2158}
2159
2160static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
2161{
2162 struct net_device *dev = (struct net_device *)dev_id;
2163 struct mv643xx_eth_private *mp = netdev_priv(dev);
2164
2165 if (unlikely(!mv643xx_eth_collect_events(mp)))
2166 return IRQ_NONE;
2167
37a6084f 2168 wrlp(mp, INT_MASK, 0);
1fa38c58
LB
2169 napi_schedule(&mp->napi);
2170
2171 return IRQ_HANDLED;
2172}
2173
2f7eb47a
LB
2174static void handle_link_event(struct mv643xx_eth_private *mp)
2175{
2176 struct net_device *dev = mp->dev;
2177 u32 port_status;
2178 int speed;
2179 int duplex;
2180 int fc;
2181
37a6084f 2182 port_status = rdlp(mp, PORT_STATUS);
2f7eb47a
LB
2183 if (!(port_status & LINK_UP)) {
2184 if (netif_carrier_ok(dev)) {
2185 int i;
2186
7542db8b 2187 netdev_info(dev, "link down\n");
2f7eb47a
LB
2188
2189 netif_carrier_off(dev);
2f7eb47a 2190
f7981c1c 2191 for (i = 0; i < mp->txq_count; i++) {
2f7eb47a
LB
2192 struct tx_queue *txq = mp->txq + i;
2193
1fa38c58 2194 txq_reclaim(txq, txq->tx_ring_size, 1);
f7981c1c 2195 txq_reset_hw_ptr(txq);
2f7eb47a
LB
2196 }
2197 }
2198 return;
2199 }
2200
2201 switch (port_status & PORT_SPEED_MASK) {
2202 case PORT_SPEED_10:
2203 speed = 10;
2204 break;
2205 case PORT_SPEED_100:
2206 speed = 100;
2207 break;
2208 case PORT_SPEED_1000:
2209 speed = 1000;
2210 break;
2211 default:
2212 speed = -1;
2213 break;
2214 }
2215 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
2216 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
2217
7542db8b
JP
2218 netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
2219 speed, duplex ? "full" : "half", fc ? "en" : "dis");
2f7eb47a 2220
4fdeca3f 2221 if (!netif_carrier_ok(dev))
2f7eb47a 2222 netif_carrier_on(dev);
2f7eb47a
LB
2223}
2224
1fa38c58 2225static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
c9df406f 2226{
1fa38c58
LB
2227 struct mv643xx_eth_private *mp;
2228 int work_done;
ce4e2e45 2229
1fa38c58 2230 mp = container_of(napi, struct mv643xx_eth_private, napi);
fc32b0e2 2231
1319ebad
LB
2232 if (unlikely(mp->oom)) {
2233 mp->oom = 0;
2234 del_timer(&mp->rx_oom);
2235 }
1da177e4 2236
1fa38c58
LB
2237 work_done = 0;
2238 while (work_done < budget) {
2239 u8 queue_mask;
2240 int queue;
2241 int work_tbd;
2242
2243 if (mp->work_link) {
2244 mp->work_link = 0;
2245 handle_link_event(mp);
26ef1f17 2246 work_done++;
1fa38c58
LB
2247 continue;
2248 }
1da177e4 2249
1319ebad
LB
2250 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
2251 if (likely(!mp->oom))
2252 queue_mask |= mp->work_rx_refill;
2253
1fa38c58
LB
2254 if (!queue_mask) {
2255 if (mv643xx_eth_collect_events(mp))
2256 continue;
2257 break;
2258 }
1da177e4 2259
1fa38c58
LB
2260 queue = fls(queue_mask) - 1;
2261 queue_mask = 1 << queue;
2262
2263 work_tbd = budget - work_done;
2264 if (work_tbd > 16)
2265 work_tbd = 16;
2266
2267 if (mp->work_tx_end & queue_mask) {
2268 txq_kick(mp->txq + queue);
2269 } else if (mp->work_tx & queue_mask) {
2270 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
2271 txq_maybe_wake(mp->txq + queue);
2272 } else if (mp->work_rx & queue_mask) {
2273 work_done += rxq_process(mp->rxq + queue, work_tbd);
1319ebad 2274 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
1fa38c58
LB
2275 work_done += rxq_refill(mp->rxq + queue, work_tbd);
2276 } else {
2277 BUG();
2278 }
84dd619e 2279 }
fc32b0e2 2280
1fa38c58 2281 if (work_done < budget) {
1319ebad 2282 if (mp->oom)
1fa38c58
LB
2283 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
2284 napi_complete(napi);
e0ca8410 2285 wrlp(mp, INT_MASK, mp->int_mask);
226bb6b7 2286 }
3d6b35bc 2287
1fa38c58
LB
2288 return work_done;
2289}
8fa89bf5 2290
1fa38c58
LB
2291static inline void oom_timer_wrapper(unsigned long data)
2292{
2293 struct mv643xx_eth_private *mp = (void *)data;
1da177e4 2294
1fa38c58 2295 napi_schedule(&mp->napi);
1da177e4
LT
2296}
2297
fc32b0e2 2298static void port_start(struct mv643xx_eth_private *mp)
1da177e4 2299{
d0412d96 2300 u32 pscr;
8a578111 2301 int i;
1da177e4 2302
bedfe324
LB
2303 /*
2304 * Perform PHY reset, if there is a PHY.
2305 */
ed94493f 2306 if (mp->phy != NULL) {
bedfe324
LB
2307 struct ethtool_cmd cmd;
2308
2309 mv643xx_eth_get_settings(mp->dev, &cmd);
7cd14636 2310 phy_init_hw(mp->phy);
bedfe324 2311 mv643xx_eth_set_settings(mp->dev, &cmd);
58911151 2312 phy_start(mp->phy);
bedfe324 2313 }
1da177e4 2314
81600eea
LB
2315 /*
2316 * Configure basic link parameters.
2317 */
37a6084f 2318 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
81600eea
LB
2319
2320 pscr |= SERIAL_PORT_ENABLE;
37a6084f 2321 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
81600eea
LB
2322
2323 pscr |= DO_NOT_FORCE_LINK_FAIL;
ed94493f 2324 if (mp->phy == NULL)
81600eea 2325 pscr |= FORCE_LINK_PASS;
37a6084f 2326 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
81600eea 2327
13d64285
LB
2328 /*
2329 * Configure TX path and queues.
2330 */
89df5fdc 2331 tx_set_rate(mp, 1000000000, 16777216);
f7981c1c 2332 for (i = 0; i < mp->txq_count; i++) {
3d6b35bc 2333 struct tx_queue *txq = mp->txq + i;
13d64285 2334
6b368f68 2335 txq_reset_hw_ptr(txq);
89df5fdc
LB
2336 txq_set_rate(txq, 1000000000, 16777216);
2337 txq_set_fixed_prio_mode(txq);
13d64285
LB
2338 }
2339
d9a073ea
LB
2340 /*
2341 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
170e7108
LB
2342 * frames to RX queue #0, and include the pseudo-header when
2343 * calculating receive checksums.
d9a073ea 2344 */
e138f96b 2345 mv643xx_eth_set_features(mp->dev, mp->dev->features);
01999873 2346
376489a2
LB
2347 /*
2348 * Treat BPDUs as normal multicasts, and disable partition mode.
2349 */
37a6084f 2350 wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
01999873 2351
5a893922
LB
2352 /*
2353 * Add configured unicast addresses to address filter table.
2354 */
2355 mv643xx_eth_program_unicast_filter(mp->dev);
2356
8a578111 2357 /*
64da80a2 2358 * Enable the receive queues.
8a578111 2359 */
f7981c1c 2360 for (i = 0; i < mp->rxq_count; i++) {
64da80a2 2361 struct rx_queue *rxq = mp->rxq + i;
8a578111 2362 u32 addr;
1da177e4 2363
8a578111
LB
2364 addr = (u32)rxq->rx_desc_dma;
2365 addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
37a6084f 2366 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
1da177e4 2367
8a578111
LB
2368 rxq_enable(rxq);
2369 }
1da177e4
LT
2370}
2371
2bcb4b0f
LB
2372static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2373{
2374 int skb_size;
2375
2376 /*
2377 * Reserve 2+14 bytes for an ethernet header (the hardware
2378 * automatically prepends 2 bytes of dummy data to each
2379 * received packet), 16 bytes for up to four VLAN tags, and
2380 * 4 bytes for the trailing FCS -- 36 bytes total.
2381 */
2382 skb_size = mp->dev->mtu + 36;
2383
2384 /*
2385 * Make sure that the skb size is a multiple of 8 bytes, as
2386 * the lower three bits of the receive descriptor's buffer
2387 * size field are ignored by the hardware.
2388 */
2389 mp->skb_size = (skb_size + 7) & ~7;
7fd96ce4
LB
2390
2391 /*
2392 * If NET_SKB_PAD is smaller than a cache line,
2393 * netdev_alloc_skb() will cause skb->data to be misaligned
2394 * to a cache line boundary. If this is the case, include
2395 * some extra space to allow re-aligning the data area.
2396 */
2397 mp->skb_size += SKB_DMA_REALIGN;
2bcb4b0f
LB
2398}
2399
c9df406f 2400static int mv643xx_eth_open(struct net_device *dev)
16e03018 2401{
e5371493 2402 struct mv643xx_eth_private *mp = netdev_priv(dev);
c9df406f 2403 int err;
64da80a2 2404 int i;
16e03018 2405
37a6084f
LB
2406 wrlp(mp, INT_CAUSE, 0);
2407 wrlp(mp, INT_CAUSE_EXT, 0);
2408 rdlp(mp, INT_CAUSE_EXT);
c9df406f 2409
fc32b0e2 2410 err = request_irq(dev->irq, mv643xx_eth_irq,
2a1867a7 2411 IRQF_SHARED, dev->name, dev);
c9df406f 2412 if (err) {
7542db8b 2413 netdev_err(dev, "can't assign irq\n");
c9df406f 2414 return -EAGAIN;
16e03018
DF
2415 }
2416
2bcb4b0f
LB
2417 mv643xx_eth_recalc_skb_size(mp);
2418
2257e05c
LB
2419 napi_enable(&mp->napi);
2420
e0ca8410
SB
2421 mp->int_mask = INT_EXT;
2422
f7981c1c 2423 for (i = 0; i < mp->rxq_count; i++) {
64da80a2
LB
2424 err = rxq_init(mp, i);
2425 if (err) {
2426 while (--i >= 0)
f7981c1c 2427 rxq_deinit(mp->rxq + i);
64da80a2
LB
2428 goto out;
2429 }
2430
1fa38c58 2431 rxq_refill(mp->rxq + i, INT_MAX);
e0ca8410 2432 mp->int_mask |= INT_RX_0 << i;
2257e05c
LB
2433 }
2434
1319ebad 2435 if (mp->oom) {
2257e05c
LB
2436 mp->rx_oom.expires = jiffies + (HZ / 10);
2437 add_timer(&mp->rx_oom);
64da80a2 2438 }
8a578111 2439
f7981c1c 2440 for (i = 0; i < mp->txq_count; i++) {
3d6b35bc
LB
2441 err = txq_init(mp, i);
2442 if (err) {
2443 while (--i >= 0)
f7981c1c 2444 txq_deinit(mp->txq + i);
3d6b35bc
LB
2445 goto out_free;
2446 }
e0ca8410 2447 mp->int_mask |= INT_TX_END_0 << i;
3d6b35bc 2448 }
16e03018 2449
f564412c 2450 add_timer(&mp->mib_counters_timer);
fc32b0e2 2451 port_start(mp);
16e03018 2452
37a6084f 2453 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
e0ca8410 2454 wrlp(mp, INT_MASK, mp->int_mask);
16e03018 2455
c9df406f
LB
2456 return 0;
2457
13d64285 2458
fc32b0e2 2459out_free:
f7981c1c
LB
2460 for (i = 0; i < mp->rxq_count; i++)
2461 rxq_deinit(mp->rxq + i);
fc32b0e2 2462out:
c9df406f
LB
2463 free_irq(dev->irq, dev);
2464
2465 return err;
16e03018
DF
2466}
2467
e5371493 2468static void port_reset(struct mv643xx_eth_private *mp)
1da177e4 2469{
fc32b0e2 2470 unsigned int data;
64da80a2 2471 int i;
1da177e4 2472
f7981c1c
LB
2473 for (i = 0; i < mp->rxq_count; i++)
2474 rxq_disable(mp->rxq + i);
2475 for (i = 0; i < mp->txq_count; i++)
2476 txq_disable(mp->txq + i);
ae9ae064
LB
2477
2478 while (1) {
37a6084f 2479 u32 ps = rdlp(mp, PORT_STATUS);
ae9ae064
LB
2480
2481 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
2482 break;
13d64285 2483 udelay(10);
ae9ae064 2484 }
1da177e4 2485
c9df406f 2486 /* Reset the Enable bit in the Configuration Register */
37a6084f 2487 data = rdlp(mp, PORT_SERIAL_CONTROL);
fc32b0e2
LB
2488 data &= ~(SERIAL_PORT_ENABLE |
2489 DO_NOT_FORCE_LINK_FAIL |
2490 FORCE_LINK_PASS);
37a6084f 2491 wrlp(mp, PORT_SERIAL_CONTROL, data);
1da177e4
LT
2492}
2493
c9df406f 2494static int mv643xx_eth_stop(struct net_device *dev)
1da177e4 2495{
e5371493 2496 struct mv643xx_eth_private *mp = netdev_priv(dev);
64da80a2 2497 int i;
1da177e4 2498
fe65e704 2499 wrlp(mp, INT_MASK_EXT, 0x00000000);
37a6084f
LB
2500 wrlp(mp, INT_MASK, 0x00000000);
2501 rdlp(mp, INT_MASK);
1da177e4 2502
c9df406f 2503 napi_disable(&mp->napi);
78fff83b 2504
2257e05c
LB
2505 del_timer_sync(&mp->rx_oom);
2506
c9df406f 2507 netif_carrier_off(dev);
58911151
SH
2508 if (mp->phy)
2509 phy_stop(mp->phy);
fc32b0e2
LB
2510 free_irq(dev->irq, dev);
2511
cc9754b3 2512 port_reset(mp);
8fd89211 2513 mv643xx_eth_get_stats(dev);
fc32b0e2 2514 mib_counters_update(mp);
57e8f26a 2515 del_timer_sync(&mp->mib_counters_timer);
1da177e4 2516
f7981c1c
LB
2517 for (i = 0; i < mp->rxq_count; i++)
2518 rxq_deinit(mp->rxq + i);
2519 for (i = 0; i < mp->txq_count; i++)
2520 txq_deinit(mp->txq + i);
1da177e4 2521
c9df406f 2522 return 0;
1da177e4
LT
2523}
2524
fc32b0e2 2525static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1da177e4 2526{
e5371493 2527 struct mv643xx_eth_private *mp = netdev_priv(dev);
260055bb 2528 int ret;
1da177e4 2529
260055bb
PS
2530 if (mp->phy == NULL)
2531 return -ENOTSUPP;
bedfe324 2532
260055bb
PS
2533 ret = phy_mii_ioctl(mp->phy, ifr, cmd);
2534 if (!ret)
0a9e413b 2535 mv643xx_eth_adjust_link(dev);
260055bb 2536 return ret;
1da177e4
LT
2537}
2538
c9df406f 2539static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 2540{
89df5fdc
LB
2541 struct mv643xx_eth_private *mp = netdev_priv(dev);
2542
fc32b0e2 2543 if (new_mtu < 64 || new_mtu > 9500)
c9df406f 2544 return -EINVAL;
1da177e4 2545
c9df406f 2546 dev->mtu = new_mtu;
2bcb4b0f 2547 mv643xx_eth_recalc_skb_size(mp);
89df5fdc
LB
2548 tx_set_rate(mp, 1000000000, 16777216);
2549
c9df406f
LB
2550 if (!netif_running(dev))
2551 return 0;
1da177e4 2552
c9df406f
LB
2553 /*
2554 * Stop and then re-open the interface. This will allocate RX
2555 * skbs of the new MTU.
2556 * There is a possible danger that the open will not succeed,
fc32b0e2 2557 * due to memory being full.
c9df406f
LB
2558 */
2559 mv643xx_eth_stop(dev);
2560 if (mv643xx_eth_open(dev)) {
7542db8b
JP
2561 netdev_err(dev,
2562 "fatal error on re-opening device after MTU change\n");
c9df406f
LB
2563 }
2564
2565 return 0;
1da177e4
LT
2566}
2567
fc32b0e2 2568static void tx_timeout_task(struct work_struct *ugly)
1da177e4 2569{
fc32b0e2 2570 struct mv643xx_eth_private *mp;
1da177e4 2571
fc32b0e2
LB
2572 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2573 if (netif_running(mp->dev)) {
e5ef1de1 2574 netif_tx_stop_all_queues(mp->dev);
fc32b0e2
LB
2575 port_reset(mp);
2576 port_start(mp);
e5ef1de1 2577 netif_tx_wake_all_queues(mp->dev);
fc32b0e2 2578 }
c9df406f
LB
2579}
2580
c9df406f 2581static void mv643xx_eth_tx_timeout(struct net_device *dev)
1da177e4 2582{
e5371493 2583 struct mv643xx_eth_private *mp = netdev_priv(dev);
1da177e4 2584
7542db8b 2585 netdev_info(dev, "tx timeout\n");
d0412d96 2586
c9df406f 2587 schedule_work(&mp->tx_timeout_task);
1da177e4
LT
2588}
2589
c9df406f 2590#ifdef CONFIG_NET_POLL_CONTROLLER
fc32b0e2 2591static void mv643xx_eth_netpoll(struct net_device *dev)
9f8dd319 2592{
fc32b0e2 2593 struct mv643xx_eth_private *mp = netdev_priv(dev);
c9df406f 2594
37a6084f
LB
2595 wrlp(mp, INT_MASK, 0x00000000);
2596 rdlp(mp, INT_MASK);
c9df406f 2597
fc32b0e2 2598 mv643xx_eth_irq(dev->irq, dev);
c9df406f 2599
e0ca8410 2600 wrlp(mp, INT_MASK, mp->int_mask);
9f8dd319 2601}
c9df406f 2602#endif
9f8dd319 2603
9f8dd319 2604
c9df406f 2605/* platform glue ************************************************************/
e5371493
LB
2606static void
2607mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
63a9332b 2608 const struct mbus_dram_target_info *dram)
c9df406f 2609{
cc9754b3 2610 void __iomem *base = msp->base;
c9df406f
LB
2611 u32 win_enable;
2612 u32 win_protect;
2613 int i;
9f8dd319 2614
c9df406f
LB
2615 for (i = 0; i < 6; i++) {
2616 writel(0, base + WINDOW_BASE(i));
2617 writel(0, base + WINDOW_SIZE(i));
2618 if (i < 4)
2619 writel(0, base + WINDOW_REMAP_HIGH(i));
9f8dd319
DF
2620 }
2621
c9df406f
LB
2622 win_enable = 0x3f;
2623 win_protect = 0;
2624
2625 for (i = 0; i < dram->num_cs; i++) {
63a9332b 2626 const struct mbus_dram_window *cs = dram->cs + i;
c9df406f
LB
2627
2628 writel((cs->base & 0xffff0000) |
2629 (cs->mbus_attr << 8) |
2630 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
2631 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
2632
2633 win_enable &= ~(1 << i);
2634 win_protect |= 3 << (2 * i);
2635 }
2636
2637 writel(win_enable, base + WINDOW_BAR_ENABLE);
2638 msp->win_protect = win_protect;
9f8dd319
DF
2639}
2640
773fc3ee
LB
2641static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2642{
2643 /*
2644 * Check whether we have a 14-bit coal limit field in bits
2645 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
2646 * SDMA config register.
2647 */
37a6084f
LB
2648 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
2649 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
773fc3ee
LB
2650 msp->extended_rx_coal_limit = 1;
2651 else
2652 msp->extended_rx_coal_limit = 0;
1e881592
LB
2653
2654 /*
457b1d5a
LB
2655 * Check whether the MAC supports TX rate control, and if
2656 * yes, whether its associated registers are in the old or
2657 * the new place.
1e881592 2658 */
37a6084f
LB
2659 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
2660 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
457b1d5a
LB
2661 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
2662 } else {
37a6084f
LB
2663 writel(7, msp->base + 0x0400 + TX_BW_RATE);
2664 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
457b1d5a
LB
2665 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
2666 else
2667 msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
2668 }
773fc3ee
LB
2669}
2670
76723bca
SH
2671#if defined(CONFIG_OF)
2672static const struct of_device_id mv643xx_eth_shared_ids[] = {
2673 { .compatible = "marvell,orion-eth", },
2674 { .compatible = "marvell,kirkwood-eth", },
2675 { }
2676};
2677MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
2678#endif
2679
2680#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60)
2681#define mv643xx_eth_property(_np, _name, _v) \
2682 do { \
2683 u32 tmp; \
2684 if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \
2685 _v = tmp; \
2686 } while (0)
2687
2688static struct platform_device *port_platdev[3];
2689
2690static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2691 struct device_node *pnp)
2692{
2693 struct platform_device *ppdev;
2694 struct mv643xx_eth_platform_data ppd;
2695 struct resource res;
2696 const char *mac_addr;
2697 int ret;
785bf6f7 2698 int dev_num = 0;
76723bca
SH
2699
2700 memset(&ppd, 0, sizeof(ppd));
2701 ppd.shared = pdev;
2702
2703 memset(&res, 0, sizeof(res));
2704 if (!of_irq_to_resource(pnp, 0, &res)) {
2705 dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
2706 return -EINVAL;
2707 }
2708
2709 if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
2710 dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name);
2711 return -EINVAL;
2712 }
2713
2714 if (ppd.port_number >= 3) {
2715 dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name);
2716 return -EINVAL;
2717 }
2718
785bf6f7
JG
2719 while (dev_num < 3 && port_platdev[dev_num])
2720 dev_num++;
2721
2722 if (dev_num == 3) {
2723 dev_err(&pdev->dev, "too many ports registered\n");
2724 return -EINVAL;
2725 }
2726
76723bca
SH
2727 mac_addr = of_get_mac_address(pnp);
2728 if (mac_addr)
d458cdf7 2729 memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
76723bca
SH
2730
2731 mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
2732 mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
2733 mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
2734 mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
2735 mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
2736 mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
2737
2738 ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
2739 if (!ppd.phy_node) {
2740 ppd.phy_addr = MV643XX_ETH_PHY_NONE;
2741 of_property_read_u32(pnp, "speed", &ppd.speed);
2742 of_property_read_u32(pnp, "duplex", &ppd.duplex);
2743 }
2744
785bf6f7 2745 ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
76723bca
SH
2746 if (!ppdev)
2747 return -ENOMEM;
2748 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
b5d82db8 2749 ppdev->dev.of_node = pnp;
76723bca
SH
2750
2751 ret = platform_device_add_resources(ppdev, &res, 1);
2752 if (ret)
2753 goto port_err;
2754
2755 ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
2756 if (ret)
2757 goto port_err;
2758
2759 ret = platform_device_add(ppdev);
2760 if (ret)
2761 goto port_err;
2762
785bf6f7 2763 port_platdev[dev_num] = ppdev;
76723bca
SH
2764
2765 return 0;
2766
2767port_err:
2768 platform_device_put(ppdev);
2769 return ret;
2770}
2771
2772static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2773{
2774 struct mv643xx_eth_shared_platform_data *pd;
2775 struct device_node *pnp, *np = pdev->dev.of_node;
2776 int ret;
2777
2778 /* bail out if not registered from DT */
2779 if (!np)
2780 return 0;
2781
2782 pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
2783 if (!pd)
2784 return -ENOMEM;
2785 pdev->dev.platform_data = pd;
2786
2787 mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
2788
2789 for_each_available_child_of_node(np, pnp) {
2790 ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
2791 if (ret)
2792 return ret;
2793 }
2794 return 0;
2795}
2796
2797static void mv643xx_eth_shared_of_remove(void)
2798{
2799 int n;
2800
2801 for (n = 0; n < 3; n++) {
2802 platform_device_del(port_platdev[n]);
2803 port_platdev[n] = NULL;
2804 }
2805}
2806#else
ff20877a 2807static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
76723bca 2808{
ff20877a 2809 return 0;
76723bca
SH
2810}
2811
ff20877a
AB
2812static inline void mv643xx_eth_shared_of_remove(void)
2813{
2814}
76723bca
SH
2815#endif
2816
c9df406f 2817static int mv643xx_eth_shared_probe(struct platform_device *pdev)
9f8dd319 2818{
10a9948d 2819 static int mv643xx_eth_version_printed;
76723bca 2820 struct mv643xx_eth_shared_platform_data *pd;
e5371493 2821 struct mv643xx_eth_shared_private *msp;
63a9332b 2822 const struct mbus_dram_target_info *dram;
c9df406f 2823 struct resource *res;
76723bca 2824 int ret;
9f8dd319 2825
e5371493 2826 if (!mv643xx_eth_version_printed++)
7542db8b
JP
2827 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
2828 mv643xx_eth_driver_version);
9f8dd319 2829
c9df406f
LB
2830 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2831 if (res == NULL)
727f957a 2832 return -EINVAL;
9f8dd319 2833
727f957a 2834 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
c9df406f 2835 if (msp == NULL)
727f957a 2836 return -ENOMEM;
76723bca 2837 platform_set_drvdata(pdev, msp);
c9df406f 2838
65a6f969 2839 msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
cc9754b3 2840 if (msp->base == NULL)
727f957a 2841 return -ENOMEM;
c9df406f 2842
20922486
SH
2843 msp->clk = devm_clk_get(&pdev->dev, NULL);
2844 if (!IS_ERR(msp->clk))
2845 clk_prepare_enable(msp->clk);
2846
c9df406f
LB
2847 /*
2848 * (Re-)program MBUS remapping windows if we are asked to.
2849 */
63a9332b
AL
2850 dram = mv_mbus_dram_info();
2851 if (dram)
2852 mv643xx_eth_conf_mbus_windows(msp, dram);
c9df406f 2853
76723bca
SH
2854 ret = mv643xx_eth_shared_of_probe(pdev);
2855 if (ret)
2856 return ret;
bbfa6d0a 2857 pd = dev_get_platdata(&pdev->dev);
76723bca 2858
50a749c1
DC
2859 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
2860 pd->tx_csum_limit : 9 * 1024;
773fc3ee 2861 infer_hw_params(msp);
fc32b0e2 2862
c9df406f 2863 return 0;
c9df406f
LB
2864}
2865
2866static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2867{
e5371493 2868 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
c9df406f 2869
76723bca 2870 mv643xx_eth_shared_of_remove();
20922486
SH
2871 if (!IS_ERR(msp->clk))
2872 clk_disable_unprepare(msp->clk);
c9df406f 2873 return 0;
9f8dd319
DF
2874}
2875
c9df406f 2876static struct platform_driver mv643xx_eth_shared_driver = {
fc32b0e2
LB
2877 .probe = mv643xx_eth_shared_probe,
2878 .remove = mv643xx_eth_shared_remove,
c9df406f 2879 .driver = {
fc32b0e2 2880 .name = MV643XX_ETH_SHARED_NAME,
76723bca 2881 .of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
c9df406f
LB
2882 },
2883};
2884
e5371493 2885static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
1da177e4 2886{
c9df406f 2887 int addr_shift = 5 * mp->port_num;
fc32b0e2 2888 u32 data;
1da177e4 2889
fc32b0e2
LB
2890 data = rdl(mp, PHY_ADDR);
2891 data &= ~(0x1f << addr_shift);
2892 data |= (phy_addr & 0x1f) << addr_shift;
2893 wrl(mp, PHY_ADDR, data);
1da177e4
LT
2894}
2895
e5371493 2896static int phy_addr_get(struct mv643xx_eth_private *mp)
1da177e4 2897{
fc32b0e2
LB
2898 unsigned int data;
2899
2900 data = rdl(mp, PHY_ADDR);
2901
2902 return (data >> (5 * mp->port_num)) & 0x1f;
2903}
2904
2905static void set_params(struct mv643xx_eth_private *mp,
2906 struct mv643xx_eth_platform_data *pd)
2907{
2908 struct net_device *dev = mp->dev;
ee9e4956 2909 unsigned int tx_ring_size;
fc32b0e2
LB
2910
2911 if (is_valid_ether_addr(pd->mac_addr))
d458cdf7 2912 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
fc32b0e2
LB
2913 else
2914 uc_addr_get(mp, dev->dev_addr);
2915
e7d2f4db 2916 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
fc32b0e2 2917 if (pd->rx_queue_size)
e7d2f4db 2918 mp->rx_ring_size = pd->rx_queue_size;
fc32b0e2
LB
2919 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2920 mp->rx_desc_sram_size = pd->rx_sram_size;
1da177e4 2921
f7981c1c 2922 mp->rxq_count = pd->rx_queue_count ? : 1;
64da80a2 2923
ee9e4956 2924 tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
fc32b0e2 2925 if (pd->tx_queue_size)
ee9e4956
EG
2926 tx_ring_size = pd->tx_queue_size;
2927
2928 mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
2929 MV643XX_MAX_SKB_DESCS * 2, 4096);
2930 if (mp->tx_ring_size != tx_ring_size)
2931 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2932 mp->tx_ring_size, tx_ring_size);
2933
fc32b0e2
LB
2934 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2935 mp->tx_desc_sram_size = pd->tx_sram_size;
3d6b35bc 2936
f7981c1c 2937 mp->txq_count = pd->tx_queue_count ? : 1;
1da177e4
LT
2938}
2939
ed94493f
LB
2940static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2941 int phy_addr)
1da177e4 2942{
ed94493f
LB
2943 struct phy_device *phydev;
2944 int start;
2945 int num;
2946 int i;
c3a07134 2947 char phy_id[MII_BUS_ID_SIZE + 3];
45c5d3bc 2948
ed94493f
LB
2949 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
2950 start = phy_addr_get(mp) & 0x1f;
2951 num = 32;
2952 } else {
2953 start = phy_addr & 0x1f;
2954 num = 1;
2955 }
45c5d3bc 2956
c3a07134 2957 /* Attempt to connect to the PHY using orion-mdio */
976c90b9 2958 phydev = ERR_PTR(-ENODEV);
ed94493f
LB
2959 for (i = 0; i < num; i++) {
2960 int addr = (start + i) & 0x1f;
fc32b0e2 2961
c3a07134
FF
2962 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
2963 "orion-mdio-mii", addr);
1da177e4 2964
c3a07134
FF
2965 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
2966 PHY_INTERFACE_MODE_GMII);
2967 if (!IS_ERR(phydev)) {
2968 phy_addr_set(mp, addr);
2969 break;
ed94493f
LB
2970 }
2971 }
1da177e4 2972
ed94493f 2973 return phydev;
1da177e4
LT
2974}
2975
ed94493f 2976static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
c28a4f89 2977{
ed94493f 2978 struct phy_device *phy = mp->phy;
c28a4f89 2979
ed94493f
LB
2980 if (speed == 0) {
2981 phy->autoneg = AUTONEG_ENABLE;
2982 phy->speed = 0;
2983 phy->duplex = 0;
2984 phy->advertising = phy->supported | ADVERTISED_Autoneg;
c9df406f 2985 } else {
ed94493f
LB
2986 phy->autoneg = AUTONEG_DISABLE;
2987 phy->advertising = 0;
2988 phy->speed = speed;
2989 phy->duplex = duplex;
c9df406f 2990 }
ed94493f 2991 phy_start_aneg(phy);
c28a4f89
JC
2992}
2993
81600eea
LB
2994static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2995{
2996 u32 pscr;
2997
37a6084f 2998 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
81600eea
LB
2999 if (pscr & SERIAL_PORT_ENABLE) {
3000 pscr &= ~SERIAL_PORT_ENABLE;
37a6084f 3001 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
81600eea
LB
3002 }
3003
3004 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
ed94493f 3005 if (mp->phy == NULL) {
81600eea
LB
3006 pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
3007 if (speed == SPEED_1000)
3008 pscr |= SET_GMII_SPEED_TO_1000;
3009 else if (speed == SPEED_100)
3010 pscr |= SET_MII_SPEED_TO_100;
3011
3012 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
3013
3014 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
3015 if (duplex == DUPLEX_FULL)
3016 pscr |= SET_FULL_DUPLEX_MODE;
3017 }
3018
37a6084f 3019 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
81600eea
LB
3020}
3021
ea8a8642
LB
3022static const struct net_device_ops mv643xx_eth_netdev_ops = {
3023 .ndo_open = mv643xx_eth_open,
3024 .ndo_stop = mv643xx_eth_stop,
3025 .ndo_start_xmit = mv643xx_eth_xmit,
3026 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
3027 .ndo_set_mac_address = mv643xx_eth_set_mac_address,
1d4bd947 3028 .ndo_validate_addr = eth_validate_addr,
ea8a8642
LB
3029 .ndo_do_ioctl = mv643xx_eth_ioctl,
3030 .ndo_change_mtu = mv643xx_eth_change_mtu,
aad59c43 3031 .ndo_set_features = mv643xx_eth_set_features,
ea8a8642
LB
3032 .ndo_tx_timeout = mv643xx_eth_tx_timeout,
3033 .ndo_get_stats = mv643xx_eth_get_stats,
3034#ifdef CONFIG_NET_POLL_CONTROLLER
3035 .ndo_poll_controller = mv643xx_eth_netpoll,
3036#endif
3037};
3038
c9df406f 3039static int mv643xx_eth_probe(struct platform_device *pdev)
1da177e4 3040{
c9df406f 3041 struct mv643xx_eth_platform_data *pd;
e5371493 3042 struct mv643xx_eth_private *mp;
c9df406f 3043 struct net_device *dev;
c9df406f 3044 struct resource *res;
fc32b0e2 3045 int err;
1da177e4 3046
bbfa6d0a 3047 pd = dev_get_platdata(&pdev->dev);
c9df406f 3048 if (pd == NULL) {
7542db8b 3049 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
c9df406f
LB
3050 return -ENODEV;
3051 }
1da177e4 3052
c9df406f 3053 if (pd->shared == NULL) {
7542db8b 3054 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
c9df406f
LB
3055 return -ENODEV;
3056 }
8f518703 3057
e5ef1de1 3058 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
c9df406f
LB
3059 if (!dev)
3060 return -ENOMEM;
1da177e4 3061
c9df406f 3062 mp = netdev_priv(dev);
fc32b0e2
LB
3063 platform_set_drvdata(pdev, mp);
3064
3065 mp->shared = platform_get_drvdata(pd->shared);
37a6084f 3066 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
fc32b0e2
LB
3067 mp->port_num = pd->port_number;
3068
c9df406f 3069 mp->dev = dev;
78fff83b 3070
cb85215f
SH
3071 /* Kirkwood resets some registers on gated clocks. Especially
3072 * CLK125_BYPASS_EN must be cleared but is not available on
3073 * all other SoCs/System Controllers using this driver.
3074 */
3075 if (of_device_is_compatible(pdev->dev.of_node,
3076 "marvell,kirkwood-eth-port"))
3077 wrlp(mp, PORT_SERIAL_CONTROL1,
3078 rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
3079
452503eb 3080 /*
9a43a026
AL
3081 * Start with a default rate, and if there is a clock, allow
3082 * it to override the default.
452503eb 3083 */
9a43a026 3084 mp->t_clk = 133000000;
20922486 3085 mp->clk = devm_clk_get(&pdev->dev, NULL);
452503eb
AL
3086 if (!IS_ERR(mp->clk)) {
3087 clk_prepare_enable(mp->clk);
3088 mp->t_clk = clk_get_rate(mp->clk);
76723bca
SH
3089 } else if (!IS_ERR(mp->shared->clk)) {
3090 mp->t_clk = clk_get_rate(mp->shared->clk);
452503eb 3091 }
20922486 3092
fc32b0e2 3093 set_params(mp, pd);
206d6b32
BH
3094 netif_set_real_num_tx_queues(dev, mp->txq_count);
3095 netif_set_real_num_rx_queues(dev, mp->rxq_count);
fc32b0e2 3096
cc9d4598
SH
3097 err = 0;
3098 if (pd->phy_node) {
3099 mp->phy = of_phy_connect(mp->dev, pd->phy_node,
3100 mv643xx_eth_adjust_link, 0,
3101 PHY_INTERFACE_MODE_GMII);
3102 if (!mp->phy)
3103 err = -ENODEV;
6115c11f
DC
3104 else
3105 phy_addr_set(mp, mp->phy->addr);
cc9d4598 3106 } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
ed94493f 3107 mp->phy = phy_scan(mp, pd->phy_addr);
bedfe324 3108
cc9d4598 3109 if (IS_ERR(mp->phy))
976c90b9 3110 err = PTR_ERR(mp->phy);
cc9d4598
SH
3111 else
3112 phy_init(mp, pd->speed, pd->duplex);
976c90b9 3113 }
cc9d4598
SH
3114 if (err == -ENODEV) {
3115 err = -EPROBE_DEFER;
3116 goto out;
3117 }
3118 if (err)
3119 goto out;
6bdf576e 3120
7ad24ea4 3121 dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
ed94493f 3122
81600eea 3123 init_pscr(mp, pd->speed, pd->duplex);
fc32b0e2 3124
4ff3495a
LB
3125
3126 mib_counters_clear(mp);
3127
3128 init_timer(&mp->mib_counters_timer);
3129 mp->mib_counters_timer.data = (unsigned long)mp;
3130 mp->mib_counters_timer.function = mib_counters_timer_wrapper;
3131 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
4ff3495a
LB
3132
3133 spin_lock_init(&mp->mib_counters_lock);
3134
3135 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
3136
a3659aa0 3137 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
2257e05c
LB
3138
3139 init_timer(&mp->rx_oom);
3140 mp->rx_oom.data = (unsigned long)mp;
3141 mp->rx_oom.function = oom_timer_wrapper;
3142
fc32b0e2 3143
c9df406f
LB
3144 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
3145 BUG_ON(!res);
3146 dev->irq = res->start;
1da177e4 3147
ea8a8642
LB
3148 dev->netdev_ops = &mv643xx_eth_netdev_ops;
3149
c9df406f
LB
3150 dev->watchdog_timeo = 2 * HZ;
3151 dev->base_addr = 0;
1da177e4 3152
3ae8f4e0 3153 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
4d48d589
EG
3154 dev->vlan_features = dev->features;
3155
3156 dev->features |= NETIF_F_RXCSUM;
3157 dev->hw_features = dev->features;
1da177e4 3158
01789349 3159 dev->priv_flags |= IFF_UNICAST_FLT;
ee9e4956 3160 dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
01789349 3161
fc32b0e2 3162 SET_NETDEV_DEV(dev, &pdev->dev);
8f518703 3163
c9df406f 3164 if (mp->shared->win_protect)
fc32b0e2 3165 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
1da177e4 3166
a5fe3616
LB
3167 netif_carrier_off(dev);
3168
b5e86db4
LB
3169 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
3170
4fb0a54a 3171 set_rx_coal(mp, 250);
a5fe3616
LB
3172 set_tx_coal(mp, 0);
3173
c9df406f
LB
3174 err = register_netdev(dev);
3175 if (err)
3176 goto out;
1da177e4 3177
7542db8b
JP
3178 netdev_notice(dev, "port %d with MAC address %pM\n",
3179 mp->port_num, dev->dev_addr);
1da177e4 3180
13d64285 3181 if (mp->tx_desc_sram_size > 0)
7542db8b 3182 netdev_notice(dev, "configured with sram\n");
1da177e4 3183
c9df406f 3184 return 0;
1da177e4 3185
c9df406f 3186out:
20922486 3187 if (!IS_ERR(mp->clk))
baffab28 3188 clk_disable_unprepare(mp->clk);
c9df406f 3189 free_netdev(dev);
1da177e4 3190
c9df406f 3191 return err;
1da177e4
LT
3192}
3193
c9df406f 3194static int mv643xx_eth_remove(struct platform_device *pdev)
1da177e4 3195{
fc32b0e2 3196 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
1da177e4 3197
fc32b0e2 3198 unregister_netdev(mp->dev);
ed94493f 3199 if (mp->phy != NULL)
cec753f5 3200 phy_disconnect(mp->phy);
23f333a2 3201 cancel_work_sync(&mp->tx_timeout_task);
452503eb 3202
20922486 3203 if (!IS_ERR(mp->clk))
452503eb 3204 clk_disable_unprepare(mp->clk);
9a43a026 3205
fc32b0e2 3206 free_netdev(mp->dev);
c9df406f 3207
c9df406f 3208 return 0;
1da177e4
LT
3209}
3210
c9df406f 3211static void mv643xx_eth_shutdown(struct platform_device *pdev)
d0412d96 3212{
fc32b0e2 3213 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
d0412d96 3214
c9df406f 3215 /* Mask all interrupts on ethernet port */
37a6084f
LB
3216 wrlp(mp, INT_MASK, 0);
3217 rdlp(mp, INT_MASK);
c9df406f 3218
fc32b0e2
LB
3219 if (netif_running(mp->dev))
3220 port_reset(mp);
d0412d96
JC
3221}
3222
c9df406f 3223static struct platform_driver mv643xx_eth_driver = {
fc32b0e2
LB
3224 .probe = mv643xx_eth_probe,
3225 .remove = mv643xx_eth_remove,
3226 .shutdown = mv643xx_eth_shutdown,
c9df406f 3227 .driver = {
fc32b0e2 3228 .name = MV643XX_ETH_NAME,
c9df406f
LB
3229 },
3230};
3231
e5371493 3232static int __init mv643xx_eth_init_module(void)
d0412d96 3233{
c9df406f 3234 int rc;
d0412d96 3235
c9df406f
LB
3236 rc = platform_driver_register(&mv643xx_eth_shared_driver);
3237 if (!rc) {
3238 rc = platform_driver_register(&mv643xx_eth_driver);
3239 if (rc)
3240 platform_driver_unregister(&mv643xx_eth_shared_driver);
3241 }
fc32b0e2 3242
c9df406f 3243 return rc;
d0412d96 3244}
fc32b0e2 3245module_init(mv643xx_eth_init_module);
d0412d96 3246
e5371493 3247static void __exit mv643xx_eth_cleanup_module(void)
d0412d96 3248{
c9df406f
LB
3249 platform_driver_unregister(&mv643xx_eth_driver);
3250 platform_driver_unregister(&mv643xx_eth_shared_driver);
d0412d96 3251}
e5371493 3252module_exit(mv643xx_eth_cleanup_module);
1da177e4 3253
45675bc6
LB
3254MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
3255 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
c9df406f 3256MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
fc32b0e2 3257MODULE_LICENSE("GPL");
c9df406f 3258MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
fc32b0e2 3259MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
This page took 1.417665 seconds and 5 git commands to generate.