Commit | Line | Data |
---|---|---|
dac2f83f KH |
1 | /* |
2 | * Intel IXP4xx Ethernet driver for Linux | |
3 | * | |
4 | * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of version 2 of the GNU General Public License | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * Ethernet port config (0x00 is not present on IXP42X): | |
11 | * | |
12 | * logical port 0x00 0x10 0x20 | |
13 | * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C) | |
14 | * physical PortId 2 0 1 | |
15 | * TX queue 23 24 25 | |
16 | * RX-free queue 26 27 28 | |
17 | * TX-done queue is always 31, per-port RX and TX-ready queues are configurable | |
18 | * | |
19 | * | |
20 | * Queue entries: | |
21 | * bits 0 -> 1 - NPE ID (RX and TX-done) | |
22 | * bits 0 -> 2 - priority (TX, per 802.1D) | |
23 | * bits 3 -> 4 - port ID (user-set?) | |
24 | * bits 5 -> 31 - physical descriptor address | |
25 | */ | |
26 | ||
27 | #include <linux/delay.h> | |
28 | #include <linux/dma-mapping.h> | |
29 | #include <linux/dmapool.h> | |
30 | #include <linux/etherdevice.h> | |
31 | #include <linux/io.h> | |
32 | #include <linux/kernel.h> | |
2098c18d | 33 | #include <linux/phy.h> |
dac2f83f | 34 | #include <linux/platform_device.h> |
a09e64fb RK |
35 | #include <mach/npe.h> |
36 | #include <mach/qmgr.h> | |
dac2f83f | 37 | |
dac2f83f KH |
38 | #define DEBUG_DESC 0 |
39 | #define DEBUG_RX 0 | |
40 | #define DEBUG_TX 0 | |
41 | #define DEBUG_PKT_BYTES 0 | |
42 | #define DEBUG_MDIO 0 | |
43 | #define DEBUG_CLOSE 0 | |
44 | ||
45 | #define DRV_NAME "ixp4xx_eth" | |
46 | ||
47 | #define MAX_NPES 3 | |
48 | ||
49 | #define RX_DESCS 64 /* also length of all RX queues */ | |
50 | #define TX_DESCS 16 /* also length of all TX queues */ | |
51 | #define TXDONE_QUEUE_LEN 64 /* dwords */ | |
52 | ||
53 | #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) | |
54 | #define REGS_SIZE 0x1000 | |
55 | #define MAX_MRU 1536 /* 0x600 */ | |
56 | #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) | |
57 | ||
58 | #define NAPI_WEIGHT 16 | |
59 | #define MDIO_INTERVAL (3 * HZ) | |
60 | #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ | |
dac2f83f KH |
61 | #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ |
62 | ||
63 | #define NPE_ID(port_id) ((port_id) >> 4) | |
64 | #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3) | |
65 | #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23) | |
66 | #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) | |
67 | #define TXDONE_QUEUE 31 | |
68 | ||
69 | /* TX Control Registers */ | |
70 | #define TX_CNTRL0_TX_EN 0x01 | |
71 | #define TX_CNTRL0_HALFDUPLEX 0x02 | |
72 | #define TX_CNTRL0_RETRY 0x04 | |
73 | #define TX_CNTRL0_PAD_EN 0x08 | |
74 | #define TX_CNTRL0_APPEND_FCS 0x10 | |
75 | #define TX_CNTRL0_2DEFER 0x20 | |
76 | #define TX_CNTRL0_RMII 0x40 /* reduced MII */ | |
77 | #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */ | |
78 | ||
79 | /* RX Control Registers */ | |
80 | #define RX_CNTRL0_RX_EN 0x01 | |
81 | #define RX_CNTRL0_PADSTRIP_EN 0x02 | |
82 | #define RX_CNTRL0_SEND_FCS 0x04 | |
83 | #define RX_CNTRL0_PAUSE_EN 0x08 | |
84 | #define RX_CNTRL0_LOOP_EN 0x10 | |
85 | #define RX_CNTRL0_ADDR_FLTR_EN 0x20 | |
86 | #define RX_CNTRL0_RX_RUNT_EN 0x40 | |
87 | #define RX_CNTRL0_BCAST_DIS 0x80 | |
88 | #define RX_CNTRL1_DEFER_EN 0x01 | |
89 | ||
90 | /* Core Control Register */ | |
91 | #define CORE_RESET 0x01 | |
92 | #define CORE_RX_FIFO_FLUSH 0x02 | |
93 | #define CORE_TX_FIFO_FLUSH 0x04 | |
94 | #define CORE_SEND_JAM 0x08 | |
95 | #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */ | |
96 | ||
97 | #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \ | |
98 | TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \ | |
99 | TX_CNTRL0_2DEFER) | |
100 | #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN | |
101 | #define DEFAULT_CORE_CNTRL CORE_MDC_EN | |
102 | ||
103 | ||
104 | /* NPE message codes */ | |
105 | #define NPE_GETSTATUS 0x00 | |
106 | #define NPE_EDB_SETPORTADDRESS 0x01 | |
107 | #define NPE_EDB_GETMACADDRESSDATABASE 0x02 | |
108 | #define NPE_EDB_SETMACADDRESSSDATABASE 0x03 | |
109 | #define NPE_GETSTATS 0x04 | |
110 | #define NPE_RESETSTATS 0x05 | |
111 | #define NPE_SETMAXFRAMELENGTHS 0x06 | |
112 | #define NPE_VLAN_SETRXTAGMODE 0x07 | |
113 | #define NPE_VLAN_SETDEFAULTRXVID 0x08 | |
114 | #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09 | |
115 | #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A | |
116 | #define NPE_VLAN_SETRXQOSENTRY 0x0B | |
117 | #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C | |
118 | #define NPE_STP_SETBLOCKINGSTATE 0x0D | |
119 | #define NPE_FW_SETFIREWALLMODE 0x0E | |
120 | #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F | |
121 | #define NPE_PC_SETAPMACTABLE 0x11 | |
122 | #define NPE_SETLOOPBACK_MODE 0x12 | |
123 | #define NPE_PC_SETBSSIDTABLE 0x13 | |
124 | #define NPE_ADDRESS_FILTER_CONFIG 0x14 | |
125 | #define NPE_APPENDFCSCONFIG 0x15 | |
126 | #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16 | |
127 | #define NPE_MAC_RECOVERY_START 0x17 | |
128 | ||
129 | ||
130 | #ifdef __ARMEB__ | |
131 | typedef struct sk_buff buffer_t; | |
132 | #define free_buffer dev_kfree_skb | |
133 | #define free_buffer_irq dev_kfree_skb_irq | |
134 | #else | |
135 | typedef void buffer_t; | |
136 | #define free_buffer kfree | |
137 | #define free_buffer_irq kfree | |
138 | #endif | |
139 | ||
140 | struct eth_regs { | |
141 | u32 tx_control[2], __res1[2]; /* 000 */ | |
142 | u32 rx_control[2], __res2[2]; /* 010 */ | |
143 | u32 random_seed, __res3[3]; /* 020 */ | |
144 | u32 partial_empty_threshold, __res4; /* 030 */ | |
145 | u32 partial_full_threshold, __res5; /* 038 */ | |
146 | u32 tx_start_bytes, __res6[3]; /* 040 */ | |
147 | u32 tx_deferral, rx_deferral, __res7[2];/* 050 */ | |
148 | u32 tx_2part_deferral[2], __res8[2]; /* 060 */ | |
149 | u32 slot_time, __res9[3]; /* 070 */ | |
150 | u32 mdio_command[4]; /* 080 */ | |
151 | u32 mdio_status[4]; /* 090 */ | |
152 | u32 mcast_mask[6], __res10[2]; /* 0A0 */ | |
153 | u32 mcast_addr[6], __res11[2]; /* 0C0 */ | |
154 | u32 int_clock_threshold, __res12[3]; /* 0E0 */ | |
155 | u32 hw_addr[6], __res13[61]; /* 0F0 */ | |
156 | u32 core_control; /* 1FC */ | |
157 | }; | |
158 | ||
159 | struct port { | |
160 | struct resource *mem_res; | |
161 | struct eth_regs __iomem *regs; | |
162 | struct npe *npe; | |
163 | struct net_device *netdev; | |
164 | struct napi_struct napi; | |
2098c18d | 165 | struct phy_device *phydev; |
dac2f83f KH |
166 | struct eth_plat_info *plat; |
167 | buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; | |
168 | struct desc *desc_tab; /* coherent */ | |
169 | u32 desc_tab_phys; | |
170 | int id; /* logical port ID */ | |
2098c18d | 171 | int speed, duplex; |
490b7722 | 172 | u8 firmware[4]; |
dac2f83f KH |
173 | }; |
174 | ||
175 | /* NPE message structure */ | |
176 | struct msg { | |
177 | #ifdef __ARMEB__ | |
178 | u8 cmd, eth_id, byte2, byte3; | |
179 | u8 byte4, byte5, byte6, byte7; | |
180 | #else | |
181 | u8 byte3, byte2, eth_id, cmd; | |
182 | u8 byte7, byte6, byte5, byte4; | |
183 | #endif | |
184 | }; | |
185 | ||
186 | /* Ethernet packet descriptor */ | |
187 | struct desc { | |
188 | u32 next; /* pointer to next buffer, unused */ | |
189 | ||
190 | #ifdef __ARMEB__ | |
191 | u16 buf_len; /* buffer length */ | |
192 | u16 pkt_len; /* packet length */ | |
193 | u32 data; /* pointer to data buffer in RAM */ | |
194 | u8 dest_id; | |
195 | u8 src_id; | |
196 | u16 flags; | |
197 | u8 qos; | |
198 | u8 padlen; | |
199 | u16 vlan_tci; | |
200 | #else | |
201 | u16 pkt_len; /* packet length */ | |
202 | u16 buf_len; /* buffer length */ | |
203 | u32 data; /* pointer to data buffer in RAM */ | |
204 | u16 flags; | |
205 | u8 src_id; | |
206 | u8 dest_id; | |
207 | u16 vlan_tci; | |
208 | u8 padlen; | |
209 | u8 qos; | |
210 | #endif | |
211 | ||
212 | #ifdef __ARMEB__ | |
213 | u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3; | |
214 | u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1; | |
215 | u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5; | |
216 | #else | |
217 | u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0; | |
218 | u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4; | |
219 | u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2; | |
220 | #endif | |
221 | }; | |
222 | ||
223 | ||
224 | #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ | |
225 | (n) * sizeof(struct desc)) | |
226 | #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) | |
227 | ||
228 | #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ | |
229 | ((n) + RX_DESCS) * sizeof(struct desc)) | |
230 | #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) | |
231 | ||
232 | #ifndef __ARMEB__ | |
233 | static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) | |
234 | { | |
235 | int i; | |
236 | for (i = 0; i < cnt; i++) | |
237 | dest[i] = swab32(src[i]); | |
238 | } | |
239 | #endif | |
240 | ||
241 | static spinlock_t mdio_lock; | |
242 | static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ | |
2098c18d | 243 | struct mii_bus *mdio_bus; |
dac2f83f KH |
244 | static int ports_open; |
245 | static struct port *npe_port_tab[MAX_NPES]; | |
246 | static struct dma_pool *dma_pool; | |
247 | ||
248 | ||
2098c18d KH |
249 | static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, |
250 | int write, u16 cmd) | |
dac2f83f KH |
251 | { |
252 | int cycles = 0; | |
253 | ||
254 | if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { | |
2098c18d KH |
255 | printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name); |
256 | return -1; | |
dac2f83f KH |
257 | } |
258 | ||
259 | if (write) { | |
260 | __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]); | |
261 | __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]); | |
262 | } | |
263 | __raw_writel(((phy_id << 5) | location) & 0xFF, | |
264 | &mdio_regs->mdio_command[2]); | |
265 | __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */, | |
266 | &mdio_regs->mdio_command[3]); | |
267 | ||
268 | while ((cycles < MAX_MDIO_RETRIES) && | |
269 | (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) { | |
270 | udelay(1); | |
271 | cycles++; | |
272 | } | |
273 | ||
274 | if (cycles == MAX_MDIO_RETRIES) { | |
2098c18d KH |
275 | printk(KERN_ERR "%s #%i: MII write failed\n", bus->name, |
276 | phy_id); | |
277 | return -1; | |
dac2f83f KH |
278 | } |
279 | ||
280 | #if DEBUG_MDIO | |
2098c18d KH |
281 | printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name, |
282 | phy_id, write ? "write" : "read", cycles); | |
dac2f83f KH |
283 | #endif |
284 | ||
285 | if (write) | |
286 | return 0; | |
287 | ||
288 | if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { | |
2098c18d KH |
289 | #if DEBUG_MDIO |
290 | printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name, | |
291 | phy_id); | |
292 | #endif | |
293 | return 0xFFFF; /* don't return error */ | |
dac2f83f KH |
294 | } |
295 | ||
296 | return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | | |
2098c18d | 297 | ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8); |
dac2f83f KH |
298 | } |
299 | ||
2098c18d | 300 | static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location) |
dac2f83f KH |
301 | { |
302 | unsigned long flags; | |
2098c18d | 303 | int ret; |
dac2f83f KH |
304 | |
305 | spin_lock_irqsave(&mdio_lock, flags); | |
2098c18d | 306 | ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0); |
dac2f83f | 307 | spin_unlock_irqrestore(&mdio_lock, flags); |
2098c18d KH |
308 | #if DEBUG_MDIO |
309 | printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name, | |
310 | phy_id, location, ret); | |
311 | #endif | |
312 | return ret; | |
dac2f83f KH |
313 | } |
314 | ||
2098c18d KH |
315 | static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, |
316 | u16 val) | |
dac2f83f KH |
317 | { |
318 | unsigned long flags; | |
2098c18d | 319 | int ret; |
dac2f83f KH |
320 | |
321 | spin_lock_irqsave(&mdio_lock, flags); | |
2098c18d | 322 | ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val); |
dac2f83f | 323 | spin_unlock_irqrestore(&mdio_lock, flags); |
2098c18d KH |
324 | #if DEBUG_MDIO |
325 | printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n", | |
326 | bus->name, phy_id, location, val, ret); | |
327 | #endif | |
328 | return ret; | |
dac2f83f KH |
329 | } |
330 | ||
2098c18d | 331 | static int ixp4xx_mdio_register(void) |
dac2f83f | 332 | { |
2098c18d KH |
333 | int err; |
334 | ||
335 | if (!(mdio_bus = mdiobus_alloc())) | |
336 | return -ENOMEM; | |
dac2f83f | 337 | |
2098c18d KH |
338 | /* All MII PHY accesses use NPE-B Ethernet registers */ |
339 | spin_lock_init(&mdio_lock); | |
340 | mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; | |
341 | __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); | |
dac2f83f | 342 | |
2098c18d KH |
343 | mdio_bus->name = "IXP4xx MII Bus"; |
344 | mdio_bus->read = &ixp4xx_mdio_read; | |
345 | mdio_bus->write = &ixp4xx_mdio_write; | |
346 | strcpy(mdio_bus->id, "0"); | |
dac2f83f | 347 | |
2098c18d KH |
348 | if ((err = mdiobus_register(mdio_bus))) |
349 | mdiobus_free(mdio_bus); | |
350 | return err; | |
dac2f83f KH |
351 | } |
352 | ||
2098c18d | 353 | static void ixp4xx_mdio_remove(void) |
dac2f83f | 354 | { |
2098c18d KH |
355 | mdiobus_unregister(mdio_bus); |
356 | mdiobus_free(mdio_bus); | |
dac2f83f KH |
357 | } |
358 | ||
359 | ||
2098c18d | 360 | static void ixp4xx_adjust_link(struct net_device *dev) |
dac2f83f | 361 | { |
2098c18d KH |
362 | struct port *port = netdev_priv(dev); |
363 | struct phy_device *phydev = port->phydev; | |
364 | ||
365 | if (!phydev->link) { | |
366 | if (port->speed) { | |
367 | port->speed = 0; | |
dac2f83f | 368 | printk(KERN_INFO "%s: link down\n", dev->name); |
dac2f83f | 369 | } |
2098c18d | 370 | return; |
dac2f83f | 371 | } |
dac2f83f | 372 | |
2098c18d KH |
373 | if (port->speed == phydev->speed && port->duplex == phydev->duplex) |
374 | return; | |
dac2f83f | 375 | |
2098c18d KH |
376 | port->speed = phydev->speed; |
377 | port->duplex = phydev->duplex; | |
dac2f83f | 378 | |
2098c18d KH |
379 | if (port->duplex) |
380 | __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, | |
381 | &port->regs->tx_control[0]); | |
382 | else | |
383 | __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, | |
384 | &port->regs->tx_control[0]); | |
385 | ||
386 | printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n", | |
387 | dev->name, port->speed, port->duplex ? "full" : "half"); | |
dac2f83f KH |
388 | } |
389 | ||
390 | ||
391 | static inline void debug_pkt(struct net_device *dev, const char *func, | |
392 | u8 *data, int len) | |
393 | { | |
394 | #if DEBUG_PKT_BYTES | |
395 | int i; | |
396 | ||
397 | printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len); | |
398 | for (i = 0; i < len; i++) { | |
399 | if (i >= DEBUG_PKT_BYTES) | |
400 | break; | |
401 | printk("%s%02X", | |
402 | ((i == 6) || (i == 12) || (i >= 14)) ? " " : "", | |
403 | data[i]); | |
404 | } | |
405 | printk("\n"); | |
406 | #endif | |
407 | } | |
408 | ||
409 | ||
410 | static inline void debug_desc(u32 phys, struct desc *desc) | |
411 | { | |
412 | #if DEBUG_DESC | |
413 | printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X" | |
414 | " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n", | |
415 | phys, desc->next, desc->buf_len, desc->pkt_len, | |
416 | desc->data, desc->dest_id, desc->src_id, desc->flags, | |
417 | desc->qos, desc->padlen, desc->vlan_tci, | |
418 | desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, | |
419 | desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, | |
420 | desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, | |
421 | desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); | |
422 | #endif | |
423 | } | |
424 | ||
dac2f83f KH |
425 | static inline int queue_get_desc(unsigned int queue, struct port *port, |
426 | int is_tx) | |
427 | { | |
428 | u32 phys, tab_phys, n_desc; | |
429 | struct desc *tab; | |
430 | ||
e6da96ac | 431 | if (!(phys = qmgr_get_entry(queue))) |
dac2f83f KH |
432 | return -1; |
433 | ||
434 | phys &= ~0x1F; /* mask out non-address bits */ | |
435 | tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); | |
436 | tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); | |
437 | n_desc = (phys - tab_phys) / sizeof(struct desc); | |
438 | BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); | |
439 | debug_desc(phys, &tab[n_desc]); | |
440 | BUG_ON(tab[n_desc].next); | |
441 | return n_desc; | |
442 | } | |
443 | ||
444 | static inline void queue_put_desc(unsigned int queue, u32 phys, | |
445 | struct desc *desc) | |
446 | { | |
dac2f83f KH |
447 | debug_desc(phys, desc); |
448 | BUG_ON(phys & 0x1F); | |
449 | qmgr_put_entry(queue, phys); | |
450 | BUG_ON(qmgr_stat_overflow(queue)); | |
451 | } | |
452 | ||
453 | ||
454 | static inline void dma_unmap_tx(struct port *port, struct desc *desc) | |
455 | { | |
456 | #ifdef __ARMEB__ | |
457 | dma_unmap_single(&port->netdev->dev, desc->data, | |
458 | desc->buf_len, DMA_TO_DEVICE); | |
459 | #else | |
460 | dma_unmap_single(&port->netdev->dev, desc->data & ~3, | |
461 | ALIGN((desc->data & 3) + desc->buf_len, 4), | |
462 | DMA_TO_DEVICE); | |
463 | #endif | |
464 | } | |
465 | ||
466 | ||
467 | static void eth_rx_irq(void *pdev) | |
468 | { | |
469 | struct net_device *dev = pdev; | |
470 | struct port *port = netdev_priv(dev); | |
471 | ||
472 | #if DEBUG_RX | |
473 | printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); | |
474 | #endif | |
475 | qmgr_disable_irq(port->plat->rxq); | |
476 | netif_rx_schedule(dev, &port->napi); | |
477 | } | |
478 | ||
479 | static int eth_poll(struct napi_struct *napi, int budget) | |
480 | { | |
481 | struct port *port = container_of(napi, struct port, napi); | |
482 | struct net_device *dev = port->netdev; | |
483 | unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); | |
484 | int received = 0; | |
485 | ||
486 | #if DEBUG_RX | |
487 | printk(KERN_DEBUG "%s: eth_poll\n", dev->name); | |
488 | #endif | |
489 | ||
490 | while (received < budget) { | |
491 | struct sk_buff *skb; | |
492 | struct desc *desc; | |
493 | int n; | |
494 | #ifdef __ARMEB__ | |
495 | struct sk_buff *temp; | |
496 | u32 phys; | |
497 | #endif | |
498 | ||
499 | if ((n = queue_get_desc(rxq, port, 0)) < 0) { | |
dac2f83f KH |
500 | #if DEBUG_RX |
501 | printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n", | |
502 | dev->name); | |
503 | #endif | |
504 | netif_rx_complete(dev, napi); | |
505 | qmgr_enable_irq(rxq); | |
506 | if (!qmgr_stat_empty(rxq) && | |
507 | netif_rx_reschedule(dev, napi)) { | |
508 | #if DEBUG_RX | |
509 | printk(KERN_DEBUG "%s: eth_poll" | |
510 | " netif_rx_reschedule successed\n", | |
511 | dev->name); | |
512 | #endif | |
513 | qmgr_disable_irq(rxq); | |
514 | continue; | |
515 | } | |
516 | #if DEBUG_RX | |
517 | printk(KERN_DEBUG "%s: eth_poll all done\n", | |
518 | dev->name); | |
519 | #endif | |
9076689a | 520 | return received; /* all work done */ |
dac2f83f KH |
521 | } |
522 | ||
523 | desc = rx_desc_ptr(port, n); | |
524 | ||
525 | #ifdef __ARMEB__ | |
526 | if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { | |
527 | phys = dma_map_single(&dev->dev, skb->data, | |
528 | RX_BUFF_SIZE, DMA_FROM_DEVICE); | |
7144decb | 529 | if (dma_mapping_error(&dev->dev, phys)) { |
dac2f83f KH |
530 | dev_kfree_skb(skb); |
531 | skb = NULL; | |
532 | } | |
533 | } | |
534 | #else | |
535 | skb = netdev_alloc_skb(dev, | |
536 | ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); | |
537 | #endif | |
538 | ||
539 | if (!skb) { | |
b4c7d3b0 | 540 | dev->stats.rx_dropped++; |
dac2f83f KH |
541 | /* put the desc back on RX-ready queue */ |
542 | desc->buf_len = MAX_MRU; | |
543 | desc->pkt_len = 0; | |
544 | queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); | |
545 | continue; | |
546 | } | |
547 | ||
548 | /* process received frame */ | |
549 | #ifdef __ARMEB__ | |
550 | temp = skb; | |
551 | skb = port->rx_buff_tab[n]; | |
552 | dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, | |
553 | RX_BUFF_SIZE, DMA_FROM_DEVICE); | |
554 | #else | |
555 | dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN, | |
556 | RX_BUFF_SIZE, DMA_FROM_DEVICE); | |
557 | memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], | |
558 | ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); | |
559 | #endif | |
560 | skb_reserve(skb, NET_IP_ALIGN); | |
561 | skb_put(skb, desc->pkt_len); | |
562 | ||
563 | debug_pkt(dev, "eth_poll", skb->data, skb->len); | |
564 | ||
565 | skb->protocol = eth_type_trans(skb, dev); | |
b4c7d3b0 KH |
566 | dev->stats.rx_packets++; |
567 | dev->stats.rx_bytes += skb->len; | |
dac2f83f KH |
568 | netif_receive_skb(skb); |
569 | ||
570 | /* put the new buffer on RX-free queue */ | |
571 | #ifdef __ARMEB__ | |
572 | port->rx_buff_tab[n] = temp; | |
573 | desc->data = phys + NET_IP_ALIGN; | |
574 | #endif | |
575 | desc->buf_len = MAX_MRU; | |
576 | desc->pkt_len = 0; | |
577 | queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); | |
578 | received++; | |
579 | } | |
580 | ||
581 | #if DEBUG_RX | |
582 | printk(KERN_DEBUG "eth_poll(): end, not all work done\n"); | |
583 | #endif | |
584 | return received; /* not all work done */ | |
585 | } | |
586 | ||
587 | ||
588 | static void eth_txdone_irq(void *unused) | |
589 | { | |
590 | u32 phys; | |
591 | ||
592 | #if DEBUG_TX | |
593 | printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); | |
594 | #endif | |
e6da96ac | 595 | while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) { |
dac2f83f KH |
596 | u32 npe_id, n_desc; |
597 | struct port *port; | |
598 | struct desc *desc; | |
599 | int start; | |
600 | ||
601 | npe_id = phys & 3; | |
602 | BUG_ON(npe_id >= MAX_NPES); | |
603 | port = npe_port_tab[npe_id]; | |
604 | BUG_ON(!port); | |
605 | phys &= ~0x1F; /* mask out non-address bits */ | |
606 | n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); | |
607 | BUG_ON(n_desc >= TX_DESCS); | |
608 | desc = tx_desc_ptr(port, n_desc); | |
609 | debug_desc(phys, desc); | |
610 | ||
611 | if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ | |
b4c7d3b0 KH |
612 | port->netdev->stats.tx_packets++; |
613 | port->netdev->stats.tx_bytes += desc->pkt_len; | |
dac2f83f KH |
614 | |
615 | dma_unmap_tx(port, desc); | |
616 | #if DEBUG_TX | |
617 | printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n", | |
618 | port->netdev->name, port->tx_buff_tab[n_desc]); | |
619 | #endif | |
620 | free_buffer_irq(port->tx_buff_tab[n_desc]); | |
621 | port->tx_buff_tab[n_desc] = NULL; | |
622 | } | |
623 | ||
624 | start = qmgr_stat_empty(port->plat->txreadyq); | |
625 | queue_put_desc(port->plat->txreadyq, phys, desc); | |
626 | if (start) { | |
627 | #if DEBUG_TX | |
628 | printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", | |
629 | port->netdev->name); | |
630 | #endif | |
631 | netif_wake_queue(port->netdev); | |
632 | } | |
633 | } | |
634 | } | |
635 | ||
636 | static int eth_xmit(struct sk_buff *skb, struct net_device *dev) | |
637 | { | |
638 | struct port *port = netdev_priv(dev); | |
639 | unsigned int txreadyq = port->plat->txreadyq; | |
640 | int len, offset, bytes, n; | |
641 | void *mem; | |
642 | u32 phys; | |
643 | struct desc *desc; | |
644 | ||
645 | #if DEBUG_TX | |
646 | printk(KERN_DEBUG "%s: eth_xmit\n", dev->name); | |
647 | #endif | |
648 | ||
649 | if (unlikely(skb->len > MAX_MRU)) { | |
650 | dev_kfree_skb(skb); | |
b4c7d3b0 | 651 | dev->stats.tx_errors++; |
dac2f83f KH |
652 | return NETDEV_TX_OK; |
653 | } | |
654 | ||
655 | debug_pkt(dev, "eth_xmit", skb->data, skb->len); | |
656 | ||
657 | len = skb->len; | |
658 | #ifdef __ARMEB__ | |
659 | offset = 0; /* no need to keep alignment */ | |
660 | bytes = len; | |
661 | mem = skb->data; | |
662 | #else | |
663 | offset = (int)skb->data & 3; /* keep 32-bit alignment */ | |
664 | bytes = ALIGN(offset + len, 4); | |
665 | if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { | |
666 | dev_kfree_skb(skb); | |
b4c7d3b0 | 667 | dev->stats.tx_dropped++; |
dac2f83f KH |
668 | return NETDEV_TX_OK; |
669 | } | |
670 | memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); | |
671 | dev_kfree_skb(skb); | |
672 | #endif | |
673 | ||
674 | phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); | |
7144decb | 675 | if (dma_mapping_error(&dev->dev, phys)) { |
dac2f83f KH |
676 | #ifdef __ARMEB__ |
677 | dev_kfree_skb(skb); | |
678 | #else | |
679 | kfree(mem); | |
680 | #endif | |
b4c7d3b0 | 681 | dev->stats.tx_dropped++; |
dac2f83f KH |
682 | return NETDEV_TX_OK; |
683 | } | |
684 | ||
685 | n = queue_get_desc(txreadyq, port, 1); | |
686 | BUG_ON(n < 0); | |
687 | desc = tx_desc_ptr(port, n); | |
688 | ||
689 | #ifdef __ARMEB__ | |
690 | port->tx_buff_tab[n] = skb; | |
691 | #else | |
692 | port->tx_buff_tab[n] = mem; | |
693 | #endif | |
694 | desc->data = phys + offset; | |
695 | desc->buf_len = desc->pkt_len = len; | |
696 | ||
697 | /* NPE firmware pads short frames with zeros internally */ | |
698 | wmb(); | |
699 | queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); | |
700 | dev->trans_start = jiffies; | |
701 | ||
702 | if (qmgr_stat_empty(txreadyq)) { | |
703 | #if DEBUG_TX | |
704 | printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); | |
705 | #endif | |
706 | netif_stop_queue(dev); | |
707 | /* we could miss TX ready interrupt */ | |
708 | if (!qmgr_stat_empty(txreadyq)) { | |
709 | #if DEBUG_TX | |
710 | printk(KERN_DEBUG "%s: eth_xmit ready again\n", | |
711 | dev->name); | |
712 | #endif | |
713 | netif_wake_queue(dev); | |
714 | } | |
715 | } | |
716 | ||
717 | #if DEBUG_TX | |
718 | printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name); | |
719 | #endif | |
720 | return NETDEV_TX_OK; | |
721 | } | |
722 | ||
723 | ||
dac2f83f KH |
724 | static void eth_set_mcast_list(struct net_device *dev) |
725 | { | |
726 | struct port *port = netdev_priv(dev); | |
727 | struct dev_mc_list *mclist = dev->mc_list; | |
728 | u8 diffs[ETH_ALEN], *addr; | |
729 | int cnt = dev->mc_count, i; | |
730 | ||
731 | if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) { | |
732 | __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, | |
733 | &port->regs->rx_control[0]); | |
734 | return; | |
735 | } | |
736 | ||
737 | memset(diffs, 0, ETH_ALEN); | |
738 | addr = mclist->dmi_addr; /* first MAC address */ | |
739 | ||
740 | while (--cnt && (mclist = mclist->next)) | |
741 | for (i = 0; i < ETH_ALEN; i++) | |
742 | diffs[i] |= addr[i] ^ mclist->dmi_addr[i]; | |
743 | ||
744 | for (i = 0; i < ETH_ALEN; i++) { | |
745 | __raw_writel(addr[i], &port->regs->mcast_addr[i]); | |
746 | __raw_writel(~diffs[i], &port->regs->mcast_mask[i]); | |
747 | } | |
748 | ||
749 | __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, | |
750 | &port->regs->rx_control[0]); | |
751 | } | |
752 | ||
753 | ||
754 | static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) | |
755 | { | |
4954936e KH |
756 | struct port *port = netdev_priv(dev); |
757 | ||
dac2f83f KH |
758 | if (!netif_running(dev)) |
759 | return -EINVAL; | |
4954936e | 760 | return phy_mii_ioctl(port->phydev, if_mii(req), cmd); |
dac2f83f KH |
761 | } |
762 | ||
490b7722 KH |
763 | /* ethtool support */ |
764 | ||
765 | static void ixp4xx_get_drvinfo(struct net_device *dev, | |
766 | struct ethtool_drvinfo *info) | |
767 | { | |
768 | struct port *port = netdev_priv(dev); | |
769 | strcpy(info->driver, DRV_NAME); | |
770 | snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u", | |
771 | port->firmware[0], port->firmware[1], | |
772 | port->firmware[2], port->firmware[3]); | |
773 | strcpy(info->bus_info, "internal"); | |
774 | } | |
775 | ||
776 | static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
777 | { | |
778 | struct port *port = netdev_priv(dev); | |
779 | return phy_ethtool_gset(port->phydev, cmd); | |
780 | } | |
781 | ||
782 | static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
783 | { | |
784 | struct port *port = netdev_priv(dev); | |
785 | return phy_ethtool_sset(port->phydev, cmd); | |
786 | } | |
787 | ||
788 | static int ixp4xx_nway_reset(struct net_device *dev) | |
789 | { | |
790 | struct port *port = netdev_priv(dev); | |
791 | return phy_start_aneg(port->phydev); | |
792 | } | |
793 | ||
794 | static struct ethtool_ops ixp4xx_ethtool_ops = { | |
795 | .get_drvinfo = ixp4xx_get_drvinfo, | |
796 | .get_settings = ixp4xx_get_settings, | |
797 | .set_settings = ixp4xx_set_settings, | |
798 | .nway_reset = ixp4xx_nway_reset, | |
799 | .get_link = ethtool_op_get_link, | |
800 | }; | |
801 | ||
dac2f83f KH |
802 | |
803 | static int request_queues(struct port *port) | |
804 | { | |
805 | int err; | |
806 | ||
e6da96ac KH |
807 | err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, |
808 | "%s:RX-free", port->netdev->name); | |
dac2f83f KH |
809 | if (err) |
810 | return err; | |
811 | ||
e6da96ac KH |
812 | err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, |
813 | "%s:RX", port->netdev->name); | |
dac2f83f KH |
814 | if (err) |
815 | goto rel_rxfree; | |
816 | ||
e6da96ac KH |
817 | err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, |
818 | "%s:TX", port->netdev->name); | |
dac2f83f KH |
819 | if (err) |
820 | goto rel_rx; | |
821 | ||
e6da96ac KH |
822 | err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, |
823 | "%s:TX-ready", port->netdev->name); | |
dac2f83f KH |
824 | if (err) |
825 | goto rel_tx; | |
826 | ||
827 | /* TX-done queue handles skbs sent out by the NPEs */ | |
828 | if (!ports_open) { | |
e6da96ac KH |
829 | err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, |
830 | "%s:TX-done", DRV_NAME); | |
dac2f83f KH |
831 | if (err) |
832 | goto rel_txready; | |
833 | } | |
834 | return 0; | |
835 | ||
836 | rel_txready: | |
837 | qmgr_release_queue(port->plat->txreadyq); | |
838 | rel_tx: | |
839 | qmgr_release_queue(TX_QUEUE(port->id)); | |
840 | rel_rx: | |
841 | qmgr_release_queue(port->plat->rxq); | |
842 | rel_rxfree: | |
843 | qmgr_release_queue(RXFREE_QUEUE(port->id)); | |
844 | printk(KERN_DEBUG "%s: unable to request hardware queues\n", | |
845 | port->netdev->name); | |
846 | return err; | |
847 | } | |
848 | ||
849 | static void release_queues(struct port *port) | |
850 | { | |
851 | qmgr_release_queue(RXFREE_QUEUE(port->id)); | |
852 | qmgr_release_queue(port->plat->rxq); | |
853 | qmgr_release_queue(TX_QUEUE(port->id)); | |
854 | qmgr_release_queue(port->plat->txreadyq); | |
855 | ||
856 | if (!ports_open) | |
857 | qmgr_release_queue(TXDONE_QUEUE); | |
858 | } | |
859 | ||
860 | static int init_queues(struct port *port) | |
861 | { | |
862 | int i; | |
863 | ||
864 | if (!ports_open) | |
865 | if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, | |
866 | POOL_ALLOC_SIZE, 32, 0))) | |
867 | return -ENOMEM; | |
868 | ||
869 | if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, | |
870 | &port->desc_tab_phys))) | |
871 | return -ENOMEM; | |
872 | memset(port->desc_tab, 0, POOL_ALLOC_SIZE); | |
873 | memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ | |
874 | memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); | |
875 | ||
876 | /* Setup RX buffers */ | |
877 | for (i = 0; i < RX_DESCS; i++) { | |
878 | struct desc *desc = rx_desc_ptr(port, i); | |
879 | buffer_t *buff; /* skb or kmalloc()ated memory */ | |
880 | void *data; | |
881 | #ifdef __ARMEB__ | |
882 | if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE))) | |
883 | return -ENOMEM; | |
884 | data = buff->data; | |
885 | #else | |
886 | if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL))) | |
887 | return -ENOMEM; | |
888 | data = buff; | |
889 | #endif | |
890 | desc->buf_len = MAX_MRU; | |
891 | desc->data = dma_map_single(&port->netdev->dev, data, | |
892 | RX_BUFF_SIZE, DMA_FROM_DEVICE); | |
7144decb | 893 | if (dma_mapping_error(&port->netdev->dev, desc->data)) { |
dac2f83f KH |
894 | free_buffer(buff); |
895 | return -EIO; | |
896 | } | |
897 | desc->data += NET_IP_ALIGN; | |
898 | port->rx_buff_tab[i] = buff; | |
899 | } | |
900 | ||
901 | return 0; | |
902 | } | |
903 | ||
904 | static void destroy_queues(struct port *port) | |
905 | { | |
906 | int i; | |
907 | ||
908 | if (port->desc_tab) { | |
909 | for (i = 0; i < RX_DESCS; i++) { | |
910 | struct desc *desc = rx_desc_ptr(port, i); | |
911 | buffer_t *buff = port->rx_buff_tab[i]; | |
912 | if (buff) { | |
913 | dma_unmap_single(&port->netdev->dev, | |
914 | desc->data - NET_IP_ALIGN, | |
915 | RX_BUFF_SIZE, DMA_FROM_DEVICE); | |
916 | free_buffer(buff); | |
917 | } | |
918 | } | |
919 | for (i = 0; i < TX_DESCS; i++) { | |
920 | struct desc *desc = tx_desc_ptr(port, i); | |
921 | buffer_t *buff = port->tx_buff_tab[i]; | |
922 | if (buff) { | |
923 | dma_unmap_tx(port, desc); | |
924 | free_buffer(buff); | |
925 | } | |
926 | } | |
927 | dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); | |
928 | port->desc_tab = NULL; | |
929 | } | |
930 | ||
931 | if (!ports_open && dma_pool) { | |
932 | dma_pool_destroy(dma_pool); | |
933 | dma_pool = NULL; | |
934 | } | |
935 | } | |
936 | ||
937 | static int eth_open(struct net_device *dev) | |
938 | { | |
939 | struct port *port = netdev_priv(dev); | |
940 | struct npe *npe = port->npe; | |
941 | struct msg msg; | |
942 | int i, err; | |
943 | ||
944 | if (!npe_running(npe)) { | |
945 | err = npe_load_firmware(npe, npe_name(npe), &dev->dev); | |
946 | if (err) | |
947 | return err; | |
948 | ||
949 | if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) { | |
950 | printk(KERN_ERR "%s: %s not responding\n", dev->name, | |
951 | npe_name(npe)); | |
952 | return -EIO; | |
953 | } | |
490b7722 KH |
954 | port->firmware[0] = msg.byte4; |
955 | port->firmware[1] = msg.byte5; | |
956 | port->firmware[2] = msg.byte6; | |
957 | port->firmware[3] = msg.byte7; | |
dac2f83f KH |
958 | } |
959 | ||
dac2f83f KH |
960 | memset(&msg, 0, sizeof(msg)); |
961 | msg.cmd = NPE_VLAN_SETRXQOSENTRY; | |
962 | msg.eth_id = port->id; | |
963 | msg.byte5 = port->plat->rxq | 0x80; | |
964 | msg.byte7 = port->plat->rxq << 4; | |
965 | for (i = 0; i < 8; i++) { | |
966 | msg.byte3 = i; | |
967 | if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) | |
968 | return -EIO; | |
969 | } | |
970 | ||
971 | msg.cmd = NPE_EDB_SETPORTADDRESS; | |
972 | msg.eth_id = PHYSICAL_ID(port->id); | |
973 | msg.byte2 = dev->dev_addr[0]; | |
974 | msg.byte3 = dev->dev_addr[1]; | |
975 | msg.byte4 = dev->dev_addr[2]; | |
976 | msg.byte5 = dev->dev_addr[3]; | |
977 | msg.byte6 = dev->dev_addr[4]; | |
978 | msg.byte7 = dev->dev_addr[5]; | |
979 | if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) | |
980 | return -EIO; | |
981 | ||
982 | memset(&msg, 0, sizeof(msg)); | |
983 | msg.cmd = NPE_FW_SETFIREWALLMODE; | |
984 | msg.eth_id = port->id; | |
985 | if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) | |
986 | return -EIO; | |
987 | ||
988 | if ((err = request_queues(port)) != 0) | |
989 | return err; | |
990 | ||
991 | if ((err = init_queues(port)) != 0) { | |
992 | destroy_queues(port); | |
993 | release_queues(port); | |
994 | return err; | |
995 | } | |
996 | ||
2098c18d KH |
997 | port->speed = 0; /* force "link up" message */ |
998 | phy_start(port->phydev); | |
999 | ||
dac2f83f KH |
1000 | for (i = 0; i < ETH_ALEN; i++) |
1001 | __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); | |
1002 | __raw_writel(0x08, &port->regs->random_seed); | |
1003 | __raw_writel(0x12, &port->regs->partial_empty_threshold); | |
1004 | __raw_writel(0x30, &port->regs->partial_full_threshold); | |
1005 | __raw_writel(0x08, &port->regs->tx_start_bytes); | |
1006 | __raw_writel(0x15, &port->regs->tx_deferral); | |
1007 | __raw_writel(0x08, &port->regs->tx_2part_deferral[0]); | |
1008 | __raw_writel(0x07, &port->regs->tx_2part_deferral[1]); | |
1009 | __raw_writel(0x80, &port->regs->slot_time); | |
1010 | __raw_writel(0x01, &port->regs->int_clock_threshold); | |
1011 | ||
1012 | /* Populate queues with buffers, no failure after this point */ | |
1013 | for (i = 0; i < TX_DESCS; i++) | |
1014 | queue_put_desc(port->plat->txreadyq, | |
1015 | tx_desc_phys(port, i), tx_desc_ptr(port, i)); | |
1016 | ||
1017 | for (i = 0; i < RX_DESCS; i++) | |
1018 | queue_put_desc(RXFREE_QUEUE(port->id), | |
1019 | rx_desc_phys(port, i), rx_desc_ptr(port, i)); | |
1020 | ||
1021 | __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]); | |
1022 | __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]); | |
1023 | __raw_writel(0, &port->regs->rx_control[1]); | |
1024 | __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); | |
1025 | ||
1026 | napi_enable(&port->napi); | |
dac2f83f KH |
1027 | eth_set_mcast_list(dev); |
1028 | netif_start_queue(dev); | |
dac2f83f KH |
1029 | |
1030 | qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, | |
1031 | eth_rx_irq, dev); | |
1032 | if (!ports_open) { | |
1033 | qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY, | |
1034 | eth_txdone_irq, NULL); | |
1035 | qmgr_enable_irq(TXDONE_QUEUE); | |
1036 | } | |
1037 | ports_open++; | |
1038 | /* we may already have RX data, enables IRQ */ | |
1039 | netif_rx_schedule(dev, &port->napi); | |
1040 | return 0; | |
1041 | } | |
1042 | ||
1043 | static int eth_close(struct net_device *dev) | |
1044 | { | |
1045 | struct port *port = netdev_priv(dev); | |
1046 | struct msg msg; | |
1047 | int buffs = RX_DESCS; /* allocated RX buffers */ | |
1048 | int i; | |
1049 | ||
1050 | ports_open--; | |
1051 | qmgr_disable_irq(port->plat->rxq); | |
1052 | napi_disable(&port->napi); | |
1053 | netif_stop_queue(dev); | |
1054 | ||
1055 | while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) | |
1056 | buffs--; | |
1057 | ||
1058 | memset(&msg, 0, sizeof(msg)); | |
1059 | msg.cmd = NPE_SETLOOPBACK_MODE; | |
1060 | msg.eth_id = port->id; | |
1061 | msg.byte3 = 1; | |
1062 | if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) | |
1063 | printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name); | |
1064 | ||
1065 | i = 0; | |
1066 | do { /* drain RX buffers */ | |
1067 | while (queue_get_desc(port->plat->rxq, port, 0) >= 0) | |
1068 | buffs--; | |
1069 | if (!buffs) | |
1070 | break; | |
1071 | if (qmgr_stat_empty(TX_QUEUE(port->id))) { | |
1072 | /* we have to inject some packet */ | |
1073 | struct desc *desc; | |
1074 | u32 phys; | |
1075 | int n = queue_get_desc(port->plat->txreadyq, port, 1); | |
1076 | BUG_ON(n < 0); | |
1077 | desc = tx_desc_ptr(port, n); | |
1078 | phys = tx_desc_phys(port, n); | |
1079 | desc->buf_len = desc->pkt_len = 1; | |
1080 | wmb(); | |
1081 | queue_put_desc(TX_QUEUE(port->id), phys, desc); | |
1082 | } | |
1083 | udelay(1); | |
1084 | } while (++i < MAX_CLOSE_WAIT); | |
1085 | ||
1086 | if (buffs) | |
1087 | printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)" | |
1088 | " left in NPE\n", dev->name, buffs); | |
1089 | #if DEBUG_CLOSE | |
1090 | if (!buffs) | |
1091 | printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i); | |
1092 | #endif | |
1093 | ||
1094 | buffs = TX_DESCS; | |
1095 | while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) | |
1096 | buffs--; /* cancel TX */ | |
1097 | ||
1098 | i = 0; | |
1099 | do { | |
1100 | while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) | |
1101 | buffs--; | |
1102 | if (!buffs) | |
1103 | break; | |
1104 | } while (++i < MAX_CLOSE_WAIT); | |
1105 | ||
1106 | if (buffs) | |
1107 | printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) " | |
1108 | "left in NPE\n", dev->name, buffs); | |
1109 | #if DEBUG_CLOSE | |
1110 | if (!buffs) | |
1111 | printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); | |
1112 | #endif | |
1113 | ||
1114 | msg.byte3 = 0; | |
1115 | if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) | |
1116 | printk(KERN_CRIT "%s: unable to disable loopback\n", | |
1117 | dev->name); | |
1118 | ||
2098c18d | 1119 | phy_stop(port->phydev); |
dac2f83f KH |
1120 | |
1121 | if (!ports_open) | |
1122 | qmgr_disable_irq(TXDONE_QUEUE); | |
dac2f83f KH |
1123 | destroy_queues(port); |
1124 | release_queues(port); | |
1125 | return 0; | |
1126 | } | |
1127 | ||
1128 | static int __devinit eth_init_one(struct platform_device *pdev) | |
1129 | { | |
1130 | struct port *port; | |
1131 | struct net_device *dev; | |
1132 | struct eth_plat_info *plat = pdev->dev.platform_data; | |
1133 | u32 regs_phys; | |
2098c18d | 1134 | char phy_id[BUS_ID_SIZE]; |
dac2f83f KH |
1135 | int err; |
1136 | ||
1137 | if (!(dev = alloc_etherdev(sizeof(struct port)))) | |
1138 | return -ENOMEM; | |
1139 | ||
1140 | SET_NETDEV_DEV(dev, &pdev->dev); | |
1141 | port = netdev_priv(dev); | |
1142 | port->netdev = dev; | |
1143 | port->id = pdev->id; | |
1144 | ||
1145 | switch (port->id) { | |
1146 | case IXP4XX_ETH_NPEA: | |
1147 | port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT; | |
1148 | regs_phys = IXP4XX_EthA_BASE_PHYS; | |
1149 | break; | |
1150 | case IXP4XX_ETH_NPEB: | |
1151 | port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; | |
1152 | regs_phys = IXP4XX_EthB_BASE_PHYS; | |
1153 | break; | |
1154 | case IXP4XX_ETH_NPEC: | |
1155 | port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; | |
1156 | regs_phys = IXP4XX_EthC_BASE_PHYS; | |
1157 | break; | |
1158 | default: | |
1159 | err = -ENOSYS; | |
1160 | goto err_free; | |
1161 | } | |
1162 | ||
1163 | dev->open = eth_open; | |
1164 | dev->hard_start_xmit = eth_xmit; | |
1165 | dev->stop = eth_close; | |
dac2f83f | 1166 | dev->do_ioctl = eth_ioctl; |
490b7722 | 1167 | dev->ethtool_ops = &ixp4xx_ethtool_ops; |
dac2f83f KH |
1168 | dev->set_multicast_list = eth_set_mcast_list; |
1169 | dev->tx_queue_len = 100; | |
1170 | ||
1171 | netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT); | |
1172 | ||
1173 | if (!(port->npe = npe_request(NPE_ID(port->id)))) { | |
1174 | err = -EIO; | |
1175 | goto err_free; | |
1176 | } | |
1177 | ||
1178 | if (register_netdev(dev)) { | |
1179 | err = -EIO; | |
1180 | goto err_npe_rel; | |
1181 | } | |
1182 | ||
1183 | port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name); | |
1184 | if (!port->mem_res) { | |
1185 | err = -EBUSY; | |
1186 | goto err_unreg; | |
1187 | } | |
1188 | ||
1189 | port->plat = plat; | |
1190 | npe_port_tab[NPE_ID(port->id)] = port; | |
1191 | memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN); | |
1192 | ||
1193 | platform_set_drvdata(pdev, dev); | |
1194 | ||
1195 | __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET, | |
1196 | &port->regs->core_control); | |
1197 | udelay(50); | |
1198 | __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); | |
1199 | udelay(50); | |
1200 | ||
2098c18d KH |
1201 | snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy); |
1202 | port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0, | |
1203 | PHY_INTERFACE_MODE_MII); | |
1204 | if (IS_ERR(port->phydev)) { | |
1205 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | |
1206 | return PTR_ERR(port->phydev); | |
1207 | } | |
1208 | ||
1209 | port->phydev->irq = PHY_POLL; | |
dac2f83f KH |
1210 | |
1211 | printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, | |
1212 | npe_name(port->npe)); | |
1213 | ||
dac2f83f KH |
1214 | return 0; |
1215 | ||
1216 | err_unreg: | |
1217 | unregister_netdev(dev); | |
1218 | err_npe_rel: | |
1219 | npe_release(port->npe); | |
1220 | err_free: | |
1221 | free_netdev(dev); | |
1222 | return err; | |
1223 | } | |
1224 | ||
1225 | static int __devexit eth_remove_one(struct platform_device *pdev) | |
1226 | { | |
1227 | struct net_device *dev = platform_get_drvdata(pdev); | |
1228 | struct port *port = netdev_priv(dev); | |
1229 | ||
1230 | unregister_netdev(dev); | |
1231 | npe_port_tab[NPE_ID(port->id)] = NULL; | |
1232 | platform_set_drvdata(pdev, NULL); | |
1233 | npe_release(port->npe); | |
1234 | release_resource(port->mem_res); | |
1235 | free_netdev(dev); | |
1236 | return 0; | |
1237 | } | |
1238 | ||
3c36a837 | 1239 | static struct platform_driver ixp4xx_eth_driver = { |
dac2f83f KH |
1240 | .driver.name = DRV_NAME, |
1241 | .probe = eth_init_one, | |
1242 | .remove = eth_remove_one, | |
1243 | }; | |
1244 | ||
1245 | static int __init eth_init_module(void) | |
1246 | { | |
2098c18d | 1247 | int err; |
dac2f83f KH |
1248 | if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) |
1249 | return -ENOSYS; | |
1250 | ||
2098c18d KH |
1251 | if ((err = ixp4xx_mdio_register())) |
1252 | return err; | |
3c36a837 | 1253 | return platform_driver_register(&ixp4xx_eth_driver); |
dac2f83f KH |
1254 | } |
1255 | ||
1256 | static void __exit eth_cleanup_module(void) | |
1257 | { | |
3c36a837 | 1258 | platform_driver_unregister(&ixp4xx_eth_driver); |
2098c18d | 1259 | ixp4xx_mdio_remove(); |
dac2f83f KH |
1260 | } |
1261 | ||
1262 | MODULE_AUTHOR("Krzysztof Halasa"); | |
1263 | MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver"); | |
1264 | MODULE_LICENSE("GPL v2"); | |
1265 | MODULE_ALIAS("platform:ixp4xx_eth"); | |
1266 | module_init(eth_init_module); | |
1267 | module_exit(eth_cleanup_module); |