Commit | Line | Data |
---|---|---|
d6aa60a1 DD |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
eeae05aa | 6 | * Copyright (C) 2009-2012 Cavium, Inc |
d6aa60a1 DD |
7 | */ |
8 | ||
368bec0d | 9 | #include <linux/platform_device.h> |
d6aa60a1 | 10 | #include <linux/dma-mapping.h> |
368bec0d DD |
11 | #include <linux/etherdevice.h> |
12 | #include <linux/capability.h> | |
3d305850 | 13 | #include <linux/net_tstamp.h> |
539d3ee6 | 14 | #include <linux/interrupt.h> |
d6aa60a1 | 15 | #include <linux/netdevice.h> |
368bec0d | 16 | #include <linux/spinlock.h> |
d6aa60a1 | 17 | #include <linux/if_vlan.h> |
368bec0d DD |
18 | #include <linux/of_mdio.h> |
19 | #include <linux/module.h> | |
20 | #include <linux/of_net.h> | |
21 | #include <linux/init.h> | |
5a0e3ad6 | 22 | #include <linux/slab.h> |
d6aa60a1 | 23 | #include <linux/phy.h> |
368bec0d | 24 | #include <linux/io.h> |
d6aa60a1 DD |
25 | |
26 | #include <asm/octeon/octeon.h> | |
27 | #include <asm/octeon/cvmx-mixx-defs.h> | |
28 | #include <asm/octeon/cvmx-agl-defs.h> | |
29 | ||
30 | #define DRV_NAME "octeon_mgmt" | |
31 | #define DRV_VERSION "2.0" | |
32 | #define DRV_DESCRIPTION \ | |
33 | "Cavium Networks Octeon MII (management) port Network Driver" | |
34 | ||
35 | #define OCTEON_MGMT_NAPI_WEIGHT 16 | |
36 | ||
a0ce9b1e | 37 | /* Ring sizes that are powers of two allow for more efficient modulo |
d6aa60a1 DD |
38 | * opertions. |
39 | */ | |
40 | #define OCTEON_MGMT_RX_RING_SIZE 512 | |
41 | #define OCTEON_MGMT_TX_RING_SIZE 128 | |
42 | ||
43 | /* Allow 8 bytes for vlan and FCS. */ | |
44 | #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) | |
45 | ||
46 | union mgmt_port_ring_entry { | |
47 | u64 d64; | |
48 | struct { | |
3ac19c90 DD |
49 | #define RING_ENTRY_CODE_DONE 0xf |
50 | #define RING_ENTRY_CODE_MORE 0x10 | |
51 | #ifdef __BIG_ENDIAN_BITFIELD | |
52 | u64 reserved_62_63:2; | |
d6aa60a1 | 53 | /* Length of the buffer/packet in bytes */ |
3ac19c90 | 54 | u64 len:14; |
d6aa60a1 | 55 | /* For TX, signals that the packet should be timestamped */ |
3ac19c90 | 56 | u64 tstamp:1; |
d6aa60a1 | 57 | /* The RX error code */ |
3ac19c90 | 58 | u64 code:7; |
d6aa60a1 | 59 | /* Physical address of the buffer */ |
3ac19c90 DD |
60 | u64 addr:40; |
61 | #else | |
62 | u64 addr:40; | |
63 | u64 code:7; | |
64 | u64 tstamp:1; | |
65 | u64 len:14; | |
66 | u64 reserved_62_63:2; | |
67 | #endif | |
d6aa60a1 DD |
68 | } s; |
69 | }; | |
70 | ||
368bec0d DD |
71 | #define MIX_ORING1 0x0 |
72 | #define MIX_ORING2 0x8 | |
73 | #define MIX_IRING1 0x10 | |
74 | #define MIX_IRING2 0x18 | |
75 | #define MIX_CTL 0x20 | |
76 | #define MIX_IRHWM 0x28 | |
77 | #define MIX_IRCNT 0x30 | |
78 | #define MIX_ORHWM 0x38 | |
79 | #define MIX_ORCNT 0x40 | |
80 | #define MIX_ISR 0x48 | |
81 | #define MIX_INTENA 0x50 | |
82 | #define MIX_REMCNT 0x58 | |
83 | #define MIX_BIST 0x78 | |
84 | ||
85 | #define AGL_GMX_PRT_CFG 0x10 | |
86 | #define AGL_GMX_RX_FRM_CTL 0x18 | |
87 | #define AGL_GMX_RX_FRM_MAX 0x30 | |
88 | #define AGL_GMX_RX_JABBER 0x38 | |
89 | #define AGL_GMX_RX_STATS_CTL 0x50 | |
90 | ||
91 | #define AGL_GMX_RX_STATS_PKTS_DRP 0xb0 | |
92 | #define AGL_GMX_RX_STATS_OCTS_DRP 0xb8 | |
93 | #define AGL_GMX_RX_STATS_PKTS_BAD 0xc0 | |
94 | ||
95 | #define AGL_GMX_RX_ADR_CTL 0x100 | |
96 | #define AGL_GMX_RX_ADR_CAM_EN 0x108 | |
97 | #define AGL_GMX_RX_ADR_CAM0 0x180 | |
98 | #define AGL_GMX_RX_ADR_CAM1 0x188 | |
99 | #define AGL_GMX_RX_ADR_CAM2 0x190 | |
100 | #define AGL_GMX_RX_ADR_CAM3 0x198 | |
101 | #define AGL_GMX_RX_ADR_CAM4 0x1a0 | |
102 | #define AGL_GMX_RX_ADR_CAM5 0x1a8 | |
103 | ||
eeae05aa | 104 | #define AGL_GMX_TX_CLK 0x208 |
368bec0d DD |
105 | #define AGL_GMX_TX_STATS_CTL 0x268 |
106 | #define AGL_GMX_TX_CTL 0x270 | |
107 | #define AGL_GMX_TX_STAT0 0x280 | |
108 | #define AGL_GMX_TX_STAT1 0x288 | |
109 | #define AGL_GMX_TX_STAT2 0x290 | |
110 | #define AGL_GMX_TX_STAT3 0x298 | |
111 | #define AGL_GMX_TX_STAT4 0x2a0 | |
112 | #define AGL_GMX_TX_STAT5 0x2a8 | |
113 | #define AGL_GMX_TX_STAT6 0x2b0 | |
114 | #define AGL_GMX_TX_STAT7 0x2b8 | |
115 | #define AGL_GMX_TX_STAT8 0x2c0 | |
116 | #define AGL_GMX_TX_STAT9 0x2c8 | |
117 | ||
d6aa60a1 DD |
118 | struct octeon_mgmt { |
119 | struct net_device *netdev; | |
368bec0d DD |
120 | u64 mix; |
121 | u64 agl; | |
eeae05aa | 122 | u64 agl_prt_ctl; |
d6aa60a1 DD |
123 | int port; |
124 | int irq; | |
3d305850 | 125 | bool has_rx_tstamp; |
d6aa60a1 DD |
126 | u64 *tx_ring; |
127 | dma_addr_t tx_ring_handle; | |
128 | unsigned int tx_next; | |
129 | unsigned int tx_next_clean; | |
130 | unsigned int tx_current_fill; | |
131 | /* The tx_list lock also protects the ring related variables */ | |
132 | struct sk_buff_head tx_list; | |
133 | ||
134 | /* RX variables only touched in napi_poll. No locking necessary. */ | |
135 | u64 *rx_ring; | |
136 | dma_addr_t rx_ring_handle; | |
137 | unsigned int rx_next; | |
138 | unsigned int rx_next_fill; | |
139 | unsigned int rx_current_fill; | |
140 | struct sk_buff_head rx_list; | |
141 | ||
142 | spinlock_t lock; | |
143 | unsigned int last_duplex; | |
144 | unsigned int last_link; | |
eeae05aa | 145 | unsigned int last_speed; |
d6aa60a1 DD |
146 | struct device *dev; |
147 | struct napi_struct napi; | |
148 | struct tasklet_struct tx_clean_tasklet; | |
149 | struct phy_device *phydev; | |
368bec0d DD |
150 | struct device_node *phy_np; |
151 | resource_size_t mix_phys; | |
152 | resource_size_t mix_size; | |
153 | resource_size_t agl_phys; | |
154 | resource_size_t agl_size; | |
eeae05aa DD |
155 | resource_size_t agl_prt_ctl_phys; |
156 | resource_size_t agl_prt_ctl_size; | |
d6aa60a1 DD |
157 | }; |
158 | ||
159 | static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) | |
160 | { | |
d6aa60a1 DD |
161 | union cvmx_mixx_intena mix_intena; |
162 | unsigned long flags; | |
163 | ||
164 | spin_lock_irqsave(&p->lock, flags); | |
368bec0d | 165 | mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); |
d6aa60a1 | 166 | mix_intena.s.ithena = enable ? 1 : 0; |
368bec0d | 167 | cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); |
d6aa60a1 DD |
168 | spin_unlock_irqrestore(&p->lock, flags); |
169 | } | |
170 | ||
171 | static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) | |
172 | { | |
d6aa60a1 DD |
173 | union cvmx_mixx_intena mix_intena; |
174 | unsigned long flags; | |
175 | ||
176 | spin_lock_irqsave(&p->lock, flags); | |
368bec0d | 177 | mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); |
d6aa60a1 | 178 | mix_intena.s.othena = enable ? 1 : 0; |
368bec0d | 179 | cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); |
d6aa60a1 DD |
180 | spin_unlock_irqrestore(&p->lock, flags); |
181 | } | |
182 | ||
e96f7515 | 183 | static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) |
d6aa60a1 DD |
184 | { |
185 | octeon_mgmt_set_rx_irq(p, 1); | |
186 | } | |
187 | ||
e96f7515 | 188 | static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) |
d6aa60a1 DD |
189 | { |
190 | octeon_mgmt_set_rx_irq(p, 0); | |
191 | } | |
192 | ||
e96f7515 | 193 | static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) |
d6aa60a1 DD |
194 | { |
195 | octeon_mgmt_set_tx_irq(p, 1); | |
196 | } | |
197 | ||
e96f7515 | 198 | static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) |
d6aa60a1 DD |
199 | { |
200 | octeon_mgmt_set_tx_irq(p, 0); | |
201 | } | |
202 | ||
203 | static unsigned int ring_max_fill(unsigned int ring_size) | |
204 | { | |
205 | return ring_size - 8; | |
206 | } | |
207 | ||
208 | static unsigned int ring_size_to_bytes(unsigned int ring_size) | |
209 | { | |
210 | return ring_size * sizeof(union mgmt_port_ring_entry); | |
211 | } | |
212 | ||
213 | static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) | |
214 | { | |
215 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
216 | |
217 | while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { | |
218 | unsigned int size; | |
219 | union mgmt_port_ring_entry re; | |
220 | struct sk_buff *skb; | |
221 | ||
222 | /* CN56XX pass 1 needs 8 bytes of padding. */ | |
223 | size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; | |
224 | ||
225 | skb = netdev_alloc_skb(netdev, size); | |
226 | if (!skb) | |
227 | break; | |
228 | skb_reserve(skb, NET_IP_ALIGN); | |
229 | __skb_queue_tail(&p->rx_list, skb); | |
230 | ||
231 | re.d64 = 0; | |
232 | re.s.len = size; | |
233 | re.s.addr = dma_map_single(p->dev, skb->data, | |
234 | size, | |
235 | DMA_FROM_DEVICE); | |
236 | ||
237 | /* Put it in the ring. */ | |
238 | p->rx_ring[p->rx_next_fill] = re.d64; | |
239 | dma_sync_single_for_device(p->dev, p->rx_ring_handle, | |
240 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
241 | DMA_BIDIRECTIONAL); | |
242 | p->rx_next_fill = | |
243 | (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; | |
244 | p->rx_current_fill++; | |
245 | /* Ring the bell. */ | |
368bec0d | 246 | cvmx_write_csr(p->mix + MIX_IRING2, 1); |
d6aa60a1 DD |
247 | } |
248 | } | |
249 | ||
250 | static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) | |
251 | { | |
d6aa60a1 DD |
252 | union cvmx_mixx_orcnt mix_orcnt; |
253 | union mgmt_port_ring_entry re; | |
254 | struct sk_buff *skb; | |
255 | int cleaned = 0; | |
256 | unsigned long flags; | |
257 | ||
368bec0d | 258 | mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); |
d6aa60a1 | 259 | while (mix_orcnt.s.orcnt) { |
4d30b801 DD |
260 | spin_lock_irqsave(&p->tx_list.lock, flags); |
261 | ||
368bec0d | 262 | mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); |
4d30b801 DD |
263 | |
264 | if (mix_orcnt.s.orcnt == 0) { | |
265 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
266 | break; | |
267 | } | |
268 | ||
d6aa60a1 DD |
269 | dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, |
270 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
271 | DMA_BIDIRECTIONAL); | |
272 | ||
d6aa60a1 DD |
273 | re.d64 = p->tx_ring[p->tx_next_clean]; |
274 | p->tx_next_clean = | |
275 | (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; | |
276 | skb = __skb_dequeue(&p->tx_list); | |
277 | ||
278 | mix_orcnt.u64 = 0; | |
279 | mix_orcnt.s.orcnt = 1; | |
280 | ||
281 | /* Acknowledge to hardware that we have the buffer. */ | |
368bec0d | 282 | cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); |
d6aa60a1 DD |
283 | p->tx_current_fill--; |
284 | ||
285 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
286 | ||
287 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | |
288 | DMA_TO_DEVICE); | |
3d305850 CR |
289 | |
290 | /* Read the hardware TX timestamp if one was recorded */ | |
291 | if (unlikely(re.s.tstamp)) { | |
292 | struct skb_shared_hwtstamps ts; | |
208f7ca4 AK |
293 | u64 ns; |
294 | ||
c6d5fefa | 295 | memset(&ts, 0, sizeof(ts)); |
3d305850 | 296 | /* Read the timestamp */ |
208f7ca4 | 297 | ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); |
3d305850 CR |
298 | /* Remove the timestamp from the FIFO */ |
299 | cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); | |
300 | /* Tell the kernel about the timestamp */ | |
3d305850 CR |
301 | ts.hwtstamp = ns_to_ktime(ns); |
302 | skb_tstamp_tx(skb, &ts); | |
303 | } | |
304 | ||
d6aa60a1 DD |
305 | dev_kfree_skb_any(skb); |
306 | cleaned++; | |
307 | ||
368bec0d | 308 | mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); |
d6aa60a1 DD |
309 | } |
310 | ||
311 | if (cleaned && netif_queue_stopped(p->netdev)) | |
312 | netif_wake_queue(p->netdev); | |
313 | } | |
314 | ||
315 | static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) | |
316 | { | |
317 | struct octeon_mgmt *p = (struct octeon_mgmt *)arg; | |
318 | octeon_mgmt_clean_tx_buffers(p); | |
319 | octeon_mgmt_enable_tx_irq(p); | |
320 | } | |
321 | ||
322 | static void octeon_mgmt_update_rx_stats(struct net_device *netdev) | |
323 | { | |
324 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
325 | unsigned long flags; |
326 | u64 drop, bad; | |
327 | ||
328 | /* These reads also clear the count registers. */ | |
368bec0d DD |
329 | drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); |
330 | bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); | |
d6aa60a1 DD |
331 | |
332 | if (drop || bad) { | |
333 | /* Do an atomic update. */ | |
334 | spin_lock_irqsave(&p->lock, flags); | |
335 | netdev->stats.rx_errors += bad; | |
336 | netdev->stats.rx_dropped += drop; | |
337 | spin_unlock_irqrestore(&p->lock, flags); | |
338 | } | |
339 | } | |
340 | ||
341 | static void octeon_mgmt_update_tx_stats(struct net_device *netdev) | |
342 | { | |
343 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
344 | unsigned long flags; |
345 | ||
346 | union cvmx_agl_gmx_txx_stat0 s0; | |
347 | union cvmx_agl_gmx_txx_stat1 s1; | |
348 | ||
349 | /* These reads also clear the count registers. */ | |
368bec0d DD |
350 | s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); |
351 | s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); | |
d6aa60a1 DD |
352 | |
353 | if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { | |
354 | /* Do an atomic update. */ | |
355 | spin_lock_irqsave(&p->lock, flags); | |
356 | netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; | |
357 | netdev->stats.collisions += s1.s.scol + s1.s.mcol; | |
358 | spin_unlock_irqrestore(&p->lock, flags); | |
359 | } | |
360 | } | |
361 | ||
362 | /* | |
363 | * Dequeue a receive skb and its corresponding ring entry. The ring | |
364 | * entry is returned, *pskb is updated to point to the skb. | |
365 | */ | |
366 | static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, | |
367 | struct sk_buff **pskb) | |
368 | { | |
369 | union mgmt_port_ring_entry re; | |
370 | ||
371 | dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, | |
372 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
373 | DMA_BIDIRECTIONAL); | |
374 | ||
375 | re.d64 = p->rx_ring[p->rx_next]; | |
376 | p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; | |
377 | p->rx_current_fill--; | |
378 | *pskb = __skb_dequeue(&p->rx_list); | |
379 | ||
380 | dma_unmap_single(p->dev, re.s.addr, | |
381 | ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, | |
382 | DMA_FROM_DEVICE); | |
383 | ||
384 | return re.d64; | |
385 | } | |
386 | ||
387 | ||
388 | static int octeon_mgmt_receive_one(struct octeon_mgmt *p) | |
389 | { | |
d6aa60a1 DD |
390 | struct net_device *netdev = p->netdev; |
391 | union cvmx_mixx_ircnt mix_ircnt; | |
392 | union mgmt_port_ring_entry re; | |
393 | struct sk_buff *skb; | |
394 | struct sk_buff *skb2; | |
395 | struct sk_buff *skb_new; | |
396 | union mgmt_port_ring_entry re2; | |
397 | int rc = 1; | |
398 | ||
399 | ||
400 | re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); | |
401 | if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { | |
402 | /* A good packet, send it up. */ | |
403 | skb_put(skb, re.s.len); | |
404 | good: | |
3d305850 CR |
405 | /* Process the RX timestamp if it was recorded */ |
406 | if (p->has_rx_tstamp) { | |
407 | /* The first 8 bytes are the timestamp */ | |
408 | u64 ns = *(u64 *)skb->data; | |
409 | struct skb_shared_hwtstamps *ts; | |
410 | ts = skb_hwtstamps(skb); | |
411 | ts->hwtstamp = ns_to_ktime(ns); | |
3d305850 CR |
412 | __skb_pull(skb, 8); |
413 | } | |
d6aa60a1 DD |
414 | skb->protocol = eth_type_trans(skb, netdev); |
415 | netdev->stats.rx_packets++; | |
416 | netdev->stats.rx_bytes += skb->len; | |
d6aa60a1 DD |
417 | netif_receive_skb(skb); |
418 | rc = 0; | |
419 | } else if (re.s.code == RING_ENTRY_CODE_MORE) { | |
a0ce9b1e | 420 | /* Packet split across skbs. This can happen if we |
d6aa60a1 DD |
421 | * increase the MTU. Buffers that are already in the |
422 | * rx ring can then end up being too small. As the rx | |
423 | * ring is refilled, buffers sized for the new MTU | |
424 | * will be used and we should go back to the normal | |
425 | * non-split case. | |
426 | */ | |
427 | skb_put(skb, re.s.len); | |
428 | do { | |
429 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | |
430 | if (re2.s.code != RING_ENTRY_CODE_MORE | |
431 | && re2.s.code != RING_ENTRY_CODE_DONE) | |
432 | goto split_error; | |
433 | skb_put(skb2, re2.s.len); | |
434 | skb_new = skb_copy_expand(skb, 0, skb2->len, | |
435 | GFP_ATOMIC); | |
436 | if (!skb_new) | |
437 | goto split_error; | |
438 | if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), | |
439 | skb2->len)) | |
440 | goto split_error; | |
441 | skb_put(skb_new, skb2->len); | |
442 | dev_kfree_skb_any(skb); | |
443 | dev_kfree_skb_any(skb2); | |
444 | skb = skb_new; | |
445 | } while (re2.s.code == RING_ENTRY_CODE_MORE); | |
446 | goto good; | |
447 | } else { | |
448 | /* Some other error, discard it. */ | |
449 | dev_kfree_skb_any(skb); | |
a0ce9b1e | 450 | /* Error statistics are accumulated in |
d6aa60a1 DD |
451 | * octeon_mgmt_update_rx_stats. |
452 | */ | |
453 | } | |
454 | goto done; | |
455 | split_error: | |
456 | /* Discard the whole mess. */ | |
457 | dev_kfree_skb_any(skb); | |
458 | dev_kfree_skb_any(skb2); | |
459 | while (re2.s.code == RING_ENTRY_CODE_MORE) { | |
460 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | |
461 | dev_kfree_skb_any(skb2); | |
462 | } | |
463 | netdev->stats.rx_errors++; | |
464 | ||
465 | done: | |
466 | /* Tell the hardware we processed a packet. */ | |
467 | mix_ircnt.u64 = 0; | |
468 | mix_ircnt.s.ircnt = 1; | |
368bec0d | 469 | cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); |
d6aa60a1 | 470 | return rc; |
d6aa60a1 DD |
471 | } |
472 | ||
473 | static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) | |
474 | { | |
d6aa60a1 DD |
475 | unsigned int work_done = 0; |
476 | union cvmx_mixx_ircnt mix_ircnt; | |
477 | int rc; | |
478 | ||
368bec0d | 479 | mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); |
d6aa60a1 DD |
480 | while (work_done < budget && mix_ircnt.s.ircnt) { |
481 | ||
482 | rc = octeon_mgmt_receive_one(p); | |
483 | if (!rc) | |
484 | work_done++; | |
485 | ||
486 | /* Check for more packets. */ | |
368bec0d | 487 | mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); |
d6aa60a1 DD |
488 | } |
489 | ||
490 | octeon_mgmt_rx_fill_ring(p->netdev); | |
491 | ||
492 | return work_done; | |
493 | } | |
494 | ||
495 | static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) | |
496 | { | |
497 | struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); | |
498 | struct net_device *netdev = p->netdev; | |
499 | unsigned int work_done = 0; | |
500 | ||
501 | work_done = octeon_mgmt_receive_packets(p, budget); | |
502 | ||
503 | if (work_done < budget) { | |
504 | /* We stopped because no more packets were available. */ | |
505 | napi_complete(napi); | |
506 | octeon_mgmt_enable_rx_irq(p); | |
507 | } | |
508 | octeon_mgmt_update_rx_stats(netdev); | |
509 | ||
510 | return work_done; | |
511 | } | |
512 | ||
513 | /* Reset the hardware to clean state. */ | |
514 | static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) | |
515 | { | |
516 | union cvmx_mixx_ctl mix_ctl; | |
517 | union cvmx_mixx_bist mix_bist; | |
518 | union cvmx_agl_gmx_bist agl_gmx_bist; | |
519 | ||
520 | mix_ctl.u64 = 0; | |
368bec0d | 521 | cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); |
d6aa60a1 | 522 | do { |
368bec0d | 523 | mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); |
d6aa60a1 DD |
524 | } while (mix_ctl.s.busy); |
525 | mix_ctl.s.reset = 1; | |
368bec0d DD |
526 | cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); |
527 | cvmx_read_csr(p->mix + MIX_CTL); | |
eeae05aa | 528 | octeon_io_clk_delay(64); |
d6aa60a1 | 529 | |
368bec0d | 530 | mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); |
d6aa60a1 DD |
531 | if (mix_bist.u64) |
532 | dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", | |
533 | (unsigned long long)mix_bist.u64); | |
534 | ||
535 | agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); | |
536 | if (agl_gmx_bist.u64) | |
537 | dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", | |
538 | (unsigned long long)agl_gmx_bist.u64); | |
539 | } | |
540 | ||
541 | struct octeon_mgmt_cam_state { | |
542 | u64 cam[6]; | |
543 | u64 cam_mask; | |
544 | int cam_index; | |
545 | }; | |
546 | ||
547 | static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, | |
548 | unsigned char *addr) | |
549 | { | |
550 | int i; | |
551 | ||
552 | for (i = 0; i < 6; i++) | |
553 | cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); | |
554 | cs->cam_mask |= (1ULL << cs->cam_index); | |
555 | cs->cam_index++; | |
556 | } | |
557 | ||
558 | static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) | |
559 | { | |
560 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
561 | union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; |
562 | union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; | |
563 | unsigned long flags; | |
564 | unsigned int prev_packet_enable; | |
565 | unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ | |
566 | unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ | |
567 | struct octeon_mgmt_cam_state cam_state; | |
22bedad3 | 568 | struct netdev_hw_addr *ha; |
d6aa60a1 DD |
569 | int available_cam_entries; |
570 | ||
571 | memset(&cam_state, 0, sizeof(cam_state)); | |
572 | ||
62538d24 | 573 | if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) { |
d6aa60a1 DD |
574 | cam_mode = 0; |
575 | available_cam_entries = 8; | |
576 | } else { | |
a0ce9b1e | 577 | /* One CAM entry for the primary address, leaves seven |
d6aa60a1 DD |
578 | * for the secondary addresses. |
579 | */ | |
62538d24 | 580 | available_cam_entries = 7 - netdev->uc.count; |
d6aa60a1 DD |
581 | } |
582 | ||
583 | if (netdev->flags & IFF_MULTICAST) { | |
4cd24eaf JP |
584 | if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || |
585 | netdev_mc_count(netdev) > available_cam_entries) | |
62538d24 | 586 | multicast_mode = 2; /* 2 - Accept all multicast. */ |
d6aa60a1 DD |
587 | else |
588 | multicast_mode = 0; /* 0 - Use CAM. */ | |
589 | } | |
590 | ||
591 | if (cam_mode == 1) { | |
592 | /* Add primary address. */ | |
593 | octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); | |
62538d24 DD |
594 | netdev_for_each_uc_addr(ha, netdev) |
595 | octeon_mgmt_cam_state_add(&cam_state, ha->addr); | |
d6aa60a1 DD |
596 | } |
597 | if (multicast_mode == 0) { | |
22bedad3 JP |
598 | netdev_for_each_mc_addr(ha, netdev) |
599 | octeon_mgmt_cam_state_add(&cam_state, ha->addr); | |
d6aa60a1 DD |
600 | } |
601 | ||
d6aa60a1 DD |
602 | spin_lock_irqsave(&p->lock, flags); |
603 | ||
604 | /* Disable packet I/O. */ | |
368bec0d | 605 | agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); |
d6aa60a1 DD |
606 | prev_packet_enable = agl_gmx_prtx.s.en; |
607 | agl_gmx_prtx.s.en = 0; | |
368bec0d | 608 | cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); |
d6aa60a1 | 609 | |
d6aa60a1 DD |
610 | adr_ctl.u64 = 0; |
611 | adr_ctl.s.cam_mode = cam_mode; | |
612 | adr_ctl.s.mcst = multicast_mode; | |
613 | adr_ctl.s.bcst = 1; /* Allow broadcast */ | |
614 | ||
368bec0d | 615 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); |
d6aa60a1 | 616 | |
368bec0d DD |
617 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); |
618 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); | |
619 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); | |
620 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); | |
621 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); | |
622 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); | |
623 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); | |
d6aa60a1 DD |
624 | |
625 | /* Restore packet I/O. */ | |
626 | agl_gmx_prtx.s.en = prev_packet_enable; | |
368bec0d | 627 | cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); |
d6aa60a1 DD |
628 | |
629 | spin_unlock_irqrestore(&p->lock, flags); | |
630 | } | |
631 | ||
632 | static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) | |
633 | { | |
f321238b | 634 | int r = eth_mac_addr(netdev, addr); |
d6aa60a1 | 635 | |
f321238b DD |
636 | if (r) |
637 | return r; | |
d6aa60a1 DD |
638 | |
639 | octeon_mgmt_set_rx_filtering(netdev); | |
640 | ||
641 | return 0; | |
642 | } | |
643 | ||
644 | static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) | |
645 | { | |
646 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
647 | int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; |
648 | ||
a0ce9b1e | 649 | /* Limit the MTU to make sure the ethernet packets are between |
d6aa60a1 DD |
650 | * 64 bytes and 16383 bytes. |
651 | */ | |
652 | if (size_without_fcs < 64 || size_without_fcs > 16383) { | |
653 | dev_warn(p->dev, "MTU must be between %d and %d.\n", | |
654 | 64 - OCTEON_MGMT_RX_HEADROOM, | |
655 | 16383 - OCTEON_MGMT_RX_HEADROOM); | |
656 | return -EINVAL; | |
657 | } | |
658 | ||
659 | netdev->mtu = new_mtu; | |
660 | ||
368bec0d DD |
661 | cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); |
662 | cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, | |
d6aa60a1 DD |
663 | (size_without_fcs + 7) & 0xfff8); |
664 | ||
665 | return 0; | |
666 | } | |
667 | ||
668 | static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) | |
669 | { | |
670 | struct net_device *netdev = dev_id; | |
671 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
672 | union cvmx_mixx_isr mixx_isr; |
673 | ||
368bec0d | 674 | mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); |
d6aa60a1 DD |
675 | |
676 | /* Clear any pending interrupts */ | |
368bec0d DD |
677 | cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); |
678 | cvmx_read_csr(p->mix + MIX_ISR); | |
d6aa60a1 DD |
679 | |
680 | if (mixx_isr.s.irthresh) { | |
681 | octeon_mgmt_disable_rx_irq(p); | |
682 | napi_schedule(&p->napi); | |
683 | } | |
684 | if (mixx_isr.s.orthresh) { | |
685 | octeon_mgmt_disable_tx_irq(p); | |
686 | tasklet_schedule(&p->tx_clean_tasklet); | |
687 | } | |
688 | ||
689 | return IRQ_HANDLED; | |
690 | } | |
691 | ||
3d305850 CR |
692 | static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, |
693 | struct ifreq *rq, int cmd) | |
d6aa60a1 DD |
694 | { |
695 | struct octeon_mgmt *p = netdev_priv(netdev); | |
3d305850 CR |
696 | struct hwtstamp_config config; |
697 | union cvmx_mio_ptp_clock_cfg ptp; | |
698 | union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; | |
699 | bool have_hw_timestamps = false; | |
700 | ||
701 | if (copy_from_user(&config, rq->ifr_data, sizeof(config))) | |
702 | return -EFAULT; | |
d6aa60a1 | 703 | |
3d305850 | 704 | if (config.flags) /* reserved for future extensions */ |
d6aa60a1 DD |
705 | return -EINVAL; |
706 | ||
3d305850 CR |
707 | /* Check the status of hardware for tiemstamps */ |
708 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | |
709 | /* Get the current state of the PTP clock */ | |
710 | ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); | |
711 | if (!ptp.s.ext_clk_en) { | |
712 | /* The clock has not been configured to use an | |
713 | * external source. Program it to use the main clock | |
714 | * reference. | |
715 | */ | |
716 | u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate(); | |
717 | if (!ptp.s.ptp_en) | |
718 | cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp); | |
719 | pr_info("PTP Clock: Using sclk reference at %lld Hz\n", | |
720 | (NSEC_PER_SEC << 32) / clock_comp); | |
721 | } else { | |
722 | /* The clock is already programmed to use a GPIO */ | |
723 | u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP); | |
724 | pr_info("PTP Clock: Using GPIO %d at %lld Hz\n", | |
725 | ptp.s.ext_clk_in, | |
726 | (NSEC_PER_SEC << 32) / clock_comp); | |
727 | } | |
728 | ||
729 | /* Enable the clock if it wasn't done already */ | |
730 | if (!ptp.s.ptp_en) { | |
731 | ptp.s.ptp_en = 1; | |
732 | cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64); | |
733 | } | |
734 | have_hw_timestamps = true; | |
735 | } | |
736 | ||
737 | if (!have_hw_timestamps) | |
d6aa60a1 DD |
738 | return -EINVAL; |
739 | ||
3d305850 CR |
740 | switch (config.tx_type) { |
741 | case HWTSTAMP_TX_OFF: | |
742 | case HWTSTAMP_TX_ON: | |
743 | break; | |
744 | default: | |
745 | return -ERANGE; | |
746 | } | |
747 | ||
748 | switch (config.rx_filter) { | |
749 | case HWTSTAMP_FILTER_NONE: | |
750 | p->has_rx_tstamp = false; | |
751 | rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); | |
752 | rxx_frm_ctl.s.ptp_mode = 0; | |
753 | cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); | |
754 | break; | |
755 | case HWTSTAMP_FILTER_ALL: | |
756 | case HWTSTAMP_FILTER_SOME: | |
757 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | |
758 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | |
759 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | |
760 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: | |
761 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | |
762 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | |
763 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: | |
764 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | |
765 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: | |
766 | case HWTSTAMP_FILTER_PTP_V2_EVENT: | |
767 | case HWTSTAMP_FILTER_PTP_V2_SYNC: | |
768 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: | |
769 | p->has_rx_tstamp = have_hw_timestamps; | |
770 | config.rx_filter = HWTSTAMP_FILTER_ALL; | |
771 | if (p->has_rx_tstamp) { | |
772 | rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); | |
773 | rxx_frm_ctl.s.ptp_mode = 1; | |
774 | cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); | |
775 | } | |
776 | break; | |
777 | default: | |
778 | return -ERANGE; | |
779 | } | |
780 | ||
781 | if (copy_to_user(rq->ifr_data, &config, sizeof(config))) | |
782 | return -EFAULT; | |
783 | ||
784 | return 0; | |
785 | } | |
786 | ||
d6aa60a1 DD |
787 | static int octeon_mgmt_ioctl(struct net_device *netdev, |
788 | struct ifreq *rq, int cmd) | |
789 | { | |
790 | struct octeon_mgmt *p = netdev_priv(netdev); | |
791 | ||
3d305850 CR |
792 | switch (cmd) { |
793 | case SIOCSHWTSTAMP: | |
794 | return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); | |
795 | default: | |
796 | if (p->phydev) | |
797 | return phy_mii_ioctl(p->phydev, rq, cmd); | |
d6aa60a1 | 798 | return -EINVAL; |
3d305850 | 799 | } |
d6aa60a1 | 800 | } |
d6aa60a1 | 801 | |
eeae05aa DD |
802 | static void octeon_mgmt_disable_link(struct octeon_mgmt *p) |
803 | { | |
804 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | |
d6aa60a1 | 805 | |
eeae05aa DD |
806 | /* Disable GMX before we make any changes. */ |
807 | prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); | |
808 | prtx_cfg.s.en = 0; | |
809 | prtx_cfg.s.tx_en = 0; | |
810 | prtx_cfg.s.rx_en = 0; | |
811 | cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); | |
812 | ||
813 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | |
814 | int i; | |
815 | for (i = 0; i < 10; i++) { | |
816 | prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); | |
817 | if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1) | |
818 | break; | |
819 | mdelay(1); | |
820 | i++; | |
821 | } | |
822 | } | |
823 | } | |
824 | ||
825 | static void octeon_mgmt_enable_link(struct octeon_mgmt *p) | |
826 | { | |
827 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | |
828 | ||
829 | /* Restore the GMX enable state only if link is set */ | |
830 | prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); | |
831 | prtx_cfg.s.tx_en = 1; | |
832 | prtx_cfg.s.rx_en = 1; | |
833 | prtx_cfg.s.en = 1; | |
834 | cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); | |
835 | } | |
836 | ||
837 | static void octeon_mgmt_update_link(struct octeon_mgmt *p) | |
838 | { | |
839 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | |
840 | ||
841 | prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); | |
842 | ||
843 | if (!p->phydev->link) | |
844 | prtx_cfg.s.duplex = 1; | |
845 | else | |
846 | prtx_cfg.s.duplex = p->phydev->duplex; | |
847 | ||
848 | switch (p->phydev->speed) { | |
849 | case 10: | |
850 | prtx_cfg.s.speed = 0; | |
851 | prtx_cfg.s.slottime = 0; | |
852 | ||
853 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | |
854 | prtx_cfg.s.burst = 1; | |
855 | prtx_cfg.s.speed_msb = 1; | |
856 | } | |
857 | break; | |
858 | case 100: | |
859 | prtx_cfg.s.speed = 0; | |
860 | prtx_cfg.s.slottime = 0; | |
861 | ||
862 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | |
863 | prtx_cfg.s.burst = 1; | |
864 | prtx_cfg.s.speed_msb = 0; | |
865 | } | |
866 | break; | |
867 | case 1000: | |
868 | /* 1000 MBits is only supported on 6XXX chips */ | |
869 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | |
870 | prtx_cfg.s.speed = 1; | |
871 | prtx_cfg.s.speed_msb = 0; | |
872 | /* Only matters for half-duplex */ | |
873 | prtx_cfg.s.slottime = 1; | |
874 | prtx_cfg.s.burst = p->phydev->duplex; | |
875 | } | |
876 | break; | |
877 | case 0: /* No link */ | |
878 | default: | |
879 | break; | |
880 | } | |
881 | ||
882 | /* Write the new GMX setting with the port still disabled. */ | |
883 | cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); | |
884 | ||
885 | /* Read GMX CFG again to make sure the config is completed. */ | |
886 | prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); | |
887 | ||
888 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | |
889 | union cvmx_agl_gmx_txx_clk agl_clk; | |
890 | union cvmx_agl_prtx_ctl prtx_ctl; | |
891 | ||
892 | prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); | |
893 | agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK); | |
894 | /* MII (both speeds) and RGMII 1000 speed. */ | |
895 | agl_clk.s.clk_cnt = 1; | |
896 | if (prtx_ctl.s.mode == 0) { /* RGMII mode */ | |
897 | if (p->phydev->speed == 10) | |
898 | agl_clk.s.clk_cnt = 50; | |
899 | else if (p->phydev->speed == 100) | |
900 | agl_clk.s.clk_cnt = 5; | |
901 | } | |
902 | cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); | |
903 | } | |
d6aa60a1 DD |
904 | } |
905 | ||
906 | static void octeon_mgmt_adjust_link(struct net_device *netdev) | |
907 | { | |
908 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
909 | unsigned long flags; |
910 | int link_changed = 0; | |
911 | ||
eeae05aa DD |
912 | if (!p->phydev) |
913 | return; | |
914 | ||
d6aa60a1 | 915 | spin_lock_irqsave(&p->lock, flags); |
eeae05aa DD |
916 | |
917 | ||
918 | if (!p->phydev->link && p->last_link) | |
919 | link_changed = -1; | |
920 | ||
921 | if (p->phydev->link | |
922 | && (p->last_duplex != p->phydev->duplex | |
923 | || p->last_link != p->phydev->link | |
924 | || p->last_speed != p->phydev->speed)) { | |
925 | octeon_mgmt_disable_link(p); | |
926 | link_changed = 1; | |
927 | octeon_mgmt_update_link(p); | |
928 | octeon_mgmt_enable_link(p); | |
d6aa60a1 | 929 | } |
eeae05aa | 930 | |
d6aa60a1 | 931 | p->last_link = p->phydev->link; |
eeae05aa DD |
932 | p->last_speed = p->phydev->speed; |
933 | p->last_duplex = p->phydev->duplex; | |
934 | ||
d6aa60a1 DD |
935 | spin_unlock_irqrestore(&p->lock, flags); |
936 | ||
937 | if (link_changed != 0) { | |
938 | if (link_changed > 0) { | |
d6aa60a1 DD |
939 | pr_info("%s: Link is up - %d/%s\n", netdev->name, |
940 | p->phydev->speed, | |
941 | DUPLEX_FULL == p->phydev->duplex ? | |
942 | "Full" : "Half"); | |
943 | } else { | |
d6aa60a1 DD |
944 | pr_info("%s: Link is down\n", netdev->name); |
945 | } | |
946 | } | |
947 | } | |
948 | ||
949 | static int octeon_mgmt_init_phy(struct net_device *netdev) | |
950 | { | |
951 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 | 952 | |
368bec0d | 953 | if (octeon_is_simulation() || p->phy_np == NULL) { |
d6aa60a1 DD |
954 | /* No PHYs in the simulator. */ |
955 | netif_carrier_on(netdev); | |
956 | return 0; | |
957 | } | |
958 | ||
368bec0d DD |
959 | p->phydev = of_phy_connect(netdev, p->phy_np, |
960 | octeon_mgmt_adjust_link, 0, | |
961 | PHY_INTERFACE_MODE_MII); | |
d6aa60a1 | 962 | |
df555b66 | 963 | if (!p->phydev) |
eeae05aa | 964 | return -ENODEV; |
d6aa60a1 DD |
965 | |
966 | return 0; | |
967 | } | |
968 | ||
969 | static int octeon_mgmt_open(struct net_device *netdev) | |
970 | { | |
971 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
972 | union cvmx_mixx_ctl mix_ctl; |
973 | union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; | |
974 | union cvmx_mixx_oring1 oring1; | |
975 | union cvmx_mixx_iring1 iring1; | |
d6aa60a1 DD |
976 | union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; |
977 | union cvmx_mixx_irhwm mix_irhwm; | |
978 | union cvmx_mixx_orhwm mix_orhwm; | |
979 | union cvmx_mixx_intena mix_intena; | |
980 | struct sockaddr sa; | |
981 | ||
982 | /* Allocate ring buffers. */ | |
983 | p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
984 | GFP_KERNEL); | |
985 | if (!p->tx_ring) | |
986 | return -ENOMEM; | |
987 | p->tx_ring_handle = | |
988 | dma_map_single(p->dev, p->tx_ring, | |
989 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
990 | DMA_BIDIRECTIONAL); | |
991 | p->tx_next = 0; | |
992 | p->tx_next_clean = 0; | |
993 | p->tx_current_fill = 0; | |
994 | ||
995 | ||
996 | p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
997 | GFP_KERNEL); | |
998 | if (!p->rx_ring) | |
999 | goto err_nomem; | |
1000 | p->rx_ring_handle = | |
1001 | dma_map_single(p->dev, p->rx_ring, | |
1002 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
1003 | DMA_BIDIRECTIONAL); | |
1004 | ||
1005 | p->rx_next = 0; | |
1006 | p->rx_next_fill = 0; | |
1007 | p->rx_current_fill = 0; | |
1008 | ||
1009 | octeon_mgmt_reset_hw(p); | |
1010 | ||
368bec0d | 1011 | mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); |
d6aa60a1 DD |
1012 | |
1013 | /* Bring it out of reset if needed. */ | |
1014 | if (mix_ctl.s.reset) { | |
1015 | mix_ctl.s.reset = 0; | |
368bec0d | 1016 | cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); |
d6aa60a1 | 1017 | do { |
368bec0d | 1018 | mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); |
d6aa60a1 DD |
1019 | } while (mix_ctl.s.reset); |
1020 | } | |
1021 | ||
eeae05aa DD |
1022 | if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { |
1023 | agl_gmx_inf_mode.u64 = 0; | |
1024 | agl_gmx_inf_mode.s.en = 1; | |
1025 | cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); | |
1026 | } | |
1027 | if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) | |
1028 | || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { | |
a0ce9b1e | 1029 | /* Force compensation values, as they are not |
eeae05aa DD |
1030 | * determined properly by HW |
1031 | */ | |
1032 | union cvmx_agl_gmx_drv_ctl drv_ctl; | |
1033 | ||
1034 | drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); | |
1035 | if (p->port) { | |
1036 | drv_ctl.s.byp_en1 = 1; | |
1037 | drv_ctl.s.nctl1 = 6; | |
1038 | drv_ctl.s.pctl1 = 6; | |
1039 | } else { | |
1040 | drv_ctl.s.byp_en = 1; | |
1041 | drv_ctl.s.nctl = 6; | |
1042 | drv_ctl.s.pctl = 6; | |
1043 | } | |
1044 | cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); | |
1045 | } | |
d6aa60a1 DD |
1046 | |
1047 | oring1.u64 = 0; | |
1048 | oring1.s.obase = p->tx_ring_handle >> 3; | |
1049 | oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; | |
368bec0d | 1050 | cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); |
d6aa60a1 DD |
1051 | |
1052 | iring1.u64 = 0; | |
1053 | iring1.s.ibase = p->rx_ring_handle >> 3; | |
1054 | iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; | |
368bec0d | 1055 | cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); |
d6aa60a1 | 1056 | |
d6aa60a1 DD |
1057 | memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); |
1058 | octeon_mgmt_set_mac_address(netdev, &sa); | |
1059 | ||
1060 | octeon_mgmt_change_mtu(netdev, netdev->mtu); | |
1061 | ||
a0ce9b1e | 1062 | /* Enable the port HW. Packets are not allowed until |
d6aa60a1 DD |
1063 | * cvmx_mgmt_port_enable() is called. |
1064 | */ | |
1065 | mix_ctl.u64 = 0; | |
1066 | mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ | |
1067 | mix_ctl.s.en = 1; /* Enable the port */ | |
1068 | mix_ctl.s.nbtarb = 0; /* Arbitration mode */ | |
1069 | /* MII CB-request FIFO programmable high watermark */ | |
1070 | mix_ctl.s.mrq_hwm = 1; | |
eeae05aa DD |
1071 | #ifdef __LITTLE_ENDIAN |
1072 | mix_ctl.s.lendian = 1; | |
1073 | #endif | |
368bec0d | 1074 | cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); |
d6aa60a1 | 1075 | |
eeae05aa DD |
1076 | /* Read the PHY to find the mode of the interface. */ |
1077 | if (octeon_mgmt_init_phy(netdev)) { | |
1078 | dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); | |
1079 | goto err_noirq; | |
1080 | } | |
d6aa60a1 | 1081 | |
eeae05aa DD |
1082 | /* Set the mode of the interface, RGMII/MII. */ |
1083 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) { | |
1084 | union cvmx_agl_prtx_ctl agl_prtx_ctl; | |
1085 | int rgmii_mode = (p->phydev->supported & | |
1086 | (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; | |
1087 | ||
1088 | agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); | |
1089 | agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1; | |
1090 | cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); | |
1091 | ||
1092 | /* MII clocks counts are based on the 125Mhz | |
1093 | * reference, which has an 8nS period. So our delays | |
1094 | * need to be multiplied by this factor. | |
1095 | */ | |
1096 | #define NS_PER_PHY_CLK 8 | |
1097 | ||
1098 | /* Take the DLL and clock tree out of reset */ | |
1099 | agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); | |
1100 | agl_prtx_ctl.s.clkrst = 0; | |
1101 | if (rgmii_mode) { | |
1102 | agl_prtx_ctl.s.dllrst = 0; | |
1103 | agl_prtx_ctl.s.clktx_byp = 0; | |
d6aa60a1 | 1104 | } |
eeae05aa DD |
1105 | cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); |
1106 | cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */ | |
1107 | ||
1108 | /* Wait for the DLL to lock. External 125 MHz | |
1109 | * reference clock must be stable at this point. | |
1110 | */ | |
1111 | ndelay(256 * NS_PER_PHY_CLK); | |
1112 | ||
1113 | /* Enable the interface */ | |
1114 | agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); | |
1115 | agl_prtx_ctl.s.enable = 1; | |
1116 | cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); | |
1117 | ||
1118 | /* Read the value back to force the previous write */ | |
1119 | agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); | |
1120 | ||
1121 | /* Enable the compensation controller */ | |
1122 | agl_prtx_ctl.s.comp = 1; | |
1123 | agl_prtx_ctl.s.drv_byp = 0; | |
1124 | cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); | |
1125 | /* Force write out before wait. */ | |
1126 | cvmx_read_csr(p->agl_prt_ctl); | |
1127 | ||
1128 | /* For compensation state to lock. */ | |
1129 | ndelay(1040 * NS_PER_PHY_CLK); | |
1130 | ||
906996d6 DD |
1131 | /* Default Interframe Gaps are too small. Recommended |
1132 | * workaround is. | |
1133 | * | |
1134 | * AGL_GMX_TX_IFG[IFG1]=14 | |
1135 | * AGL_GMX_TX_IFG[IFG2]=10 | |
eeae05aa | 1136 | */ |
906996d6 | 1137 | cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae); |
d6aa60a1 DD |
1138 | } |
1139 | ||
1140 | octeon_mgmt_rx_fill_ring(netdev); | |
1141 | ||
1142 | /* Clear statistics. */ | |
1143 | /* Clear on read. */ | |
368bec0d DD |
1144 | cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); |
1145 | cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); | |
1146 | cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); | |
d6aa60a1 | 1147 | |
368bec0d DD |
1148 | cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); |
1149 | cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); | |
1150 | cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); | |
d6aa60a1 DD |
1151 | |
1152 | /* Clear any pending interrupts */ | |
368bec0d | 1153 | cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); |
d6aa60a1 DD |
1154 | |
1155 | if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, | |
1156 | netdev)) { | |
1157 | dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); | |
1158 | goto err_noirq; | |
1159 | } | |
1160 | ||
1161 | /* Interrupt every single RX packet */ | |
1162 | mix_irhwm.u64 = 0; | |
1163 | mix_irhwm.s.irhwm = 0; | |
368bec0d | 1164 | cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); |
d6aa60a1 | 1165 | |
b635e069 | 1166 | /* Interrupt when we have 1 or more packets to clean. */ |
d6aa60a1 | 1167 | mix_orhwm.u64 = 0; |
eeae05aa | 1168 | mix_orhwm.s.orhwm = 0; |
368bec0d | 1169 | cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); |
d6aa60a1 DD |
1170 | |
1171 | /* Enable receive and transmit interrupts */ | |
1172 | mix_intena.u64 = 0; | |
1173 | mix_intena.s.ithena = 1; | |
1174 | mix_intena.s.othena = 1; | |
368bec0d | 1175 | cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); |
d6aa60a1 | 1176 | |
d6aa60a1 DD |
1177 | /* Enable packet I/O. */ |
1178 | ||
1179 | rxx_frm_ctl.u64 = 0; | |
3d305850 | 1180 | rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0; |
d6aa60a1 | 1181 | rxx_frm_ctl.s.pre_align = 1; |
a0ce9b1e | 1182 | /* When set, disables the length check for non-min sized pkts |
d6aa60a1 DD |
1183 | * with padding in the client data. |
1184 | */ | |
1185 | rxx_frm_ctl.s.pad_len = 1; | |
1186 | /* When set, disables the length check for VLAN pkts */ | |
1187 | rxx_frm_ctl.s.vlan_len = 1; | |
1188 | /* When set, PREAMBLE checking is less strict */ | |
1189 | rxx_frm_ctl.s.pre_free = 1; | |
1190 | /* Control Pause Frames can match station SMAC */ | |
1191 | rxx_frm_ctl.s.ctl_smac = 0; | |
1192 | /* Control Pause Frames can match globally assign Multicast address */ | |
1193 | rxx_frm_ctl.s.ctl_mcst = 1; | |
1194 | /* Forward pause information to TX block */ | |
1195 | rxx_frm_ctl.s.ctl_bck = 1; | |
1196 | /* Drop Control Pause Frames */ | |
1197 | rxx_frm_ctl.s.ctl_drp = 1; | |
1198 | /* Strip off the preamble */ | |
1199 | rxx_frm_ctl.s.pre_strp = 1; | |
a0ce9b1e | 1200 | /* This port is configured to send PREAMBLE+SFD to begin every |
d6aa60a1 DD |
1201 | * frame. GMX checks that the PREAMBLE is sent correctly. |
1202 | */ | |
1203 | rxx_frm_ctl.s.pre_chk = 1; | |
368bec0d | 1204 | cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); |
d6aa60a1 | 1205 | |
eeae05aa DD |
1206 | /* Configure the port duplex, speed and enables */ |
1207 | octeon_mgmt_disable_link(p); | |
1208 | if (p->phydev) | |
1209 | octeon_mgmt_update_link(p); | |
1210 | octeon_mgmt_enable_link(p); | |
d6aa60a1 DD |
1211 | |
1212 | p->last_link = 0; | |
eeae05aa DD |
1213 | p->last_speed = 0; |
1214 | /* PHY is not present in simulator. The carrier is enabled | |
1215 | * while initializing the phy for simulator, leave it enabled. | |
1216 | */ | |
1217 | if (p->phydev) { | |
1218 | netif_carrier_off(netdev); | |
1219 | phy_start_aneg(p->phydev); | |
d6aa60a1 DD |
1220 | } |
1221 | ||
1222 | netif_wake_queue(netdev); | |
1223 | napi_enable(&p->napi); | |
1224 | ||
1225 | return 0; | |
1226 | err_noirq: | |
1227 | octeon_mgmt_reset_hw(p); | |
1228 | dma_unmap_single(p->dev, p->rx_ring_handle, | |
1229 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
1230 | DMA_BIDIRECTIONAL); | |
1231 | kfree(p->rx_ring); | |
1232 | err_nomem: | |
1233 | dma_unmap_single(p->dev, p->tx_ring_handle, | |
1234 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
1235 | DMA_BIDIRECTIONAL); | |
1236 | kfree(p->tx_ring); | |
1237 | return -ENOMEM; | |
1238 | } | |
1239 | ||
1240 | static int octeon_mgmt_stop(struct net_device *netdev) | |
1241 | { | |
1242 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1243 | ||
1244 | napi_disable(&p->napi); | |
1245 | netif_stop_queue(netdev); | |
1246 | ||
1247 | if (p->phydev) | |
1248 | phy_disconnect(p->phydev); | |
eeae05aa | 1249 | p->phydev = NULL; |
d6aa60a1 DD |
1250 | |
1251 | netif_carrier_off(netdev); | |
1252 | ||
1253 | octeon_mgmt_reset_hw(p); | |
1254 | ||
d6aa60a1 DD |
1255 | free_irq(p->irq, netdev); |
1256 | ||
1257 | /* dma_unmap is a nop on Octeon, so just free everything. */ | |
1258 | skb_queue_purge(&p->tx_list); | |
1259 | skb_queue_purge(&p->rx_list); | |
1260 | ||
1261 | dma_unmap_single(p->dev, p->rx_ring_handle, | |
1262 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
1263 | DMA_BIDIRECTIONAL); | |
1264 | kfree(p->rx_ring); | |
1265 | ||
1266 | dma_unmap_single(p->dev, p->tx_ring_handle, | |
1267 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
1268 | DMA_BIDIRECTIONAL); | |
1269 | kfree(p->tx_ring); | |
1270 | ||
d6aa60a1 DD |
1271 | return 0; |
1272 | } | |
1273 | ||
1274 | static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) | |
1275 | { | |
1276 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
1277 | union mgmt_port_ring_entry re; |
1278 | unsigned long flags; | |
4e4a4f14 | 1279 | int rv = NETDEV_TX_BUSY; |
d6aa60a1 DD |
1280 | |
1281 | re.d64 = 0; | |
3d305850 | 1282 | re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); |
d6aa60a1 DD |
1283 | re.s.len = skb->len; |
1284 | re.s.addr = dma_map_single(p->dev, skb->data, | |
1285 | skb->len, | |
1286 | DMA_TO_DEVICE); | |
1287 | ||
1288 | spin_lock_irqsave(&p->tx_list.lock, flags); | |
1289 | ||
4e4a4f14 DD |
1290 | if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { |
1291 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
1292 | netif_stop_queue(netdev); | |
1293 | spin_lock_irqsave(&p->tx_list.lock, flags); | |
1294 | } | |
1295 | ||
d6aa60a1 DD |
1296 | if (unlikely(p->tx_current_fill >= |
1297 | ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { | |
1298 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
d6aa60a1 DD |
1299 | dma_unmap_single(p->dev, re.s.addr, re.s.len, |
1300 | DMA_TO_DEVICE); | |
4e4a4f14 | 1301 | goto out; |
d6aa60a1 DD |
1302 | } |
1303 | ||
1304 | __skb_queue_tail(&p->tx_list, skb); | |
1305 | ||
1306 | /* Put it in the ring. */ | |
1307 | p->tx_ring[p->tx_next] = re.d64; | |
1308 | p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; | |
1309 | p->tx_current_fill++; | |
1310 | ||
1311 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
1312 | ||
1313 | dma_sync_single_for_device(p->dev, p->tx_ring_handle, | |
1314 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
1315 | DMA_BIDIRECTIONAL); | |
1316 | ||
1317 | netdev->stats.tx_packets++; | |
1318 | netdev->stats.tx_bytes += skb->len; | |
1319 | ||
1320 | /* Ring the bell. */ | |
368bec0d | 1321 | cvmx_write_csr(p->mix + MIX_ORING2, 1); |
d6aa60a1 | 1322 | |
860e9538 | 1323 | netif_trans_update(netdev); |
4e4a4f14 DD |
1324 | rv = NETDEV_TX_OK; |
1325 | out: | |
d6aa60a1 | 1326 | octeon_mgmt_update_tx_stats(netdev); |
4e4a4f14 | 1327 | return rv; |
d6aa60a1 DD |
1328 | } |
1329 | ||
1330 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1331 | static void octeon_mgmt_poll_controller(struct net_device *netdev) | |
1332 | { | |
1333 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1334 | ||
1335 | octeon_mgmt_receive_packets(p, 16); | |
1336 | octeon_mgmt_update_rx_stats(netdev); | |
d6aa60a1 DD |
1337 | } |
1338 | #endif | |
1339 | ||
1340 | static void octeon_mgmt_get_drvinfo(struct net_device *netdev, | |
1341 | struct ethtool_drvinfo *info) | |
1342 | { | |
7826d43f JP |
1343 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); |
1344 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); | |
1345 | strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); | |
1346 | strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); | |
d6aa60a1 DD |
1347 | } |
1348 | ||
1349 | static int octeon_mgmt_get_settings(struct net_device *netdev, | |
1350 | struct ethtool_cmd *cmd) | |
1351 | { | |
1352 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1353 | ||
1354 | if (p->phydev) | |
1355 | return phy_ethtool_gset(p->phydev, cmd); | |
1356 | ||
f21105df | 1357 | return -EOPNOTSUPP; |
d6aa60a1 DD |
1358 | } |
1359 | ||
1360 | static int octeon_mgmt_set_settings(struct net_device *netdev, | |
1361 | struct ethtool_cmd *cmd) | |
1362 | { | |
1363 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1364 | ||
1365 | if (!capable(CAP_NET_ADMIN)) | |
1366 | return -EPERM; | |
1367 | ||
1368 | if (p->phydev) | |
1369 | return phy_ethtool_sset(p->phydev, cmd); | |
1370 | ||
f21105df DD |
1371 | return -EOPNOTSUPP; |
1372 | } | |
1373 | ||
1374 | static int octeon_mgmt_nway_reset(struct net_device *dev) | |
1375 | { | |
1376 | struct octeon_mgmt *p = netdev_priv(dev); | |
1377 | ||
1378 | if (!capable(CAP_NET_ADMIN)) | |
1379 | return -EPERM; | |
1380 | ||
1381 | if (p->phydev) | |
1382 | return phy_start_aneg(p->phydev); | |
1383 | ||
1384 | return -EOPNOTSUPP; | |
d6aa60a1 DD |
1385 | } |
1386 | ||
1387 | static const struct ethtool_ops octeon_mgmt_ethtool_ops = { | |
1388 | .get_drvinfo = octeon_mgmt_get_drvinfo, | |
d6aa60a1 | 1389 | .get_settings = octeon_mgmt_get_settings, |
f21105df DD |
1390 | .set_settings = octeon_mgmt_set_settings, |
1391 | .nway_reset = octeon_mgmt_nway_reset, | |
1392 | .get_link = ethtool_op_get_link, | |
d6aa60a1 DD |
1393 | }; |
1394 | ||
1395 | static const struct net_device_ops octeon_mgmt_ops = { | |
1396 | .ndo_open = octeon_mgmt_open, | |
1397 | .ndo_stop = octeon_mgmt_stop, | |
1398 | .ndo_start_xmit = octeon_mgmt_xmit, | |
eeae05aa | 1399 | .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, |
d6aa60a1 | 1400 | .ndo_set_mac_address = octeon_mgmt_set_mac_address, |
eeae05aa | 1401 | .ndo_do_ioctl = octeon_mgmt_ioctl, |
d6aa60a1 DD |
1402 | .ndo_change_mtu = octeon_mgmt_change_mtu, |
1403 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1404 | .ndo_poll_controller = octeon_mgmt_poll_controller, | |
1405 | #endif | |
1406 | }; | |
1407 | ||
5bc7ec70 | 1408 | static int octeon_mgmt_probe(struct platform_device *pdev) |
d6aa60a1 | 1409 | { |
d6aa60a1 DD |
1410 | struct net_device *netdev; |
1411 | struct octeon_mgmt *p; | |
368bec0d DD |
1412 | const __be32 *data; |
1413 | const u8 *mac; | |
1414 | struct resource *res_mix; | |
1415 | struct resource *res_agl; | |
eeae05aa | 1416 | struct resource *res_agl_prt_ctl; |
368bec0d DD |
1417 | int len; |
1418 | int result; | |
d6aa60a1 DD |
1419 | |
1420 | netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); | |
1421 | if (netdev == NULL) | |
1422 | return -ENOMEM; | |
1423 | ||
052958e3 DD |
1424 | SET_NETDEV_DEV(netdev, &pdev->dev); |
1425 | ||
8513fbd8 | 1426 | platform_set_drvdata(pdev, netdev); |
d6aa60a1 DD |
1427 | p = netdev_priv(netdev); |
1428 | netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, | |
1429 | OCTEON_MGMT_NAPI_WEIGHT); | |
1430 | ||
1431 | p->netdev = netdev; | |
1432 | p->dev = &pdev->dev; | |
3d305850 | 1433 | p->has_rx_tstamp = false; |
d6aa60a1 | 1434 | |
368bec0d DD |
1435 | data = of_get_property(pdev->dev.of_node, "cell-index", &len); |
1436 | if (data && len == sizeof(*data)) { | |
1437 | p->port = be32_to_cpup(data); | |
1438 | } else { | |
1439 | dev_err(&pdev->dev, "no 'cell-index' property\n"); | |
1440 | result = -ENXIO; | |
1441 | goto err; | |
1442 | } | |
1443 | ||
d6aa60a1 DD |
1444 | snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); |
1445 | ||
368bec0d DD |
1446 | result = platform_get_irq(pdev, 0); |
1447 | if (result < 0) | |
1448 | goto err; | |
1449 | ||
1450 | p->irq = result; | |
1451 | ||
1452 | res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1453 | if (res_mix == NULL) { | |
1454 | dev_err(&pdev->dev, "no 'reg' resource\n"); | |
1455 | result = -ENXIO; | |
1456 | goto err; | |
1457 | } | |
1458 | ||
1459 | res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
1460 | if (res_agl == NULL) { | |
1461 | dev_err(&pdev->dev, "no 'reg' resource\n"); | |
1462 | result = -ENXIO; | |
1463 | goto err; | |
1464 | } | |
1465 | ||
eeae05aa DD |
1466 | res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3); |
1467 | if (res_agl_prt_ctl == NULL) { | |
1468 | dev_err(&pdev->dev, "no 'reg' resource\n"); | |
1469 | result = -ENXIO; | |
1470 | goto err; | |
1471 | } | |
1472 | ||
368bec0d DD |
1473 | p->mix_phys = res_mix->start; |
1474 | p->mix_size = resource_size(res_mix); | |
1475 | p->agl_phys = res_agl->start; | |
1476 | p->agl_size = resource_size(res_agl); | |
eeae05aa DD |
1477 | p->agl_prt_ctl_phys = res_agl_prt_ctl->start; |
1478 | p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl); | |
368bec0d DD |
1479 | |
1480 | ||
1481 | if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, | |
1482 | res_mix->name)) { | |
1483 | dev_err(&pdev->dev, "request_mem_region (%s) failed\n", | |
1484 | res_mix->name); | |
1485 | result = -ENXIO; | |
1486 | goto err; | |
1487 | } | |
1488 | ||
1489 | if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, | |
1490 | res_agl->name)) { | |
1491 | result = -ENXIO; | |
1492 | dev_err(&pdev->dev, "request_mem_region (%s) failed\n", | |
1493 | res_agl->name); | |
d6aa60a1 | 1494 | goto err; |
368bec0d DD |
1495 | } |
1496 | ||
eeae05aa DD |
1497 | if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys, |
1498 | p->agl_prt_ctl_size, res_agl_prt_ctl->name)) { | |
1499 | result = -ENXIO; | |
1500 | dev_err(&pdev->dev, "request_mem_region (%s) failed\n", | |
1501 | res_agl_prt_ctl->name); | |
1502 | goto err; | |
1503 | } | |
368bec0d DD |
1504 | |
1505 | p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); | |
1506 | p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); | |
eeae05aa DD |
1507 | p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, |
1508 | p->agl_prt_ctl_size); | |
d6aa60a1 DD |
1509 | spin_lock_init(&p->lock); |
1510 | ||
1511 | skb_queue_head_init(&p->tx_list); | |
1512 | skb_queue_head_init(&p->rx_list); | |
1513 | tasklet_init(&p->tx_clean_tasklet, | |
1514 | octeon_mgmt_clean_tx_tasklet, (unsigned long)p); | |
1515 | ||
01789349 JP |
1516 | netdev->priv_flags |= IFF_UNICAST_FLT; |
1517 | ||
d6aa60a1 DD |
1518 | netdev->netdev_ops = &octeon_mgmt_ops; |
1519 | netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; | |
1520 | ||
368bec0d DD |
1521 | mac = of_get_mac_address(pdev->dev.of_node); |
1522 | ||
09ec0d05 | 1523 | if (mac) |
f321238b | 1524 | memcpy(netdev->dev_addr, mac, ETH_ALEN); |
15c6ff3b | 1525 | else |
f321238b | 1526 | eth_hw_addr_random(netdev); |
d6aa60a1 | 1527 | |
368bec0d | 1528 | p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); |
d6aa60a1 | 1529 | |
26741a69 RK |
1530 | result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
1531 | if (result) | |
1532 | goto err; | |
368bec0d | 1533 | |
eeae05aa | 1534 | netif_carrier_off(netdev); |
368bec0d DD |
1535 | result = register_netdev(netdev); |
1536 | if (result) | |
d6aa60a1 DD |
1537 | goto err; |
1538 | ||
1539 | dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); | |
1540 | return 0; | |
368bec0d | 1541 | |
d6aa60a1 DD |
1542 | err: |
1543 | free_netdev(netdev); | |
368bec0d | 1544 | return result; |
d6aa60a1 DD |
1545 | } |
1546 | ||
5bc7ec70 | 1547 | static int octeon_mgmt_remove(struct platform_device *pdev) |
d6aa60a1 | 1548 | { |
8513fbd8 | 1549 | struct net_device *netdev = platform_get_drvdata(pdev); |
d6aa60a1 DD |
1550 | |
1551 | unregister_netdev(netdev); | |
1552 | free_netdev(netdev); | |
1553 | return 0; | |
1554 | } | |
1555 | ||
437dab40 | 1556 | static const struct of_device_id octeon_mgmt_match[] = { |
368bec0d DD |
1557 | { |
1558 | .compatible = "cavium,octeon-5750-mix", | |
1559 | }, | |
1560 | {}, | |
1561 | }; | |
1562 | MODULE_DEVICE_TABLE(of, octeon_mgmt_match); | |
1563 | ||
d6aa60a1 DD |
1564 | static struct platform_driver octeon_mgmt_driver = { |
1565 | .driver = { | |
1566 | .name = "octeon_mgmt", | |
368bec0d | 1567 | .of_match_table = octeon_mgmt_match, |
d6aa60a1 DD |
1568 | }, |
1569 | .probe = octeon_mgmt_probe, | |
5bc7ec70 | 1570 | .remove = octeon_mgmt_remove, |
d6aa60a1 DD |
1571 | }; |
1572 | ||
1573 | extern void octeon_mdiobus_force_mod_depencency(void); | |
1574 | ||
1575 | static int __init octeon_mgmt_mod_init(void) | |
1576 | { | |
1577 | /* Force our mdiobus driver module to be loaded first. */ | |
1578 | octeon_mdiobus_force_mod_depencency(); | |
1579 | return platform_driver_register(&octeon_mgmt_driver); | |
1580 | } | |
1581 | ||
1582 | static void __exit octeon_mgmt_mod_exit(void) | |
1583 | { | |
1584 | platform_driver_unregister(&octeon_mgmt_driver); | |
1585 | } | |
1586 | ||
1587 | module_init(octeon_mgmt_mod_init); | |
1588 | module_exit(octeon_mgmt_mod_exit); | |
1589 | ||
1590 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | |
1591 | MODULE_AUTHOR("David Daney"); | |
1592 | MODULE_LICENSE("GPL"); | |
1593 | MODULE_VERSION(DRV_VERSION); |