Commit | Line | Data |
---|---|---|
d6aa60a1 DD |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
eeae05aa | 6 | * Copyright (C) 2009-2012 Cavium, Inc |
d6aa60a1 DD |
7 | */ |
8 | ||
368bec0d | 9 | #include <linux/platform_device.h> |
d6aa60a1 | 10 | #include <linux/dma-mapping.h> |
368bec0d DD |
11 | #include <linux/etherdevice.h> |
12 | #include <linux/capability.h> | |
3d305850 | 13 | #include <linux/net_tstamp.h> |
539d3ee6 | 14 | #include <linux/interrupt.h> |
d6aa60a1 | 15 | #include <linux/netdevice.h> |
368bec0d | 16 | #include <linux/spinlock.h> |
d6aa60a1 | 17 | #include <linux/if_vlan.h> |
368bec0d DD |
18 | #include <linux/of_mdio.h> |
19 | #include <linux/module.h> | |
20 | #include <linux/of_net.h> | |
21 | #include <linux/init.h> | |
5a0e3ad6 | 22 | #include <linux/slab.h> |
d6aa60a1 | 23 | #include <linux/phy.h> |
368bec0d | 24 | #include <linux/io.h> |
d6aa60a1 DD |
25 | |
26 | #include <asm/octeon/octeon.h> | |
27 | #include <asm/octeon/cvmx-mixx-defs.h> | |
28 | #include <asm/octeon/cvmx-agl-defs.h> | |
29 | ||
30 | #define DRV_NAME "octeon_mgmt" | |
31 | #define DRV_VERSION "2.0" | |
32 | #define DRV_DESCRIPTION \ | |
33 | "Cavium Networks Octeon MII (management) port Network Driver" | |
34 | ||
35 | #define OCTEON_MGMT_NAPI_WEIGHT 16 | |
36 | ||
a0ce9b1e | 37 | /* Ring sizes that are powers of two allow for more efficient modulo |
d6aa60a1 DD |
38 | * opertions. |
39 | */ | |
40 | #define OCTEON_MGMT_RX_RING_SIZE 512 | |
41 | #define OCTEON_MGMT_TX_RING_SIZE 128 | |
42 | ||
43 | /* Allow 8 bytes for vlan and FCS. */ | |
44 | #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) | |
45 | ||
46 | union mgmt_port_ring_entry { | |
47 | u64 d64; | |
48 | struct { | |
3ac19c90 DD |
49 | #define RING_ENTRY_CODE_DONE 0xf |
50 | #define RING_ENTRY_CODE_MORE 0x10 | |
51 | #ifdef __BIG_ENDIAN_BITFIELD | |
52 | u64 reserved_62_63:2; | |
d6aa60a1 | 53 | /* Length of the buffer/packet in bytes */ |
3ac19c90 | 54 | u64 len:14; |
d6aa60a1 | 55 | /* For TX, signals that the packet should be timestamped */ |
3ac19c90 | 56 | u64 tstamp:1; |
d6aa60a1 | 57 | /* The RX error code */ |
3ac19c90 | 58 | u64 code:7; |
d6aa60a1 | 59 | /* Physical address of the buffer */ |
3ac19c90 DD |
60 | u64 addr:40; |
61 | #else | |
62 | u64 addr:40; | |
63 | u64 code:7; | |
64 | u64 tstamp:1; | |
65 | u64 len:14; | |
66 | u64 reserved_62_63:2; | |
67 | #endif | |
d6aa60a1 DD |
68 | } s; |
69 | }; | |
70 | ||
368bec0d DD |
71 | #define MIX_ORING1 0x0 |
72 | #define MIX_ORING2 0x8 | |
73 | #define MIX_IRING1 0x10 | |
74 | #define MIX_IRING2 0x18 | |
75 | #define MIX_CTL 0x20 | |
76 | #define MIX_IRHWM 0x28 | |
77 | #define MIX_IRCNT 0x30 | |
78 | #define MIX_ORHWM 0x38 | |
79 | #define MIX_ORCNT 0x40 | |
80 | #define MIX_ISR 0x48 | |
81 | #define MIX_INTENA 0x50 | |
82 | #define MIX_REMCNT 0x58 | |
83 | #define MIX_BIST 0x78 | |
84 | ||
85 | #define AGL_GMX_PRT_CFG 0x10 | |
86 | #define AGL_GMX_RX_FRM_CTL 0x18 | |
87 | #define AGL_GMX_RX_FRM_MAX 0x30 | |
88 | #define AGL_GMX_RX_JABBER 0x38 | |
89 | #define AGL_GMX_RX_STATS_CTL 0x50 | |
90 | ||
91 | #define AGL_GMX_RX_STATS_PKTS_DRP 0xb0 | |
92 | #define AGL_GMX_RX_STATS_OCTS_DRP 0xb8 | |
93 | #define AGL_GMX_RX_STATS_PKTS_BAD 0xc0 | |
94 | ||
95 | #define AGL_GMX_RX_ADR_CTL 0x100 | |
96 | #define AGL_GMX_RX_ADR_CAM_EN 0x108 | |
97 | #define AGL_GMX_RX_ADR_CAM0 0x180 | |
98 | #define AGL_GMX_RX_ADR_CAM1 0x188 | |
99 | #define AGL_GMX_RX_ADR_CAM2 0x190 | |
100 | #define AGL_GMX_RX_ADR_CAM3 0x198 | |
101 | #define AGL_GMX_RX_ADR_CAM4 0x1a0 | |
102 | #define AGL_GMX_RX_ADR_CAM5 0x1a8 | |
103 | ||
eeae05aa | 104 | #define AGL_GMX_TX_CLK 0x208 |
368bec0d DD |
105 | #define AGL_GMX_TX_STATS_CTL 0x268 |
106 | #define AGL_GMX_TX_CTL 0x270 | |
107 | #define AGL_GMX_TX_STAT0 0x280 | |
108 | #define AGL_GMX_TX_STAT1 0x288 | |
109 | #define AGL_GMX_TX_STAT2 0x290 | |
110 | #define AGL_GMX_TX_STAT3 0x298 | |
111 | #define AGL_GMX_TX_STAT4 0x2a0 | |
112 | #define AGL_GMX_TX_STAT5 0x2a8 | |
113 | #define AGL_GMX_TX_STAT6 0x2b0 | |
114 | #define AGL_GMX_TX_STAT7 0x2b8 | |
115 | #define AGL_GMX_TX_STAT8 0x2c0 | |
116 | #define AGL_GMX_TX_STAT9 0x2c8 | |
117 | ||
d6aa60a1 DD |
118 | struct octeon_mgmt { |
119 | struct net_device *netdev; | |
368bec0d DD |
120 | u64 mix; |
121 | u64 agl; | |
eeae05aa | 122 | u64 agl_prt_ctl; |
d6aa60a1 DD |
123 | int port; |
124 | int irq; | |
3d305850 | 125 | bool has_rx_tstamp; |
d6aa60a1 DD |
126 | u64 *tx_ring; |
127 | dma_addr_t tx_ring_handle; | |
128 | unsigned int tx_next; | |
129 | unsigned int tx_next_clean; | |
130 | unsigned int tx_current_fill; | |
131 | /* The tx_list lock also protects the ring related variables */ | |
132 | struct sk_buff_head tx_list; | |
133 | ||
134 | /* RX variables only touched in napi_poll. No locking necessary. */ | |
135 | u64 *rx_ring; | |
136 | dma_addr_t rx_ring_handle; | |
137 | unsigned int rx_next; | |
138 | unsigned int rx_next_fill; | |
139 | unsigned int rx_current_fill; | |
140 | struct sk_buff_head rx_list; | |
141 | ||
142 | spinlock_t lock; | |
143 | unsigned int last_duplex; | |
144 | unsigned int last_link; | |
eeae05aa | 145 | unsigned int last_speed; |
d6aa60a1 DD |
146 | struct device *dev; |
147 | struct napi_struct napi; | |
148 | struct tasklet_struct tx_clean_tasklet; | |
368bec0d DD |
149 | struct device_node *phy_np; |
150 | resource_size_t mix_phys; | |
151 | resource_size_t mix_size; | |
152 | resource_size_t agl_phys; | |
153 | resource_size_t agl_size; | |
eeae05aa DD |
154 | resource_size_t agl_prt_ctl_phys; |
155 | resource_size_t agl_prt_ctl_size; | |
d6aa60a1 DD |
156 | }; |
157 | ||
158 | static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) | |
159 | { | |
d6aa60a1 DD |
160 | union cvmx_mixx_intena mix_intena; |
161 | unsigned long flags; | |
162 | ||
163 | spin_lock_irqsave(&p->lock, flags); | |
368bec0d | 164 | mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); |
d6aa60a1 | 165 | mix_intena.s.ithena = enable ? 1 : 0; |
368bec0d | 166 | cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); |
d6aa60a1 DD |
167 | spin_unlock_irqrestore(&p->lock, flags); |
168 | } | |
169 | ||
170 | static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) | |
171 | { | |
d6aa60a1 DD |
172 | union cvmx_mixx_intena mix_intena; |
173 | unsigned long flags; | |
174 | ||
175 | spin_lock_irqsave(&p->lock, flags); | |
368bec0d | 176 | mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); |
d6aa60a1 | 177 | mix_intena.s.othena = enable ? 1 : 0; |
368bec0d | 178 | cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); |
d6aa60a1 DD |
179 | spin_unlock_irqrestore(&p->lock, flags); |
180 | } | |
181 | ||
e96f7515 | 182 | static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) |
d6aa60a1 DD |
183 | { |
184 | octeon_mgmt_set_rx_irq(p, 1); | |
185 | } | |
186 | ||
e96f7515 | 187 | static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) |
d6aa60a1 DD |
188 | { |
189 | octeon_mgmt_set_rx_irq(p, 0); | |
190 | } | |
191 | ||
e96f7515 | 192 | static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) |
d6aa60a1 DD |
193 | { |
194 | octeon_mgmt_set_tx_irq(p, 1); | |
195 | } | |
196 | ||
e96f7515 | 197 | static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) |
d6aa60a1 DD |
198 | { |
199 | octeon_mgmt_set_tx_irq(p, 0); | |
200 | } | |
201 | ||
202 | static unsigned int ring_max_fill(unsigned int ring_size) | |
203 | { | |
204 | return ring_size - 8; | |
205 | } | |
206 | ||
207 | static unsigned int ring_size_to_bytes(unsigned int ring_size) | |
208 | { | |
209 | return ring_size * sizeof(union mgmt_port_ring_entry); | |
210 | } | |
211 | ||
212 | static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) | |
213 | { | |
214 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
215 | |
216 | while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { | |
217 | unsigned int size; | |
218 | union mgmt_port_ring_entry re; | |
219 | struct sk_buff *skb; | |
220 | ||
221 | /* CN56XX pass 1 needs 8 bytes of padding. */ | |
222 | size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; | |
223 | ||
224 | skb = netdev_alloc_skb(netdev, size); | |
225 | if (!skb) | |
226 | break; | |
227 | skb_reserve(skb, NET_IP_ALIGN); | |
228 | __skb_queue_tail(&p->rx_list, skb); | |
229 | ||
230 | re.d64 = 0; | |
231 | re.s.len = size; | |
232 | re.s.addr = dma_map_single(p->dev, skb->data, | |
233 | size, | |
234 | DMA_FROM_DEVICE); | |
235 | ||
236 | /* Put it in the ring. */ | |
237 | p->rx_ring[p->rx_next_fill] = re.d64; | |
238 | dma_sync_single_for_device(p->dev, p->rx_ring_handle, | |
239 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
240 | DMA_BIDIRECTIONAL); | |
241 | p->rx_next_fill = | |
242 | (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; | |
243 | p->rx_current_fill++; | |
244 | /* Ring the bell. */ | |
368bec0d | 245 | cvmx_write_csr(p->mix + MIX_IRING2, 1); |
d6aa60a1 DD |
246 | } |
247 | } | |
248 | ||
249 | static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) | |
250 | { | |
d6aa60a1 DD |
251 | union cvmx_mixx_orcnt mix_orcnt; |
252 | union mgmt_port_ring_entry re; | |
253 | struct sk_buff *skb; | |
254 | int cleaned = 0; | |
255 | unsigned long flags; | |
256 | ||
368bec0d | 257 | mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); |
d6aa60a1 | 258 | while (mix_orcnt.s.orcnt) { |
4d30b801 DD |
259 | spin_lock_irqsave(&p->tx_list.lock, flags); |
260 | ||
368bec0d | 261 | mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); |
4d30b801 DD |
262 | |
263 | if (mix_orcnt.s.orcnt == 0) { | |
264 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
265 | break; | |
266 | } | |
267 | ||
d6aa60a1 DD |
268 | dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, |
269 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
270 | DMA_BIDIRECTIONAL); | |
271 | ||
d6aa60a1 DD |
272 | re.d64 = p->tx_ring[p->tx_next_clean]; |
273 | p->tx_next_clean = | |
274 | (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; | |
275 | skb = __skb_dequeue(&p->tx_list); | |
276 | ||
277 | mix_orcnt.u64 = 0; | |
278 | mix_orcnt.s.orcnt = 1; | |
279 | ||
280 | /* Acknowledge to hardware that we have the buffer. */ | |
368bec0d | 281 | cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); |
d6aa60a1 DD |
282 | p->tx_current_fill--; |
283 | ||
284 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
285 | ||
286 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | |
287 | DMA_TO_DEVICE); | |
3d305850 CR |
288 | |
289 | /* Read the hardware TX timestamp if one was recorded */ | |
290 | if (unlikely(re.s.tstamp)) { | |
291 | struct skb_shared_hwtstamps ts; | |
208f7ca4 AK |
292 | u64 ns; |
293 | ||
c6d5fefa | 294 | memset(&ts, 0, sizeof(ts)); |
3d305850 | 295 | /* Read the timestamp */ |
208f7ca4 | 296 | ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); |
3d305850 CR |
297 | /* Remove the timestamp from the FIFO */ |
298 | cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); | |
299 | /* Tell the kernel about the timestamp */ | |
3d305850 CR |
300 | ts.hwtstamp = ns_to_ktime(ns); |
301 | skb_tstamp_tx(skb, &ts); | |
302 | } | |
303 | ||
d6aa60a1 DD |
304 | dev_kfree_skb_any(skb); |
305 | cleaned++; | |
306 | ||
368bec0d | 307 | mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); |
d6aa60a1 DD |
308 | } |
309 | ||
310 | if (cleaned && netif_queue_stopped(p->netdev)) | |
311 | netif_wake_queue(p->netdev); | |
312 | } | |
313 | ||
314 | static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) | |
315 | { | |
316 | struct octeon_mgmt *p = (struct octeon_mgmt *)arg; | |
317 | octeon_mgmt_clean_tx_buffers(p); | |
318 | octeon_mgmt_enable_tx_irq(p); | |
319 | } | |
320 | ||
321 | static void octeon_mgmt_update_rx_stats(struct net_device *netdev) | |
322 | { | |
323 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
324 | unsigned long flags; |
325 | u64 drop, bad; | |
326 | ||
327 | /* These reads also clear the count registers. */ | |
368bec0d DD |
328 | drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); |
329 | bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); | |
d6aa60a1 DD |
330 | |
331 | if (drop || bad) { | |
332 | /* Do an atomic update. */ | |
333 | spin_lock_irqsave(&p->lock, flags); | |
334 | netdev->stats.rx_errors += bad; | |
335 | netdev->stats.rx_dropped += drop; | |
336 | spin_unlock_irqrestore(&p->lock, flags); | |
337 | } | |
338 | } | |
339 | ||
340 | static void octeon_mgmt_update_tx_stats(struct net_device *netdev) | |
341 | { | |
342 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
343 | unsigned long flags; |
344 | ||
345 | union cvmx_agl_gmx_txx_stat0 s0; | |
346 | union cvmx_agl_gmx_txx_stat1 s1; | |
347 | ||
348 | /* These reads also clear the count registers. */ | |
368bec0d DD |
349 | s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); |
350 | s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); | |
d6aa60a1 DD |
351 | |
352 | if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { | |
353 | /* Do an atomic update. */ | |
354 | spin_lock_irqsave(&p->lock, flags); | |
355 | netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; | |
356 | netdev->stats.collisions += s1.s.scol + s1.s.mcol; | |
357 | spin_unlock_irqrestore(&p->lock, flags); | |
358 | } | |
359 | } | |
360 | ||
361 | /* | |
362 | * Dequeue a receive skb and its corresponding ring entry. The ring | |
363 | * entry is returned, *pskb is updated to point to the skb. | |
364 | */ | |
365 | static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, | |
366 | struct sk_buff **pskb) | |
367 | { | |
368 | union mgmt_port_ring_entry re; | |
369 | ||
370 | dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, | |
371 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
372 | DMA_BIDIRECTIONAL); | |
373 | ||
374 | re.d64 = p->rx_ring[p->rx_next]; | |
375 | p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; | |
376 | p->rx_current_fill--; | |
377 | *pskb = __skb_dequeue(&p->rx_list); | |
378 | ||
379 | dma_unmap_single(p->dev, re.s.addr, | |
380 | ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, | |
381 | DMA_FROM_DEVICE); | |
382 | ||
383 | return re.d64; | |
384 | } | |
385 | ||
386 | ||
387 | static int octeon_mgmt_receive_one(struct octeon_mgmt *p) | |
388 | { | |
d6aa60a1 DD |
389 | struct net_device *netdev = p->netdev; |
390 | union cvmx_mixx_ircnt mix_ircnt; | |
391 | union mgmt_port_ring_entry re; | |
392 | struct sk_buff *skb; | |
393 | struct sk_buff *skb2; | |
394 | struct sk_buff *skb_new; | |
395 | union mgmt_port_ring_entry re2; | |
396 | int rc = 1; | |
397 | ||
398 | ||
399 | re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); | |
400 | if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { | |
401 | /* A good packet, send it up. */ | |
402 | skb_put(skb, re.s.len); | |
403 | good: | |
3d305850 CR |
404 | /* Process the RX timestamp if it was recorded */ |
405 | if (p->has_rx_tstamp) { | |
406 | /* The first 8 bytes are the timestamp */ | |
407 | u64 ns = *(u64 *)skb->data; | |
408 | struct skb_shared_hwtstamps *ts; | |
409 | ts = skb_hwtstamps(skb); | |
410 | ts->hwtstamp = ns_to_ktime(ns); | |
3d305850 CR |
411 | __skb_pull(skb, 8); |
412 | } | |
d6aa60a1 DD |
413 | skb->protocol = eth_type_trans(skb, netdev); |
414 | netdev->stats.rx_packets++; | |
415 | netdev->stats.rx_bytes += skb->len; | |
d6aa60a1 DD |
416 | netif_receive_skb(skb); |
417 | rc = 0; | |
418 | } else if (re.s.code == RING_ENTRY_CODE_MORE) { | |
a0ce9b1e | 419 | /* Packet split across skbs. This can happen if we |
d6aa60a1 DD |
420 | * increase the MTU. Buffers that are already in the |
421 | * rx ring can then end up being too small. As the rx | |
422 | * ring is refilled, buffers sized for the new MTU | |
423 | * will be used and we should go back to the normal | |
424 | * non-split case. | |
425 | */ | |
426 | skb_put(skb, re.s.len); | |
427 | do { | |
428 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | |
429 | if (re2.s.code != RING_ENTRY_CODE_MORE | |
430 | && re2.s.code != RING_ENTRY_CODE_DONE) | |
431 | goto split_error; | |
432 | skb_put(skb2, re2.s.len); | |
433 | skb_new = skb_copy_expand(skb, 0, skb2->len, | |
434 | GFP_ATOMIC); | |
435 | if (!skb_new) | |
436 | goto split_error; | |
437 | if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), | |
438 | skb2->len)) | |
439 | goto split_error; | |
440 | skb_put(skb_new, skb2->len); | |
441 | dev_kfree_skb_any(skb); | |
442 | dev_kfree_skb_any(skb2); | |
443 | skb = skb_new; | |
444 | } while (re2.s.code == RING_ENTRY_CODE_MORE); | |
445 | goto good; | |
446 | } else { | |
447 | /* Some other error, discard it. */ | |
448 | dev_kfree_skb_any(skb); | |
a0ce9b1e | 449 | /* Error statistics are accumulated in |
d6aa60a1 DD |
450 | * octeon_mgmt_update_rx_stats. |
451 | */ | |
452 | } | |
453 | goto done; | |
454 | split_error: | |
455 | /* Discard the whole mess. */ | |
456 | dev_kfree_skb_any(skb); | |
457 | dev_kfree_skb_any(skb2); | |
458 | while (re2.s.code == RING_ENTRY_CODE_MORE) { | |
459 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | |
460 | dev_kfree_skb_any(skb2); | |
461 | } | |
462 | netdev->stats.rx_errors++; | |
463 | ||
464 | done: | |
465 | /* Tell the hardware we processed a packet. */ | |
466 | mix_ircnt.u64 = 0; | |
467 | mix_ircnt.s.ircnt = 1; | |
368bec0d | 468 | cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); |
d6aa60a1 | 469 | return rc; |
d6aa60a1 DD |
470 | } |
471 | ||
472 | static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) | |
473 | { | |
d6aa60a1 DD |
474 | unsigned int work_done = 0; |
475 | union cvmx_mixx_ircnt mix_ircnt; | |
476 | int rc; | |
477 | ||
368bec0d | 478 | mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); |
d6aa60a1 DD |
479 | while (work_done < budget && mix_ircnt.s.ircnt) { |
480 | ||
481 | rc = octeon_mgmt_receive_one(p); | |
482 | if (!rc) | |
483 | work_done++; | |
484 | ||
485 | /* Check for more packets. */ | |
368bec0d | 486 | mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); |
d6aa60a1 DD |
487 | } |
488 | ||
489 | octeon_mgmt_rx_fill_ring(p->netdev); | |
490 | ||
491 | return work_done; | |
492 | } | |
493 | ||
494 | static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) | |
495 | { | |
496 | struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); | |
497 | struct net_device *netdev = p->netdev; | |
498 | unsigned int work_done = 0; | |
499 | ||
500 | work_done = octeon_mgmt_receive_packets(p, budget); | |
501 | ||
502 | if (work_done < budget) { | |
503 | /* We stopped because no more packets were available. */ | |
504 | napi_complete(napi); | |
505 | octeon_mgmt_enable_rx_irq(p); | |
506 | } | |
507 | octeon_mgmt_update_rx_stats(netdev); | |
508 | ||
509 | return work_done; | |
510 | } | |
511 | ||
512 | /* Reset the hardware to clean state. */ | |
513 | static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) | |
514 | { | |
515 | union cvmx_mixx_ctl mix_ctl; | |
516 | union cvmx_mixx_bist mix_bist; | |
517 | union cvmx_agl_gmx_bist agl_gmx_bist; | |
518 | ||
519 | mix_ctl.u64 = 0; | |
368bec0d | 520 | cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); |
d6aa60a1 | 521 | do { |
368bec0d | 522 | mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); |
d6aa60a1 DD |
523 | } while (mix_ctl.s.busy); |
524 | mix_ctl.s.reset = 1; | |
368bec0d DD |
525 | cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); |
526 | cvmx_read_csr(p->mix + MIX_CTL); | |
eeae05aa | 527 | octeon_io_clk_delay(64); |
d6aa60a1 | 528 | |
368bec0d | 529 | mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); |
d6aa60a1 DD |
530 | if (mix_bist.u64) |
531 | dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", | |
532 | (unsigned long long)mix_bist.u64); | |
533 | ||
534 | agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); | |
535 | if (agl_gmx_bist.u64) | |
536 | dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", | |
537 | (unsigned long long)agl_gmx_bist.u64); | |
538 | } | |
539 | ||
540 | struct octeon_mgmt_cam_state { | |
541 | u64 cam[6]; | |
542 | u64 cam_mask; | |
543 | int cam_index; | |
544 | }; | |
545 | ||
546 | static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, | |
547 | unsigned char *addr) | |
548 | { | |
549 | int i; | |
550 | ||
551 | for (i = 0; i < 6; i++) | |
552 | cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); | |
553 | cs->cam_mask |= (1ULL << cs->cam_index); | |
554 | cs->cam_index++; | |
555 | } | |
556 | ||
557 | static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) | |
558 | { | |
559 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
560 | union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; |
561 | union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; | |
562 | unsigned long flags; | |
563 | unsigned int prev_packet_enable; | |
564 | unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ | |
565 | unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ | |
566 | struct octeon_mgmt_cam_state cam_state; | |
22bedad3 | 567 | struct netdev_hw_addr *ha; |
d6aa60a1 DD |
568 | int available_cam_entries; |
569 | ||
570 | memset(&cam_state, 0, sizeof(cam_state)); | |
571 | ||
62538d24 | 572 | if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) { |
d6aa60a1 DD |
573 | cam_mode = 0; |
574 | available_cam_entries = 8; | |
575 | } else { | |
a0ce9b1e | 576 | /* One CAM entry for the primary address, leaves seven |
d6aa60a1 DD |
577 | * for the secondary addresses. |
578 | */ | |
62538d24 | 579 | available_cam_entries = 7 - netdev->uc.count; |
d6aa60a1 DD |
580 | } |
581 | ||
582 | if (netdev->flags & IFF_MULTICAST) { | |
4cd24eaf JP |
583 | if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || |
584 | netdev_mc_count(netdev) > available_cam_entries) | |
62538d24 | 585 | multicast_mode = 2; /* 2 - Accept all multicast. */ |
d6aa60a1 DD |
586 | else |
587 | multicast_mode = 0; /* 0 - Use CAM. */ | |
588 | } | |
589 | ||
590 | if (cam_mode == 1) { | |
591 | /* Add primary address. */ | |
592 | octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); | |
62538d24 DD |
593 | netdev_for_each_uc_addr(ha, netdev) |
594 | octeon_mgmt_cam_state_add(&cam_state, ha->addr); | |
d6aa60a1 DD |
595 | } |
596 | if (multicast_mode == 0) { | |
22bedad3 JP |
597 | netdev_for_each_mc_addr(ha, netdev) |
598 | octeon_mgmt_cam_state_add(&cam_state, ha->addr); | |
d6aa60a1 DD |
599 | } |
600 | ||
d6aa60a1 DD |
601 | spin_lock_irqsave(&p->lock, flags); |
602 | ||
603 | /* Disable packet I/O. */ | |
368bec0d | 604 | agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); |
d6aa60a1 DD |
605 | prev_packet_enable = agl_gmx_prtx.s.en; |
606 | agl_gmx_prtx.s.en = 0; | |
368bec0d | 607 | cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); |
d6aa60a1 | 608 | |
d6aa60a1 DD |
609 | adr_ctl.u64 = 0; |
610 | adr_ctl.s.cam_mode = cam_mode; | |
611 | adr_ctl.s.mcst = multicast_mode; | |
612 | adr_ctl.s.bcst = 1; /* Allow broadcast */ | |
613 | ||
368bec0d | 614 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); |
d6aa60a1 | 615 | |
368bec0d DD |
616 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); |
617 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); | |
618 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); | |
619 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); | |
620 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); | |
621 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); | |
622 | cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); | |
d6aa60a1 DD |
623 | |
624 | /* Restore packet I/O. */ | |
625 | agl_gmx_prtx.s.en = prev_packet_enable; | |
368bec0d | 626 | cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); |
d6aa60a1 DD |
627 | |
628 | spin_unlock_irqrestore(&p->lock, flags); | |
629 | } | |
630 | ||
631 | static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) | |
632 | { | |
f321238b | 633 | int r = eth_mac_addr(netdev, addr); |
d6aa60a1 | 634 | |
f321238b DD |
635 | if (r) |
636 | return r; | |
d6aa60a1 DD |
637 | |
638 | octeon_mgmt_set_rx_filtering(netdev); | |
639 | ||
640 | return 0; | |
641 | } | |
642 | ||
643 | static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) | |
644 | { | |
645 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
646 | int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; |
647 | ||
a0ce9b1e | 648 | /* Limit the MTU to make sure the ethernet packets are between |
d6aa60a1 DD |
649 | * 64 bytes and 16383 bytes. |
650 | */ | |
651 | if (size_without_fcs < 64 || size_without_fcs > 16383) { | |
652 | dev_warn(p->dev, "MTU must be between %d and %d.\n", | |
653 | 64 - OCTEON_MGMT_RX_HEADROOM, | |
654 | 16383 - OCTEON_MGMT_RX_HEADROOM); | |
655 | return -EINVAL; | |
656 | } | |
657 | ||
658 | netdev->mtu = new_mtu; | |
659 | ||
368bec0d DD |
660 | cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); |
661 | cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, | |
d6aa60a1 DD |
662 | (size_without_fcs + 7) & 0xfff8); |
663 | ||
664 | return 0; | |
665 | } | |
666 | ||
667 | static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) | |
668 | { | |
669 | struct net_device *netdev = dev_id; | |
670 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
671 | union cvmx_mixx_isr mixx_isr; |
672 | ||
368bec0d | 673 | mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); |
d6aa60a1 DD |
674 | |
675 | /* Clear any pending interrupts */ | |
368bec0d DD |
676 | cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); |
677 | cvmx_read_csr(p->mix + MIX_ISR); | |
d6aa60a1 DD |
678 | |
679 | if (mixx_isr.s.irthresh) { | |
680 | octeon_mgmt_disable_rx_irq(p); | |
681 | napi_schedule(&p->napi); | |
682 | } | |
683 | if (mixx_isr.s.orthresh) { | |
684 | octeon_mgmt_disable_tx_irq(p); | |
685 | tasklet_schedule(&p->tx_clean_tasklet); | |
686 | } | |
687 | ||
688 | return IRQ_HANDLED; | |
689 | } | |
690 | ||
3d305850 CR |
691 | static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, |
692 | struct ifreq *rq, int cmd) | |
d6aa60a1 DD |
693 | { |
694 | struct octeon_mgmt *p = netdev_priv(netdev); | |
3d305850 CR |
695 | struct hwtstamp_config config; |
696 | union cvmx_mio_ptp_clock_cfg ptp; | |
697 | union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; | |
698 | bool have_hw_timestamps = false; | |
699 | ||
700 | if (copy_from_user(&config, rq->ifr_data, sizeof(config))) | |
701 | return -EFAULT; | |
d6aa60a1 | 702 | |
3d305850 | 703 | if (config.flags) /* reserved for future extensions */ |
d6aa60a1 DD |
704 | return -EINVAL; |
705 | ||
3d305850 CR |
706 | /* Check the status of hardware for tiemstamps */ |
707 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | |
708 | /* Get the current state of the PTP clock */ | |
709 | ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); | |
710 | if (!ptp.s.ext_clk_en) { | |
711 | /* The clock has not been configured to use an | |
712 | * external source. Program it to use the main clock | |
713 | * reference. | |
714 | */ | |
715 | u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate(); | |
716 | if (!ptp.s.ptp_en) | |
717 | cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp); | |
718 | pr_info("PTP Clock: Using sclk reference at %lld Hz\n", | |
719 | (NSEC_PER_SEC << 32) / clock_comp); | |
720 | } else { | |
721 | /* The clock is already programmed to use a GPIO */ | |
722 | u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP); | |
723 | pr_info("PTP Clock: Using GPIO %d at %lld Hz\n", | |
724 | ptp.s.ext_clk_in, | |
725 | (NSEC_PER_SEC << 32) / clock_comp); | |
726 | } | |
727 | ||
728 | /* Enable the clock if it wasn't done already */ | |
729 | if (!ptp.s.ptp_en) { | |
730 | ptp.s.ptp_en = 1; | |
731 | cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64); | |
732 | } | |
733 | have_hw_timestamps = true; | |
734 | } | |
735 | ||
736 | if (!have_hw_timestamps) | |
d6aa60a1 DD |
737 | return -EINVAL; |
738 | ||
3d305850 CR |
739 | switch (config.tx_type) { |
740 | case HWTSTAMP_TX_OFF: | |
741 | case HWTSTAMP_TX_ON: | |
742 | break; | |
743 | default: | |
744 | return -ERANGE; | |
745 | } | |
746 | ||
747 | switch (config.rx_filter) { | |
748 | case HWTSTAMP_FILTER_NONE: | |
749 | p->has_rx_tstamp = false; | |
750 | rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); | |
751 | rxx_frm_ctl.s.ptp_mode = 0; | |
752 | cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); | |
753 | break; | |
754 | case HWTSTAMP_FILTER_ALL: | |
755 | case HWTSTAMP_FILTER_SOME: | |
756 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | |
757 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | |
758 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | |
759 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: | |
760 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | |
761 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | |
762 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: | |
763 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | |
764 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: | |
765 | case HWTSTAMP_FILTER_PTP_V2_EVENT: | |
766 | case HWTSTAMP_FILTER_PTP_V2_SYNC: | |
767 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: | |
768 | p->has_rx_tstamp = have_hw_timestamps; | |
769 | config.rx_filter = HWTSTAMP_FILTER_ALL; | |
770 | if (p->has_rx_tstamp) { | |
771 | rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); | |
772 | rxx_frm_ctl.s.ptp_mode = 1; | |
773 | cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); | |
774 | } | |
775 | break; | |
776 | default: | |
777 | return -ERANGE; | |
778 | } | |
779 | ||
780 | if (copy_to_user(rq->ifr_data, &config, sizeof(config))) | |
781 | return -EFAULT; | |
782 | ||
783 | return 0; | |
784 | } | |
785 | ||
d6aa60a1 DD |
786 | static int octeon_mgmt_ioctl(struct net_device *netdev, |
787 | struct ifreq *rq, int cmd) | |
788 | { | |
3d305850 CR |
789 | switch (cmd) { |
790 | case SIOCSHWTSTAMP: | |
791 | return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); | |
792 | default: | |
9e8e6e88 PR |
793 | if (netdev->phydev) |
794 | return phy_mii_ioctl(netdev->phydev, rq, cmd); | |
d6aa60a1 | 795 | return -EINVAL; |
3d305850 | 796 | } |
d6aa60a1 | 797 | } |
d6aa60a1 | 798 | |
eeae05aa DD |
799 | static void octeon_mgmt_disable_link(struct octeon_mgmt *p) |
800 | { | |
801 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | |
d6aa60a1 | 802 | |
eeae05aa DD |
803 | /* Disable GMX before we make any changes. */ |
804 | prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); | |
805 | prtx_cfg.s.en = 0; | |
806 | prtx_cfg.s.tx_en = 0; | |
807 | prtx_cfg.s.rx_en = 0; | |
808 | cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); | |
809 | ||
810 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | |
811 | int i; | |
812 | for (i = 0; i < 10; i++) { | |
813 | prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); | |
814 | if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1) | |
815 | break; | |
816 | mdelay(1); | |
817 | i++; | |
818 | } | |
819 | } | |
820 | } | |
821 | ||
822 | static void octeon_mgmt_enable_link(struct octeon_mgmt *p) | |
823 | { | |
824 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | |
825 | ||
826 | /* Restore the GMX enable state only if link is set */ | |
827 | prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); | |
828 | prtx_cfg.s.tx_en = 1; | |
829 | prtx_cfg.s.rx_en = 1; | |
830 | prtx_cfg.s.en = 1; | |
831 | cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); | |
832 | } | |
833 | ||
834 | static void octeon_mgmt_update_link(struct octeon_mgmt *p) | |
835 | { | |
9e8e6e88 PR |
836 | struct net_device *ndev = p->netdev; |
837 | struct phy_device *phydev = ndev->phydev; | |
eeae05aa DD |
838 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; |
839 | ||
840 | prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); | |
841 | ||
9e8e6e88 | 842 | if (!phydev->link) |
eeae05aa DD |
843 | prtx_cfg.s.duplex = 1; |
844 | else | |
9e8e6e88 | 845 | prtx_cfg.s.duplex = phydev->duplex; |
eeae05aa | 846 | |
9e8e6e88 | 847 | switch (phydev->speed) { |
eeae05aa DD |
848 | case 10: |
849 | prtx_cfg.s.speed = 0; | |
850 | prtx_cfg.s.slottime = 0; | |
851 | ||
852 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | |
853 | prtx_cfg.s.burst = 1; | |
854 | prtx_cfg.s.speed_msb = 1; | |
855 | } | |
856 | break; | |
857 | case 100: | |
858 | prtx_cfg.s.speed = 0; | |
859 | prtx_cfg.s.slottime = 0; | |
860 | ||
861 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | |
862 | prtx_cfg.s.burst = 1; | |
863 | prtx_cfg.s.speed_msb = 0; | |
864 | } | |
865 | break; | |
866 | case 1000: | |
867 | /* 1000 MBits is only supported on 6XXX chips */ | |
868 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | |
869 | prtx_cfg.s.speed = 1; | |
870 | prtx_cfg.s.speed_msb = 0; | |
871 | /* Only matters for half-duplex */ | |
872 | prtx_cfg.s.slottime = 1; | |
9e8e6e88 | 873 | prtx_cfg.s.burst = phydev->duplex; |
eeae05aa DD |
874 | } |
875 | break; | |
876 | case 0: /* No link */ | |
877 | default: | |
878 | break; | |
879 | } | |
880 | ||
881 | /* Write the new GMX setting with the port still disabled. */ | |
882 | cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); | |
883 | ||
884 | /* Read GMX CFG again to make sure the config is completed. */ | |
885 | prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); | |
886 | ||
887 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | |
888 | union cvmx_agl_gmx_txx_clk agl_clk; | |
889 | union cvmx_agl_prtx_ctl prtx_ctl; | |
890 | ||
891 | prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); | |
892 | agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK); | |
893 | /* MII (both speeds) and RGMII 1000 speed. */ | |
894 | agl_clk.s.clk_cnt = 1; | |
895 | if (prtx_ctl.s.mode == 0) { /* RGMII mode */ | |
9e8e6e88 | 896 | if (phydev->speed == 10) |
eeae05aa | 897 | agl_clk.s.clk_cnt = 50; |
9e8e6e88 | 898 | else if (phydev->speed == 100) |
eeae05aa DD |
899 | agl_clk.s.clk_cnt = 5; |
900 | } | |
901 | cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); | |
902 | } | |
d6aa60a1 DD |
903 | } |
904 | ||
905 | static void octeon_mgmt_adjust_link(struct net_device *netdev) | |
906 | { | |
907 | struct octeon_mgmt *p = netdev_priv(netdev); | |
9e8e6e88 | 908 | struct phy_device *phydev = netdev->phydev; |
d6aa60a1 DD |
909 | unsigned long flags; |
910 | int link_changed = 0; | |
911 | ||
9e8e6e88 | 912 | if (!phydev) |
eeae05aa DD |
913 | return; |
914 | ||
d6aa60a1 | 915 | spin_lock_irqsave(&p->lock, flags); |
eeae05aa DD |
916 | |
917 | ||
9e8e6e88 | 918 | if (!phydev->link && p->last_link) |
eeae05aa DD |
919 | link_changed = -1; |
920 | ||
9e8e6e88 PR |
921 | if (phydev->link && |
922 | (p->last_duplex != phydev->duplex || | |
923 | p->last_link != phydev->link || | |
924 | p->last_speed != phydev->speed)) { | |
eeae05aa DD |
925 | octeon_mgmt_disable_link(p); |
926 | link_changed = 1; | |
927 | octeon_mgmt_update_link(p); | |
928 | octeon_mgmt_enable_link(p); | |
d6aa60a1 | 929 | } |
eeae05aa | 930 | |
9e8e6e88 PR |
931 | p->last_link = phydev->link; |
932 | p->last_speed = phydev->speed; | |
933 | p->last_duplex = phydev->duplex; | |
eeae05aa | 934 | |
d6aa60a1 DD |
935 | spin_unlock_irqrestore(&p->lock, flags); |
936 | ||
937 | if (link_changed != 0) { | |
938 | if (link_changed > 0) { | |
d6aa60a1 | 939 | pr_info("%s: Link is up - %d/%s\n", netdev->name, |
9e8e6e88 PR |
940 | phydev->speed, |
941 | phydev->duplex == DUPLEX_FULL ? | |
d6aa60a1 DD |
942 | "Full" : "Half"); |
943 | } else { | |
d6aa60a1 DD |
944 | pr_info("%s: Link is down\n", netdev->name); |
945 | } | |
946 | } | |
947 | } | |
948 | ||
949 | static int octeon_mgmt_init_phy(struct net_device *netdev) | |
950 | { | |
951 | struct octeon_mgmt *p = netdev_priv(netdev); | |
9e8e6e88 | 952 | struct phy_device *phydev = NULL; |
d6aa60a1 | 953 | |
368bec0d | 954 | if (octeon_is_simulation() || p->phy_np == NULL) { |
d6aa60a1 DD |
955 | /* No PHYs in the simulator. */ |
956 | netif_carrier_on(netdev); | |
957 | return 0; | |
958 | } | |
959 | ||
9e8e6e88 PR |
960 | phydev = of_phy_connect(netdev, p->phy_np, |
961 | octeon_mgmt_adjust_link, 0, | |
962 | PHY_INTERFACE_MODE_MII); | |
d6aa60a1 | 963 | |
9e8e6e88 | 964 | if (!phydev) |
eeae05aa | 965 | return -ENODEV; |
d6aa60a1 DD |
966 | |
967 | return 0; | |
968 | } | |
969 | ||
970 | static int octeon_mgmt_open(struct net_device *netdev) | |
971 | { | |
972 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
973 | union cvmx_mixx_ctl mix_ctl; |
974 | union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; | |
975 | union cvmx_mixx_oring1 oring1; | |
976 | union cvmx_mixx_iring1 iring1; | |
d6aa60a1 DD |
977 | union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; |
978 | union cvmx_mixx_irhwm mix_irhwm; | |
979 | union cvmx_mixx_orhwm mix_orhwm; | |
980 | union cvmx_mixx_intena mix_intena; | |
981 | struct sockaddr sa; | |
982 | ||
983 | /* Allocate ring buffers. */ | |
984 | p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
985 | GFP_KERNEL); | |
986 | if (!p->tx_ring) | |
987 | return -ENOMEM; | |
988 | p->tx_ring_handle = | |
989 | dma_map_single(p->dev, p->tx_ring, | |
990 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
991 | DMA_BIDIRECTIONAL); | |
992 | p->tx_next = 0; | |
993 | p->tx_next_clean = 0; | |
994 | p->tx_current_fill = 0; | |
995 | ||
996 | ||
997 | p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
998 | GFP_KERNEL); | |
999 | if (!p->rx_ring) | |
1000 | goto err_nomem; | |
1001 | p->rx_ring_handle = | |
1002 | dma_map_single(p->dev, p->rx_ring, | |
1003 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
1004 | DMA_BIDIRECTIONAL); | |
1005 | ||
1006 | p->rx_next = 0; | |
1007 | p->rx_next_fill = 0; | |
1008 | p->rx_current_fill = 0; | |
1009 | ||
1010 | octeon_mgmt_reset_hw(p); | |
1011 | ||
368bec0d | 1012 | mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); |
d6aa60a1 DD |
1013 | |
1014 | /* Bring it out of reset if needed. */ | |
1015 | if (mix_ctl.s.reset) { | |
1016 | mix_ctl.s.reset = 0; | |
368bec0d | 1017 | cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); |
d6aa60a1 | 1018 | do { |
368bec0d | 1019 | mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); |
d6aa60a1 DD |
1020 | } while (mix_ctl.s.reset); |
1021 | } | |
1022 | ||
eeae05aa DD |
1023 | if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { |
1024 | agl_gmx_inf_mode.u64 = 0; | |
1025 | agl_gmx_inf_mode.s.en = 1; | |
1026 | cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); | |
1027 | } | |
1028 | if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) | |
1029 | || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { | |
a0ce9b1e | 1030 | /* Force compensation values, as they are not |
eeae05aa DD |
1031 | * determined properly by HW |
1032 | */ | |
1033 | union cvmx_agl_gmx_drv_ctl drv_ctl; | |
1034 | ||
1035 | drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); | |
1036 | if (p->port) { | |
1037 | drv_ctl.s.byp_en1 = 1; | |
1038 | drv_ctl.s.nctl1 = 6; | |
1039 | drv_ctl.s.pctl1 = 6; | |
1040 | } else { | |
1041 | drv_ctl.s.byp_en = 1; | |
1042 | drv_ctl.s.nctl = 6; | |
1043 | drv_ctl.s.pctl = 6; | |
1044 | } | |
1045 | cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); | |
1046 | } | |
d6aa60a1 DD |
1047 | |
1048 | oring1.u64 = 0; | |
1049 | oring1.s.obase = p->tx_ring_handle >> 3; | |
1050 | oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; | |
368bec0d | 1051 | cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); |
d6aa60a1 DD |
1052 | |
1053 | iring1.u64 = 0; | |
1054 | iring1.s.ibase = p->rx_ring_handle >> 3; | |
1055 | iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; | |
368bec0d | 1056 | cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); |
d6aa60a1 | 1057 | |
d6aa60a1 DD |
1058 | memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); |
1059 | octeon_mgmt_set_mac_address(netdev, &sa); | |
1060 | ||
1061 | octeon_mgmt_change_mtu(netdev, netdev->mtu); | |
1062 | ||
a0ce9b1e | 1063 | /* Enable the port HW. Packets are not allowed until |
d6aa60a1 DD |
1064 | * cvmx_mgmt_port_enable() is called. |
1065 | */ | |
1066 | mix_ctl.u64 = 0; | |
1067 | mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ | |
1068 | mix_ctl.s.en = 1; /* Enable the port */ | |
1069 | mix_ctl.s.nbtarb = 0; /* Arbitration mode */ | |
1070 | /* MII CB-request FIFO programmable high watermark */ | |
1071 | mix_ctl.s.mrq_hwm = 1; | |
eeae05aa DD |
1072 | #ifdef __LITTLE_ENDIAN |
1073 | mix_ctl.s.lendian = 1; | |
1074 | #endif | |
368bec0d | 1075 | cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); |
d6aa60a1 | 1076 | |
eeae05aa DD |
1077 | /* Read the PHY to find the mode of the interface. */ |
1078 | if (octeon_mgmt_init_phy(netdev)) { | |
1079 | dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); | |
1080 | goto err_noirq; | |
1081 | } | |
d6aa60a1 | 1082 | |
eeae05aa | 1083 | /* Set the mode of the interface, RGMII/MII. */ |
9e8e6e88 | 1084 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) { |
eeae05aa | 1085 | union cvmx_agl_prtx_ctl agl_prtx_ctl; |
9e8e6e88 | 1086 | int rgmii_mode = (netdev->phydev->supported & |
eeae05aa DD |
1087 | (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; |
1088 | ||
1089 | agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); | |
1090 | agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1; | |
1091 | cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); | |
1092 | ||
1093 | /* MII clocks counts are based on the 125Mhz | |
1094 | * reference, which has an 8nS period. So our delays | |
1095 | * need to be multiplied by this factor. | |
1096 | */ | |
1097 | #define NS_PER_PHY_CLK 8 | |
1098 | ||
1099 | /* Take the DLL and clock tree out of reset */ | |
1100 | agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); | |
1101 | agl_prtx_ctl.s.clkrst = 0; | |
1102 | if (rgmii_mode) { | |
1103 | agl_prtx_ctl.s.dllrst = 0; | |
1104 | agl_prtx_ctl.s.clktx_byp = 0; | |
d6aa60a1 | 1105 | } |
eeae05aa DD |
1106 | cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); |
1107 | cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */ | |
1108 | ||
1109 | /* Wait for the DLL to lock. External 125 MHz | |
1110 | * reference clock must be stable at this point. | |
1111 | */ | |
1112 | ndelay(256 * NS_PER_PHY_CLK); | |
1113 | ||
1114 | /* Enable the interface */ | |
1115 | agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); | |
1116 | agl_prtx_ctl.s.enable = 1; | |
1117 | cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); | |
1118 | ||
1119 | /* Read the value back to force the previous write */ | |
1120 | agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); | |
1121 | ||
1122 | /* Enable the compensation controller */ | |
1123 | agl_prtx_ctl.s.comp = 1; | |
1124 | agl_prtx_ctl.s.drv_byp = 0; | |
1125 | cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); | |
1126 | /* Force write out before wait. */ | |
1127 | cvmx_read_csr(p->agl_prt_ctl); | |
1128 | ||
1129 | /* For compensation state to lock. */ | |
1130 | ndelay(1040 * NS_PER_PHY_CLK); | |
1131 | ||
906996d6 DD |
1132 | /* Default Interframe Gaps are too small. Recommended |
1133 | * workaround is. | |
1134 | * | |
1135 | * AGL_GMX_TX_IFG[IFG1]=14 | |
1136 | * AGL_GMX_TX_IFG[IFG2]=10 | |
eeae05aa | 1137 | */ |
906996d6 | 1138 | cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae); |
d6aa60a1 DD |
1139 | } |
1140 | ||
1141 | octeon_mgmt_rx_fill_ring(netdev); | |
1142 | ||
1143 | /* Clear statistics. */ | |
1144 | /* Clear on read. */ | |
368bec0d DD |
1145 | cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); |
1146 | cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); | |
1147 | cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); | |
d6aa60a1 | 1148 | |
368bec0d DD |
1149 | cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); |
1150 | cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); | |
1151 | cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); | |
d6aa60a1 DD |
1152 | |
1153 | /* Clear any pending interrupts */ | |
368bec0d | 1154 | cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); |
d6aa60a1 DD |
1155 | |
1156 | if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, | |
1157 | netdev)) { | |
1158 | dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); | |
1159 | goto err_noirq; | |
1160 | } | |
1161 | ||
1162 | /* Interrupt every single RX packet */ | |
1163 | mix_irhwm.u64 = 0; | |
1164 | mix_irhwm.s.irhwm = 0; | |
368bec0d | 1165 | cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); |
d6aa60a1 | 1166 | |
b635e069 | 1167 | /* Interrupt when we have 1 or more packets to clean. */ |
d6aa60a1 | 1168 | mix_orhwm.u64 = 0; |
eeae05aa | 1169 | mix_orhwm.s.orhwm = 0; |
368bec0d | 1170 | cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); |
d6aa60a1 DD |
1171 | |
1172 | /* Enable receive and transmit interrupts */ | |
1173 | mix_intena.u64 = 0; | |
1174 | mix_intena.s.ithena = 1; | |
1175 | mix_intena.s.othena = 1; | |
368bec0d | 1176 | cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); |
d6aa60a1 | 1177 | |
d6aa60a1 DD |
1178 | /* Enable packet I/O. */ |
1179 | ||
1180 | rxx_frm_ctl.u64 = 0; | |
3d305850 | 1181 | rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0; |
d6aa60a1 | 1182 | rxx_frm_ctl.s.pre_align = 1; |
a0ce9b1e | 1183 | /* When set, disables the length check for non-min sized pkts |
d6aa60a1 DD |
1184 | * with padding in the client data. |
1185 | */ | |
1186 | rxx_frm_ctl.s.pad_len = 1; | |
1187 | /* When set, disables the length check for VLAN pkts */ | |
1188 | rxx_frm_ctl.s.vlan_len = 1; | |
1189 | /* When set, PREAMBLE checking is less strict */ | |
1190 | rxx_frm_ctl.s.pre_free = 1; | |
1191 | /* Control Pause Frames can match station SMAC */ | |
1192 | rxx_frm_ctl.s.ctl_smac = 0; | |
1193 | /* Control Pause Frames can match globally assign Multicast address */ | |
1194 | rxx_frm_ctl.s.ctl_mcst = 1; | |
1195 | /* Forward pause information to TX block */ | |
1196 | rxx_frm_ctl.s.ctl_bck = 1; | |
1197 | /* Drop Control Pause Frames */ | |
1198 | rxx_frm_ctl.s.ctl_drp = 1; | |
1199 | /* Strip off the preamble */ | |
1200 | rxx_frm_ctl.s.pre_strp = 1; | |
a0ce9b1e | 1201 | /* This port is configured to send PREAMBLE+SFD to begin every |
d6aa60a1 DD |
1202 | * frame. GMX checks that the PREAMBLE is sent correctly. |
1203 | */ | |
1204 | rxx_frm_ctl.s.pre_chk = 1; | |
368bec0d | 1205 | cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); |
d6aa60a1 | 1206 | |
eeae05aa DD |
1207 | /* Configure the port duplex, speed and enables */ |
1208 | octeon_mgmt_disable_link(p); | |
9e8e6e88 | 1209 | if (netdev->phydev) |
eeae05aa DD |
1210 | octeon_mgmt_update_link(p); |
1211 | octeon_mgmt_enable_link(p); | |
d6aa60a1 DD |
1212 | |
1213 | p->last_link = 0; | |
eeae05aa DD |
1214 | p->last_speed = 0; |
1215 | /* PHY is not present in simulator. The carrier is enabled | |
1216 | * while initializing the phy for simulator, leave it enabled. | |
1217 | */ | |
9e8e6e88 | 1218 | if (netdev->phydev) { |
eeae05aa | 1219 | netif_carrier_off(netdev); |
9e8e6e88 | 1220 | phy_start_aneg(netdev->phydev); |
d6aa60a1 DD |
1221 | } |
1222 | ||
1223 | netif_wake_queue(netdev); | |
1224 | napi_enable(&p->napi); | |
1225 | ||
1226 | return 0; | |
1227 | err_noirq: | |
1228 | octeon_mgmt_reset_hw(p); | |
1229 | dma_unmap_single(p->dev, p->rx_ring_handle, | |
1230 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
1231 | DMA_BIDIRECTIONAL); | |
1232 | kfree(p->rx_ring); | |
1233 | err_nomem: | |
1234 | dma_unmap_single(p->dev, p->tx_ring_handle, | |
1235 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
1236 | DMA_BIDIRECTIONAL); | |
1237 | kfree(p->tx_ring); | |
1238 | return -ENOMEM; | |
1239 | } | |
1240 | ||
1241 | static int octeon_mgmt_stop(struct net_device *netdev) | |
1242 | { | |
1243 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1244 | ||
1245 | napi_disable(&p->napi); | |
1246 | netif_stop_queue(netdev); | |
1247 | ||
9e8e6e88 PR |
1248 | if (netdev->phydev) |
1249 | phy_disconnect(netdev->phydev); | |
d6aa60a1 DD |
1250 | |
1251 | netif_carrier_off(netdev); | |
1252 | ||
1253 | octeon_mgmt_reset_hw(p); | |
1254 | ||
d6aa60a1 DD |
1255 | free_irq(p->irq, netdev); |
1256 | ||
1257 | /* dma_unmap is a nop on Octeon, so just free everything. */ | |
1258 | skb_queue_purge(&p->tx_list); | |
1259 | skb_queue_purge(&p->rx_list); | |
1260 | ||
1261 | dma_unmap_single(p->dev, p->rx_ring_handle, | |
1262 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
1263 | DMA_BIDIRECTIONAL); | |
1264 | kfree(p->rx_ring); | |
1265 | ||
1266 | dma_unmap_single(p->dev, p->tx_ring_handle, | |
1267 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
1268 | DMA_BIDIRECTIONAL); | |
1269 | kfree(p->tx_ring); | |
1270 | ||
d6aa60a1 DD |
1271 | return 0; |
1272 | } | |
1273 | ||
1274 | static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) | |
1275 | { | |
1276 | struct octeon_mgmt *p = netdev_priv(netdev); | |
d6aa60a1 DD |
1277 | union mgmt_port_ring_entry re; |
1278 | unsigned long flags; | |
4e4a4f14 | 1279 | int rv = NETDEV_TX_BUSY; |
d6aa60a1 DD |
1280 | |
1281 | re.d64 = 0; | |
3d305850 | 1282 | re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); |
d6aa60a1 DD |
1283 | re.s.len = skb->len; |
1284 | re.s.addr = dma_map_single(p->dev, skb->data, | |
1285 | skb->len, | |
1286 | DMA_TO_DEVICE); | |
1287 | ||
1288 | spin_lock_irqsave(&p->tx_list.lock, flags); | |
1289 | ||
4e4a4f14 DD |
1290 | if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { |
1291 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
1292 | netif_stop_queue(netdev); | |
1293 | spin_lock_irqsave(&p->tx_list.lock, flags); | |
1294 | } | |
1295 | ||
d6aa60a1 DD |
1296 | if (unlikely(p->tx_current_fill >= |
1297 | ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { | |
1298 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
d6aa60a1 DD |
1299 | dma_unmap_single(p->dev, re.s.addr, re.s.len, |
1300 | DMA_TO_DEVICE); | |
4e4a4f14 | 1301 | goto out; |
d6aa60a1 DD |
1302 | } |
1303 | ||
1304 | __skb_queue_tail(&p->tx_list, skb); | |
1305 | ||
1306 | /* Put it in the ring. */ | |
1307 | p->tx_ring[p->tx_next] = re.d64; | |
1308 | p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; | |
1309 | p->tx_current_fill++; | |
1310 | ||
1311 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
1312 | ||
1313 | dma_sync_single_for_device(p->dev, p->tx_ring_handle, | |
1314 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
1315 | DMA_BIDIRECTIONAL); | |
1316 | ||
1317 | netdev->stats.tx_packets++; | |
1318 | netdev->stats.tx_bytes += skb->len; | |
1319 | ||
1320 | /* Ring the bell. */ | |
368bec0d | 1321 | cvmx_write_csr(p->mix + MIX_ORING2, 1); |
d6aa60a1 | 1322 | |
860e9538 | 1323 | netif_trans_update(netdev); |
4e4a4f14 DD |
1324 | rv = NETDEV_TX_OK; |
1325 | out: | |
d6aa60a1 | 1326 | octeon_mgmt_update_tx_stats(netdev); |
4e4a4f14 | 1327 | return rv; |
d6aa60a1 DD |
1328 | } |
1329 | ||
1330 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1331 | static void octeon_mgmt_poll_controller(struct net_device *netdev) | |
1332 | { | |
1333 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1334 | ||
1335 | octeon_mgmt_receive_packets(p, 16); | |
1336 | octeon_mgmt_update_rx_stats(netdev); | |
d6aa60a1 DD |
1337 | } |
1338 | #endif | |
1339 | ||
1340 | static void octeon_mgmt_get_drvinfo(struct net_device *netdev, | |
1341 | struct ethtool_drvinfo *info) | |
1342 | { | |
7826d43f JP |
1343 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); |
1344 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); | |
1345 | strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); | |
1346 | strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); | |
d6aa60a1 DD |
1347 | } |
1348 | ||
f21105df DD |
1349 | static int octeon_mgmt_nway_reset(struct net_device *dev) |
1350 | { | |
f21105df DD |
1351 | if (!capable(CAP_NET_ADMIN)) |
1352 | return -EPERM; | |
1353 | ||
9e8e6e88 PR |
1354 | if (dev->phydev) |
1355 | return phy_start_aneg(dev->phydev); | |
f21105df DD |
1356 | |
1357 | return -EOPNOTSUPP; | |
d6aa60a1 DD |
1358 | } |
1359 | ||
1360 | static const struct ethtool_ops octeon_mgmt_ethtool_ops = { | |
1361 | .get_drvinfo = octeon_mgmt_get_drvinfo, | |
f21105df DD |
1362 | .nway_reset = octeon_mgmt_nway_reset, |
1363 | .get_link = ethtool_op_get_link, | |
f4400ded PR |
1364 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
1365 | .set_link_ksettings = phy_ethtool_set_link_ksettings, | |
d6aa60a1 DD |
1366 | }; |
1367 | ||
1368 | static const struct net_device_ops octeon_mgmt_ops = { | |
1369 | .ndo_open = octeon_mgmt_open, | |
1370 | .ndo_stop = octeon_mgmt_stop, | |
1371 | .ndo_start_xmit = octeon_mgmt_xmit, | |
eeae05aa | 1372 | .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, |
d6aa60a1 | 1373 | .ndo_set_mac_address = octeon_mgmt_set_mac_address, |
eeae05aa | 1374 | .ndo_do_ioctl = octeon_mgmt_ioctl, |
d6aa60a1 DD |
1375 | .ndo_change_mtu = octeon_mgmt_change_mtu, |
1376 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1377 | .ndo_poll_controller = octeon_mgmt_poll_controller, | |
1378 | #endif | |
1379 | }; | |
1380 | ||
5bc7ec70 | 1381 | static int octeon_mgmt_probe(struct platform_device *pdev) |
d6aa60a1 | 1382 | { |
d6aa60a1 DD |
1383 | struct net_device *netdev; |
1384 | struct octeon_mgmt *p; | |
368bec0d DD |
1385 | const __be32 *data; |
1386 | const u8 *mac; | |
1387 | struct resource *res_mix; | |
1388 | struct resource *res_agl; | |
eeae05aa | 1389 | struct resource *res_agl_prt_ctl; |
368bec0d DD |
1390 | int len; |
1391 | int result; | |
d6aa60a1 DD |
1392 | |
1393 | netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); | |
1394 | if (netdev == NULL) | |
1395 | return -ENOMEM; | |
1396 | ||
052958e3 DD |
1397 | SET_NETDEV_DEV(netdev, &pdev->dev); |
1398 | ||
8513fbd8 | 1399 | platform_set_drvdata(pdev, netdev); |
d6aa60a1 DD |
1400 | p = netdev_priv(netdev); |
1401 | netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, | |
1402 | OCTEON_MGMT_NAPI_WEIGHT); | |
1403 | ||
1404 | p->netdev = netdev; | |
1405 | p->dev = &pdev->dev; | |
3d305850 | 1406 | p->has_rx_tstamp = false; |
d6aa60a1 | 1407 | |
368bec0d DD |
1408 | data = of_get_property(pdev->dev.of_node, "cell-index", &len); |
1409 | if (data && len == sizeof(*data)) { | |
1410 | p->port = be32_to_cpup(data); | |
1411 | } else { | |
1412 | dev_err(&pdev->dev, "no 'cell-index' property\n"); | |
1413 | result = -ENXIO; | |
1414 | goto err; | |
1415 | } | |
1416 | ||
d6aa60a1 DD |
1417 | snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); |
1418 | ||
368bec0d DD |
1419 | result = platform_get_irq(pdev, 0); |
1420 | if (result < 0) | |
1421 | goto err; | |
1422 | ||
1423 | p->irq = result; | |
1424 | ||
1425 | res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1426 | if (res_mix == NULL) { | |
1427 | dev_err(&pdev->dev, "no 'reg' resource\n"); | |
1428 | result = -ENXIO; | |
1429 | goto err; | |
1430 | } | |
1431 | ||
1432 | res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
1433 | if (res_agl == NULL) { | |
1434 | dev_err(&pdev->dev, "no 'reg' resource\n"); | |
1435 | result = -ENXIO; | |
1436 | goto err; | |
1437 | } | |
1438 | ||
eeae05aa DD |
1439 | res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3); |
1440 | if (res_agl_prt_ctl == NULL) { | |
1441 | dev_err(&pdev->dev, "no 'reg' resource\n"); | |
1442 | result = -ENXIO; | |
1443 | goto err; | |
1444 | } | |
1445 | ||
368bec0d DD |
1446 | p->mix_phys = res_mix->start; |
1447 | p->mix_size = resource_size(res_mix); | |
1448 | p->agl_phys = res_agl->start; | |
1449 | p->agl_size = resource_size(res_agl); | |
eeae05aa DD |
1450 | p->agl_prt_ctl_phys = res_agl_prt_ctl->start; |
1451 | p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl); | |
368bec0d DD |
1452 | |
1453 | ||
1454 | if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, | |
1455 | res_mix->name)) { | |
1456 | dev_err(&pdev->dev, "request_mem_region (%s) failed\n", | |
1457 | res_mix->name); | |
1458 | result = -ENXIO; | |
1459 | goto err; | |
1460 | } | |
1461 | ||
1462 | if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, | |
1463 | res_agl->name)) { | |
1464 | result = -ENXIO; | |
1465 | dev_err(&pdev->dev, "request_mem_region (%s) failed\n", | |
1466 | res_agl->name); | |
d6aa60a1 | 1467 | goto err; |
368bec0d DD |
1468 | } |
1469 | ||
eeae05aa DD |
1470 | if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys, |
1471 | p->agl_prt_ctl_size, res_agl_prt_ctl->name)) { | |
1472 | result = -ENXIO; | |
1473 | dev_err(&pdev->dev, "request_mem_region (%s) failed\n", | |
1474 | res_agl_prt_ctl->name); | |
1475 | goto err; | |
1476 | } | |
368bec0d DD |
1477 | |
1478 | p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); | |
1479 | p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); | |
eeae05aa DD |
1480 | p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, |
1481 | p->agl_prt_ctl_size); | |
d6aa60a1 DD |
1482 | spin_lock_init(&p->lock); |
1483 | ||
1484 | skb_queue_head_init(&p->tx_list); | |
1485 | skb_queue_head_init(&p->rx_list); | |
1486 | tasklet_init(&p->tx_clean_tasklet, | |
1487 | octeon_mgmt_clean_tx_tasklet, (unsigned long)p); | |
1488 | ||
01789349 JP |
1489 | netdev->priv_flags |= IFF_UNICAST_FLT; |
1490 | ||
d6aa60a1 DD |
1491 | netdev->netdev_ops = &octeon_mgmt_ops; |
1492 | netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; | |
1493 | ||
368bec0d DD |
1494 | mac = of_get_mac_address(pdev->dev.of_node); |
1495 | ||
09ec0d05 | 1496 | if (mac) |
f321238b | 1497 | memcpy(netdev->dev_addr, mac, ETH_ALEN); |
15c6ff3b | 1498 | else |
f321238b | 1499 | eth_hw_addr_random(netdev); |
d6aa60a1 | 1500 | |
368bec0d | 1501 | p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); |
d6aa60a1 | 1502 | |
26741a69 RK |
1503 | result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); |
1504 | if (result) | |
1505 | goto err; | |
368bec0d | 1506 | |
eeae05aa | 1507 | netif_carrier_off(netdev); |
368bec0d DD |
1508 | result = register_netdev(netdev); |
1509 | if (result) | |
d6aa60a1 DD |
1510 | goto err; |
1511 | ||
1512 | dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); | |
1513 | return 0; | |
368bec0d | 1514 | |
d6aa60a1 | 1515 | err: |
46997066 | 1516 | of_node_put(p->phy_np); |
d6aa60a1 | 1517 | free_netdev(netdev); |
368bec0d | 1518 | return result; |
d6aa60a1 DD |
1519 | } |
1520 | ||
5bc7ec70 | 1521 | static int octeon_mgmt_remove(struct platform_device *pdev) |
d6aa60a1 | 1522 | { |
8513fbd8 | 1523 | struct net_device *netdev = platform_get_drvdata(pdev); |
46997066 | 1524 | struct octeon_mgmt *p = netdev_priv(netdev); |
d6aa60a1 DD |
1525 | |
1526 | unregister_netdev(netdev); | |
46997066 | 1527 | of_node_put(p->phy_np); |
d6aa60a1 DD |
1528 | free_netdev(netdev); |
1529 | return 0; | |
1530 | } | |
1531 | ||
437dab40 | 1532 | static const struct of_device_id octeon_mgmt_match[] = { |
368bec0d DD |
1533 | { |
1534 | .compatible = "cavium,octeon-5750-mix", | |
1535 | }, | |
1536 | {}, | |
1537 | }; | |
1538 | MODULE_DEVICE_TABLE(of, octeon_mgmt_match); | |
1539 | ||
d6aa60a1 DD |
1540 | static struct platform_driver octeon_mgmt_driver = { |
1541 | .driver = { | |
1542 | .name = "octeon_mgmt", | |
368bec0d | 1543 | .of_match_table = octeon_mgmt_match, |
d6aa60a1 DD |
1544 | }, |
1545 | .probe = octeon_mgmt_probe, | |
5bc7ec70 | 1546 | .remove = octeon_mgmt_remove, |
d6aa60a1 DD |
1547 | }; |
1548 | ||
1549 | extern void octeon_mdiobus_force_mod_depencency(void); | |
1550 | ||
1551 | static int __init octeon_mgmt_mod_init(void) | |
1552 | { | |
1553 | /* Force our mdiobus driver module to be loaded first. */ | |
1554 | octeon_mdiobus_force_mod_depencency(); | |
1555 | return platform_driver_register(&octeon_mgmt_driver); | |
1556 | } | |
1557 | ||
1558 | static void __exit octeon_mgmt_mod_exit(void) | |
1559 | { | |
1560 | platform_driver_unregister(&octeon_mgmt_driver); | |
1561 | } | |
1562 | ||
1563 | module_init(octeon_mgmt_mod_init); | |
1564 | module_exit(octeon_mgmt_mod_exit); | |
1565 | ||
1566 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | |
1567 | MODULE_AUTHOR("David Daney"); | |
1568 | MODULE_LICENSE("GPL"); | |
1569 | MODULE_VERSION(DRV_VERSION); |