staging: octeon-ethernet: move cvm_oct_xaui_open()
[deliverable/linux.git] / drivers / staging / octeon / ethernet.c
CommitLineData
67620987
AK
1/*
2 * This file is based on code from OCTEON SDK by Cavium Networks.
80ff0fd3
DD
3 *
4 * Copyright (c) 2003-2007 Cavium Networks
5 *
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
67620987
AK
9 */
10
df9244c5 11#include <linux/platform_device.h>
80ff0fd3 12#include <linux/kernel.h>
80ff0fd3
DD
13#include <linux/module.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
f6ed1b3b 16#include <linux/phy.h>
5a0e3ad6 17#include <linux/slab.h>
dc890df0 18#include <linux/interrupt.h>
df9244c5 19#include <linux/of_net.h>
80ff0fd3
DD
20
21#include <net/dst.h>
22
23#include <asm/octeon/octeon.h>
24
25#include "ethernet-defines.h"
a620c163 26#include "octeon-ethernet.h"
80ff0fd3
DD
27#include "ethernet-mem.h"
28#include "ethernet-rx.h"
29#include "ethernet-tx.h"
f696a108 30#include "ethernet-mdio.h"
80ff0fd3 31#include "ethernet-util.h"
80ff0fd3 32
af866496
DD
33#include <asm/octeon/cvmx-pip.h>
34#include <asm/octeon/cvmx-pko.h>
35#include <asm/octeon/cvmx-fau.h>
36#include <asm/octeon/cvmx-ipd.h>
37#include <asm/octeon/cvmx-helper.h>
80ff0fd3 38
af866496
DD
39#include <asm/octeon/cvmx-gmxx-defs.h>
40#include <asm/octeon/cvmx-smix-defs.h>
80ff0fd3 41
90419615 42static int num_packet_buffers = 1024;
80ff0fd3
DD
43module_param(num_packet_buffers, int, 0444);
44MODULE_PARM_DESC(num_packet_buffers, "\n"
45 "\tNumber of packet buffers to allocate and store in the\n"
5ff8bebb 46 "\tFPA. By default, 1024 packet buffers are used.\n");
80ff0fd3
DD
47
48int pow_receive_group = 15;
49module_param(pow_receive_group, int, 0444);
50MODULE_PARM_DESC(pow_receive_group, "\n"
51 "\tPOW group to receive packets from. All ethernet hardware\n"
d82603c6 52 "\twill be configured to send incoming packets to this POW\n"
80ff0fd3
DD
53 "\tgroup. Also any other software can submit packets to this\n"
54 "\tgroup for the kernel to process.");
55
56int pow_send_group = -1;
57module_param(pow_send_group, int, 0644);
58MODULE_PARM_DESC(pow_send_group, "\n"
59 "\tPOW group to send packets to other software on. This\n"
60 "\tcontrols the creation of the virtual device pow0.\n"
61 "\talways_use_pow also depends on this value.");
62
63int always_use_pow;
64module_param(always_use_pow, int, 0444);
65MODULE_PARM_DESC(always_use_pow, "\n"
66 "\tWhen set, always send to the pow group. This will cause\n"
67 "\tpackets sent to real ethernet devices to be sent to the\n"
68 "\tPOW group instead of the hardware. Unless some other\n"
69 "\tapplication changes the config, packets will still be\n"
70 "\treceived from the low level hardware. Use this option\n"
71 "\tto allow a CVMX app to intercept all packets from the\n"
72 "\tlinux kernel. You must specify pow_send_group along with\n"
73 "\tthis option.");
74
75char pow_send_list[128] = "";
76module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
77MODULE_PARM_DESC(pow_send_list, "\n"
78 "\tComma separated list of ethernet devices that should use the\n"
79 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
80 "\tis a per port version of always_use_pow. always_use_pow takes\n"
81 "\tprecedence over this list. For example, setting this to\n"
82 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
83 "\tusing the pow_send_group.");
84
3368c784
DD
85int rx_napi_weight = 32;
86module_param(rx_napi_weight, int, 0444);
87MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
13c5939e 88
d0fbf9f3 89/*
f8c26486 90 * cvm_oct_poll_queue - Workqueue for polling operations.
80ff0fd3 91 */
f8c26486
DD
92struct workqueue_struct *cvm_oct_poll_queue;
93
d0fbf9f3 94/*
f8c26486
DD
95 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
96 *
97 * Set to one right before cvm_oct_poll_queue is destroyed.
80ff0fd3 98 */
f8c26486 99atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
80ff0fd3 100
d0fbf9f3 101/*
80ff0fd3
DD
102 * Array of every ethernet device owned by this driver indexed by
103 * the ipd input port number.
104 */
105struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
106
4898c560
DD
107u64 cvm_oct_tx_poll_interval;
108
f8c26486
DD
109static void cvm_oct_rx_refill_worker(struct work_struct *work);
110static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
111
112static void cvm_oct_rx_refill_worker(struct work_struct *work)
80ff0fd3 113{
f8c26486
DD
114 /*
115 * FPA 0 may have been drained, try to refill it if we need
116 * more than num_packet_buffers / 2, otherwise normal receive
117 * processing will refill it. If it were drained, no packets
118 * could be received so cvm_oct_napi_poll would never be
119 * invoked to do the refill.
120 */
121 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
a620c163 122
f8c26486
DD
123 if (!atomic_read(&cvm_oct_poll_queue_stopping))
124 queue_delayed_work(cvm_oct_poll_queue,
125 &cvm_oct_rx_refill_work, HZ);
80ff0fd3
DD
126}
127
4898c560 128static void cvm_oct_periodic_worker(struct work_struct *work)
f8c26486
DD
129{
130 struct octeon_ethernet *priv = container_of(work,
131 struct octeon_ethernet,
4898c560 132 port_periodic_work.work);
a620c163 133
f6ed1b3b 134 if (priv->poll)
f8c26486 135 priv->poll(cvm_oct_device[priv->port]);
a620c163 136
b186410d
NH
137 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
138 cvm_oct_device[priv->port]);
4898c560 139
f8c26486 140 if (!atomic_read(&cvm_oct_poll_queue_stopping))
b186410d
NH
141 queue_delayed_work(cvm_oct_poll_queue,
142 &priv->port_periodic_work, HZ);
851ec8cd 143}
80ff0fd3 144
4f240906 145static void cvm_oct_configure_common_hw(void)
80ff0fd3 146{
80ff0fd3
DD
147 /* Setup the FPA */
148 cvmx_fpa_enable();
149 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
150 num_packet_buffers);
151 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
152 num_packet_buffers);
153 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
154 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
d5f9bc73 155 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
80ff0fd3 156
8a5cc923
PM
157#ifdef __LITTLE_ENDIAN
158 {
159 union cvmx_ipd_ctl_status ipd_ctl_status;
4bc8ff74 160
8a5cc923
PM
161 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
162 ipd_ctl_status.s.pkt_lend = 1;
163 ipd_ctl_status.s.wqe_lend = 1;
164 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
165 }
166#endif
167
cccdb277 168 cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
80ff0fd3
DD
169}
170
171/**
ec977c5b
DD
172 * cvm_oct_free_work- Free a work queue entry
173 *
174 * @work_queue_entry: Work queue entry to free
80ff0fd3 175 *
80ff0fd3
DD
176 * Returns Zero on success, Negative on failure.
177 */
178int cvm_oct_free_work(void *work_queue_entry)
179{
180 cvmx_wqe_t *work = work_queue_entry;
181
182 int segments = work->word2.s.bufs;
183 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
184
185 while (segments--) {
186 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
187 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
188 if (unlikely(!segment_ptr.s.i))
189 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
190 segment_ptr.s.pool,
c93b0e75 191 CVMX_FPA_PACKET_POOL_SIZE / 128);
80ff0fd3
DD
192 segment_ptr = next_ptr;
193 }
c93b0e75 194 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
80ff0fd3
DD
195
196 return 0;
197}
198EXPORT_SYMBOL(cvm_oct_free_work);
199
f696a108 200/**
ec977c5b 201 * cvm_oct_common_get_stats - get the low level ethernet statistics
f696a108 202 * @dev: Device to get the statistics from
ec977c5b 203 *
f696a108
DD
204 * Returns Pointer to the statistics
205 */
206static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
207{
208 cvmx_pip_port_status_t rx_status;
209 cvmx_pko_port_status_t tx_status;
210 struct octeon_ethernet *priv = netdev_priv(dev);
211
212 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
213 if (octeon_is_simulation()) {
214 /* The simulator doesn't support statistics */
215 memset(&rx_status, 0, sizeof(rx_status));
216 memset(&tx_status, 0, sizeof(tx_status));
217 } else {
218 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
219 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
220 }
221
222 priv->stats.rx_packets += rx_status.inb_packets;
223 priv->stats.tx_packets += tx_status.packets;
224 priv->stats.rx_bytes += rx_status.inb_octets;
225 priv->stats.tx_bytes += tx_status.octets;
226 priv->stats.multicast += rx_status.multicast_packets;
227 priv->stats.rx_crc_errors += rx_status.inb_errors;
228 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
229
230 /*
231 * The drop counter must be incremented atomically
232 * since the RX tasklet also increments it.
233 */
234#ifdef CONFIG_64BIT
235 atomic64_add(rx_status.dropped_packets,
236 (atomic64_t *)&priv->stats.rx_dropped);
237#else
238 atomic_add(rx_status.dropped_packets,
239 (atomic_t *)&priv->stats.rx_dropped);
240#endif
241 }
242
243 return &priv->stats;
244}
245
246/**
ec977c5b 247 * cvm_oct_common_change_mtu - change the link MTU
f696a108
DD
248 * @dev: Device to change
249 * @new_mtu: The new MTU
250 *
251 * Returns Zero on success
252 */
253static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
254{
255 struct octeon_ethernet *priv = netdev_priv(dev);
256 int interface = INTERFACE(priv->port);
257 int index = INDEX(priv->port);
258#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
259 int vlan_bytes = 4;
260#else
261 int vlan_bytes = 0;
262#endif
263
264 /*
265 * Limit the MTU to make sure the ethernet packets are between
266 * 64 bytes and 65535 bytes.
267 */
268 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
269 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
270 pr_err("MTU must be between %d and %d.\n",
271 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
272 return -EINVAL;
273 }
274 dev->mtu = new_mtu;
275
276 if ((interface < 2)
277 && (cvmx_helper_interface_get_mode(interface) !=
278 CVMX_HELPER_INTERFACE_MODE_SPI)) {
279 /* Add ethernet header and FCS, and VLAN if configured. */
280 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
281
282 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
283 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
284 /* Signal errors on packets larger than the MTU */
285 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
286 max_packet);
287 } else {
288 /*
289 * Set the hardware to truncate packets larger
290 * than the MTU and smaller the 64 bytes.
291 */
292 union cvmx_pip_frm_len_chkx frm_len_chk;
39bc7513 293
f696a108
DD
294 frm_len_chk.u64 = 0;
295 frm_len_chk.s.minlen = 64;
296 frm_len_chk.s.maxlen = max_packet;
297 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
298 frm_len_chk.u64);
299 }
300 /*
301 * Set the hardware to truncate packets larger than
302 * the MTU. The jabber register must be set to a
303 * multiple of 8 bytes, so round up.
304 */
305 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
306 (max_packet + 7) & ~7u);
307 }
308 return 0;
309}
310
311/**
ec977c5b 312 * cvm_oct_common_set_multicast_list - set the multicast list
f696a108
DD
313 * @dev: Device to work on
314 */
315static void cvm_oct_common_set_multicast_list(struct net_device *dev)
316{
317 union cvmx_gmxx_prtx_cfg gmx_cfg;
318 struct octeon_ethernet *priv = netdev_priv(dev);
319 int interface = INTERFACE(priv->port);
320 int index = INDEX(priv->port);
321
322 if ((interface < 2)
323 && (cvmx_helper_interface_get_mode(interface) !=
324 CVMX_HELPER_INTERFACE_MODE_SPI)) {
325 union cvmx_gmxx_rxx_adr_ctl control;
39bc7513 326
f696a108
DD
327 control.u64 = 0;
328 control.s.bcst = 1; /* Allow broadcast MAC addresses */
329
d5907942 330 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
f696a108
DD
331 (dev->flags & IFF_PROMISC))
332 /* Force accept multicast packets */
333 control.s.mcst = 2;
334 else
215c47c9 335 /* Force reject multicast packets */
f696a108
DD
336 control.s.mcst = 1;
337
338 if (dev->flags & IFF_PROMISC)
339 /*
340 * Reject matches if promisc. Since CAM is
341 * shut off, should accept everything.
342 */
343 control.s.cam_mode = 0;
344 else
345 /* Filter packets based on the CAM */
346 control.s.cam_mode = 1;
347
348 gmx_cfg.u64 =
349 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
350 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
351 gmx_cfg.u64 & ~1ull);
352
353 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
354 control.u64);
355 if (dev->flags & IFF_PROMISC)
356 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
357 (index, interface), 0);
358 else
359 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
360 (index, interface), 1);
361
362 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
363 gmx_cfg.u64);
364 }
365}
366
df9244c5 367static int cvm_oct_set_mac_filter(struct net_device *dev)
f696a108
DD
368{
369 struct octeon_ethernet *priv = netdev_priv(dev);
370 union cvmx_gmxx_prtx_cfg gmx_cfg;
371 int interface = INTERFACE(priv->port);
372 int index = INDEX(priv->port);
373
f696a108
DD
374 if ((interface < 2)
375 && (cvmx_helper_interface_get_mode(interface) !=
376 CVMX_HELPER_INTERFACE_MODE_SPI)) {
377 int i;
ec2c398e
AO
378 u8 *ptr = dev->dev_addr;
379 u64 mac = 0;
39bc7513 380
f696a108 381 for (i = 0; i < 6; i++)
ec2c398e 382 mac = (mac << 8) | (u64)ptr[i];
f696a108
DD
383
384 gmx_cfg.u64 =
385 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
386 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
387 gmx_cfg.u64 & ~1ull);
388
389 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
390 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
df9244c5 391 ptr[0]);
f696a108 392 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
df9244c5 393 ptr[1]);
f696a108 394 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
df9244c5 395 ptr[2]);
f696a108 396 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
df9244c5 397 ptr[3]);
f696a108 398 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
df9244c5 399 ptr[4]);
f696a108 400 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
df9244c5 401 ptr[5]);
f696a108
DD
402 cvm_oct_common_set_multicast_list(dev);
403 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
404 gmx_cfg.u64);
405 }
406 return 0;
407}
408
90590750
CM
409/**
410 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
411 * @dev: The device in question.
412 * @addr: Socket address.
413 *
414 * Returns Zero on success
415 */
df9244c5
DD
416static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
417{
418 int r = eth_mac_addr(dev, addr);
419
420 if (r)
421 return r;
422 return cvm_oct_set_mac_filter(dev);
423}
424
f696a108 425/**
ec977c5b 426 * cvm_oct_common_init - per network device initialization
f696a108 427 * @dev: Device to initialize
ec977c5b 428 *
f696a108
DD
429 * Returns Zero on success
430 */
431int cvm_oct_common_init(struct net_device *dev)
432{
f696a108 433 struct octeon_ethernet *priv = netdev_priv(dev);
df9244c5
DD
434 const u8 *mac = NULL;
435
436 if (priv->of_node)
437 mac = of_get_mac_address(priv->of_node);
438
4d978452 439 if (mac)
6c71ea54 440 ether_addr_copy(dev->dev_addr, mac);
15c6ff3b 441 else
df9244c5 442 eth_hw_addr_random(dev);
f696a108
DD
443
444 /*
445 * Force the interface to use the POW send if always_use_pow
446 * was specified or it is in the pow send list.
447 */
448 if ((pow_send_group != -1)
449 && (always_use_pow || strstr(pow_send_list, dev->name)))
450 priv->queue = -1;
451
6646baf7
AK
452 if (priv->queue != -1)
453 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
f696a108 454
f696a108
DD
455 /* We do our own locking, Linux doesn't need to */
456 dev->features |= NETIF_F_LLTX;
7ad24ea4 457 dev->ethtool_ops = &cvm_oct_ethtool_ops;
f696a108 458
df9244c5 459 cvm_oct_set_mac_filter(dev);
f696a108
DD
460 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
461
462 /*
463 * Zero out stats for port so we won't mistakenly show
464 * counters from the bootloader.
465 */
466 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
467 sizeof(struct net_device_stats));
468
be76400c
AK
469 if (dev->netdev_ops->ndo_stop)
470 dev->netdev_ops->ndo_stop(dev);
471
f696a108
DD
472 return 0;
473}
474
475void cvm_oct_common_uninit(struct net_device *dev)
476{
f6ed1b3b
DD
477 struct octeon_ethernet *priv = netdev_priv(dev);
478
479 if (priv->phydev)
480 phy_disconnect(priv->phydev);
f696a108
DD
481}
482
9e3ae4f9 483int cvm_oct_common_open(struct net_device *dev,
2c265f74 484 void (*link_poll)(struct net_device *))
9e3ae4f9
AK
485{
486 union cvmx_gmxx_prtx_cfg gmx_cfg;
487 struct octeon_ethernet *priv = netdev_priv(dev);
488 int interface = INTERFACE(priv->port);
489 int index = INDEX(priv->port);
490 cvmx_helper_link_info_t link_info;
491 int rv;
492
493 rv = cvm_oct_phy_setup_device(dev);
494 if (rv)
495 return rv;
496
497 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
498 gmx_cfg.s.en = 1;
499 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
500
501 if (octeon_is_simulation())
502 return 0;
503
504 if (priv->phydev) {
505 int r = phy_read_status(priv->phydev);
506
507 if (r == 0 && priv->phydev->link == 0)
508 netif_carrier_off(dev);
509 cvm_oct_adjust_link(dev);
510 } else {
511 link_info = cvmx_helper_link_get(priv->port);
512 if (!link_info.s.link_up)
513 netif_carrier_off(dev);
514 priv->poll = link_poll;
2c265f74 515 link_poll(dev);
9e3ae4f9
AK
516 }
517
518 return 0;
519}
520
a8d2e817
AK
521void cvm_oct_link_poll(struct net_device *dev)
522{
523 struct octeon_ethernet *priv = netdev_priv(dev);
524 cvmx_helper_link_info_t link_info;
525
526 link_info = cvmx_helper_link_get(priv->port);
527 if (link_info.u64 == priv->link_info)
528 return;
529
530 link_info = cvmx_helper_link_autoconf(priv->port);
531 priv->link_info = link_info.u64;
532
533 if (link_info.s.link_up) {
534 if (!netif_carrier_ok(dev))
535 netif_carrier_on(dev);
536 } else if (netif_carrier_ok(dev)) {
537 netif_carrier_off(dev);
538 }
539 cvm_oct_note_carrier(priv, link_info);
540}
541
d566e690
AK
542static int cvm_oct_xaui_open(struct net_device *dev)
543{
544 return cvm_oct_common_open(dev, cvm_oct_link_poll);
545}
546
f696a108
DD
547static const struct net_device_ops cvm_oct_npi_netdev_ops = {
548 .ndo_init = cvm_oct_common_init,
549 .ndo_uninit = cvm_oct_common_uninit,
550 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 551 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
552 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
553 .ndo_do_ioctl = cvm_oct_ioctl,
554 .ndo_change_mtu = cvm_oct_common_change_mtu,
555 .ndo_get_stats = cvm_oct_common_get_stats,
556#ifdef CONFIG_NET_POLL_CONTROLLER
557 .ndo_poll_controller = cvm_oct_poll_controller,
558#endif
559};
560static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
41cb5786 561 .ndo_init = cvm_oct_common_init,
3c339145 562 .ndo_uninit = cvm_oct_common_uninit,
f696a108 563 .ndo_open = cvm_oct_xaui_open,
96217ebf 564 .ndo_stop = cvm_oct_common_stop,
f696a108 565 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 566 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
567 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
568 .ndo_do_ioctl = cvm_oct_ioctl,
569 .ndo_change_mtu = cvm_oct_common_change_mtu,
570 .ndo_get_stats = cvm_oct_common_get_stats,
571#ifdef CONFIG_NET_POLL_CONTROLLER
572 .ndo_poll_controller = cvm_oct_poll_controller,
573#endif
574};
575static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
576 .ndo_init = cvm_oct_sgmii_init,
3c339145 577 .ndo_uninit = cvm_oct_common_uninit,
f696a108 578 .ndo_open = cvm_oct_sgmii_open,
96217ebf 579 .ndo_stop = cvm_oct_common_stop,
f696a108 580 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 581 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
582 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
583 .ndo_do_ioctl = cvm_oct_ioctl,
584 .ndo_change_mtu = cvm_oct_common_change_mtu,
585 .ndo_get_stats = cvm_oct_common_get_stats,
586#ifdef CONFIG_NET_POLL_CONTROLLER
587 .ndo_poll_controller = cvm_oct_poll_controller,
588#endif
589};
590static const struct net_device_ops cvm_oct_spi_netdev_ops = {
591 .ndo_init = cvm_oct_spi_init,
592 .ndo_uninit = cvm_oct_spi_uninit,
593 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 594 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
595 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
596 .ndo_do_ioctl = cvm_oct_ioctl,
597 .ndo_change_mtu = cvm_oct_common_change_mtu,
598 .ndo_get_stats = cvm_oct_common_get_stats,
599#ifdef CONFIG_NET_POLL_CONTROLLER
600 .ndo_poll_controller = cvm_oct_poll_controller,
601#endif
602};
603static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
604 .ndo_init = cvm_oct_rgmii_init,
605 .ndo_uninit = cvm_oct_rgmii_uninit,
606 .ndo_open = cvm_oct_rgmii_open,
96217ebf 607 .ndo_stop = cvm_oct_common_stop,
f696a108 608 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 609 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
610 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
611 .ndo_do_ioctl = cvm_oct_ioctl,
612 .ndo_change_mtu = cvm_oct_common_change_mtu,
613 .ndo_get_stats = cvm_oct_common_get_stats,
614#ifdef CONFIG_NET_POLL_CONTROLLER
615 .ndo_poll_controller = cvm_oct_poll_controller,
616#endif
617};
618static const struct net_device_ops cvm_oct_pow_netdev_ops = {
619 .ndo_init = cvm_oct_common_init,
620 .ndo_start_xmit = cvm_oct_xmit_pow,
afc4b13d 621 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
622 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
623 .ndo_do_ioctl = cvm_oct_ioctl,
624 .ndo_change_mtu = cvm_oct_common_change_mtu,
625 .ndo_get_stats = cvm_oct_common_get_stats,
626#ifdef CONFIG_NET_POLL_CONTROLLER
627 .ndo_poll_controller = cvm_oct_poll_controller,
628#endif
629};
630
b186410d
NH
631static struct device_node *cvm_oct_of_get_child(
632 const struct device_node *parent, int reg_val)
df9244c5
DD
633{
634 struct device_node *node = NULL;
635 int size;
636 const __be32 *addr;
637
638 for (;;) {
639 node = of_get_next_child(parent, node);
640 if (!node)
641 break;
642 addr = of_get_property(node, "reg", &size);
643 if (addr && (be32_to_cpu(*addr) == reg_val))
644 break;
645 }
646 return node;
647}
648
4f240906 649static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
b186410d 650 int interface, int port)
df9244c5
DD
651{
652 struct device_node *ni, *np;
653
654 ni = cvm_oct_of_get_child(pip, interface);
655 if (!ni)
656 return NULL;
657
658 np = cvm_oct_of_get_child(ni, port);
659 of_node_put(ni);
660
661 return np;
662}
663
4f240906 664static int cvm_oct_probe(struct platform_device *pdev)
80ff0fd3
DD
665{
666 int num_interfaces;
667 int interface;
668 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
669 int qos;
df9244c5 670 struct device_node *pip;
80ff0fd3 671
f6ed1b3b 672 octeon_mdiobus_force_mod_depencency();
80ff0fd3 673
df9244c5
DD
674 pip = pdev->dev.of_node;
675 if (!pip) {
676 pr_err("Error: No 'pip' in /aliases\n");
677 return -EINVAL;
678 }
13c5939e 679
f8c26486
DD
680 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
681 if (cvm_oct_poll_queue == NULL) {
682 pr_err("octeon-ethernet: Cannot create workqueue");
683 return -ENOMEM;
684 }
685
80ff0fd3
DD
686 cvm_oct_configure_common_hw();
687
688 cvmx_helper_initialize_packet_io_global();
689
690 /* Change the input group for all ports before input is enabled */
691 num_interfaces = cvmx_helper_get_number_of_interfaces();
692 for (interface = 0; interface < num_interfaces; interface++) {
693 int num_ports = cvmx_helper_ports_on_interface(interface);
694 int port;
695
696 for (port = cvmx_helper_get_ipd_port(interface, 0);
697 port < cvmx_helper_get_ipd_port(interface, num_ports);
698 port++) {
699 union cvmx_pip_prt_tagx pip_prt_tagx;
39bc7513 700
80ff0fd3
DD
701 pip_prt_tagx.u64 =
702 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
703 pip_prt_tagx.s.grp = pow_receive_group;
704 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
705 pip_prt_tagx.u64);
706 }
707 }
708
709 cvmx_helper_ipd_and_packet_input_enable();
710
711 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
712
713 /*
714 * Initialize the FAU used for counting packet buffers that
715 * need to be freed.
716 */
717 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
718
4898c560
DD
719 /* Initialize the FAU used for counting tx SKBs that need to be freed */
720 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
721
80ff0fd3
DD
722 if ((pow_send_group != -1)) {
723 struct net_device *dev;
39bc7513 724
80ff0fd3
DD
725 pr_info("\tConfiguring device for POW only access\n");
726 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
727 if (dev) {
728 /* Initialize the device private structure. */
729 struct octeon_ethernet *priv = netdev_priv(dev);
80ff0fd3 730
f696a108 731 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
80ff0fd3
DD
732 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
733 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
734 priv->queue = -1;
735 strcpy(dev->name, "pow%d");
736 for (qos = 0; qos < 16; qos++)
737 skb_queue_head_init(&priv->tx_free_list[qos]);
738
739 if (register_netdev(dev) < 0) {
6568a234 740 pr_err("Failed to register ethernet device for POW\n");
c4711c3a 741 free_netdev(dev);
80ff0fd3
DD
742 } else {
743 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
6568a234
DD
744 pr_info("%s: POW send group %d, receive group %d\n",
745 dev->name, pow_send_group,
746 pow_receive_group);
80ff0fd3
DD
747 }
748 } else {
6568a234 749 pr_err("Failed to allocate ethernet device for POW\n");
80ff0fd3
DD
750 }
751 }
752
753 num_interfaces = cvmx_helper_get_number_of_interfaces();
754 for (interface = 0; interface < num_interfaces; interface++) {
755 cvmx_helper_interface_mode_t imode =
756 cvmx_helper_interface_get_mode(interface);
757 int num_ports = cvmx_helper_ports_on_interface(interface);
758 int port;
df9244c5 759 int port_index;
80ff0fd3 760
b186410d
NH
761 for (port_index = 0,
762 port = cvmx_helper_get_ipd_port(interface, 0);
80ff0fd3 763 port < cvmx_helper_get_ipd_port(interface, num_ports);
df9244c5 764 port_index++, port++) {
80ff0fd3
DD
765 struct octeon_ethernet *priv;
766 struct net_device *dev =
767 alloc_etherdev(sizeof(struct octeon_ethernet));
768 if (!dev) {
99f8dbc5
EA
769 pr_err("Failed to allocate ethernet device for port %d\n",
770 port);
80ff0fd3
DD
771 continue;
772 }
80ff0fd3
DD
773
774 /* Initialize the device private structure. */
775 priv = netdev_priv(dev);
ec3a2207 776 priv->netdev = dev;
b186410d
NH
777 priv->of_node = cvm_oct_node_for_port(pip, interface,
778 port_index);
80ff0fd3 779
4898c560
DD
780 INIT_DELAYED_WORK(&priv->port_periodic_work,
781 cvm_oct_periodic_worker);
80ff0fd3
DD
782 priv->imode = imode;
783 priv->port = port;
784 priv->queue = cvmx_pko_get_base_queue(priv->port);
785 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
786 for (qos = 0; qos < 16; qos++)
787 skb_queue_head_init(&priv->tx_free_list[qos]);
788 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
789 qos++)
790 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
791
792 switch (priv->imode) {
793
794 /* These types don't support ports to IPD/PKO */
795 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
796 case CVMX_HELPER_INTERFACE_MODE_PCIE:
797 case CVMX_HELPER_INTERFACE_MODE_PICMG:
798 break;
799
800 case CVMX_HELPER_INTERFACE_MODE_NPI:
f696a108 801 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
80ff0fd3
DD
802 strcpy(dev->name, "npi%d");
803 break;
804
805 case CVMX_HELPER_INTERFACE_MODE_XAUI:
f696a108 806 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
80ff0fd3
DD
807 strcpy(dev->name, "xaui%d");
808 break;
809
810 case CVMX_HELPER_INTERFACE_MODE_LOOP:
f696a108 811 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
80ff0fd3
DD
812 strcpy(dev->name, "loop%d");
813 break;
814
815 case CVMX_HELPER_INTERFACE_MODE_SGMII:
f696a108 816 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
80ff0fd3
DD
817 strcpy(dev->name, "eth%d");
818 break;
819
820 case CVMX_HELPER_INTERFACE_MODE_SPI:
f696a108 821 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
80ff0fd3
DD
822 strcpy(dev->name, "spi%d");
823 break;
824
825 case CVMX_HELPER_INTERFACE_MODE_RGMII:
826 case CVMX_HELPER_INTERFACE_MODE_GMII:
f696a108 827 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
80ff0fd3
DD
828 strcpy(dev->name, "eth%d");
829 break;
830 }
831
f696a108 832 if (!dev->netdev_ops) {
c4711c3a 833 free_netdev(dev);
80ff0fd3 834 } else if (register_netdev(dev) < 0) {
0a5fcc6b 835 pr_err("Failed to register ethernet device for interface %d, port %d\n",
80ff0fd3 836 interface, priv->port);
c4711c3a 837 free_netdev(dev);
80ff0fd3
DD
838 } else {
839 cvm_oct_device[priv->port] = dev;
840 fau -=
841 cvmx_pko_get_num_queues(priv->port) *
ec2c398e 842 sizeof(u32);
f8c26486 843 queue_delayed_work(cvm_oct_poll_queue,
b186410d 844 &priv->port_periodic_work, HZ);
80ff0fd3
DD
845 }
846 }
847 }
848
4898c560 849 cvm_oct_tx_initialize();
3368c784 850 cvm_oct_rx_initialize();
80ff0fd3 851
4898c560 852 /*
f5801a81 853 * 150 uS: about 10 1500-byte packets at 1GE.
4898c560
DD
854 */
855 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
80ff0fd3 856
f8c26486 857 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
80ff0fd3
DD
858
859 return 0;
860}
861
f7e2f350 862static int cvm_oct_remove(struct platform_device *pdev)
80ff0fd3
DD
863{
864 int port;
865
866 /* Disable POW interrupt */
bcbb1396
AK
867 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
868 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
869 else
870 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
80ff0fd3
DD
871
872 cvmx_ipd_disable();
873
874 /* Free the interrupt handler */
875 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
876
f8c26486
DD
877 atomic_inc_return(&cvm_oct_poll_queue_stopping);
878 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
879
80ff0fd3 880 cvm_oct_rx_shutdown();
4898c560
DD
881 cvm_oct_tx_shutdown();
882
80ff0fd3
DD
883 cvmx_pko_disable();
884
885 /* Free the ethernet devices */
886 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
887 if (cvm_oct_device[port]) {
f8c26486
DD
888 struct net_device *dev = cvm_oct_device[port];
889 struct octeon_ethernet *priv = netdev_priv(dev);
39bc7513 890
4898c560 891 cancel_delayed_work_sync(&priv->port_periodic_work);
f8c26486 892
4898c560 893 cvm_oct_tx_shutdown_dev(dev);
f8c26486 894 unregister_netdev(dev);
c4711c3a 895 free_netdev(dev);
80ff0fd3
DD
896 cvm_oct_device[port] = NULL;
897 }
898 }
899
f8c26486
DD
900 destroy_workqueue(cvm_oct_poll_queue);
901
80ff0fd3 902 cvmx_pko_shutdown();
80ff0fd3
DD
903
904 cvmx_ipd_free_ptr();
905
906 /* Free the HW pools */
907 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
908 num_packet_buffers);
909 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
910 num_packet_buffers);
911 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
912 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
913 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
df9244c5 914 return 0;
80ff0fd3
DD
915}
916
87794575 917static const struct of_device_id cvm_oct_match[] = {
df9244c5
DD
918 {
919 .compatible = "cavium,octeon-3860-pip",
920 },
921 {},
922};
923MODULE_DEVICE_TABLE(of, cvm_oct_match);
924
925static struct platform_driver cvm_oct_driver = {
926 .probe = cvm_oct_probe,
095d0bb5 927 .remove = cvm_oct_remove,
df9244c5 928 .driver = {
df9244c5
DD
929 .name = KBUILD_MODNAME,
930 .of_match_table = cvm_oct_match,
931 },
932};
933
934module_platform_driver(cvm_oct_driver);
935
80ff0fd3
DD
936MODULE_LICENSE("GPL");
937MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
938MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
This page took 0.742662 seconds and 5 git commands to generate.