afcdce4bce1cdc0003385ae6d35ef2aba94349e7
[deliverable/linux.git] / drivers / staging / octeon / ethernet.c
1 /**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/platform_device.h>
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/phy.h>
33 #include <linux/slab.h>
34 #include <linux/interrupt.h>
35 #include <linux/of_net.h>
36
37 #include <net/dst.h>
38
39 #include <asm/octeon/octeon.h>
40
41 #include "ethernet-defines.h"
42 #include "octeon-ethernet.h"
43 #include "ethernet-mem.h"
44 #include "ethernet-rx.h"
45 #include "ethernet-tx.h"
46 #include "ethernet-mdio.h"
47 #include "ethernet-util.h"
48
49 #include <asm/octeon/cvmx-pip.h>
50 #include <asm/octeon/cvmx-pko.h>
51 #include <asm/octeon/cvmx-fau.h>
52 #include <asm/octeon/cvmx-ipd.h>
53 #include <asm/octeon/cvmx-helper.h>
54
55 #include <asm/octeon/cvmx-gmxx-defs.h>
56 #include <asm/octeon/cvmx-smix-defs.h>
57
58 static int num_packet_buffers = 1024;
59 module_param(num_packet_buffers, int, 0444);
60 MODULE_PARM_DESC(num_packet_buffers, "\n"
61 "\tNumber of packet buffers to allocate and store in the\n"
62 "\tFPA. By default, 1024 packet buffers are used.\n");
63
64 int pow_receive_group = 15;
65 module_param(pow_receive_group, int, 0444);
66 MODULE_PARM_DESC(pow_receive_group, "\n"
67 "\tPOW group to receive packets from. All ethernet hardware\n"
68 "\twill be configured to send incoming packets to this POW\n"
69 "\tgroup. Also any other software can submit packets to this\n"
70 "\tgroup for the kernel to process.");
71
72 int pow_send_group = -1;
73 module_param(pow_send_group, int, 0644);
74 MODULE_PARM_DESC(pow_send_group, "\n"
75 "\tPOW group to send packets to other software on. This\n"
76 "\tcontrols the creation of the virtual device pow0.\n"
77 "\talways_use_pow also depends on this value.");
78
79 int always_use_pow;
80 module_param(always_use_pow, int, 0444);
81 MODULE_PARM_DESC(always_use_pow, "\n"
82 "\tWhen set, always send to the pow group. This will cause\n"
83 "\tpackets sent to real ethernet devices to be sent to the\n"
84 "\tPOW group instead of the hardware. Unless some other\n"
85 "\tapplication changes the config, packets will still be\n"
86 "\treceived from the low level hardware. Use this option\n"
87 "\tto allow a CVMX app to intercept all packets from the\n"
88 "\tlinux kernel. You must specify pow_send_group along with\n"
89 "\tthis option.");
90
91 char pow_send_list[128] = "";
92 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
93 MODULE_PARM_DESC(pow_send_list, "\n"
94 "\tComma separated list of ethernet devices that should use the\n"
95 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
96 "\tis a per port version of always_use_pow. always_use_pow takes\n"
97 "\tprecedence over this list. For example, setting this to\n"
98 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
99 "\tusing the pow_send_group.");
100
101 int rx_napi_weight = 32;
102 module_param(rx_napi_weight, int, 0444);
103 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
104
105 /**
106 * cvm_oct_poll_queue - Workqueue for polling operations.
107 */
108 struct workqueue_struct *cvm_oct_poll_queue;
109
110 /**
111 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
112 *
113 * Set to one right before cvm_oct_poll_queue is destroyed.
114 */
115 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
116
117 /**
118 * Array of every ethernet device owned by this driver indexed by
119 * the ipd input port number.
120 */
121 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
122
123 u64 cvm_oct_tx_poll_interval;
124
125 static void cvm_oct_rx_refill_worker(struct work_struct *work);
126 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
127
128 static void cvm_oct_rx_refill_worker(struct work_struct *work)
129 {
130 /*
131 * FPA 0 may have been drained, try to refill it if we need
132 * more than num_packet_buffers / 2, otherwise normal receive
133 * processing will refill it. If it were drained, no packets
134 * could be received so cvm_oct_napi_poll would never be
135 * invoked to do the refill.
136 */
137 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
138
139 if (!atomic_read(&cvm_oct_poll_queue_stopping))
140 queue_delayed_work(cvm_oct_poll_queue,
141 &cvm_oct_rx_refill_work, HZ);
142 }
143
144 static void cvm_oct_periodic_worker(struct work_struct *work)
145 {
146 struct octeon_ethernet *priv = container_of(work,
147 struct octeon_ethernet,
148 port_periodic_work.work);
149
150 if (priv->poll)
151 priv->poll(cvm_oct_device[priv->port]);
152
153 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
154 cvm_oct_device[priv->port]);
155
156 if (!atomic_read(&cvm_oct_poll_queue_stopping))
157 queue_delayed_work(cvm_oct_poll_queue,
158 &priv->port_periodic_work, HZ);
159 }
160
161 static void cvm_oct_configure_common_hw(void)
162 {
163 /* Setup the FPA */
164 cvmx_fpa_enable();
165 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
166 num_packet_buffers);
167 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
168 num_packet_buffers);
169 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
170 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
171 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
172
173 #ifdef __LITTLE_ENDIAN
174 {
175 union cvmx_ipd_ctl_status ipd_ctl_status;
176 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
177 ipd_ctl_status.s.pkt_lend = 1;
178 ipd_ctl_status.s.wqe_lend = 1;
179 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
180 }
181 #endif
182
183 cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
184 }
185
186 /**
187 * cvm_oct_free_work- Free a work queue entry
188 *
189 * @work_queue_entry: Work queue entry to free
190 *
191 * Returns Zero on success, Negative on failure.
192 */
193 int cvm_oct_free_work(void *work_queue_entry)
194 {
195 cvmx_wqe_t *work = work_queue_entry;
196
197 int segments = work->word2.s.bufs;
198 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
199
200 while (segments--) {
201 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
202 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
203 if (unlikely(!segment_ptr.s.i))
204 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
205 segment_ptr.s.pool,
206 DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
207 128));
208 segment_ptr = next_ptr;
209 }
210 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
211
212 return 0;
213 }
214 EXPORT_SYMBOL(cvm_oct_free_work);
215
216 /**
217 * cvm_oct_common_get_stats - get the low level ethernet statistics
218 * @dev: Device to get the statistics from
219 *
220 * Returns Pointer to the statistics
221 */
222 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
223 {
224 cvmx_pip_port_status_t rx_status;
225 cvmx_pko_port_status_t tx_status;
226 struct octeon_ethernet *priv = netdev_priv(dev);
227
228 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
229 if (octeon_is_simulation()) {
230 /* The simulator doesn't support statistics */
231 memset(&rx_status, 0, sizeof(rx_status));
232 memset(&tx_status, 0, sizeof(tx_status));
233 } else {
234 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
235 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
236 }
237
238 priv->stats.rx_packets += rx_status.inb_packets;
239 priv->stats.tx_packets += tx_status.packets;
240 priv->stats.rx_bytes += rx_status.inb_octets;
241 priv->stats.tx_bytes += tx_status.octets;
242 priv->stats.multicast += rx_status.multicast_packets;
243 priv->stats.rx_crc_errors += rx_status.inb_errors;
244 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
245
246 /*
247 * The drop counter must be incremented atomically
248 * since the RX tasklet also increments it.
249 */
250 #ifdef CONFIG_64BIT
251 atomic64_add(rx_status.dropped_packets,
252 (atomic64_t *)&priv->stats.rx_dropped);
253 #else
254 atomic_add(rx_status.dropped_packets,
255 (atomic_t *)&priv->stats.rx_dropped);
256 #endif
257 }
258
259 return &priv->stats;
260 }
261
262 /**
263 * cvm_oct_common_change_mtu - change the link MTU
264 * @dev: Device to change
265 * @new_mtu: The new MTU
266 *
267 * Returns Zero on success
268 */
269 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
270 {
271 struct octeon_ethernet *priv = netdev_priv(dev);
272 int interface = INTERFACE(priv->port);
273 int index = INDEX(priv->port);
274 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
275 int vlan_bytes = 4;
276 #else
277 int vlan_bytes = 0;
278 #endif
279
280 /*
281 * Limit the MTU to make sure the ethernet packets are between
282 * 64 bytes and 65535 bytes.
283 */
284 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
285 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
286 pr_err("MTU must be between %d and %d.\n",
287 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
288 return -EINVAL;
289 }
290 dev->mtu = new_mtu;
291
292 if ((interface < 2)
293 && (cvmx_helper_interface_get_mode(interface) !=
294 CVMX_HELPER_INTERFACE_MODE_SPI)) {
295 /* Add ethernet header and FCS, and VLAN if configured. */
296 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
297
298 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
299 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
300 /* Signal errors on packets larger than the MTU */
301 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
302 max_packet);
303 } else {
304 /*
305 * Set the hardware to truncate packets larger
306 * than the MTU and smaller the 64 bytes.
307 */
308 union cvmx_pip_frm_len_chkx frm_len_chk;
309
310 frm_len_chk.u64 = 0;
311 frm_len_chk.s.minlen = 64;
312 frm_len_chk.s.maxlen = max_packet;
313 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
314 frm_len_chk.u64);
315 }
316 /*
317 * Set the hardware to truncate packets larger than
318 * the MTU. The jabber register must be set to a
319 * multiple of 8 bytes, so round up.
320 */
321 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
322 (max_packet + 7) & ~7u);
323 }
324 return 0;
325 }
326
327 /**
328 * cvm_oct_common_set_multicast_list - set the multicast list
329 * @dev: Device to work on
330 */
331 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
332 {
333 union cvmx_gmxx_prtx_cfg gmx_cfg;
334 struct octeon_ethernet *priv = netdev_priv(dev);
335 int interface = INTERFACE(priv->port);
336 int index = INDEX(priv->port);
337
338 if ((interface < 2)
339 && (cvmx_helper_interface_get_mode(interface) !=
340 CVMX_HELPER_INTERFACE_MODE_SPI)) {
341 union cvmx_gmxx_rxx_adr_ctl control;
342
343 control.u64 = 0;
344 control.s.bcst = 1; /* Allow broadcast MAC addresses */
345
346 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
347 (dev->flags & IFF_PROMISC))
348 /* Force accept multicast packets */
349 control.s.mcst = 2;
350 else
351 /* Force reject multicast packets */
352 control.s.mcst = 1;
353
354 if (dev->flags & IFF_PROMISC)
355 /*
356 * Reject matches if promisc. Since CAM is
357 * shut off, should accept everything.
358 */
359 control.s.cam_mode = 0;
360 else
361 /* Filter packets based on the CAM */
362 control.s.cam_mode = 1;
363
364 gmx_cfg.u64 =
365 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
366 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
367 gmx_cfg.u64 & ~1ull);
368
369 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
370 control.u64);
371 if (dev->flags & IFF_PROMISC)
372 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
373 (index, interface), 0);
374 else
375 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
376 (index, interface), 1);
377
378 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
379 gmx_cfg.u64);
380 }
381 }
382
383 /**
384 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
385 * @dev: The device in question.
386 * @addr: Address structure to change it too.
387
388 * Returns Zero on success
389 */
390 static int cvm_oct_set_mac_filter(struct net_device *dev)
391 {
392 struct octeon_ethernet *priv = netdev_priv(dev);
393 union cvmx_gmxx_prtx_cfg gmx_cfg;
394 int interface = INTERFACE(priv->port);
395 int index = INDEX(priv->port);
396
397 if ((interface < 2)
398 && (cvmx_helper_interface_get_mode(interface) !=
399 CVMX_HELPER_INTERFACE_MODE_SPI)) {
400 int i;
401 uint8_t *ptr = dev->dev_addr;
402 uint64_t mac = 0;
403
404 for (i = 0; i < 6; i++)
405 mac = (mac << 8) | (uint64_t)ptr[i];
406
407 gmx_cfg.u64 =
408 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
409 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
410 gmx_cfg.u64 & ~1ull);
411
412 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
413 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
414 ptr[0]);
415 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
416 ptr[1]);
417 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
418 ptr[2]);
419 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
420 ptr[3]);
421 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
422 ptr[4]);
423 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
424 ptr[5]);
425 cvm_oct_common_set_multicast_list(dev);
426 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
427 gmx_cfg.u64);
428 }
429 return 0;
430 }
431
432 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
433 {
434 int r = eth_mac_addr(dev, addr);
435
436 if (r)
437 return r;
438 return cvm_oct_set_mac_filter(dev);
439 }
440
441 /**
442 * cvm_oct_common_init - per network device initialization
443 * @dev: Device to initialize
444 *
445 * Returns Zero on success
446 */
447 int cvm_oct_common_init(struct net_device *dev)
448 {
449 struct octeon_ethernet *priv = netdev_priv(dev);
450 const u8 *mac = NULL;
451
452 if (priv->of_node)
453 mac = of_get_mac_address(priv->of_node);
454
455 if (mac)
456 ether_addr_copy(dev->dev_addr, mac);
457 else
458 eth_hw_addr_random(dev);
459
460 /*
461 * Force the interface to use the POW send if always_use_pow
462 * was specified or it is in the pow send list.
463 */
464 if ((pow_send_group != -1)
465 && (always_use_pow || strstr(pow_send_list, dev->name)))
466 priv->queue = -1;
467
468 if (priv->queue != -1)
469 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
470
471 /* We do our own locking, Linux doesn't need to */
472 dev->features |= NETIF_F_LLTX;
473 dev->ethtool_ops = &cvm_oct_ethtool_ops;
474
475 cvm_oct_set_mac_filter(dev);
476 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
477
478 /*
479 * Zero out stats for port so we won't mistakenly show
480 * counters from the bootloader.
481 */
482 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
483 sizeof(struct net_device_stats));
484
485 if (dev->netdev_ops->ndo_stop)
486 dev->netdev_ops->ndo_stop(dev);
487
488 return 0;
489 }
490
491 void cvm_oct_common_uninit(struct net_device *dev)
492 {
493 struct octeon_ethernet *priv = netdev_priv(dev);
494
495 if (priv->phydev)
496 phy_disconnect(priv->phydev);
497 }
498
499 int cvm_oct_common_open(struct net_device *dev,
500 void (*link_poll)(struct net_device *), bool poll_now)
501 {
502 union cvmx_gmxx_prtx_cfg gmx_cfg;
503 struct octeon_ethernet *priv = netdev_priv(dev);
504 int interface = INTERFACE(priv->port);
505 int index = INDEX(priv->port);
506 cvmx_helper_link_info_t link_info;
507 int rv;
508
509 rv = cvm_oct_phy_setup_device(dev);
510 if (rv)
511 return rv;
512
513 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
514 gmx_cfg.s.en = 1;
515 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
516
517 if (octeon_is_simulation())
518 return 0;
519
520 if (priv->phydev) {
521 int r = phy_read_status(priv->phydev);
522
523 if (r == 0 && priv->phydev->link == 0)
524 netif_carrier_off(dev);
525 cvm_oct_adjust_link(dev);
526 } else {
527 link_info = cvmx_helper_link_get(priv->port);
528 if (!link_info.s.link_up)
529 netif_carrier_off(dev);
530 priv->poll = link_poll;
531 if (poll_now)
532 link_poll(dev);
533 }
534
535 return 0;
536 }
537
538 void cvm_oct_link_poll(struct net_device *dev)
539 {
540 struct octeon_ethernet *priv = netdev_priv(dev);
541 cvmx_helper_link_info_t link_info;
542
543 link_info = cvmx_helper_link_get(priv->port);
544 if (link_info.u64 == priv->link_info)
545 return;
546
547 link_info = cvmx_helper_link_autoconf(priv->port);
548 priv->link_info = link_info.u64;
549
550 if (link_info.s.link_up) {
551 if (!netif_carrier_ok(dev))
552 netif_carrier_on(dev);
553 } else if (netif_carrier_ok(dev)) {
554 netif_carrier_off(dev);
555 }
556 cvm_oct_note_carrier(priv, link_info);
557 }
558
559 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
560 .ndo_init = cvm_oct_common_init,
561 .ndo_uninit = cvm_oct_common_uninit,
562 .ndo_start_xmit = cvm_oct_xmit,
563 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
564 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
565 .ndo_do_ioctl = cvm_oct_ioctl,
566 .ndo_change_mtu = cvm_oct_common_change_mtu,
567 .ndo_get_stats = cvm_oct_common_get_stats,
568 #ifdef CONFIG_NET_POLL_CONTROLLER
569 .ndo_poll_controller = cvm_oct_poll_controller,
570 #endif
571 };
572 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
573 .ndo_init = cvm_oct_xaui_init,
574 .ndo_uninit = cvm_oct_common_uninit,
575 .ndo_open = cvm_oct_xaui_open,
576 .ndo_stop = cvm_oct_common_stop,
577 .ndo_start_xmit = cvm_oct_xmit,
578 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
579 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
580 .ndo_do_ioctl = cvm_oct_ioctl,
581 .ndo_change_mtu = cvm_oct_common_change_mtu,
582 .ndo_get_stats = cvm_oct_common_get_stats,
583 #ifdef CONFIG_NET_POLL_CONTROLLER
584 .ndo_poll_controller = cvm_oct_poll_controller,
585 #endif
586 };
587 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
588 .ndo_init = cvm_oct_sgmii_init,
589 .ndo_uninit = cvm_oct_common_uninit,
590 .ndo_open = cvm_oct_sgmii_open,
591 .ndo_stop = cvm_oct_common_stop,
592 .ndo_start_xmit = cvm_oct_xmit,
593 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
594 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
595 .ndo_do_ioctl = cvm_oct_ioctl,
596 .ndo_change_mtu = cvm_oct_common_change_mtu,
597 .ndo_get_stats = cvm_oct_common_get_stats,
598 #ifdef CONFIG_NET_POLL_CONTROLLER
599 .ndo_poll_controller = cvm_oct_poll_controller,
600 #endif
601 };
602 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
603 .ndo_init = cvm_oct_spi_init,
604 .ndo_uninit = cvm_oct_spi_uninit,
605 .ndo_start_xmit = cvm_oct_xmit,
606 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
607 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
608 .ndo_do_ioctl = cvm_oct_ioctl,
609 .ndo_change_mtu = cvm_oct_common_change_mtu,
610 .ndo_get_stats = cvm_oct_common_get_stats,
611 #ifdef CONFIG_NET_POLL_CONTROLLER
612 .ndo_poll_controller = cvm_oct_poll_controller,
613 #endif
614 };
615 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
616 .ndo_init = cvm_oct_rgmii_init,
617 .ndo_uninit = cvm_oct_rgmii_uninit,
618 .ndo_open = cvm_oct_rgmii_open,
619 .ndo_stop = cvm_oct_common_stop,
620 .ndo_start_xmit = cvm_oct_xmit,
621 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
622 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
623 .ndo_do_ioctl = cvm_oct_ioctl,
624 .ndo_change_mtu = cvm_oct_common_change_mtu,
625 .ndo_get_stats = cvm_oct_common_get_stats,
626 #ifdef CONFIG_NET_POLL_CONTROLLER
627 .ndo_poll_controller = cvm_oct_poll_controller,
628 #endif
629 };
630 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
631 .ndo_init = cvm_oct_common_init,
632 .ndo_start_xmit = cvm_oct_xmit_pow,
633 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
634 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
635 .ndo_do_ioctl = cvm_oct_ioctl,
636 .ndo_change_mtu = cvm_oct_common_change_mtu,
637 .ndo_get_stats = cvm_oct_common_get_stats,
638 #ifdef CONFIG_NET_POLL_CONTROLLER
639 .ndo_poll_controller = cvm_oct_poll_controller,
640 #endif
641 };
642
643 static struct device_node *cvm_oct_of_get_child(
644 const struct device_node *parent, int reg_val)
645 {
646 struct device_node *node = NULL;
647 int size;
648 const __be32 *addr;
649
650 for (;;) {
651 node = of_get_next_child(parent, node);
652 if (!node)
653 break;
654 addr = of_get_property(node, "reg", &size);
655 if (addr && (be32_to_cpu(*addr) == reg_val))
656 break;
657 }
658 return node;
659 }
660
661 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
662 int interface, int port)
663 {
664 struct device_node *ni, *np;
665
666 ni = cvm_oct_of_get_child(pip, interface);
667 if (!ni)
668 return NULL;
669
670 np = cvm_oct_of_get_child(ni, port);
671 of_node_put(ni);
672
673 return np;
674 }
675
676 static int cvm_oct_probe(struct platform_device *pdev)
677 {
678 int num_interfaces;
679 int interface;
680 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
681 int qos;
682 struct device_node *pip;
683
684 octeon_mdiobus_force_mod_depencency();
685 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
686
687 pip = pdev->dev.of_node;
688 if (!pip) {
689 pr_err("Error: No 'pip' in /aliases\n");
690 return -EINVAL;
691 }
692
693 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
694 if (cvm_oct_poll_queue == NULL) {
695 pr_err("octeon-ethernet: Cannot create workqueue");
696 return -ENOMEM;
697 }
698
699 cvm_oct_configure_common_hw();
700
701 cvmx_helper_initialize_packet_io_global();
702
703 /* Change the input group for all ports before input is enabled */
704 num_interfaces = cvmx_helper_get_number_of_interfaces();
705 for (interface = 0; interface < num_interfaces; interface++) {
706 int num_ports = cvmx_helper_ports_on_interface(interface);
707 int port;
708
709 for (port = cvmx_helper_get_ipd_port(interface, 0);
710 port < cvmx_helper_get_ipd_port(interface, num_ports);
711 port++) {
712 union cvmx_pip_prt_tagx pip_prt_tagx;
713
714 pip_prt_tagx.u64 =
715 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
716 pip_prt_tagx.s.grp = pow_receive_group;
717 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
718 pip_prt_tagx.u64);
719 }
720 }
721
722 cvmx_helper_ipd_and_packet_input_enable();
723
724 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
725
726 /*
727 * Initialize the FAU used for counting packet buffers that
728 * need to be freed.
729 */
730 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
731
732 /* Initialize the FAU used for counting tx SKBs that need to be freed */
733 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
734
735 if ((pow_send_group != -1)) {
736 struct net_device *dev;
737
738 pr_info("\tConfiguring device for POW only access\n");
739 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
740 if (dev) {
741 /* Initialize the device private structure. */
742 struct octeon_ethernet *priv = netdev_priv(dev);
743
744 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
745 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
746 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
747 priv->queue = -1;
748 strcpy(dev->name, "pow%d");
749 for (qos = 0; qos < 16; qos++)
750 skb_queue_head_init(&priv->tx_free_list[qos]);
751
752 if (register_netdev(dev) < 0) {
753 pr_err("Failed to register ethernet device for POW\n");
754 free_netdev(dev);
755 } else {
756 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
757 pr_info("%s: POW send group %d, receive group %d\n",
758 dev->name, pow_send_group,
759 pow_receive_group);
760 }
761 } else {
762 pr_err("Failed to allocate ethernet device for POW\n");
763 }
764 }
765
766 num_interfaces = cvmx_helper_get_number_of_interfaces();
767 for (interface = 0; interface < num_interfaces; interface++) {
768 cvmx_helper_interface_mode_t imode =
769 cvmx_helper_interface_get_mode(interface);
770 int num_ports = cvmx_helper_ports_on_interface(interface);
771 int port;
772 int port_index;
773
774 for (port_index = 0,
775 port = cvmx_helper_get_ipd_port(interface, 0);
776 port < cvmx_helper_get_ipd_port(interface, num_ports);
777 port_index++, port++) {
778 struct octeon_ethernet *priv;
779 struct net_device *dev =
780 alloc_etherdev(sizeof(struct octeon_ethernet));
781 if (!dev) {
782 pr_err("Failed to allocate ethernet device for port %d\n",
783 port);
784 continue;
785 }
786
787 /* Initialize the device private structure. */
788 priv = netdev_priv(dev);
789 priv->netdev = dev;
790 priv->of_node = cvm_oct_node_for_port(pip, interface,
791 port_index);
792
793 INIT_DELAYED_WORK(&priv->port_periodic_work,
794 cvm_oct_periodic_worker);
795 priv->imode = imode;
796 priv->port = port;
797 priv->queue = cvmx_pko_get_base_queue(priv->port);
798 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
799 for (qos = 0; qos < 16; qos++)
800 skb_queue_head_init(&priv->tx_free_list[qos]);
801 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
802 qos++)
803 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
804
805 switch (priv->imode) {
806
807 /* These types don't support ports to IPD/PKO */
808 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
809 case CVMX_HELPER_INTERFACE_MODE_PCIE:
810 case CVMX_HELPER_INTERFACE_MODE_PICMG:
811 break;
812
813 case CVMX_HELPER_INTERFACE_MODE_NPI:
814 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
815 strcpy(dev->name, "npi%d");
816 break;
817
818 case CVMX_HELPER_INTERFACE_MODE_XAUI:
819 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
820 strcpy(dev->name, "xaui%d");
821 break;
822
823 case CVMX_HELPER_INTERFACE_MODE_LOOP:
824 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
825 strcpy(dev->name, "loop%d");
826 break;
827
828 case CVMX_HELPER_INTERFACE_MODE_SGMII:
829 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
830 strcpy(dev->name, "eth%d");
831 break;
832
833 case CVMX_HELPER_INTERFACE_MODE_SPI:
834 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
835 strcpy(dev->name, "spi%d");
836 break;
837
838 case CVMX_HELPER_INTERFACE_MODE_RGMII:
839 case CVMX_HELPER_INTERFACE_MODE_GMII:
840 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
841 strcpy(dev->name, "eth%d");
842 break;
843 }
844
845 if (!dev->netdev_ops) {
846 free_netdev(dev);
847 } else if (register_netdev(dev) < 0) {
848 pr_err("Failed to register ethernet device for interface %d, port %d\n",
849 interface, priv->port);
850 free_netdev(dev);
851 } else {
852 cvm_oct_device[priv->port] = dev;
853 fau -=
854 cvmx_pko_get_num_queues(priv->port) *
855 sizeof(uint32_t);
856 queue_delayed_work(cvm_oct_poll_queue,
857 &priv->port_periodic_work, HZ);
858 }
859 }
860 }
861
862 cvm_oct_tx_initialize();
863 cvm_oct_rx_initialize();
864
865 /*
866 * 150 uS: about 10 1500-byte packets at 1GE.
867 */
868 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
869
870 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
871
872 return 0;
873 }
874
875 static int cvm_oct_remove(struct platform_device *pdev)
876 {
877 int port;
878
879 /* Disable POW interrupt */
880 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
881
882 cvmx_ipd_disable();
883
884 /* Free the interrupt handler */
885 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
886
887 atomic_inc_return(&cvm_oct_poll_queue_stopping);
888 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
889
890 cvm_oct_rx_shutdown();
891 cvm_oct_tx_shutdown();
892
893 cvmx_pko_disable();
894
895 /* Free the ethernet devices */
896 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
897 if (cvm_oct_device[port]) {
898 struct net_device *dev = cvm_oct_device[port];
899 struct octeon_ethernet *priv = netdev_priv(dev);
900
901 cancel_delayed_work_sync(&priv->port_periodic_work);
902
903 cvm_oct_tx_shutdown_dev(dev);
904 unregister_netdev(dev);
905 free_netdev(dev);
906 cvm_oct_device[port] = NULL;
907 }
908 }
909
910 destroy_workqueue(cvm_oct_poll_queue);
911
912 cvmx_pko_shutdown();
913
914 cvmx_ipd_free_ptr();
915
916 /* Free the HW pools */
917 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
918 num_packet_buffers);
919 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
920 num_packet_buffers);
921 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
922 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
923 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
924 return 0;
925 }
926
927 static const struct of_device_id cvm_oct_match[] = {
928 {
929 .compatible = "cavium,octeon-3860-pip",
930 },
931 {},
932 };
933 MODULE_DEVICE_TABLE(of, cvm_oct_match);
934
935 static struct platform_driver cvm_oct_driver = {
936 .probe = cvm_oct_probe,
937 .remove = cvm_oct_remove,
938 .driver = {
939 .name = KBUILD_MODNAME,
940 .of_match_table = cvm_oct_match,
941 },
942 };
943
944 module_platform_driver(cvm_oct_driver);
945
946 MODULE_LICENSE("GPL");
947 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
948 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
This page took 0.049676 seconds and 4 git commands to generate.