44142cffbfe5e657d8cc7e9a4878f8e00ee4b6c0
[deliverable/linux.git] / drivers / staging / octeon / ethernet.c
1 /*
2 * This file is based on code from OCTEON SDK by Cavium Networks.
3 *
4 * Copyright (c) 2003-2007 Cavium Networks
5 *
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/platform_device.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/phy.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_net.h>
20
21 #include <net/dst.h>
22
23 #include <asm/octeon/octeon.h>
24
25 #include "ethernet-defines.h"
26 #include "octeon-ethernet.h"
27 #include "ethernet-mem.h"
28 #include "ethernet-rx.h"
29 #include "ethernet-tx.h"
30 #include "ethernet-mdio.h"
31 #include "ethernet-util.h"
32
33 #include <asm/octeon/cvmx-pip.h>
34 #include <asm/octeon/cvmx-pko.h>
35 #include <asm/octeon/cvmx-fau.h>
36 #include <asm/octeon/cvmx-ipd.h>
37 #include <asm/octeon/cvmx-helper.h>
38
39 #include <asm/octeon/cvmx-gmxx-defs.h>
40 #include <asm/octeon/cvmx-smix-defs.h>
41
42 static int num_packet_buffers = 1024;
43 module_param(num_packet_buffers, int, 0444);
44 MODULE_PARM_DESC(num_packet_buffers, "\n"
45 "\tNumber of packet buffers to allocate and store in the\n"
46 "\tFPA. By default, 1024 packet buffers are used.\n");
47
48 int pow_receive_group = 15;
49 module_param(pow_receive_group, int, 0444);
50 MODULE_PARM_DESC(pow_receive_group, "\n"
51 "\tPOW group to receive packets from. All ethernet hardware\n"
52 "\twill be configured to send incoming packets to this POW\n"
53 "\tgroup. Also any other software can submit packets to this\n"
54 "\tgroup for the kernel to process.");
55
56 int pow_send_group = -1;
57 module_param(pow_send_group, int, 0644);
58 MODULE_PARM_DESC(pow_send_group, "\n"
59 "\tPOW group to send packets to other software on. This\n"
60 "\tcontrols the creation of the virtual device pow0.\n"
61 "\talways_use_pow also depends on this value.");
62
63 int always_use_pow;
64 module_param(always_use_pow, int, 0444);
65 MODULE_PARM_DESC(always_use_pow, "\n"
66 "\tWhen set, always send to the pow group. This will cause\n"
67 "\tpackets sent to real ethernet devices to be sent to the\n"
68 "\tPOW group instead of the hardware. Unless some other\n"
69 "\tapplication changes the config, packets will still be\n"
70 "\treceived from the low level hardware. Use this option\n"
71 "\tto allow a CVMX app to intercept all packets from the\n"
72 "\tlinux kernel. You must specify pow_send_group along with\n"
73 "\tthis option.");
74
75 char pow_send_list[128] = "";
76 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
77 MODULE_PARM_DESC(pow_send_list, "\n"
78 "\tComma separated list of ethernet devices that should use the\n"
79 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
80 "\tis a per port version of always_use_pow. always_use_pow takes\n"
81 "\tprecedence over this list. For example, setting this to\n"
82 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
83 "\tusing the pow_send_group.");
84
85 int rx_napi_weight = 32;
86 module_param(rx_napi_weight, int, 0444);
87 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
88
89 /*
90 * cvm_oct_poll_queue - Workqueue for polling operations.
91 */
92 struct workqueue_struct *cvm_oct_poll_queue;
93
94 /*
95 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
96 *
97 * Set to one right before cvm_oct_poll_queue is destroyed.
98 */
99 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
100
101 /*
102 * Array of every ethernet device owned by this driver indexed by
103 * the ipd input port number.
104 */
105 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
106
107 u64 cvm_oct_tx_poll_interval;
108
109 static void cvm_oct_rx_refill_worker(struct work_struct *work);
110 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
111
112 static void cvm_oct_rx_refill_worker(struct work_struct *work)
113 {
114 /*
115 * FPA 0 may have been drained, try to refill it if we need
116 * more than num_packet_buffers / 2, otherwise normal receive
117 * processing will refill it. If it were drained, no packets
118 * could be received so cvm_oct_napi_poll would never be
119 * invoked to do the refill.
120 */
121 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
122
123 if (!atomic_read(&cvm_oct_poll_queue_stopping))
124 queue_delayed_work(cvm_oct_poll_queue,
125 &cvm_oct_rx_refill_work, HZ);
126 }
127
128 static void cvm_oct_periodic_worker(struct work_struct *work)
129 {
130 struct octeon_ethernet *priv = container_of(work,
131 struct octeon_ethernet,
132 port_periodic_work.work);
133
134 if (priv->poll)
135 priv->poll(cvm_oct_device[priv->port]);
136
137 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
138 cvm_oct_device[priv->port]);
139
140 if (!atomic_read(&cvm_oct_poll_queue_stopping))
141 queue_delayed_work(cvm_oct_poll_queue,
142 &priv->port_periodic_work, HZ);
143 }
144
145 static void cvm_oct_configure_common_hw(void)
146 {
147 /* Setup the FPA */
148 cvmx_fpa_enable();
149 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
150 num_packet_buffers);
151 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
152 num_packet_buffers);
153 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
154 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
155 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
156
157 #ifdef __LITTLE_ENDIAN
158 {
159 union cvmx_ipd_ctl_status ipd_ctl_status;
160
161 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
162 ipd_ctl_status.s.pkt_lend = 1;
163 ipd_ctl_status.s.wqe_lend = 1;
164 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
165 }
166 #endif
167
168 cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
169 }
170
171 /**
172 * cvm_oct_free_work- Free a work queue entry
173 *
174 * @work_queue_entry: Work queue entry to free
175 *
176 * Returns Zero on success, Negative on failure.
177 */
178 int cvm_oct_free_work(void *work_queue_entry)
179 {
180 cvmx_wqe_t *work = work_queue_entry;
181
182 int segments = work->word2.s.bufs;
183 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
184
185 while (segments--) {
186 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
187 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
188 if (unlikely(!segment_ptr.s.i))
189 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
190 segment_ptr.s.pool,
191 CVMX_FPA_PACKET_POOL_SIZE / 128);
192 segment_ptr = next_ptr;
193 }
194 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
195
196 return 0;
197 }
198 EXPORT_SYMBOL(cvm_oct_free_work);
199
200 /**
201 * cvm_oct_common_get_stats - get the low level ethernet statistics
202 * @dev: Device to get the statistics from
203 *
204 * Returns Pointer to the statistics
205 */
206 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
207 {
208 cvmx_pip_port_status_t rx_status;
209 cvmx_pko_port_status_t tx_status;
210 struct octeon_ethernet *priv = netdev_priv(dev);
211
212 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
213 if (octeon_is_simulation()) {
214 /* The simulator doesn't support statistics */
215 memset(&rx_status, 0, sizeof(rx_status));
216 memset(&tx_status, 0, sizeof(tx_status));
217 } else {
218 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
219 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
220 }
221
222 priv->stats.rx_packets += rx_status.inb_packets;
223 priv->stats.tx_packets += tx_status.packets;
224 priv->stats.rx_bytes += rx_status.inb_octets;
225 priv->stats.tx_bytes += tx_status.octets;
226 priv->stats.multicast += rx_status.multicast_packets;
227 priv->stats.rx_crc_errors += rx_status.inb_errors;
228 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
229
230 /*
231 * The drop counter must be incremented atomically
232 * since the RX tasklet also increments it.
233 */
234 #ifdef CONFIG_64BIT
235 atomic64_add(rx_status.dropped_packets,
236 (atomic64_t *)&priv->stats.rx_dropped);
237 #else
238 atomic_add(rx_status.dropped_packets,
239 (atomic_t *)&priv->stats.rx_dropped);
240 #endif
241 }
242
243 return &priv->stats;
244 }
245
246 /**
247 * cvm_oct_common_change_mtu - change the link MTU
248 * @dev: Device to change
249 * @new_mtu: The new MTU
250 *
251 * Returns Zero on success
252 */
253 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
254 {
255 struct octeon_ethernet *priv = netdev_priv(dev);
256 int interface = INTERFACE(priv->port);
257 int index = INDEX(priv->port);
258 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
259 int vlan_bytes = 4;
260 #else
261 int vlan_bytes = 0;
262 #endif
263
264 /*
265 * Limit the MTU to make sure the ethernet packets are between
266 * 64 bytes and 65535 bytes.
267 */
268 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
269 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
270 pr_err("MTU must be between %d and %d.\n",
271 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
272 return -EINVAL;
273 }
274 dev->mtu = new_mtu;
275
276 if ((interface < 2)
277 && (cvmx_helper_interface_get_mode(interface) !=
278 CVMX_HELPER_INTERFACE_MODE_SPI)) {
279 /* Add ethernet header and FCS, and VLAN if configured. */
280 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
281
282 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
283 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
284 /* Signal errors on packets larger than the MTU */
285 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
286 max_packet);
287 } else {
288 /*
289 * Set the hardware to truncate packets larger
290 * than the MTU and smaller the 64 bytes.
291 */
292 union cvmx_pip_frm_len_chkx frm_len_chk;
293
294 frm_len_chk.u64 = 0;
295 frm_len_chk.s.minlen = 64;
296 frm_len_chk.s.maxlen = max_packet;
297 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
298 frm_len_chk.u64);
299 }
300 /*
301 * Set the hardware to truncate packets larger than
302 * the MTU. The jabber register must be set to a
303 * multiple of 8 bytes, so round up.
304 */
305 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
306 (max_packet + 7) & ~7u);
307 }
308 return 0;
309 }
310
311 /**
312 * cvm_oct_common_set_multicast_list - set the multicast list
313 * @dev: Device to work on
314 */
315 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
316 {
317 union cvmx_gmxx_prtx_cfg gmx_cfg;
318 struct octeon_ethernet *priv = netdev_priv(dev);
319 int interface = INTERFACE(priv->port);
320 int index = INDEX(priv->port);
321
322 if ((interface < 2)
323 && (cvmx_helper_interface_get_mode(interface) !=
324 CVMX_HELPER_INTERFACE_MODE_SPI)) {
325 union cvmx_gmxx_rxx_adr_ctl control;
326
327 control.u64 = 0;
328 control.s.bcst = 1; /* Allow broadcast MAC addresses */
329
330 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
331 (dev->flags & IFF_PROMISC))
332 /* Force accept multicast packets */
333 control.s.mcst = 2;
334 else
335 /* Force reject multicast packets */
336 control.s.mcst = 1;
337
338 if (dev->flags & IFF_PROMISC)
339 /*
340 * Reject matches if promisc. Since CAM is
341 * shut off, should accept everything.
342 */
343 control.s.cam_mode = 0;
344 else
345 /* Filter packets based on the CAM */
346 control.s.cam_mode = 1;
347
348 gmx_cfg.u64 =
349 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
350 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
351 gmx_cfg.u64 & ~1ull);
352
353 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
354 control.u64);
355 if (dev->flags & IFF_PROMISC)
356 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
357 (index, interface), 0);
358 else
359 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
360 (index, interface), 1);
361
362 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
363 gmx_cfg.u64);
364 }
365 }
366
367 static int cvm_oct_set_mac_filter(struct net_device *dev)
368 {
369 struct octeon_ethernet *priv = netdev_priv(dev);
370 union cvmx_gmxx_prtx_cfg gmx_cfg;
371 int interface = INTERFACE(priv->port);
372 int index = INDEX(priv->port);
373
374 if ((interface < 2)
375 && (cvmx_helper_interface_get_mode(interface) !=
376 CVMX_HELPER_INTERFACE_MODE_SPI)) {
377 int i;
378 u8 *ptr = dev->dev_addr;
379 u64 mac = 0;
380
381 for (i = 0; i < 6; i++)
382 mac = (mac << 8) | (u64)ptr[i];
383
384 gmx_cfg.u64 =
385 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
386 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
387 gmx_cfg.u64 & ~1ull);
388
389 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
390 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
391 ptr[0]);
392 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
393 ptr[1]);
394 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
395 ptr[2]);
396 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
397 ptr[3]);
398 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
399 ptr[4]);
400 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
401 ptr[5]);
402 cvm_oct_common_set_multicast_list(dev);
403 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
404 gmx_cfg.u64);
405 }
406 return 0;
407 }
408
409 /**
410 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
411 * @dev: The device in question.
412 * @addr: Socket address.
413 *
414 * Returns Zero on success
415 */
416 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
417 {
418 int r = eth_mac_addr(dev, addr);
419
420 if (r)
421 return r;
422 return cvm_oct_set_mac_filter(dev);
423 }
424
425 /**
426 * cvm_oct_common_init - per network device initialization
427 * @dev: Device to initialize
428 *
429 * Returns Zero on success
430 */
431 int cvm_oct_common_init(struct net_device *dev)
432 {
433 struct octeon_ethernet *priv = netdev_priv(dev);
434 const u8 *mac = NULL;
435
436 if (priv->of_node)
437 mac = of_get_mac_address(priv->of_node);
438
439 if (mac)
440 ether_addr_copy(dev->dev_addr, mac);
441 else
442 eth_hw_addr_random(dev);
443
444 /*
445 * Force the interface to use the POW send if always_use_pow
446 * was specified or it is in the pow send list.
447 */
448 if ((pow_send_group != -1)
449 && (always_use_pow || strstr(pow_send_list, dev->name)))
450 priv->queue = -1;
451
452 if (priv->queue != -1)
453 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
454
455 /* We do our own locking, Linux doesn't need to */
456 dev->features |= NETIF_F_LLTX;
457 dev->ethtool_ops = &cvm_oct_ethtool_ops;
458
459 cvm_oct_set_mac_filter(dev);
460 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
461
462 /*
463 * Zero out stats for port so we won't mistakenly show
464 * counters from the bootloader.
465 */
466 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
467 sizeof(struct net_device_stats));
468
469 if (dev->netdev_ops->ndo_stop)
470 dev->netdev_ops->ndo_stop(dev);
471
472 return 0;
473 }
474
475 void cvm_oct_common_uninit(struct net_device *dev)
476 {
477 struct octeon_ethernet *priv = netdev_priv(dev);
478
479 if (priv->phydev)
480 phy_disconnect(priv->phydev);
481 }
482
483 int cvm_oct_common_open(struct net_device *dev,
484 void (*link_poll)(struct net_device *))
485 {
486 union cvmx_gmxx_prtx_cfg gmx_cfg;
487 struct octeon_ethernet *priv = netdev_priv(dev);
488 int interface = INTERFACE(priv->port);
489 int index = INDEX(priv->port);
490 cvmx_helper_link_info_t link_info;
491 int rv;
492
493 rv = cvm_oct_phy_setup_device(dev);
494 if (rv)
495 return rv;
496
497 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
498 gmx_cfg.s.en = 1;
499 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
500
501 if (octeon_is_simulation())
502 return 0;
503
504 if (priv->phydev) {
505 int r = phy_read_status(priv->phydev);
506
507 if (r == 0 && priv->phydev->link == 0)
508 netif_carrier_off(dev);
509 cvm_oct_adjust_link(dev);
510 } else {
511 link_info = cvmx_helper_link_get(priv->port);
512 if (!link_info.s.link_up)
513 netif_carrier_off(dev);
514 priv->poll = link_poll;
515 link_poll(dev);
516 }
517
518 return 0;
519 }
520
521 void cvm_oct_link_poll(struct net_device *dev)
522 {
523 struct octeon_ethernet *priv = netdev_priv(dev);
524 cvmx_helper_link_info_t link_info;
525
526 link_info = cvmx_helper_link_get(priv->port);
527 if (link_info.u64 == priv->link_info)
528 return;
529
530 link_info = cvmx_helper_link_autoconf(priv->port);
531 priv->link_info = link_info.u64;
532
533 if (link_info.s.link_up) {
534 if (!netif_carrier_ok(dev))
535 netif_carrier_on(dev);
536 } else if (netif_carrier_ok(dev)) {
537 netif_carrier_off(dev);
538 }
539 cvm_oct_note_carrier(priv, link_info);
540 }
541
542 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
543 .ndo_init = cvm_oct_common_init,
544 .ndo_uninit = cvm_oct_common_uninit,
545 .ndo_start_xmit = cvm_oct_xmit,
546 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
547 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
548 .ndo_do_ioctl = cvm_oct_ioctl,
549 .ndo_change_mtu = cvm_oct_common_change_mtu,
550 .ndo_get_stats = cvm_oct_common_get_stats,
551 #ifdef CONFIG_NET_POLL_CONTROLLER
552 .ndo_poll_controller = cvm_oct_poll_controller,
553 #endif
554 };
555 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
556 .ndo_init = cvm_oct_common_init,
557 .ndo_uninit = cvm_oct_common_uninit,
558 .ndo_open = cvm_oct_xaui_open,
559 .ndo_stop = cvm_oct_common_stop,
560 .ndo_start_xmit = cvm_oct_xmit,
561 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
562 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
563 .ndo_do_ioctl = cvm_oct_ioctl,
564 .ndo_change_mtu = cvm_oct_common_change_mtu,
565 .ndo_get_stats = cvm_oct_common_get_stats,
566 #ifdef CONFIG_NET_POLL_CONTROLLER
567 .ndo_poll_controller = cvm_oct_poll_controller,
568 #endif
569 };
570 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
571 .ndo_init = cvm_oct_sgmii_init,
572 .ndo_uninit = cvm_oct_common_uninit,
573 .ndo_open = cvm_oct_sgmii_open,
574 .ndo_stop = cvm_oct_common_stop,
575 .ndo_start_xmit = cvm_oct_xmit,
576 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
577 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
578 .ndo_do_ioctl = cvm_oct_ioctl,
579 .ndo_change_mtu = cvm_oct_common_change_mtu,
580 .ndo_get_stats = cvm_oct_common_get_stats,
581 #ifdef CONFIG_NET_POLL_CONTROLLER
582 .ndo_poll_controller = cvm_oct_poll_controller,
583 #endif
584 };
585 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
586 .ndo_init = cvm_oct_spi_init,
587 .ndo_uninit = cvm_oct_spi_uninit,
588 .ndo_start_xmit = cvm_oct_xmit,
589 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
590 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
591 .ndo_do_ioctl = cvm_oct_ioctl,
592 .ndo_change_mtu = cvm_oct_common_change_mtu,
593 .ndo_get_stats = cvm_oct_common_get_stats,
594 #ifdef CONFIG_NET_POLL_CONTROLLER
595 .ndo_poll_controller = cvm_oct_poll_controller,
596 #endif
597 };
598 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
599 .ndo_init = cvm_oct_rgmii_init,
600 .ndo_uninit = cvm_oct_rgmii_uninit,
601 .ndo_open = cvm_oct_rgmii_open,
602 .ndo_stop = cvm_oct_common_stop,
603 .ndo_start_xmit = cvm_oct_xmit,
604 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
605 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
606 .ndo_do_ioctl = cvm_oct_ioctl,
607 .ndo_change_mtu = cvm_oct_common_change_mtu,
608 .ndo_get_stats = cvm_oct_common_get_stats,
609 #ifdef CONFIG_NET_POLL_CONTROLLER
610 .ndo_poll_controller = cvm_oct_poll_controller,
611 #endif
612 };
613 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
614 .ndo_init = cvm_oct_common_init,
615 .ndo_start_xmit = cvm_oct_xmit_pow,
616 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
617 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
618 .ndo_do_ioctl = cvm_oct_ioctl,
619 .ndo_change_mtu = cvm_oct_common_change_mtu,
620 .ndo_get_stats = cvm_oct_common_get_stats,
621 #ifdef CONFIG_NET_POLL_CONTROLLER
622 .ndo_poll_controller = cvm_oct_poll_controller,
623 #endif
624 };
625
626 static struct device_node *cvm_oct_of_get_child(
627 const struct device_node *parent, int reg_val)
628 {
629 struct device_node *node = NULL;
630 int size;
631 const __be32 *addr;
632
633 for (;;) {
634 node = of_get_next_child(parent, node);
635 if (!node)
636 break;
637 addr = of_get_property(node, "reg", &size);
638 if (addr && (be32_to_cpu(*addr) == reg_val))
639 break;
640 }
641 return node;
642 }
643
644 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
645 int interface, int port)
646 {
647 struct device_node *ni, *np;
648
649 ni = cvm_oct_of_get_child(pip, interface);
650 if (!ni)
651 return NULL;
652
653 np = cvm_oct_of_get_child(ni, port);
654 of_node_put(ni);
655
656 return np;
657 }
658
659 static int cvm_oct_probe(struct platform_device *pdev)
660 {
661 int num_interfaces;
662 int interface;
663 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
664 int qos;
665 struct device_node *pip;
666
667 octeon_mdiobus_force_mod_depencency();
668
669 pip = pdev->dev.of_node;
670 if (!pip) {
671 pr_err("Error: No 'pip' in /aliases\n");
672 return -EINVAL;
673 }
674
675 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
676 if (cvm_oct_poll_queue == NULL) {
677 pr_err("octeon-ethernet: Cannot create workqueue");
678 return -ENOMEM;
679 }
680
681 cvm_oct_configure_common_hw();
682
683 cvmx_helper_initialize_packet_io_global();
684
685 /* Change the input group for all ports before input is enabled */
686 num_interfaces = cvmx_helper_get_number_of_interfaces();
687 for (interface = 0; interface < num_interfaces; interface++) {
688 int num_ports = cvmx_helper_ports_on_interface(interface);
689 int port;
690
691 for (port = cvmx_helper_get_ipd_port(interface, 0);
692 port < cvmx_helper_get_ipd_port(interface, num_ports);
693 port++) {
694 union cvmx_pip_prt_tagx pip_prt_tagx;
695
696 pip_prt_tagx.u64 =
697 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
698 pip_prt_tagx.s.grp = pow_receive_group;
699 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
700 pip_prt_tagx.u64);
701 }
702 }
703
704 cvmx_helper_ipd_and_packet_input_enable();
705
706 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
707
708 /*
709 * Initialize the FAU used for counting packet buffers that
710 * need to be freed.
711 */
712 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
713
714 /* Initialize the FAU used for counting tx SKBs that need to be freed */
715 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
716
717 if ((pow_send_group != -1)) {
718 struct net_device *dev;
719
720 pr_info("\tConfiguring device for POW only access\n");
721 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
722 if (dev) {
723 /* Initialize the device private structure. */
724 struct octeon_ethernet *priv = netdev_priv(dev);
725
726 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
727 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
728 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
729 priv->queue = -1;
730 strcpy(dev->name, "pow%d");
731 for (qos = 0; qos < 16; qos++)
732 skb_queue_head_init(&priv->tx_free_list[qos]);
733
734 if (register_netdev(dev) < 0) {
735 pr_err("Failed to register ethernet device for POW\n");
736 free_netdev(dev);
737 } else {
738 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
739 pr_info("%s: POW send group %d, receive group %d\n",
740 dev->name, pow_send_group,
741 pow_receive_group);
742 }
743 } else {
744 pr_err("Failed to allocate ethernet device for POW\n");
745 }
746 }
747
748 num_interfaces = cvmx_helper_get_number_of_interfaces();
749 for (interface = 0; interface < num_interfaces; interface++) {
750 cvmx_helper_interface_mode_t imode =
751 cvmx_helper_interface_get_mode(interface);
752 int num_ports = cvmx_helper_ports_on_interface(interface);
753 int port;
754 int port_index;
755
756 for (port_index = 0,
757 port = cvmx_helper_get_ipd_port(interface, 0);
758 port < cvmx_helper_get_ipd_port(interface, num_ports);
759 port_index++, port++) {
760 struct octeon_ethernet *priv;
761 struct net_device *dev =
762 alloc_etherdev(sizeof(struct octeon_ethernet));
763 if (!dev) {
764 pr_err("Failed to allocate ethernet device for port %d\n",
765 port);
766 continue;
767 }
768
769 /* Initialize the device private structure. */
770 priv = netdev_priv(dev);
771 priv->netdev = dev;
772 priv->of_node = cvm_oct_node_for_port(pip, interface,
773 port_index);
774
775 INIT_DELAYED_WORK(&priv->port_periodic_work,
776 cvm_oct_periodic_worker);
777 priv->imode = imode;
778 priv->port = port;
779 priv->queue = cvmx_pko_get_base_queue(priv->port);
780 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
781 for (qos = 0; qos < 16; qos++)
782 skb_queue_head_init(&priv->tx_free_list[qos]);
783 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
784 qos++)
785 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
786
787 switch (priv->imode) {
788
789 /* These types don't support ports to IPD/PKO */
790 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
791 case CVMX_HELPER_INTERFACE_MODE_PCIE:
792 case CVMX_HELPER_INTERFACE_MODE_PICMG:
793 break;
794
795 case CVMX_HELPER_INTERFACE_MODE_NPI:
796 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
797 strcpy(dev->name, "npi%d");
798 break;
799
800 case CVMX_HELPER_INTERFACE_MODE_XAUI:
801 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
802 strcpy(dev->name, "xaui%d");
803 break;
804
805 case CVMX_HELPER_INTERFACE_MODE_LOOP:
806 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
807 strcpy(dev->name, "loop%d");
808 break;
809
810 case CVMX_HELPER_INTERFACE_MODE_SGMII:
811 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
812 strcpy(dev->name, "eth%d");
813 break;
814
815 case CVMX_HELPER_INTERFACE_MODE_SPI:
816 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
817 strcpy(dev->name, "spi%d");
818 break;
819
820 case CVMX_HELPER_INTERFACE_MODE_RGMII:
821 case CVMX_HELPER_INTERFACE_MODE_GMII:
822 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
823 strcpy(dev->name, "eth%d");
824 break;
825 }
826
827 if (!dev->netdev_ops) {
828 free_netdev(dev);
829 } else if (register_netdev(dev) < 0) {
830 pr_err("Failed to register ethernet device for interface %d, port %d\n",
831 interface, priv->port);
832 free_netdev(dev);
833 } else {
834 cvm_oct_device[priv->port] = dev;
835 fau -=
836 cvmx_pko_get_num_queues(priv->port) *
837 sizeof(u32);
838 queue_delayed_work(cvm_oct_poll_queue,
839 &priv->port_periodic_work, HZ);
840 }
841 }
842 }
843
844 cvm_oct_tx_initialize();
845 cvm_oct_rx_initialize();
846
847 /*
848 * 150 uS: about 10 1500-byte packets at 1GE.
849 */
850 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
851
852 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
853
854 return 0;
855 }
856
857 static int cvm_oct_remove(struct platform_device *pdev)
858 {
859 int port;
860
861 /* Disable POW interrupt */
862 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
863 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
864 else
865 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
866
867 cvmx_ipd_disable();
868
869 /* Free the interrupt handler */
870 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
871
872 atomic_inc_return(&cvm_oct_poll_queue_stopping);
873 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
874
875 cvm_oct_rx_shutdown();
876 cvm_oct_tx_shutdown();
877
878 cvmx_pko_disable();
879
880 /* Free the ethernet devices */
881 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
882 if (cvm_oct_device[port]) {
883 struct net_device *dev = cvm_oct_device[port];
884 struct octeon_ethernet *priv = netdev_priv(dev);
885
886 cancel_delayed_work_sync(&priv->port_periodic_work);
887
888 cvm_oct_tx_shutdown_dev(dev);
889 unregister_netdev(dev);
890 free_netdev(dev);
891 cvm_oct_device[port] = NULL;
892 }
893 }
894
895 destroy_workqueue(cvm_oct_poll_queue);
896
897 cvmx_pko_shutdown();
898
899 cvmx_ipd_free_ptr();
900
901 /* Free the HW pools */
902 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
903 num_packet_buffers);
904 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
905 num_packet_buffers);
906 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
907 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
908 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
909 return 0;
910 }
911
912 static const struct of_device_id cvm_oct_match[] = {
913 {
914 .compatible = "cavium,octeon-3860-pip",
915 },
916 {},
917 };
918 MODULE_DEVICE_TABLE(of, cvm_oct_match);
919
920 static struct platform_driver cvm_oct_driver = {
921 .probe = cvm_oct_probe,
922 .remove = cvm_oct_remove,
923 .driver = {
924 .name = KBUILD_MODNAME,
925 .of_match_table = cvm_oct_match,
926 },
927 };
928
929 module_platform_driver(cvm_oct_driver);
930
931 MODULE_LICENSE("GPL");
932 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
933 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
This page took 0.084108 seconds and 4 git commands to generate.