1 /**********************************************************************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2007 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/platform_device.h>
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/phy.h>
33 #include <linux/slab.h>
34 #include <linux/interrupt.h>
35 #include <linux/of_net.h>
39 #include <asm/octeon/octeon.h>
41 #include "ethernet-defines.h"
42 #include "octeon-ethernet.h"
43 #include "ethernet-mem.h"
44 #include "ethernet-rx.h"
45 #include "ethernet-tx.h"
46 #include "ethernet-mdio.h"
47 #include "ethernet-util.h"
49 #include <asm/octeon/cvmx-pip.h>
50 #include <asm/octeon/cvmx-pko.h>
51 #include <asm/octeon/cvmx-fau.h>
52 #include <asm/octeon/cvmx-ipd.h>
53 #include <asm/octeon/cvmx-helper.h>
55 #include <asm/octeon/cvmx-gmxx-defs.h>
56 #include <asm/octeon/cvmx-smix-defs.h>
58 static int num_packet_buffers
= 1024;
59 module_param(num_packet_buffers
, int, 0444);
60 MODULE_PARM_DESC(num_packet_buffers
, "\n"
61 "\tNumber of packet buffers to allocate and store in the\n"
62 "\tFPA. By default, 1024 packet buffers are used.\n");
64 int pow_receive_group
= 15;
65 module_param(pow_receive_group
, int, 0444);
66 MODULE_PARM_DESC(pow_receive_group
, "\n"
67 "\tPOW group to receive packets from. All ethernet hardware\n"
68 "\twill be configured to send incoming packets to this POW\n"
69 "\tgroup. Also any other software can submit packets to this\n"
70 "\tgroup for the kernel to process.");
72 int pow_send_group
= -1;
73 module_param(pow_send_group
, int, 0644);
74 MODULE_PARM_DESC(pow_send_group
, "\n"
75 "\tPOW group to send packets to other software on. This\n"
76 "\tcontrols the creation of the virtual device pow0.\n"
77 "\talways_use_pow also depends on this value.");
80 module_param(always_use_pow
, int, 0444);
81 MODULE_PARM_DESC(always_use_pow
, "\n"
82 "\tWhen set, always send to the pow group. This will cause\n"
83 "\tpackets sent to real ethernet devices to be sent to the\n"
84 "\tPOW group instead of the hardware. Unless some other\n"
85 "\tapplication changes the config, packets will still be\n"
86 "\treceived from the low level hardware. Use this option\n"
87 "\tto allow a CVMX app to intercept all packets from the\n"
88 "\tlinux kernel. You must specify pow_send_group along with\n"
91 char pow_send_list
[128] = "";
92 module_param_string(pow_send_list
, pow_send_list
, sizeof(pow_send_list
), 0444);
93 MODULE_PARM_DESC(pow_send_list
, "\n"
94 "\tComma separated list of ethernet devices that should use the\n"
95 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
96 "\tis a per port version of always_use_pow. always_use_pow takes\n"
97 "\tprecedence over this list. For example, setting this to\n"
98 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
99 "\tusing the pow_send_group.");
101 int rx_napi_weight
= 32;
102 module_param(rx_napi_weight
, int, 0444);
103 MODULE_PARM_DESC(rx_napi_weight
, "The NAPI WEIGHT parameter.");
106 * cvm_oct_poll_queue - Workqueue for polling operations.
108 struct workqueue_struct
*cvm_oct_poll_queue
;
111 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
113 * Set to one right before cvm_oct_poll_queue is destroyed.
115 atomic_t cvm_oct_poll_queue_stopping
= ATOMIC_INIT(0);
118 * Array of every ethernet device owned by this driver indexed by
119 * the ipd input port number.
121 struct net_device
*cvm_oct_device
[TOTAL_NUMBER_OF_PORTS
];
123 u64 cvm_oct_tx_poll_interval
;
125 static void cvm_oct_rx_refill_worker(struct work_struct
*work
);
126 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work
, cvm_oct_rx_refill_worker
);
128 static void cvm_oct_rx_refill_worker(struct work_struct
*work
)
131 * FPA 0 may have been drained, try to refill it if we need
132 * more than num_packet_buffers / 2, otherwise normal receive
133 * processing will refill it. If it were drained, no packets
134 * could be received so cvm_oct_napi_poll would never be
135 * invoked to do the refill.
137 cvm_oct_rx_refill_pool(num_packet_buffers
/ 2);
139 if (!atomic_read(&cvm_oct_poll_queue_stopping
))
140 queue_delayed_work(cvm_oct_poll_queue
,
141 &cvm_oct_rx_refill_work
, HZ
);
144 static void cvm_oct_periodic_worker(struct work_struct
*work
)
146 struct octeon_ethernet
*priv
= container_of(work
,
147 struct octeon_ethernet
,
148 port_periodic_work
.work
);
151 priv
->poll(cvm_oct_device
[priv
->port
]);
153 cvm_oct_device
[priv
->port
]->netdev_ops
->ndo_get_stats(
154 cvm_oct_device
[priv
->port
]);
156 if (!atomic_read(&cvm_oct_poll_queue_stopping
))
157 queue_delayed_work(cvm_oct_poll_queue
,
158 &priv
->port_periodic_work
, HZ
);
161 static void cvm_oct_configure_common_hw(void)
165 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL
, CVMX_FPA_PACKET_POOL_SIZE
,
167 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL
, CVMX_FPA_WQE_POOL_SIZE
,
169 if (CVMX_FPA_OUTPUT_BUFFER_POOL
!= CVMX_FPA_PACKET_POOL
)
170 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL
,
171 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
, 128);
173 #ifdef __LITTLE_ENDIAN
175 union cvmx_ipd_ctl_status ipd_ctl_status
;
176 ipd_ctl_status
.u64
= cvmx_read_csr(CVMX_IPD_CTL_STATUS
);
177 ipd_ctl_status
.s
.pkt_lend
= 1;
178 ipd_ctl_status
.s
.wqe_lend
= 1;
179 cvmx_write_csr(CVMX_IPD_CTL_STATUS
, ipd_ctl_status
.u64
);
183 cvmx_helper_setup_red(num_packet_buffers
/ 4, num_packet_buffers
/ 8);
187 * cvm_oct_free_work- Free a work queue entry
189 * @work_queue_entry: Work queue entry to free
191 * Returns Zero on success, Negative on failure.
193 int cvm_oct_free_work(void *work_queue_entry
)
195 cvmx_wqe_t
*work
= work_queue_entry
;
197 int segments
= work
->word2
.s
.bufs
;
198 union cvmx_buf_ptr segment_ptr
= work
->packet_ptr
;
201 union cvmx_buf_ptr next_ptr
= *(union cvmx_buf_ptr
*)
202 cvmx_phys_to_ptr(segment_ptr
.s
.addr
- 8);
203 if (unlikely(!segment_ptr
.s
.i
))
204 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr
),
206 DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE
/
208 segment_ptr
= next_ptr
;
210 cvmx_fpa_free(work
, CVMX_FPA_WQE_POOL
, DONT_WRITEBACK(1));
214 EXPORT_SYMBOL(cvm_oct_free_work
);
217 * cvm_oct_common_get_stats - get the low level ethernet statistics
218 * @dev: Device to get the statistics from
220 * Returns Pointer to the statistics
222 static struct net_device_stats
*cvm_oct_common_get_stats(struct net_device
*dev
)
224 cvmx_pip_port_status_t rx_status
;
225 cvmx_pko_port_status_t tx_status
;
226 struct octeon_ethernet
*priv
= netdev_priv(dev
);
228 if (priv
->port
< CVMX_PIP_NUM_INPUT_PORTS
) {
229 if (octeon_is_simulation()) {
230 /* The simulator doesn't support statistics */
231 memset(&rx_status
, 0, sizeof(rx_status
));
232 memset(&tx_status
, 0, sizeof(tx_status
));
234 cvmx_pip_get_port_status(priv
->port
, 1, &rx_status
);
235 cvmx_pko_get_port_status(priv
->port
, 1, &tx_status
);
238 priv
->stats
.rx_packets
+= rx_status
.inb_packets
;
239 priv
->stats
.tx_packets
+= tx_status
.packets
;
240 priv
->stats
.rx_bytes
+= rx_status
.inb_octets
;
241 priv
->stats
.tx_bytes
+= tx_status
.octets
;
242 priv
->stats
.multicast
+= rx_status
.multicast_packets
;
243 priv
->stats
.rx_crc_errors
+= rx_status
.inb_errors
;
244 priv
->stats
.rx_frame_errors
+= rx_status
.fcs_align_err_packets
;
247 * The drop counter must be incremented atomically
248 * since the RX tasklet also increments it.
251 atomic64_add(rx_status
.dropped_packets
,
252 (atomic64_t
*)&priv
->stats
.rx_dropped
);
254 atomic_add(rx_status
.dropped_packets
,
255 (atomic_t
*)&priv
->stats
.rx_dropped
);
263 * cvm_oct_common_change_mtu - change the link MTU
264 * @dev: Device to change
265 * @new_mtu: The new MTU
267 * Returns Zero on success
269 static int cvm_oct_common_change_mtu(struct net_device
*dev
, int new_mtu
)
271 struct octeon_ethernet
*priv
= netdev_priv(dev
);
272 int interface
= INTERFACE(priv
->port
);
273 int index
= INDEX(priv
->port
);
274 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
281 * Limit the MTU to make sure the ethernet packets are between
282 * 64 bytes and 65535 bytes.
284 if ((new_mtu
+ 14 + 4 + vlan_bytes
< 64)
285 || (new_mtu
+ 14 + 4 + vlan_bytes
> 65392)) {
286 pr_err("MTU must be between %d and %d.\n",
287 64 - 14 - 4 - vlan_bytes
, 65392 - 14 - 4 - vlan_bytes
);
293 && (cvmx_helper_interface_get_mode(interface
) !=
294 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
295 /* Add ethernet header and FCS, and VLAN if configured. */
296 int max_packet
= new_mtu
+ 14 + 4 + vlan_bytes
;
298 if (OCTEON_IS_MODEL(OCTEON_CN3XXX
)
299 || OCTEON_IS_MODEL(OCTEON_CN58XX
)) {
300 /* Signal errors on packets larger than the MTU */
301 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index
, interface
),
305 * Set the hardware to truncate packets larger
306 * than the MTU and smaller the 64 bytes.
308 union cvmx_pip_frm_len_chkx frm_len_chk
;
311 frm_len_chk
.s
.minlen
= 64;
312 frm_len_chk
.s
.maxlen
= max_packet
;
313 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface
),
317 * Set the hardware to truncate packets larger than
318 * the MTU. The jabber register must be set to a
319 * multiple of 8 bytes, so round up.
321 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index
, interface
),
322 (max_packet
+ 7) & ~7u);
328 * cvm_oct_common_set_multicast_list - set the multicast list
329 * @dev: Device to work on
331 static void cvm_oct_common_set_multicast_list(struct net_device
*dev
)
333 union cvmx_gmxx_prtx_cfg gmx_cfg
;
334 struct octeon_ethernet
*priv
= netdev_priv(dev
);
335 int interface
= INTERFACE(priv
->port
);
336 int index
= INDEX(priv
->port
);
339 && (cvmx_helper_interface_get_mode(interface
) !=
340 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
341 union cvmx_gmxx_rxx_adr_ctl control
;
344 control
.s
.bcst
= 1; /* Allow broadcast MAC addresses */
346 if (!netdev_mc_empty(dev
) || (dev
->flags
& IFF_ALLMULTI
) ||
347 (dev
->flags
& IFF_PROMISC
))
348 /* Force accept multicast packets */
351 /* Force reject multicast packets */
354 if (dev
->flags
& IFF_PROMISC
)
356 * Reject matches if promisc. Since CAM is
357 * shut off, should accept everything.
359 control
.s
.cam_mode
= 0;
361 /* Filter packets based on the CAM */
362 control
.s
.cam_mode
= 1;
365 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
366 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
367 gmx_cfg
.u64
& ~1ull);
369 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index
, interface
),
371 if (dev
->flags
& IFF_PROMISC
)
372 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
373 (index
, interface
), 0);
375 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
376 (index
, interface
), 1);
378 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
384 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
385 * @dev: The device in question.
386 * @addr: Address structure to change it too.
388 * Returns Zero on success
390 static int cvm_oct_set_mac_filter(struct net_device
*dev
)
392 struct octeon_ethernet
*priv
= netdev_priv(dev
);
393 union cvmx_gmxx_prtx_cfg gmx_cfg
;
394 int interface
= INTERFACE(priv
->port
);
395 int index
= INDEX(priv
->port
);
398 && (cvmx_helper_interface_get_mode(interface
) !=
399 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
401 uint8_t *ptr
= dev
->dev_addr
;
404 for (i
= 0; i
< 6; i
++)
405 mac
= (mac
<< 8) | (uint64_t)ptr
[i
];
408 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
409 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
410 gmx_cfg
.u64
& ~1ull);
412 cvmx_write_csr(CVMX_GMXX_SMACX(index
, interface
), mac
);
413 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index
, interface
),
415 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index
, interface
),
417 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index
, interface
),
419 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index
, interface
),
421 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index
, interface
),
423 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index
, interface
),
425 cvm_oct_common_set_multicast_list(dev
);
426 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
432 static int cvm_oct_common_set_mac_address(struct net_device
*dev
, void *addr
)
434 int r
= eth_mac_addr(dev
, addr
);
438 return cvm_oct_set_mac_filter(dev
);
442 * cvm_oct_common_init - per network device initialization
443 * @dev: Device to initialize
445 * Returns Zero on success
447 int cvm_oct_common_init(struct net_device
*dev
)
449 struct octeon_ethernet
*priv
= netdev_priv(dev
);
450 const u8
*mac
= NULL
;
453 mac
= of_get_mac_address(priv
->of_node
);
456 ether_addr_copy(dev
->dev_addr
, mac
);
458 eth_hw_addr_random(dev
);
461 * Force the interface to use the POW send if always_use_pow
462 * was specified or it is in the pow send list.
464 if ((pow_send_group
!= -1)
465 && (always_use_pow
|| strstr(pow_send_list
, dev
->name
)))
468 if (priv
->queue
!= -1)
469 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
471 /* We do our own locking, Linux doesn't need to */
472 dev
->features
|= NETIF_F_LLTX
;
473 dev
->ethtool_ops
= &cvm_oct_ethtool_ops
;
475 cvm_oct_set_mac_filter(dev
);
476 dev
->netdev_ops
->ndo_change_mtu(dev
, dev
->mtu
);
479 * Zero out stats for port so we won't mistakenly show
480 * counters from the bootloader.
482 memset(dev
->netdev_ops
->ndo_get_stats(dev
), 0,
483 sizeof(struct net_device_stats
));
485 if (dev
->netdev_ops
->ndo_stop
)
486 dev
->netdev_ops
->ndo_stop(dev
);
491 void cvm_oct_common_uninit(struct net_device
*dev
)
493 struct octeon_ethernet
*priv
= netdev_priv(dev
);
496 phy_disconnect(priv
->phydev
);
499 int cvm_oct_common_open(struct net_device
*dev
,
500 void (*link_poll
)(struct net_device
*), bool poll_now
)
502 union cvmx_gmxx_prtx_cfg gmx_cfg
;
503 struct octeon_ethernet
*priv
= netdev_priv(dev
);
504 int interface
= INTERFACE(priv
->port
);
505 int index
= INDEX(priv
->port
);
506 cvmx_helper_link_info_t link_info
;
509 rv
= cvm_oct_phy_setup_device(dev
);
513 gmx_cfg
.u64
= cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
515 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
), gmx_cfg
.u64
);
517 if (octeon_is_simulation())
521 int r
= phy_read_status(priv
->phydev
);
523 if (r
== 0 && priv
->phydev
->link
== 0)
524 netif_carrier_off(dev
);
525 cvm_oct_adjust_link(dev
);
527 link_info
= cvmx_helper_link_get(priv
->port
);
528 if (!link_info
.s
.link_up
)
529 netif_carrier_off(dev
);
530 priv
->poll
= link_poll
;
538 void cvm_oct_link_poll(struct net_device
*dev
)
540 struct octeon_ethernet
*priv
= netdev_priv(dev
);
541 cvmx_helper_link_info_t link_info
;
543 link_info
= cvmx_helper_link_get(priv
->port
);
544 if (link_info
.u64
== priv
->link_info
)
547 link_info
= cvmx_helper_link_autoconf(priv
->port
);
548 priv
->link_info
= link_info
.u64
;
550 if (link_info
.s
.link_up
) {
551 if (!netif_carrier_ok(dev
))
552 netif_carrier_on(dev
);
553 } else if (netif_carrier_ok(dev
)) {
554 netif_carrier_off(dev
);
556 cvm_oct_note_carrier(priv
, link_info
);
559 static const struct net_device_ops cvm_oct_npi_netdev_ops
= {
560 .ndo_init
= cvm_oct_common_init
,
561 .ndo_uninit
= cvm_oct_common_uninit
,
562 .ndo_start_xmit
= cvm_oct_xmit
,
563 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
564 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
565 .ndo_do_ioctl
= cvm_oct_ioctl
,
566 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
567 .ndo_get_stats
= cvm_oct_common_get_stats
,
568 #ifdef CONFIG_NET_POLL_CONTROLLER
569 .ndo_poll_controller
= cvm_oct_poll_controller
,
572 static const struct net_device_ops cvm_oct_xaui_netdev_ops
= {
573 .ndo_init
= cvm_oct_xaui_init
,
574 .ndo_uninit
= cvm_oct_common_uninit
,
575 .ndo_open
= cvm_oct_xaui_open
,
576 .ndo_stop
= cvm_oct_common_stop
,
577 .ndo_start_xmit
= cvm_oct_xmit
,
578 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
579 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
580 .ndo_do_ioctl
= cvm_oct_ioctl
,
581 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
582 .ndo_get_stats
= cvm_oct_common_get_stats
,
583 #ifdef CONFIG_NET_POLL_CONTROLLER
584 .ndo_poll_controller
= cvm_oct_poll_controller
,
587 static const struct net_device_ops cvm_oct_sgmii_netdev_ops
= {
588 .ndo_init
= cvm_oct_sgmii_init
,
589 .ndo_uninit
= cvm_oct_common_uninit
,
590 .ndo_open
= cvm_oct_sgmii_open
,
591 .ndo_stop
= cvm_oct_common_stop
,
592 .ndo_start_xmit
= cvm_oct_xmit
,
593 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
594 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
595 .ndo_do_ioctl
= cvm_oct_ioctl
,
596 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
597 .ndo_get_stats
= cvm_oct_common_get_stats
,
598 #ifdef CONFIG_NET_POLL_CONTROLLER
599 .ndo_poll_controller
= cvm_oct_poll_controller
,
602 static const struct net_device_ops cvm_oct_spi_netdev_ops
= {
603 .ndo_init
= cvm_oct_spi_init
,
604 .ndo_uninit
= cvm_oct_spi_uninit
,
605 .ndo_start_xmit
= cvm_oct_xmit
,
606 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
607 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
608 .ndo_do_ioctl
= cvm_oct_ioctl
,
609 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
610 .ndo_get_stats
= cvm_oct_common_get_stats
,
611 #ifdef CONFIG_NET_POLL_CONTROLLER
612 .ndo_poll_controller
= cvm_oct_poll_controller
,
615 static const struct net_device_ops cvm_oct_rgmii_netdev_ops
= {
616 .ndo_init
= cvm_oct_rgmii_init
,
617 .ndo_uninit
= cvm_oct_rgmii_uninit
,
618 .ndo_open
= cvm_oct_rgmii_open
,
619 .ndo_stop
= cvm_oct_common_stop
,
620 .ndo_start_xmit
= cvm_oct_xmit
,
621 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
622 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
623 .ndo_do_ioctl
= cvm_oct_ioctl
,
624 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
625 .ndo_get_stats
= cvm_oct_common_get_stats
,
626 #ifdef CONFIG_NET_POLL_CONTROLLER
627 .ndo_poll_controller
= cvm_oct_poll_controller
,
630 static const struct net_device_ops cvm_oct_pow_netdev_ops
= {
631 .ndo_init
= cvm_oct_common_init
,
632 .ndo_start_xmit
= cvm_oct_xmit_pow
,
633 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
634 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
635 .ndo_do_ioctl
= cvm_oct_ioctl
,
636 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
637 .ndo_get_stats
= cvm_oct_common_get_stats
,
638 #ifdef CONFIG_NET_POLL_CONTROLLER
639 .ndo_poll_controller
= cvm_oct_poll_controller
,
643 static struct device_node
*cvm_oct_of_get_child(
644 const struct device_node
*parent
, int reg_val
)
646 struct device_node
*node
= NULL
;
651 node
= of_get_next_child(parent
, node
);
654 addr
= of_get_property(node
, "reg", &size
);
655 if (addr
&& (be32_to_cpu(*addr
) == reg_val
))
661 static struct device_node
*cvm_oct_node_for_port(struct device_node
*pip
,
662 int interface
, int port
)
664 struct device_node
*ni
, *np
;
666 ni
= cvm_oct_of_get_child(pip
, interface
);
670 np
= cvm_oct_of_get_child(ni
, port
);
676 static int cvm_oct_probe(struct platform_device
*pdev
)
680 int fau
= FAU_NUM_PACKET_BUFFERS_TO_FREE
;
682 struct device_node
*pip
;
684 octeon_mdiobus_force_mod_depencency();
685 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION
);
687 pip
= pdev
->dev
.of_node
;
689 pr_err("Error: No 'pip' in /aliases\n");
693 cvm_oct_poll_queue
= create_singlethread_workqueue("octeon-ethernet");
694 if (cvm_oct_poll_queue
== NULL
) {
695 pr_err("octeon-ethernet: Cannot create workqueue");
699 cvm_oct_configure_common_hw();
701 cvmx_helper_initialize_packet_io_global();
703 /* Change the input group for all ports before input is enabled */
704 num_interfaces
= cvmx_helper_get_number_of_interfaces();
705 for (interface
= 0; interface
< num_interfaces
; interface
++) {
706 int num_ports
= cvmx_helper_ports_on_interface(interface
);
709 for (port
= cvmx_helper_get_ipd_port(interface
, 0);
710 port
< cvmx_helper_get_ipd_port(interface
, num_ports
);
712 union cvmx_pip_prt_tagx pip_prt_tagx
;
715 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port
));
716 pip_prt_tagx
.s
.grp
= pow_receive_group
;
717 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port
),
722 cvmx_helper_ipd_and_packet_input_enable();
724 memset(cvm_oct_device
, 0, sizeof(cvm_oct_device
));
727 * Initialize the FAU used for counting packet buffers that
730 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE
, 0);
732 /* Initialize the FAU used for counting tx SKBs that need to be freed */
733 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN
, 0);
735 if ((pow_send_group
!= -1)) {
736 struct net_device
*dev
;
738 pr_info("\tConfiguring device for POW only access\n");
739 dev
= alloc_etherdev(sizeof(struct octeon_ethernet
));
741 /* Initialize the device private structure. */
742 struct octeon_ethernet
*priv
= netdev_priv(dev
);
744 dev
->netdev_ops
= &cvm_oct_pow_netdev_ops
;
745 priv
->imode
= CVMX_HELPER_INTERFACE_MODE_DISABLED
;
746 priv
->port
= CVMX_PIP_NUM_INPUT_PORTS
;
748 strcpy(dev
->name
, "pow%d");
749 for (qos
= 0; qos
< 16; qos
++)
750 skb_queue_head_init(&priv
->tx_free_list
[qos
]);
752 if (register_netdev(dev
) < 0) {
753 pr_err("Failed to register ethernet device for POW\n");
756 cvm_oct_device
[CVMX_PIP_NUM_INPUT_PORTS
] = dev
;
757 pr_info("%s: POW send group %d, receive group %d\n",
758 dev
->name
, pow_send_group
,
762 pr_err("Failed to allocate ethernet device for POW\n");
766 num_interfaces
= cvmx_helper_get_number_of_interfaces();
767 for (interface
= 0; interface
< num_interfaces
; interface
++) {
768 cvmx_helper_interface_mode_t imode
=
769 cvmx_helper_interface_get_mode(interface
);
770 int num_ports
= cvmx_helper_ports_on_interface(interface
);
775 port
= cvmx_helper_get_ipd_port(interface
, 0);
776 port
< cvmx_helper_get_ipd_port(interface
, num_ports
);
777 port_index
++, port
++) {
778 struct octeon_ethernet
*priv
;
779 struct net_device
*dev
=
780 alloc_etherdev(sizeof(struct octeon_ethernet
));
782 pr_err("Failed to allocate ethernet device for port %d\n",
787 /* Initialize the device private structure. */
788 priv
= netdev_priv(dev
);
790 priv
->of_node
= cvm_oct_node_for_port(pip
, interface
,
793 INIT_DELAYED_WORK(&priv
->port_periodic_work
,
794 cvm_oct_periodic_worker
);
797 priv
->queue
= cvmx_pko_get_base_queue(priv
->port
);
798 priv
->fau
= fau
- cvmx_pko_get_num_queues(port
) * 4;
799 for (qos
= 0; qos
< 16; qos
++)
800 skb_queue_head_init(&priv
->tx_free_list
[qos
]);
801 for (qos
= 0; qos
< cvmx_pko_get_num_queues(port
);
803 cvmx_fau_atomic_write32(priv
->fau
+ qos
* 4, 0);
805 switch (priv
->imode
) {
807 /* These types don't support ports to IPD/PKO */
808 case CVMX_HELPER_INTERFACE_MODE_DISABLED
:
809 case CVMX_HELPER_INTERFACE_MODE_PCIE
:
810 case CVMX_HELPER_INTERFACE_MODE_PICMG
:
813 case CVMX_HELPER_INTERFACE_MODE_NPI
:
814 dev
->netdev_ops
= &cvm_oct_npi_netdev_ops
;
815 strcpy(dev
->name
, "npi%d");
818 case CVMX_HELPER_INTERFACE_MODE_XAUI
:
819 dev
->netdev_ops
= &cvm_oct_xaui_netdev_ops
;
820 strcpy(dev
->name
, "xaui%d");
823 case CVMX_HELPER_INTERFACE_MODE_LOOP
:
824 dev
->netdev_ops
= &cvm_oct_npi_netdev_ops
;
825 strcpy(dev
->name
, "loop%d");
828 case CVMX_HELPER_INTERFACE_MODE_SGMII
:
829 dev
->netdev_ops
= &cvm_oct_sgmii_netdev_ops
;
830 strcpy(dev
->name
, "eth%d");
833 case CVMX_HELPER_INTERFACE_MODE_SPI
:
834 dev
->netdev_ops
= &cvm_oct_spi_netdev_ops
;
835 strcpy(dev
->name
, "spi%d");
838 case CVMX_HELPER_INTERFACE_MODE_RGMII
:
839 case CVMX_HELPER_INTERFACE_MODE_GMII
:
840 dev
->netdev_ops
= &cvm_oct_rgmii_netdev_ops
;
841 strcpy(dev
->name
, "eth%d");
845 if (!dev
->netdev_ops
) {
847 } else if (register_netdev(dev
) < 0) {
848 pr_err("Failed to register ethernet device for interface %d, port %d\n",
849 interface
, priv
->port
);
852 cvm_oct_device
[priv
->port
] = dev
;
854 cvmx_pko_get_num_queues(priv
->port
) *
856 queue_delayed_work(cvm_oct_poll_queue
,
857 &priv
->port_periodic_work
, HZ
);
862 cvm_oct_tx_initialize();
863 cvm_oct_rx_initialize();
866 * 150 uS: about 10 1500-byte packets at 1GE.
868 cvm_oct_tx_poll_interval
= 150 * (octeon_get_clock_rate() / 1000000);
870 queue_delayed_work(cvm_oct_poll_queue
, &cvm_oct_rx_refill_work
, HZ
);
875 static int cvm_oct_remove(struct platform_device
*pdev
)
879 /* Disable POW interrupt */
880 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group
), 0);
884 /* Free the interrupt handler */
885 free_irq(OCTEON_IRQ_WORKQ0
+ pow_receive_group
, cvm_oct_device
);
887 atomic_inc_return(&cvm_oct_poll_queue_stopping
);
888 cancel_delayed_work_sync(&cvm_oct_rx_refill_work
);
890 cvm_oct_rx_shutdown();
891 cvm_oct_tx_shutdown();
895 /* Free the ethernet devices */
896 for (port
= 0; port
< TOTAL_NUMBER_OF_PORTS
; port
++) {
897 if (cvm_oct_device
[port
]) {
898 struct net_device
*dev
= cvm_oct_device
[port
];
899 struct octeon_ethernet
*priv
= netdev_priv(dev
);
901 cancel_delayed_work_sync(&priv
->port_periodic_work
);
903 cvm_oct_tx_shutdown_dev(dev
);
904 unregister_netdev(dev
);
906 cvm_oct_device
[port
] = NULL
;
910 destroy_workqueue(cvm_oct_poll_queue
);
916 /* Free the HW pools */
917 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL
, CVMX_FPA_PACKET_POOL_SIZE
,
919 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL
, CVMX_FPA_WQE_POOL_SIZE
,
921 if (CVMX_FPA_OUTPUT_BUFFER_POOL
!= CVMX_FPA_PACKET_POOL
)
922 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL
,
923 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
, 128);
927 static const struct of_device_id cvm_oct_match
[] = {
929 .compatible
= "cavium,octeon-3860-pip",
933 MODULE_DEVICE_TABLE(of
, cvm_oct_match
);
935 static struct platform_driver cvm_oct_driver
= {
936 .probe
= cvm_oct_probe
,
937 .remove
= cvm_oct_remove
,
939 .name
= KBUILD_MODNAME
,
940 .of_match_table
= cvm_oct_match
,
944 module_platform_driver(cvm_oct_driver
);
946 MODULE_LICENSE("GPL");
947 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
948 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");