1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 #include "xgene_enet_xgmac.h"
26 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring
*buf_pool
)
28 struct xgene_enet_raw_desc16
*raw_desc
;
31 for (i
= 0; i
< buf_pool
->slots
; i
++) {
32 raw_desc
= &buf_pool
->raw_desc16
[i
];
34 /* Hardware expects descriptor in little endian format */
35 raw_desc
->m0
= cpu_to_le64(i
|
36 SET_VAL(FPQNUM
, buf_pool
->dst_ring_num
) |
41 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring
*buf_pool
,
45 struct xgene_enet_raw_desc16
*raw_desc
;
46 struct net_device
*ndev
;
49 u32 tail
= buf_pool
->tail
;
50 u32 slots
= buf_pool
->slots
- 1;
54 ndev
= buf_pool
->ndev
;
55 dev
= ndev_to_dev(buf_pool
->ndev
);
56 bufdatalen
= BUF_LEN_CODE_2K
| (SKB_BUFFER_SIZE
& GENMASK(11, 0));
57 len
= XGENE_ENET_MAX_MTU
;
59 for (i
= 0; i
< nbuf
; i
++) {
60 raw_desc
= &buf_pool
->raw_desc16
[tail
];
62 skb
= netdev_alloc_skb_ip_align(ndev
, len
);
65 buf_pool
->rx_skb
[tail
] = skb
;
67 dma_addr
= dma_map_single(dev
, skb
->data
, len
, DMA_FROM_DEVICE
);
68 if (dma_mapping_error(dev
, dma_addr
)) {
69 netdev_err(ndev
, "DMA mapping error\n");
70 dev_kfree_skb_any(skb
);
74 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
75 SET_VAL(BUFDATALEN
, bufdatalen
) |
77 tail
= (tail
+ 1) & slots
;
80 iowrite32(nbuf
, buf_pool
->cmd
);
81 buf_pool
->tail
= tail
;
86 static u16
xgene_enet_dst_ring_num(struct xgene_enet_desc_ring
*ring
)
88 struct xgene_enet_pdata
*pdata
= netdev_priv(ring
->ndev
);
90 return ((u16
)pdata
->rm
<< 10) | ring
->num
;
93 static u8
xgene_enet_hdr_len(const void *data
)
95 const struct ethhdr
*eth
= data
;
97 return (eth
->h_proto
== htons(ETH_P_8021Q
)) ? VLAN_ETH_HLEN
: ETH_HLEN
;
100 static u32
xgene_enet_ring_len(struct xgene_enet_desc_ring
*ring
)
102 u32 __iomem
*cmd_base
= ring
->cmd_base
;
103 u32 ring_state
, num_msgs
;
105 ring_state
= ioread32(&cmd_base
[1]);
106 num_msgs
= ring_state
& CREATE_MASK(NUMMSGSINQ_POS
, NUMMSGSINQ_LEN
);
108 return num_msgs
>> NUMMSGSINQ_POS
;
111 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring
*buf_pool
)
113 struct xgene_enet_raw_desc16
*raw_desc
;
114 u32 slots
= buf_pool
->slots
- 1;
115 u32 tail
= buf_pool
->tail
;
119 len
= xgene_enet_ring_len(buf_pool
);
120 for (i
= 0; i
< len
; i
++) {
121 tail
= (tail
- 1) & slots
;
122 raw_desc
= &buf_pool
->raw_desc16
[tail
];
124 /* Hardware stores descriptor in little endian format */
125 userinfo
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
126 dev_kfree_skb_any(buf_pool
->rx_skb
[userinfo
]);
129 iowrite32(-len
, buf_pool
->cmd
);
130 buf_pool
->tail
= tail
;
133 static irqreturn_t
xgene_enet_rx_irq(const int irq
, void *data
)
135 struct xgene_enet_desc_ring
*rx_ring
= data
;
137 if (napi_schedule_prep(&rx_ring
->napi
)) {
138 disable_irq_nosync(irq
);
139 __napi_schedule(&rx_ring
->napi
);
145 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring
*cp_ring
,
146 struct xgene_enet_raw_desc
*raw_desc
)
154 skb_index
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
155 skb
= cp_ring
->cp_skb
[skb_index
];
157 dev
= ndev_to_dev(cp_ring
->ndev
);
158 dma_unmap_single(dev
, GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
)),
159 GET_VAL(BUFDATALEN
, le64_to_cpu(raw_desc
->m1
)),
162 /* Checking for error */
163 status
= GET_VAL(LERR
, le64_to_cpu(raw_desc
->m0
));
164 if (unlikely(status
> 2)) {
165 xgene_enet_parse_error(cp_ring
, netdev_priv(cp_ring
->ndev
),
171 dev_kfree_skb_any(skb
);
173 netdev_err(cp_ring
->ndev
, "completion skb is NULL\n");
180 static u64
xgene_enet_work_msg(struct sk_buff
*skb
)
183 u8 l3hlen
, l4hlen
= 0;
189 if (unlikely(skb
->protocol
!= htons(ETH_P_IP
)) &&
190 unlikely(skb
->protocol
!= htons(ETH_P_8021Q
)))
193 if (unlikely(!(skb
->dev
->features
& NETIF_F_IP_CSUM
)))
197 if (unlikely(ip_is_fragment(iph
)))
200 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
201 l4hlen
= tcp_hdrlen(skb
) >> 2;
203 proto
= TSO_IPPROTO_TCP
;
204 } else if (iph
->protocol
== IPPROTO_UDP
) {
205 l4hlen
= UDP_HDR_SIZE
;
209 l3hlen
= ip_hdrlen(skb
) >> 2;
210 ethhdr
= xgene_enet_hdr_len(skb
->data
);
211 hopinfo
= SET_VAL(TCPHDR
, l4hlen
) |
212 SET_VAL(IPHDR
, l3hlen
) |
213 SET_VAL(ETHHDR
, ethhdr
) |
214 SET_VAL(EC
, csum_enable
) |
217 SET_BIT(TYPE_ETH_WORK_MESSAGE
);
222 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring
*tx_ring
,
225 struct device
*dev
= ndev_to_dev(tx_ring
->ndev
);
226 struct xgene_enet_raw_desc
*raw_desc
;
228 u16 tail
= tx_ring
->tail
;
231 raw_desc
= &tx_ring
->raw_desc
[tail
];
232 memset(raw_desc
, 0, sizeof(struct xgene_enet_raw_desc
));
234 dma_addr
= dma_map_single(dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
235 if (dma_mapping_error(dev
, dma_addr
)) {
236 netdev_err(tx_ring
->ndev
, "DMA mapping error\n");
240 /* Hardware expects descriptor in little endian format */
241 raw_desc
->m0
= cpu_to_le64(tail
);
242 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
243 SET_VAL(BUFDATALEN
, skb
->len
) |
245 hopinfo
= xgene_enet_work_msg(skb
);
246 raw_desc
->m3
= cpu_to_le64(SET_VAL(HENQNUM
, tx_ring
->dst_ring_num
) |
248 tx_ring
->cp_ring
->cp_skb
[tail
] = skb
;
253 static netdev_tx_t
xgene_enet_start_xmit(struct sk_buff
*skb
,
254 struct net_device
*ndev
)
256 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
257 struct xgene_enet_desc_ring
*tx_ring
= pdata
->tx_ring
;
258 struct xgene_enet_desc_ring
*cp_ring
= tx_ring
->cp_ring
;
259 u32 tx_level
, cq_level
;
261 tx_level
= xgene_enet_ring_len(tx_ring
);
262 cq_level
= xgene_enet_ring_len(cp_ring
);
263 if (unlikely(tx_level
> pdata
->tx_qcnt_hi
||
264 cq_level
> pdata
->cp_qcnt_hi
)) {
265 netif_stop_queue(ndev
);
266 return NETDEV_TX_BUSY
;
269 if (xgene_enet_setup_tx_desc(tx_ring
, skb
)) {
270 dev_kfree_skb_any(skb
);
274 iowrite32(1, tx_ring
->cmd
);
275 skb_tx_timestamp(skb
);
276 tx_ring
->tail
= (tx_ring
->tail
+ 1) & (tx_ring
->slots
- 1);
278 pdata
->stats
.tx_packets
++;
279 pdata
->stats
.tx_bytes
+= skb
->len
;
284 static void xgene_enet_skip_csum(struct sk_buff
*skb
)
286 struct iphdr
*iph
= ip_hdr(skb
);
288 if (!ip_is_fragment(iph
) ||
289 (iph
->protocol
!= IPPROTO_TCP
&& iph
->protocol
!= IPPROTO_UDP
)) {
290 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
294 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring
*rx_ring
,
295 struct xgene_enet_raw_desc
*raw_desc
)
297 struct net_device
*ndev
;
298 struct xgene_enet_pdata
*pdata
;
300 struct xgene_enet_desc_ring
*buf_pool
;
301 u32 datalen
, skb_index
;
306 ndev
= rx_ring
->ndev
;
307 pdata
= netdev_priv(ndev
);
308 dev
= ndev_to_dev(rx_ring
->ndev
);
309 buf_pool
= rx_ring
->buf_pool
;
311 dma_unmap_single(dev
, GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
)),
312 XGENE_ENET_MAX_MTU
, DMA_FROM_DEVICE
);
313 skb_index
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
314 skb
= buf_pool
->rx_skb
[skb_index
];
316 /* checking for error */
317 status
= GET_VAL(LERR
, le64_to_cpu(raw_desc
->m0
));
318 if (unlikely(status
> 2)) {
319 dev_kfree_skb_any(skb
);
320 xgene_enet_parse_error(rx_ring
, netdev_priv(rx_ring
->ndev
),
322 pdata
->stats
.rx_dropped
++;
327 /* strip off CRC as HW isn't doing this */
328 datalen
= GET_VAL(BUFDATALEN
, le64_to_cpu(raw_desc
->m1
));
330 prefetch(skb
->data
- NET_IP_ALIGN
);
331 skb_put(skb
, datalen
);
333 skb_checksum_none_assert(skb
);
334 skb
->protocol
= eth_type_trans(skb
, ndev
);
335 if (likely((ndev
->features
& NETIF_F_IP_CSUM
) &&
336 skb
->protocol
== htons(ETH_P_IP
))) {
337 xgene_enet_skip_csum(skb
);
340 pdata
->stats
.rx_packets
++;
341 pdata
->stats
.rx_bytes
+= datalen
;
342 napi_gro_receive(&rx_ring
->napi
, skb
);
344 if (--rx_ring
->nbufpool
== 0) {
345 ret
= xgene_enet_refill_bufpool(buf_pool
, NUM_BUFPOOL
);
346 rx_ring
->nbufpool
= NUM_BUFPOOL
;
352 static bool is_rx_desc(struct xgene_enet_raw_desc
*raw_desc
)
354 return GET_VAL(FPQNUM
, le64_to_cpu(raw_desc
->m0
)) ? true : false;
357 static int xgene_enet_process_ring(struct xgene_enet_desc_ring
*ring
,
360 struct xgene_enet_pdata
*pdata
= netdev_priv(ring
->ndev
);
361 struct xgene_enet_raw_desc
*raw_desc
;
362 u16 head
= ring
->head
;
363 u16 slots
= ring
->slots
- 1;
367 raw_desc
= &ring
->raw_desc
[head
];
368 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc
)))
371 if (is_rx_desc(raw_desc
))
372 ret
= xgene_enet_rx_frame(ring
, raw_desc
);
374 ret
= xgene_enet_tx_completion(ring
, raw_desc
);
375 xgene_enet_mark_desc_slot_empty(raw_desc
);
377 head
= (head
+ 1) & slots
;
385 iowrite32(-count
, ring
->cmd
);
388 if (netif_queue_stopped(ring
->ndev
)) {
389 if (xgene_enet_ring_len(ring
) < pdata
->cp_qcnt_low
)
390 netif_wake_queue(ring
->ndev
);
397 static int xgene_enet_napi(struct napi_struct
*napi
, const int budget
)
399 struct xgene_enet_desc_ring
*ring
;
402 ring
= container_of(napi
, struct xgene_enet_desc_ring
, napi
);
403 processed
= xgene_enet_process_ring(ring
, budget
);
405 if (processed
!= budget
) {
407 enable_irq(ring
->irq
);
413 static void xgene_enet_timeout(struct net_device
*ndev
)
415 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
417 pdata
->mac_ops
->reset(pdata
);
420 static int xgene_enet_register_irq(struct net_device
*ndev
)
422 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
423 struct device
*dev
= ndev_to_dev(ndev
);
426 ret
= devm_request_irq(dev
, pdata
->rx_ring
->irq
, xgene_enet_rx_irq
,
427 IRQF_SHARED
, ndev
->name
, pdata
->rx_ring
);
429 netdev_err(ndev
, "rx%d interrupt request failed\n",
430 pdata
->rx_ring
->irq
);
436 static void xgene_enet_free_irq(struct net_device
*ndev
)
438 struct xgene_enet_pdata
*pdata
;
441 pdata
= netdev_priv(ndev
);
442 dev
= ndev_to_dev(ndev
);
443 devm_free_irq(dev
, pdata
->rx_ring
->irq
, pdata
->rx_ring
);
446 static int xgene_enet_open(struct net_device
*ndev
)
448 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
449 struct xgene_mac_ops
*mac_ops
= pdata
->mac_ops
;
452 mac_ops
->tx_enable(pdata
);
453 mac_ops
->rx_enable(pdata
);
455 ret
= xgene_enet_register_irq(ndev
);
458 napi_enable(&pdata
->rx_ring
->napi
);
460 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
461 phy_start(pdata
->phy_dev
);
463 schedule_delayed_work(&pdata
->link_work
, PHY_POLL_LINK_OFF
);
465 netif_start_queue(ndev
);
470 static int xgene_enet_close(struct net_device
*ndev
)
472 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
473 struct xgene_mac_ops
*mac_ops
= pdata
->mac_ops
;
475 netif_stop_queue(ndev
);
477 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
478 phy_stop(pdata
->phy_dev
);
480 cancel_delayed_work_sync(&pdata
->link_work
);
482 napi_disable(&pdata
->rx_ring
->napi
);
483 xgene_enet_free_irq(ndev
);
484 xgene_enet_process_ring(pdata
->rx_ring
, -1);
486 mac_ops
->tx_disable(pdata
);
487 mac_ops
->rx_disable(pdata
);
492 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring
*ring
)
494 struct xgene_enet_pdata
*pdata
;
497 pdata
= netdev_priv(ring
->ndev
);
498 dev
= ndev_to_dev(ring
->ndev
);
500 xgene_enet_clear_ring(ring
);
501 dma_free_coherent(dev
, ring
->size
, ring
->desc_addr
, ring
->dma
);
504 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata
*pdata
)
506 struct xgene_enet_desc_ring
*buf_pool
;
508 if (pdata
->tx_ring
) {
509 xgene_enet_delete_ring(pdata
->tx_ring
);
510 pdata
->tx_ring
= NULL
;
513 if (pdata
->rx_ring
) {
514 buf_pool
= pdata
->rx_ring
->buf_pool
;
515 xgene_enet_delete_bufpool(buf_pool
);
516 xgene_enet_delete_ring(buf_pool
);
517 xgene_enet_delete_ring(pdata
->rx_ring
);
518 pdata
->rx_ring
= NULL
;
522 static int xgene_enet_get_ring_size(struct device
*dev
,
523 enum xgene_enet_ring_cfgsize cfgsize
)
528 case RING_CFGSIZE_512B
:
531 case RING_CFGSIZE_2KB
:
534 case RING_CFGSIZE_16KB
:
537 case RING_CFGSIZE_64KB
:
540 case RING_CFGSIZE_512KB
:
544 dev_err(dev
, "Unsupported cfg ring size %d\n", cfgsize
);
551 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring
*ring
)
558 dev
= ndev_to_dev(ring
->ndev
);
560 if (ring
->desc_addr
) {
561 xgene_enet_clear_ring(ring
);
562 dma_free_coherent(dev
, ring
->size
, ring
->desc_addr
, ring
->dma
);
564 devm_kfree(dev
, ring
);
567 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata
*pdata
)
569 struct device
*dev
= &pdata
->pdev
->dev
;
570 struct xgene_enet_desc_ring
*ring
;
572 ring
= pdata
->tx_ring
;
574 if (ring
->cp_ring
&& ring
->cp_ring
->cp_skb
)
575 devm_kfree(dev
, ring
->cp_ring
->cp_skb
);
576 xgene_enet_free_desc_ring(ring
);
579 ring
= pdata
->rx_ring
;
581 if (ring
->buf_pool
) {
582 if (ring
->buf_pool
->rx_skb
)
583 devm_kfree(dev
, ring
->buf_pool
->rx_skb
);
584 xgene_enet_free_desc_ring(ring
->buf_pool
);
586 xgene_enet_free_desc_ring(ring
);
590 static struct xgene_enet_desc_ring
*xgene_enet_create_desc_ring(
591 struct net_device
*ndev
, u32 ring_num
,
592 enum xgene_enet_ring_cfgsize cfgsize
, u32 ring_id
)
594 struct xgene_enet_desc_ring
*ring
;
595 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
596 struct device
*dev
= ndev_to_dev(ndev
);
599 size
= xgene_enet_get_ring_size(dev
, cfgsize
);
603 ring
= devm_kzalloc(dev
, sizeof(struct xgene_enet_desc_ring
),
609 ring
->num
= ring_num
;
610 ring
->cfgsize
= cfgsize
;
613 ring
->desc_addr
= dma_zalloc_coherent(dev
, size
, &ring
->dma
,
615 if (!ring
->desc_addr
) {
616 devm_kfree(dev
, ring
);
621 ring
->cmd_base
= pdata
->ring_cmd_addr
+ (ring
->num
<< 6);
622 ring
->cmd
= ring
->cmd_base
+ INC_DEC_CMD_ADDR
;
623 ring
= xgene_enet_setup_ring(ring
);
624 netdev_dbg(ndev
, "ring info: num=%d size=%d id=%d slots=%d\n",
625 ring
->num
, ring
->size
, ring
->id
, ring
->slots
);
630 static u16
xgene_enet_get_ring_id(enum xgene_ring_owner owner
, u8 bufnum
)
632 return (owner
<< 6) | (bufnum
& GENMASK(5, 0));
635 static int xgene_enet_create_desc_rings(struct net_device
*ndev
)
637 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
638 struct device
*dev
= ndev_to_dev(ndev
);
639 struct xgene_enet_desc_ring
*rx_ring
, *tx_ring
, *cp_ring
;
640 struct xgene_enet_desc_ring
*buf_pool
= NULL
;
641 u8 cpu_bufnum
= 0, eth_bufnum
= 0;
643 u16 ring_id
, ring_num
= 0;
646 /* allocate rx descriptor ring */
647 ring_id
= xgene_enet_get_ring_id(RING_OWNER_CPU
, cpu_bufnum
++);
648 rx_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
649 RING_CFGSIZE_16KB
, ring_id
);
655 /* allocate buffer pool for receiving packets */
656 ring_id
= xgene_enet_get_ring_id(RING_OWNER_ETH0
, bp_bufnum
++);
657 buf_pool
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
658 RING_CFGSIZE_2KB
, ring_id
);
664 rx_ring
->nbufpool
= NUM_BUFPOOL
;
665 rx_ring
->buf_pool
= buf_pool
;
666 rx_ring
->irq
= pdata
->rx_irq
;
667 buf_pool
->rx_skb
= devm_kcalloc(dev
, buf_pool
->slots
,
668 sizeof(struct sk_buff
*), GFP_KERNEL
);
669 if (!buf_pool
->rx_skb
) {
674 buf_pool
->dst_ring_num
= xgene_enet_dst_ring_num(buf_pool
);
675 rx_ring
->buf_pool
= buf_pool
;
676 pdata
->rx_ring
= rx_ring
;
678 /* allocate tx descriptor ring */
679 ring_id
= xgene_enet_get_ring_id(RING_OWNER_ETH0
, eth_bufnum
++);
680 tx_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
681 RING_CFGSIZE_16KB
, ring_id
);
686 pdata
->tx_ring
= tx_ring
;
688 cp_ring
= pdata
->rx_ring
;
689 cp_ring
->cp_skb
= devm_kcalloc(dev
, tx_ring
->slots
,
690 sizeof(struct sk_buff
*), GFP_KERNEL
);
691 if (!cp_ring
->cp_skb
) {
695 pdata
->tx_ring
->cp_ring
= cp_ring
;
696 pdata
->tx_ring
->dst_ring_num
= xgene_enet_dst_ring_num(cp_ring
);
698 pdata
->tx_qcnt_hi
= pdata
->tx_ring
->slots
/ 2;
699 pdata
->cp_qcnt_hi
= pdata
->rx_ring
->slots
/ 2;
700 pdata
->cp_qcnt_low
= pdata
->cp_qcnt_hi
/ 2;
705 xgene_enet_free_desc_rings(pdata
);
709 static struct rtnl_link_stats64
*xgene_enet_get_stats64(
710 struct net_device
*ndev
,
711 struct rtnl_link_stats64
*storage
)
713 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
714 struct rtnl_link_stats64
*stats
= &pdata
->stats
;
716 stats
->rx_errors
+= stats
->rx_length_errors
+
717 stats
->rx_crc_errors
+
718 stats
->rx_frame_errors
+
719 stats
->rx_fifo_errors
;
720 memcpy(storage
, &pdata
->stats
, sizeof(struct rtnl_link_stats64
));
725 static int xgene_enet_set_mac_address(struct net_device
*ndev
, void *addr
)
727 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
730 ret
= eth_mac_addr(ndev
, addr
);
733 pdata
->mac_ops
->set_mac_addr(pdata
);
738 static const struct net_device_ops xgene_ndev_ops
= {
739 .ndo_open
= xgene_enet_open
,
740 .ndo_stop
= xgene_enet_close
,
741 .ndo_start_xmit
= xgene_enet_start_xmit
,
742 .ndo_tx_timeout
= xgene_enet_timeout
,
743 .ndo_get_stats64
= xgene_enet_get_stats64
,
744 .ndo_change_mtu
= eth_change_mtu
,
745 .ndo_set_mac_address
= xgene_enet_set_mac_address
,
748 static int xgene_enet_get_resources(struct xgene_enet_pdata
*pdata
)
750 struct platform_device
*pdev
;
751 struct net_device
*ndev
;
753 struct resource
*res
;
754 void __iomem
*base_addr
;
762 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "enet_csr");
764 dev_err(dev
, "Resource enet_csr not defined\n");
767 pdata
->base_addr
= devm_ioremap_resource(dev
, res
);
768 if (IS_ERR(pdata
->base_addr
)) {
769 dev_err(dev
, "Unable to retrieve ENET Port CSR region\n");
770 return PTR_ERR(pdata
->base_addr
);
773 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "ring_csr");
775 dev_err(dev
, "Resource ring_csr not defined\n");
778 pdata
->ring_csr_addr
= devm_ioremap_resource(dev
, res
);
779 if (IS_ERR(pdata
->ring_csr_addr
)) {
780 dev_err(dev
, "Unable to retrieve ENET Ring CSR region\n");
781 return PTR_ERR(pdata
->ring_csr_addr
);
784 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "ring_cmd");
786 dev_err(dev
, "Resource ring_cmd not defined\n");
789 pdata
->ring_cmd_addr
= devm_ioremap_resource(dev
, res
);
790 if (IS_ERR(pdata
->ring_cmd_addr
)) {
791 dev_err(dev
, "Unable to retrieve ENET Ring command region\n");
792 return PTR_ERR(pdata
->ring_cmd_addr
);
795 ret
= platform_get_irq(pdev
, 0);
797 dev_err(dev
, "Unable to get ENET Rx IRQ\n");
798 ret
= ret
? : -ENXIO
;
803 mac
= of_get_mac_address(dev
->of_node
);
805 memcpy(ndev
->dev_addr
, mac
, ndev
->addr_len
);
807 eth_hw_addr_random(ndev
);
808 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
810 pdata
->phy_mode
= of_get_phy_mode(pdev
->dev
.of_node
);
811 if (pdata
->phy_mode
< 0) {
812 dev_err(dev
, "Unable to get phy-connection-type\n");
813 return pdata
->phy_mode
;
815 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_RGMII
&&
816 pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
) {
817 dev_err(dev
, "Incorrect phy-connection-type specified\n");
821 pdata
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
822 ret
= IS_ERR(pdata
->clk
);
823 if (IS_ERR(pdata
->clk
)) {
824 dev_err(&pdev
->dev
, "can't get clock\n");
825 ret
= PTR_ERR(pdata
->clk
);
829 base_addr
= pdata
->base_addr
;
830 pdata
->eth_csr_addr
= base_addr
+ BLOCK_ETH_CSR_OFFSET
;
831 pdata
->eth_ring_if_addr
= base_addr
+ BLOCK_ETH_RING_IF_OFFSET
;
832 pdata
->eth_diag_csr_addr
= base_addr
+ BLOCK_ETH_DIAG_CSR_OFFSET
;
833 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
) {
834 pdata
->mcx_mac_addr
= base_addr
+ BLOCK_ETH_MAC_OFFSET
;
835 pdata
->mcx_mac_csr_addr
= base_addr
+ BLOCK_ETH_MAC_CSR_OFFSET
;
837 pdata
->mcx_mac_addr
= base_addr
+ BLOCK_AXG_MAC_OFFSET
;
838 pdata
->mcx_mac_csr_addr
= base_addr
+ BLOCK_AXG_MAC_CSR_OFFSET
;
840 pdata
->rx_buff_cnt
= NUM_PKT_BUF
;
845 static int xgene_enet_init_hw(struct xgene_enet_pdata
*pdata
)
847 struct net_device
*ndev
= pdata
->ndev
;
848 struct xgene_enet_desc_ring
*buf_pool
;
852 pdata
->port_ops
->reset(pdata
);
854 ret
= xgene_enet_create_desc_rings(ndev
);
856 netdev_err(ndev
, "Error in ring configuration\n");
860 /* setup buffer pool */
861 buf_pool
= pdata
->rx_ring
->buf_pool
;
862 xgene_enet_init_bufpool(buf_pool
);
863 ret
= xgene_enet_refill_bufpool(buf_pool
, pdata
->rx_buff_cnt
);
865 xgene_enet_delete_desc_rings(pdata
);
869 dst_ring_num
= xgene_enet_dst_ring_num(pdata
->rx_ring
);
870 pdata
->port_ops
->cle_bypass(pdata
, dst_ring_num
, buf_pool
->id
);
871 pdata
->mac_ops
->init(pdata
);
876 static void xgene_enet_setup_ops(struct xgene_enet_pdata
*pdata
)
878 switch (pdata
->phy_mode
) {
879 case PHY_INTERFACE_MODE_RGMII
:
880 pdata
->mac_ops
= &xgene_gmac_ops
;
881 pdata
->port_ops
= &xgene_gport_ops
;
885 pdata
->mac_ops
= &xgene_xgmac_ops
;
886 pdata
->port_ops
= &xgene_xgport_ops
;
892 static int xgene_enet_probe(struct platform_device
*pdev
)
894 struct net_device
*ndev
;
895 struct xgene_enet_pdata
*pdata
;
896 struct device
*dev
= &pdev
->dev
;
897 struct napi_struct
*napi
;
898 struct xgene_mac_ops
*mac_ops
;
901 ndev
= alloc_etherdev(sizeof(struct xgene_enet_pdata
));
905 pdata
= netdev_priv(ndev
);
909 SET_NETDEV_DEV(ndev
, dev
);
910 platform_set_drvdata(pdev
, pdata
);
911 ndev
->netdev_ops
= &xgene_ndev_ops
;
912 xgene_enet_set_ethtool_ops(ndev
);
913 ndev
->features
|= NETIF_F_IP_CSUM
|
917 ret
= xgene_enet_get_resources(pdata
);
921 xgene_enet_setup_ops(pdata
);
923 ret
= register_netdev(ndev
);
925 netdev_err(ndev
, "Failed to register netdev\n");
929 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
931 netdev_err(ndev
, "No usable DMA configuration\n");
935 ret
= xgene_enet_init_hw(pdata
);
939 napi
= &pdata
->rx_ring
->napi
;
940 netif_napi_add(ndev
, napi
, xgene_enet_napi
, NAPI_POLL_WEIGHT
);
941 mac_ops
= pdata
->mac_ops
;
942 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
943 ret
= xgene_enet_mdio_config(pdata
);
945 INIT_DELAYED_WORK(&pdata
->link_work
, mac_ops
->link_state
);
953 static int xgene_enet_remove(struct platform_device
*pdev
)
955 struct xgene_enet_pdata
*pdata
;
956 struct xgene_mac_ops
*mac_ops
;
957 struct net_device
*ndev
;
959 pdata
= platform_get_drvdata(pdev
);
960 mac_ops
= pdata
->mac_ops
;
963 mac_ops
->rx_disable(pdata
);
964 mac_ops
->tx_disable(pdata
);
966 netif_napi_del(&pdata
->rx_ring
->napi
);
967 xgene_enet_mdio_remove(pdata
);
968 xgene_enet_delete_desc_rings(pdata
);
969 unregister_netdev(ndev
);
970 pdata
->port_ops
->shutdown(pdata
);
976 static struct of_device_id xgene_enet_match
[] = {
977 {.compatible
= "apm,xgene-enet",},
981 MODULE_DEVICE_TABLE(of
, xgene_enet_match
);
983 static struct platform_driver xgene_enet_driver
= {
985 .name
= "xgene-enet",
986 .of_match_table
= xgene_enet_match
,
988 .probe
= xgene_enet_probe
,
989 .remove
= xgene_enet_remove
,
992 module_platform_driver(xgene_enet_driver
);
994 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
995 MODULE_VERSION(XGENE_DRV_VERSION
);
996 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
997 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
998 MODULE_LICENSE("GPL");