drivers: net: xgene: Pre-initialize ret in xgene_enet_get_resources()
[deliverable/linux.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
CommitLineData
e6ad7673
IS
1/* Applied Micro X-Gene SoC Ethernet Driver
2 *
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "xgene_enet_main.h"
23#include "xgene_enet_hw.h"
32f784b5 24#include "xgene_enet_sgmac.h"
0148d38d 25#include "xgene_enet_xgmac.h"
e6ad7673 26
de7b5b3d
FK
27#define RES_ENET_CSR 0
28#define RES_RING_CSR 1
29#define RES_RING_CMD 2
30
bc1b7c13 31static const struct of_device_id xgene_enet_of_match[];
0738c54d 32static const struct acpi_device_id xgene_enet_acpi_match[];
bc1b7c13 33
e6ad7673
IS
34static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
35{
36 struct xgene_enet_raw_desc16 *raw_desc;
37 int i;
38
39 for (i = 0; i < buf_pool->slots; i++) {
40 raw_desc = &buf_pool->raw_desc16[i];
41
42 /* Hardware expects descriptor in little endian format */
43 raw_desc->m0 = cpu_to_le64(i |
44 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
45 SET_VAL(STASH, 3));
46 }
47}
48
49static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
50 u32 nbuf)
51{
52 struct sk_buff *skb;
53 struct xgene_enet_raw_desc16 *raw_desc;
81cefb81 54 struct xgene_enet_pdata *pdata;
e6ad7673
IS
55 struct net_device *ndev;
56 struct device *dev;
57 dma_addr_t dma_addr;
58 u32 tail = buf_pool->tail;
59 u32 slots = buf_pool->slots - 1;
60 u16 bufdatalen, len;
61 int i;
62
63 ndev = buf_pool->ndev;
64 dev = ndev_to_dev(buf_pool->ndev);
81cefb81 65 pdata = netdev_priv(ndev);
e6ad7673
IS
66 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67 len = XGENE_ENET_MAX_MTU;
68
69 for (i = 0; i < nbuf; i++) {
70 raw_desc = &buf_pool->raw_desc16[tail];
71
72 skb = netdev_alloc_skb_ip_align(ndev, len);
73 if (unlikely(!skb))
74 return -ENOMEM;
75 buf_pool->rx_skb[tail] = skb;
76
77 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78 if (dma_mapping_error(dev, dma_addr)) {
79 netdev_err(ndev, "DMA mapping error\n");
80 dev_kfree_skb_any(skb);
81 return -EINVAL;
82 }
83
84 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
85 SET_VAL(BUFDATALEN, bufdatalen) |
86 SET_BIT(COHERENT));
87 tail = (tail + 1) & slots;
88 }
89
81cefb81 90 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
e6ad7673
IS
91 buf_pool->tail = tail;
92
93 return 0;
94}
95
96static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
97{
98 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
99
100 return ((u16)pdata->rm << 10) | ring->num;
101}
102
103static u8 xgene_enet_hdr_len(const void *data)
104{
105 const struct ethhdr *eth = data;
106
107 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
108}
109
e6ad7673
IS
110static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
111{
81cefb81 112 struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
e6ad7673
IS
113 struct xgene_enet_raw_desc16 *raw_desc;
114 u32 slots = buf_pool->slots - 1;
115 u32 tail = buf_pool->tail;
116 u32 userinfo;
117 int i, len;
118
81cefb81 119 len = pdata->ring_ops->len(buf_pool);
e6ad7673
IS
120 for (i = 0; i < len; i++) {
121 tail = (tail - 1) & slots;
122 raw_desc = &buf_pool->raw_desc16[tail];
123
124 /* Hardware stores descriptor in little endian format */
125 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
126 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
127 }
128
81cefb81 129 pdata->ring_ops->wr_cmd(buf_pool, -len);
e6ad7673
IS
130 buf_pool->tail = tail;
131}
132
133static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
134{
135 struct xgene_enet_desc_ring *rx_ring = data;
136
137 if (napi_schedule_prep(&rx_ring->napi)) {
138 disable_irq_nosync(irq);
139 __napi_schedule(&rx_ring->napi);
140 }
141
142 return IRQ_HANDLED;
143}
144
145static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
146 struct xgene_enet_raw_desc *raw_desc)
147{
148 struct sk_buff *skb;
149 struct device *dev;
150 u16 skb_index;
151 u8 status;
152 int ret = 0;
153
154 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
155 skb = cp_ring->cp_skb[skb_index];
156
157 dev = ndev_to_dev(cp_ring->ndev);
158 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
159 GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)),
160 DMA_TO_DEVICE);
161
162 /* Checking for error */
163 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
164 if (unlikely(status > 2)) {
165 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
166 status);
167 ret = -EIO;
168 }
169
170 if (likely(skb)) {
171 dev_kfree_skb_any(skb);
172 } else {
173 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
174 ret = -EIO;
175 }
176
177 return ret;
178}
179
180static u64 xgene_enet_work_msg(struct sk_buff *skb)
181{
182 struct iphdr *iph;
183 u8 l3hlen, l4hlen = 0;
184 u8 csum_enable = 0;
185 u8 proto = 0;
186 u8 ethhdr;
187 u64 hopinfo;
188
189 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
190 unlikely(skb->protocol != htons(ETH_P_8021Q)))
191 goto out;
192
193 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
194 goto out;
195
196 iph = ip_hdr(skb);
197 if (unlikely(ip_is_fragment(iph)))
198 goto out;
199
200 if (likely(iph->protocol == IPPROTO_TCP)) {
201 l4hlen = tcp_hdrlen(skb) >> 2;
202 csum_enable = 1;
203 proto = TSO_IPPROTO_TCP;
204 } else if (iph->protocol == IPPROTO_UDP) {
205 l4hlen = UDP_HDR_SIZE;
206 csum_enable = 1;
207 }
208out:
209 l3hlen = ip_hdrlen(skb) >> 2;
210 ethhdr = xgene_enet_hdr_len(skb->data);
211 hopinfo = SET_VAL(TCPHDR, l4hlen) |
212 SET_VAL(IPHDR, l3hlen) |
213 SET_VAL(ETHHDR, ethhdr) |
214 SET_VAL(EC, csum_enable) |
215 SET_VAL(IS, proto) |
216 SET_BIT(IC) |
217 SET_BIT(TYPE_ETH_WORK_MESSAGE);
218
219 return hopinfo;
220}
221
222static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
223 struct sk_buff *skb)
224{
225 struct device *dev = ndev_to_dev(tx_ring->ndev);
226 struct xgene_enet_raw_desc *raw_desc;
227 dma_addr_t dma_addr;
228 u16 tail = tx_ring->tail;
229 u64 hopinfo;
230
231 raw_desc = &tx_ring->raw_desc[tail];
232 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
233
234 dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
235 if (dma_mapping_error(dev, dma_addr)) {
236 netdev_err(tx_ring->ndev, "DMA mapping error\n");
237 return -EINVAL;
238 }
239
240 /* Hardware expects descriptor in little endian format */
241 raw_desc->m0 = cpu_to_le64(tail);
242 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
243 SET_VAL(BUFDATALEN, skb->len) |
244 SET_BIT(COHERENT));
245 hopinfo = xgene_enet_work_msg(skb);
246 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
247 hopinfo);
248 tx_ring->cp_ring->cp_skb[tail] = skb;
249
250 return 0;
251}
252
253static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
254 struct net_device *ndev)
255{
256 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
257 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
258 struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
259 u32 tx_level, cq_level;
260
81cefb81
IS
261 tx_level = pdata->ring_ops->len(tx_ring);
262 cq_level = pdata->ring_ops->len(cp_ring);
e6ad7673
IS
263 if (unlikely(tx_level > pdata->tx_qcnt_hi ||
264 cq_level > pdata->cp_qcnt_hi)) {
265 netif_stop_queue(ndev);
266 return NETDEV_TX_BUSY;
267 }
268
269 if (xgene_enet_setup_tx_desc(tx_ring, skb)) {
270 dev_kfree_skb_any(skb);
271 return NETDEV_TX_OK;
272 }
273
81cefb81 274 pdata->ring_ops->wr_cmd(tx_ring, 1);
e6ad7673
IS
275 skb_tx_timestamp(skb);
276 tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
277
278 pdata->stats.tx_packets++;
279 pdata->stats.tx_bytes += skb->len;
280
281 return NETDEV_TX_OK;
282}
283
284static void xgene_enet_skip_csum(struct sk_buff *skb)
285{
286 struct iphdr *iph = ip_hdr(skb);
287
288 if (!ip_is_fragment(iph) ||
289 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
290 skb->ip_summed = CHECKSUM_UNNECESSARY;
291 }
292}
293
294static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
295 struct xgene_enet_raw_desc *raw_desc)
296{
297 struct net_device *ndev;
298 struct xgene_enet_pdata *pdata;
299 struct device *dev;
300 struct xgene_enet_desc_ring *buf_pool;
301 u32 datalen, skb_index;
302 struct sk_buff *skb;
303 u8 status;
304 int ret = 0;
305
306 ndev = rx_ring->ndev;
307 pdata = netdev_priv(ndev);
308 dev = ndev_to_dev(rx_ring->ndev);
309 buf_pool = rx_ring->buf_pool;
310
311 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
312 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
313 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
314 skb = buf_pool->rx_skb[skb_index];
315
316 /* checking for error */
317 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
318 if (unlikely(status > 2)) {
319 dev_kfree_skb_any(skb);
320 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
321 status);
322 pdata->stats.rx_dropped++;
323 ret = -EIO;
324 goto out;
325 }
326
327 /* strip off CRC as HW isn't doing this */
328 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
329 datalen -= 4;
330 prefetch(skb->data - NET_IP_ALIGN);
331 skb_put(skb, datalen);
332
333 skb_checksum_none_assert(skb);
334 skb->protocol = eth_type_trans(skb, ndev);
335 if (likely((ndev->features & NETIF_F_IP_CSUM) &&
336 skb->protocol == htons(ETH_P_IP))) {
337 xgene_enet_skip_csum(skb);
338 }
339
340 pdata->stats.rx_packets++;
341 pdata->stats.rx_bytes += datalen;
342 napi_gro_receive(&rx_ring->napi, skb);
343out:
344 if (--rx_ring->nbufpool == 0) {
345 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
346 rx_ring->nbufpool = NUM_BUFPOOL;
347 }
348
349 return ret;
350}
351
352static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
353{
354 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
355}
356
357static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
358 int budget)
359{
360 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
361 struct xgene_enet_raw_desc *raw_desc;
362 u16 head = ring->head;
363 u16 slots = ring->slots - 1;
364 int ret, count = 0;
365
366 do {
367 raw_desc = &ring->raw_desc[head];
368 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
369 break;
370
ecf6ba83
IS
371 /* read fpqnum field after dataaddr field */
372 dma_rmb();
e6ad7673
IS
373 if (is_rx_desc(raw_desc))
374 ret = xgene_enet_rx_frame(ring, raw_desc);
375 else
376 ret = xgene_enet_tx_completion(ring, raw_desc);
377 xgene_enet_mark_desc_slot_empty(raw_desc);
378
379 head = (head + 1) & slots;
380 count++;
381
382 if (ret)
383 break;
384 } while (--budget);
385
386 if (likely(count)) {
81cefb81 387 pdata->ring_ops->wr_cmd(ring, -count);
e6ad7673
IS
388 ring->head = head;
389
390 if (netif_queue_stopped(ring->ndev)) {
81cefb81 391 if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
e6ad7673
IS
392 netif_wake_queue(ring->ndev);
393 }
394 }
395
0148d38d 396 return count;
e6ad7673
IS
397}
398
399static int xgene_enet_napi(struct napi_struct *napi, const int budget)
400{
401 struct xgene_enet_desc_ring *ring;
402 int processed;
403
404 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
405 processed = xgene_enet_process_ring(ring, budget);
406
407 if (processed != budget) {
408 napi_complete(napi);
409 enable_irq(ring->irq);
410 }
411
412 return processed;
413}
414
415static void xgene_enet_timeout(struct net_device *ndev)
416{
417 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
418
d0eb7458 419 pdata->mac_ops->reset(pdata);
e6ad7673
IS
420}
421
422static int xgene_enet_register_irq(struct net_device *ndev)
423{
424 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
425 struct device *dev = ndev_to_dev(ndev);
6772b653 426 struct xgene_enet_desc_ring *ring;
e6ad7673
IS
427 int ret;
428
6772b653
IS
429 ring = pdata->rx_ring;
430 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
431 IRQF_SHARED, ring->irq_name, ring);
432 if (ret)
433 netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name);
434
435 if (pdata->cq_cnt) {
436 ring = pdata->tx_ring->cp_ring;
437 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
438 IRQF_SHARED, ring->irq_name, ring);
439 if (ret) {
440 netdev_err(ndev, "Failed to request irq %s\n",
441 ring->irq_name);
442 }
e6ad7673
IS
443 }
444
445 return ret;
446}
447
448static void xgene_enet_free_irq(struct net_device *ndev)
449{
450 struct xgene_enet_pdata *pdata;
451 struct device *dev;
452
453 pdata = netdev_priv(ndev);
454 dev = ndev_to_dev(ndev);
455 devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
6772b653
IS
456
457 if (pdata->cq_cnt) {
458 devm_free_irq(dev, pdata->tx_ring->cp_ring->irq,
459 pdata->tx_ring->cp_ring);
460 }
461}
462
463static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
464{
465 struct napi_struct *napi;
466
467 napi = &pdata->rx_ring->napi;
468 napi_enable(napi);
469
470 if (pdata->cq_cnt) {
471 napi = &pdata->tx_ring->cp_ring->napi;
472 napi_enable(napi);
473 }
474}
475
476static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
477{
478 struct napi_struct *napi;
479
480 napi = &pdata->rx_ring->napi;
481 napi_disable(napi);
482
483 if (pdata->cq_cnt) {
484 napi = &pdata->tx_ring->cp_ring->napi;
485 napi_disable(napi);
486 }
e6ad7673
IS
487}
488
489static int xgene_enet_open(struct net_device *ndev)
490{
491 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
d0eb7458 492 struct xgene_mac_ops *mac_ops = pdata->mac_ops;
e6ad7673
IS
493 int ret;
494
d0eb7458
IS
495 mac_ops->tx_enable(pdata);
496 mac_ops->rx_enable(pdata);
e6ad7673
IS
497
498 ret = xgene_enet_register_irq(ndev);
499 if (ret)
500 return ret;
6772b653 501 xgene_enet_napi_enable(pdata);
e6ad7673 502
0148d38d 503 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
e6ad7673 504 phy_start(pdata->phy_dev);
0148d38d
IS
505 else
506 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
e6ad7673 507
81cefb81 508 netif_carrier_off(ndev);
e6ad7673
IS
509 netif_start_queue(ndev);
510
511 return ret;
512}
513
514static int xgene_enet_close(struct net_device *ndev)
515{
516 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
d0eb7458 517 struct xgene_mac_ops *mac_ops = pdata->mac_ops;
e6ad7673
IS
518
519 netif_stop_queue(ndev);
520
0148d38d 521 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
e6ad7673 522 phy_stop(pdata->phy_dev);
0148d38d
IS
523 else
524 cancel_delayed_work_sync(&pdata->link_work);
e6ad7673 525
6772b653 526 xgene_enet_napi_disable(pdata);
e6ad7673
IS
527 xgene_enet_free_irq(ndev);
528 xgene_enet_process_ring(pdata->rx_ring, -1);
529
d0eb7458
IS
530 mac_ops->tx_disable(pdata);
531 mac_ops->rx_disable(pdata);
e6ad7673
IS
532
533 return 0;
534}
535
536static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
537{
538 struct xgene_enet_pdata *pdata;
539 struct device *dev;
540
541 pdata = netdev_priv(ring->ndev);
542 dev = ndev_to_dev(ring->ndev);
543
81cefb81 544 pdata->ring_ops->clear(ring);
e6ad7673
IS
545 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
546}
547
548static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
549{
550 struct xgene_enet_desc_ring *buf_pool;
551
552 if (pdata->tx_ring) {
553 xgene_enet_delete_ring(pdata->tx_ring);
554 pdata->tx_ring = NULL;
555 }
556
557 if (pdata->rx_ring) {
558 buf_pool = pdata->rx_ring->buf_pool;
559 xgene_enet_delete_bufpool(buf_pool);
560 xgene_enet_delete_ring(buf_pool);
561 xgene_enet_delete_ring(pdata->rx_ring);
562 pdata->rx_ring = NULL;
563 }
564}
565
566static int xgene_enet_get_ring_size(struct device *dev,
567 enum xgene_enet_ring_cfgsize cfgsize)
568{
569 int size = -EINVAL;
570
571 switch (cfgsize) {
572 case RING_CFGSIZE_512B:
573 size = 0x200;
574 break;
575 case RING_CFGSIZE_2KB:
576 size = 0x800;
577 break;
578 case RING_CFGSIZE_16KB:
579 size = 0x4000;
580 break;
581 case RING_CFGSIZE_64KB:
582 size = 0x10000;
583 break;
584 case RING_CFGSIZE_512KB:
585 size = 0x80000;
586 break;
587 default:
588 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
589 break;
590 }
591
592 return size;
593}
594
595static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
596{
81cefb81 597 struct xgene_enet_pdata *pdata;
e6ad7673
IS
598 struct device *dev;
599
600 if (!ring)
601 return;
602
603 dev = ndev_to_dev(ring->ndev);
81cefb81 604 pdata = netdev_priv(ring->ndev);
e6ad7673
IS
605
606 if (ring->desc_addr) {
81cefb81 607 pdata->ring_ops->clear(ring);
e6ad7673
IS
608 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
609 }
610 devm_kfree(dev, ring);
611}
612
613static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
614{
615 struct device *dev = &pdata->pdev->dev;
616 struct xgene_enet_desc_ring *ring;
617
618 ring = pdata->tx_ring;
c10e4caf
IS
619 if (ring) {
620 if (ring->cp_ring && ring->cp_ring->cp_skb)
621 devm_kfree(dev, ring->cp_ring->cp_skb);
6772b653
IS
622 if (ring->cp_ring && pdata->cq_cnt)
623 xgene_enet_free_desc_ring(ring->cp_ring);
c10e4caf
IS
624 xgene_enet_free_desc_ring(ring);
625 }
e6ad7673
IS
626
627 ring = pdata->rx_ring;
c10e4caf
IS
628 if (ring) {
629 if (ring->buf_pool) {
630 if (ring->buf_pool->rx_skb)
631 devm_kfree(dev, ring->buf_pool->rx_skb);
632 xgene_enet_free_desc_ring(ring->buf_pool);
633 }
634 xgene_enet_free_desc_ring(ring);
635 }
e6ad7673
IS
636}
637
bc1b7c13
IS
638static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
639 struct xgene_enet_desc_ring *ring)
640{
641 if ((pdata->enet_id == XGENE_ENET2) &&
642 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
643 return true;
644 }
645
646 return false;
647}
648
649static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
650 struct xgene_enet_desc_ring *ring)
651{
652 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
653
654 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
655}
656
e6ad7673
IS
657static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
658 struct net_device *ndev, u32 ring_num,
659 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
660{
661 struct xgene_enet_desc_ring *ring;
662 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
663 struct device *dev = ndev_to_dev(ndev);
9b9ba821
TK
664 int size;
665
666 size = xgene_enet_get_ring_size(dev, cfgsize);
667 if (size < 0)
668 return NULL;
e6ad7673
IS
669
670 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
671 GFP_KERNEL);
672 if (!ring)
673 return NULL;
674
675 ring->ndev = ndev;
676 ring->num = ring_num;
677 ring->cfgsize = cfgsize;
678 ring->id = ring_id;
679
e6ad7673
IS
680 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
681 GFP_KERNEL);
682 if (!ring->desc_addr) {
683 devm_kfree(dev, ring);
684 return NULL;
685 }
686 ring->size = size;
687
bc1b7c13
IS
688 if (is_irq_mbox_required(pdata, ring)) {
689 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
690 &ring->irq_mbox_dma, GFP_KERNEL);
691 if (!ring->irq_mbox_addr) {
692 dma_free_coherent(dev, size, ring->desc_addr,
693 ring->dma);
694 devm_kfree(dev, ring);
695 return NULL;
696 }
697 }
698
699 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
e6ad7673 700 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
81cefb81 701 ring = pdata->ring_ops->setup(ring);
e6ad7673
IS
702 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
703 ring->num, ring->size, ring->id, ring->slots);
704
705 return ring;
706}
707
708static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
709{
710 return (owner << 6) | (bufnum & GENMASK(5, 0));
711}
712
bc1b7c13
IS
713static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
714{
715 enum xgene_ring_owner owner;
716
717 if (p->enet_id == XGENE_ENET1) {
718 switch (p->phy_mode) {
719 case PHY_INTERFACE_MODE_SGMII:
720 owner = RING_OWNER_ETH0;
721 break;
722 default:
723 owner = (!p->port_id) ? RING_OWNER_ETH0 :
724 RING_OWNER_ETH1;
725 break;
726 }
727 } else {
728 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
729 }
730
731 return owner;
732}
733
e6ad7673
IS
734static int xgene_enet_create_desc_rings(struct net_device *ndev)
735{
736 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
737 struct device *dev = ndev_to_dev(ndev);
738 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
739 struct xgene_enet_desc_ring *buf_pool = NULL;
bc1b7c13 740 enum xgene_ring_owner owner;
ca626454
KC
741 u8 cpu_bufnum = pdata->cpu_bufnum;
742 u8 eth_bufnum = pdata->eth_bufnum;
743 u8 bp_bufnum = pdata->bp_bufnum;
744 u16 ring_num = pdata->ring_num;
745 u16 ring_id;
e6ad7673
IS
746 int ret;
747
748 /* allocate rx descriptor ring */
bc1b7c13 749 owner = xgene_derive_ring_owner(pdata);
e6ad7673
IS
750 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
751 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
752 RING_CFGSIZE_16KB, ring_id);
753 if (!rx_ring) {
754 ret = -ENOMEM;
755 goto err;
756 }
757
758 /* allocate buffer pool for receiving packets */
bc1b7c13
IS
759 owner = xgene_derive_ring_owner(pdata);
760 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
e6ad7673
IS
761 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
762 RING_CFGSIZE_2KB, ring_id);
763 if (!buf_pool) {
764 ret = -ENOMEM;
765 goto err;
766 }
767
768 rx_ring->nbufpool = NUM_BUFPOOL;
769 rx_ring->buf_pool = buf_pool;
770 rx_ring->irq = pdata->rx_irq;
6772b653
IS
771 if (!pdata->cq_cnt) {
772 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
773 ndev->name);
774 } else {
775 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name);
776 }
e6ad7673
IS
777 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
778 sizeof(struct sk_buff *), GFP_KERNEL);
779 if (!buf_pool->rx_skb) {
780 ret = -ENOMEM;
781 goto err;
782 }
783
784 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
785 rx_ring->buf_pool = buf_pool;
786 pdata->rx_ring = rx_ring;
787
788 /* allocate tx descriptor ring */
bc1b7c13
IS
789 owner = xgene_derive_ring_owner(pdata);
790 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
e6ad7673
IS
791 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
792 RING_CFGSIZE_16KB, ring_id);
793 if (!tx_ring) {
794 ret = -ENOMEM;
795 goto err;
796 }
797 pdata->tx_ring = tx_ring;
798
6772b653
IS
799 if (!pdata->cq_cnt) {
800 cp_ring = pdata->rx_ring;
801 } else {
802 /* allocate tx completion descriptor ring */
803 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
804 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
805 RING_CFGSIZE_16KB,
806 ring_id);
807 if (!cp_ring) {
808 ret = -ENOMEM;
809 goto err;
810 }
811 cp_ring->irq = pdata->txc_irq;
812 snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name);
813 }
814
e6ad7673
IS
815 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
816 sizeof(struct sk_buff *), GFP_KERNEL);
817 if (!cp_ring->cp_skb) {
818 ret = -ENOMEM;
819 goto err;
820 }
821 pdata->tx_ring->cp_ring = cp_ring;
822 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
823
824 pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
825 pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
826 pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
827
828 return 0;
829
830err:
831 xgene_enet_free_desc_rings(pdata);
832 return ret;
833}
834
835static struct rtnl_link_stats64 *xgene_enet_get_stats64(
836 struct net_device *ndev,
837 struct rtnl_link_stats64 *storage)
838{
839 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
840 struct rtnl_link_stats64 *stats = &pdata->stats;
841
842 stats->rx_errors += stats->rx_length_errors +
843 stats->rx_crc_errors +
844 stats->rx_frame_errors +
845 stats->rx_fifo_errors;
846 memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
847
848 return storage;
849}
850
851static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
852{
853 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
854 int ret;
855
856 ret = eth_mac_addr(ndev, addr);
857 if (ret)
858 return ret;
d0eb7458 859 pdata->mac_ops->set_mac_addr(pdata);
e6ad7673
IS
860
861 return ret;
862}
863
864static const struct net_device_ops xgene_ndev_ops = {
865 .ndo_open = xgene_enet_open,
866 .ndo_stop = xgene_enet_close,
867 .ndo_start_xmit = xgene_enet_start_xmit,
868 .ndo_tx_timeout = xgene_enet_timeout,
869 .ndo_get_stats64 = xgene_enet_get_stats64,
870 .ndo_change_mtu = eth_change_mtu,
871 .ndo_set_mac_address = xgene_enet_set_mac_address,
872};
873
8beeef8d 874#ifdef CONFIG_ACPI
0738c54d
ST
875static int xgene_get_port_id_acpi(struct device *dev,
876 struct xgene_enet_pdata *pdata)
877{
878 acpi_status status;
879 u64 temp;
880
881 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
882 if (ACPI_FAILURE(status)) {
883 pdata->port_id = 0;
884 } else {
885 pdata->port_id = temp;
886 }
887
888 return 0;
889}
8beeef8d 890#endif
0738c54d
ST
891
892static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
ca626454
KC
893{
894 u32 id = 0;
895 int ret;
896
0738c54d
ST
897 ret = of_property_read_u32(dev->of_node, "port-id", &id);
898 if (ret) {
561fea6d
IS
899 pdata->port_id = 0;
900 ret = 0;
0738c54d 901 } else {
561fea6d 902 pdata->port_id = id & BIT(0);
561fea6d 903 }
ca626454 904
561fea6d 905 return ret;
ca626454
KC
906}
907
de7b5b3d
FK
908static int xgene_get_mac_address(struct device *dev,
909 unsigned char *addr)
910{
911 int ret;
912
913 ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
914 if (ret)
915 ret = device_property_read_u8_array(dev, "mac-address",
916 addr, 6);
917 if (ret)
918 return -ENODEV;
919
920 return ETH_ALEN;
921}
922
923static int xgene_get_phy_mode(struct device *dev)
924{
925 int i, ret;
926 char *modestr;
927
928 ret = device_property_read_string(dev, "phy-connection-type",
929 (const char **)&modestr);
930 if (ret)
931 ret = device_property_read_string(dev, "phy-mode",
932 (const char **)&modestr);
933 if (ret)
934 return -ENODEV;
935
936 for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
937 if (!strcasecmp(modestr, phy_modes(i)))
938 return i;
939 }
940 return -ENODEV;
941}
942
e6ad7673
IS
943static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
944{
945 struct platform_device *pdev;
946 struct net_device *ndev;
947 struct device *dev;
948 struct resource *res;
949 void __iomem *base_addr;
561fea6d 950 u32 offset;
2e598712 951 int ret = 0;
e6ad7673
IS
952
953 pdev = pdata->pdev;
954 dev = &pdev->dev;
955 ndev = pdata->ndev;
956
de7b5b3d
FK
957 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
958 if (!res) {
959 dev_err(dev, "Resource enet_csr not defined\n");
960 return -ENODEV;
961 }
962 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
3ec7a176 963 if (!pdata->base_addr) {
e6ad7673 964 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
3ec7a176 965 return -ENOMEM;
e6ad7673
IS
966 }
967
de7b5b3d
FK
968 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
969 if (!res) {
970 dev_err(dev, "Resource ring_csr not defined\n");
971 return -ENODEV;
972 }
973 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
974 resource_size(res));
3ec7a176 975 if (!pdata->ring_csr_addr) {
e6ad7673 976 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
3ec7a176 977 return -ENOMEM;
e6ad7673
IS
978 }
979
de7b5b3d
FK
980 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
981 if (!res) {
982 dev_err(dev, "Resource ring_cmd not defined\n");
983 return -ENODEV;
984 }
985 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
986 resource_size(res));
3ec7a176 987 if (!pdata->ring_cmd_addr) {
e6ad7673 988 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
3ec7a176 989 return -ENOMEM;
e6ad7673
IS
990 }
991
0738c54d
ST
992 if (dev->of_node)
993 ret = xgene_get_port_id_dt(dev, pdata);
994#ifdef CONFIG_ACPI
995 else
996 ret = xgene_get_port_id_acpi(dev, pdata);
997#endif
ca626454
KC
998 if (ret)
999 return ret;
1000
de7b5b3d 1001 if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
e6ad7673 1002 eth_hw_addr_random(ndev);
de7b5b3d 1003
e6ad7673
IS
1004 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1005
de7b5b3d 1006 pdata->phy_mode = xgene_get_phy_mode(dev);
e6ad7673 1007 if (pdata->phy_mode < 0) {
0148d38d
IS
1008 dev_err(dev, "Unable to get phy-connection-type\n");
1009 return pdata->phy_mode;
1010 }
1011 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
32f784b5 1012 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
0148d38d
IS
1013 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1014 dev_err(dev, "Incorrect phy-connection-type specified\n");
1015 return -ENODEV;
e6ad7673
IS
1016 }
1017
6772b653
IS
1018 ret = platform_get_irq(pdev, 0);
1019 if (ret <= 0) {
1020 dev_err(dev, "Unable to get ENET Rx IRQ\n");
1021 ret = ret ? : -ENXIO;
1022 return ret;
1023 }
1024 pdata->rx_irq = ret;
1025
1026 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
1027 ret = platform_get_irq(pdev, 1);
1028 if (ret <= 0) {
2c7be0ac
ST
1029 pdata->cq_cnt = 0;
1030 dev_info(dev, "Unable to get Tx completion IRQ,"
1031 "using Rx IRQ instead\n");
1032 } else {
1033 pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
1034 pdata->txc_irq = ret;
6772b653 1035 }
6772b653
IS
1036 }
1037
e6ad7673 1038 pdata->clk = devm_clk_get(&pdev->dev, NULL);
e6ad7673 1039 if (IS_ERR(pdata->clk)) {
de7b5b3d 1040 /* Firmware may have set up the clock already. */
c2d33bdc 1041 dev_info(dev, "clocks have been setup already\n");
e6ad7673
IS
1042 }
1043
bc1b7c13
IS
1044 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1045 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1046 else
1047 base_addr = pdata->base_addr;
e6ad7673
IS
1048 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1049 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1050 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
32f784b5
IS
1051 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1052 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
ca626454 1053 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
561fea6d
IS
1054 offset = (pdata->enet_id == XGENE_ENET1) ?
1055 BLOCK_ETH_MAC_CSR_OFFSET :
1056 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1057 pdata->mcx_mac_csr_addr = base_addr + offset;
0148d38d
IS
1058 } else {
1059 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1060 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
0148d38d 1061 }
e6ad7673
IS
1062 pdata->rx_buff_cnt = NUM_PKT_BUF;
1063
0148d38d 1064 return 0;
e6ad7673
IS
1065}
1066
1067static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1068{
1069 struct net_device *ndev = pdata->ndev;
1070 struct xgene_enet_desc_ring *buf_pool;
1071 u16 dst_ring_num;
1072 int ret;
1073
c3f4465d
IS
1074 ret = pdata->port_ops->reset(pdata);
1075 if (ret)
1076 return ret;
e6ad7673
IS
1077
1078 ret = xgene_enet_create_desc_rings(ndev);
1079 if (ret) {
1080 netdev_err(ndev, "Error in ring configuration\n");
1081 return ret;
1082 }
1083
1084 /* setup buffer pool */
1085 buf_pool = pdata->rx_ring->buf_pool;
1086 xgene_enet_init_bufpool(buf_pool);
1087 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1088 if (ret) {
1089 xgene_enet_delete_desc_rings(pdata);
1090 return ret;
1091 }
1092
1093 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
d0eb7458 1094 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
0148d38d 1095 pdata->mac_ops->init(pdata);
e6ad7673
IS
1096
1097 return ret;
1098}
1099
d0eb7458
IS
1100static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1101{
0148d38d
IS
1102 switch (pdata->phy_mode) {
1103 case PHY_INTERFACE_MODE_RGMII:
1104 pdata->mac_ops = &xgene_gmac_ops;
1105 pdata->port_ops = &xgene_gport_ops;
dc8385f0 1106 pdata->rm = RM3;
0148d38d 1107 break;
32f784b5
IS
1108 case PHY_INTERFACE_MODE_SGMII:
1109 pdata->mac_ops = &xgene_sgmac_ops;
1110 pdata->port_ops = &xgene_sgport_ops;
1111 pdata->rm = RM1;
1112 break;
0148d38d
IS
1113 default:
1114 pdata->mac_ops = &xgene_xgmac_ops;
1115 pdata->port_ops = &xgene_xgport_ops;
dc8385f0 1116 pdata->rm = RM0;
0148d38d
IS
1117 break;
1118 }
ca626454 1119
bc1b7c13
IS
1120 if (pdata->enet_id == XGENE_ENET1) {
1121 switch (pdata->port_id) {
1122 case 0:
1123 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1124 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1125 pdata->bp_bufnum = START_BP_BUFNUM_0;
1126 pdata->ring_num = START_RING_NUM_0;
1127 break;
1128 case 1:
1129 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1130 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1131 pdata->bp_bufnum = START_BP_BUFNUM_1;
1132 pdata->ring_num = START_RING_NUM_1;
1133 break;
1134 default:
1135 break;
1136 }
1137 pdata->ring_ops = &xgene_ring1_ops;
1138 } else {
1139 switch (pdata->port_id) {
1140 case 0:
1141 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1142 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1143 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1144 pdata->ring_num = X2_START_RING_NUM_0;
1145 break;
1146 case 1:
1147 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1148 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1149 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1150 pdata->ring_num = X2_START_RING_NUM_1;
1151 break;
1152 default:
1153 break;
1154 }
1155 pdata->rm = RM0;
1156 pdata->ring_ops = &xgene_ring2_ops;
ca626454 1157 }
d0eb7458
IS
1158}
1159
6772b653
IS
1160static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1161{
1162 struct napi_struct *napi;
1163
1164 napi = &pdata->rx_ring->napi;
1165 netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
1166
1167 if (pdata->cq_cnt) {
1168 napi = &pdata->tx_ring->cp_ring->napi;
1169 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1170 NAPI_POLL_WEIGHT);
1171 }
1172}
1173
1174static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
1175{
1176 struct napi_struct *napi;
1177
1178 napi = &pdata->rx_ring->napi;
1179 netif_napi_del(napi);
1180
1181 if (pdata->cq_cnt) {
1182 napi = &pdata->tx_ring->cp_ring->napi;
1183 netif_napi_del(napi);
1184 }
1185}
1186
e6ad7673
IS
1187static int xgene_enet_probe(struct platform_device *pdev)
1188{
1189 struct net_device *ndev;
1190 struct xgene_enet_pdata *pdata;
1191 struct device *dev = &pdev->dev;
dc8385f0 1192 struct xgene_mac_ops *mac_ops;
bc1b7c13 1193 const struct of_device_id *of_id;
e6ad7673
IS
1194 int ret;
1195
1196 ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
1197 if (!ndev)
1198 return -ENOMEM;
1199
1200 pdata = netdev_priv(ndev);
1201
1202 pdata->pdev = pdev;
1203 pdata->ndev = ndev;
1204 SET_NETDEV_DEV(ndev, dev);
1205 platform_set_drvdata(pdev, pdata);
1206 ndev->netdev_ops = &xgene_ndev_ops;
1207 xgene_enet_set_ethtool_ops(ndev);
1208 ndev->features |= NETIF_F_IP_CSUM |
1209 NETIF_F_GSO |
1210 NETIF_F_GRO;
1211
bc1b7c13
IS
1212 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1213 if (of_id) {
1214 pdata->enet_id = (enum xgene_enet_id)of_id->data;
0738c54d
ST
1215 }
1216#ifdef CONFIG_ACPI
1217 else {
1218 const struct acpi_device_id *acpi_id;
1219
1220 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1221 if (acpi_id)
1222 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
bc1b7c13
IS
1223 }
1224#endif
0738c54d
ST
1225 if (!pdata->enet_id) {
1226 free_netdev(ndev);
1227 return -ENODEV;
1228 }
bc1b7c13 1229
e6ad7673
IS
1230 ret = xgene_enet_get_resources(pdata);
1231 if (ret)
1232 goto err;
1233
d0eb7458 1234 xgene_enet_setup_ops(pdata);
e6ad7673
IS
1235
1236 ret = register_netdev(ndev);
1237 if (ret) {
1238 netdev_err(ndev, "Failed to register netdev\n");
1239 goto err;
1240 }
1241
de7b5b3d 1242 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
e6ad7673
IS
1243 if (ret) {
1244 netdev_err(ndev, "No usable DMA configuration\n");
1245 goto err;
1246 }
1247
1248 ret = xgene_enet_init_hw(pdata);
1249 if (ret)
1250 goto err;
1251
6772b653 1252 xgene_enet_napi_add(pdata);
dc8385f0 1253 mac_ops = pdata->mac_ops;
0148d38d
IS
1254 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1255 ret = xgene_enet_mdio_config(pdata);
1256 else
dc8385f0 1257 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
e6ad7673
IS
1258
1259 return ret;
1260err:
c3f4465d 1261 unregister_netdev(ndev);
e6ad7673
IS
1262 free_netdev(ndev);
1263 return ret;
1264}
1265
1266static int xgene_enet_remove(struct platform_device *pdev)
1267{
1268 struct xgene_enet_pdata *pdata;
d0eb7458 1269 struct xgene_mac_ops *mac_ops;
e6ad7673
IS
1270 struct net_device *ndev;
1271
1272 pdata = platform_get_drvdata(pdev);
d0eb7458 1273 mac_ops = pdata->mac_ops;
e6ad7673
IS
1274 ndev = pdata->ndev;
1275
d0eb7458
IS
1276 mac_ops->rx_disable(pdata);
1277 mac_ops->tx_disable(pdata);
e6ad7673 1278
6772b653 1279 xgene_enet_napi_del(pdata);
e6ad7673
IS
1280 xgene_enet_mdio_remove(pdata);
1281 xgene_enet_delete_desc_rings(pdata);
1282 unregister_netdev(ndev);
d0eb7458 1283 pdata->port_ops->shutdown(pdata);
e6ad7673
IS
1284 free_netdev(ndev);
1285
1286 return 0;
1287}
1288
de7b5b3d
FK
1289#ifdef CONFIG_ACPI
1290static const struct acpi_device_id xgene_enet_acpi_match[] = {
0738c54d
ST
1291 { "APMC0D05", XGENE_ENET1},
1292 { "APMC0D30", XGENE_ENET1},
1293 { "APMC0D31", XGENE_ENET1},
822e34a4
ST
1294 { "APMC0D26", XGENE_ENET2},
1295 { "APMC0D25", XGENE_ENET2},
de7b5b3d
FK
1296 { }
1297};
1298MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1299#endif
1300
163cff31 1301#ifdef CONFIG_OF
a6b0dc2a 1302static const struct of_device_id xgene_enet_of_match[] = {
bc1b7c13
IS
1303 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
1304 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1305 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
561fea6d 1306 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
bc1b7c13 1307 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
e6ad7673
IS
1308 {},
1309};
1310
de7b5b3d 1311MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
163cff31 1312#endif
e6ad7673
IS
1313
1314static struct platform_driver xgene_enet_driver = {
1315 .driver = {
1316 .name = "xgene-enet",
de7b5b3d
FK
1317 .of_match_table = of_match_ptr(xgene_enet_of_match),
1318 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
e6ad7673
IS
1319 },
1320 .probe = xgene_enet_probe,
1321 .remove = xgene_enet_remove,
1322};
1323
1324module_platform_driver(xgene_enet_driver);
1325
1326MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1327MODULE_VERSION(XGENE_DRV_VERSION);
d0eb7458 1328MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
e6ad7673
IS
1329MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1330MODULE_LICENSE("GPL");
This page took 0.162585 seconds and 5 git commands to generate.