net: netcp: Fixes efuse mac addr swap on k2e and k2l
[deliverable/linux.git] / drivers / net / ethernet / ti / netcp_core.c
CommitLineData
84640e27
KM
1/*
2 * Keystone NetCP Core driver
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Murali Karicheri <m-karicheri2@ti.com>
10 * Wingman Kwok <w-kwok2@ti.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
15 *
16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17 * kind, whether express or implied; without even the implied warranty
18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21
22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/of_net.h>
25#include <linux/of_address.h>
26#include <linux/if_vlan.h>
27#include <linux/pm_runtime.h>
28#include <linux/platform_device.h>
29#include <linux/soc/ti/knav_qmss.h>
30#include <linux/soc/ti/knav_dma.h>
31
32#include "netcp.h"
33
34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35#define NETCP_NAPI_WEIGHT 64
36#define NETCP_TX_TIMEOUT (5 * HZ)
37#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
38#define NETCP_MAX_MCAST_ADDR 16
39
40#define NETCP_EFUSE_REG_INDEX 0
41
42#define NETCP_MOD_PROBE_SKIPPED 1
43#define NETCP_MOD_PROBE_FAILED 2
44
45#define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
46 NETIF_MSG_DRV | NETIF_MSG_LINK | \
47 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
48 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
49 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
50 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
51 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
52 NETIF_MSG_RX_STATUS)
53
71382bc0
WK
54#define NETCP_EFUSE_ADDR_SWAP 2
55
84640e27
KM
56#define knav_queue_get_id(q) knav_queue_device_control(q, \
57 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
58
59#define knav_queue_enable_notify(q) knav_queue_device_control(q, \
60 KNAV_QUEUE_ENABLE_NOTIFY, \
61 (unsigned long)NULL)
62
63#define knav_queue_disable_notify(q) knav_queue_device_control(q, \
64 KNAV_QUEUE_DISABLE_NOTIFY, \
65 (unsigned long)NULL)
66
67#define knav_queue_get_count(q) knav_queue_device_control(q, \
68 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
69
70#define for_each_netcp_module(module) \
71 list_for_each_entry(module, &netcp_modules, module_list)
72
73#define for_each_netcp_device_module(netcp_device, inst_modpriv) \
74 list_for_each_entry(inst_modpriv, \
75 &((netcp_device)->modpriv_head), inst_list)
76
77#define for_each_module(netcp, intf_modpriv) \
78 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
79
80/* Module management structures */
81struct netcp_device {
82 struct list_head device_list;
83 struct list_head interface_head;
84 struct list_head modpriv_head;
85 struct device *device;
86};
87
88struct netcp_inst_modpriv {
89 struct netcp_device *netcp_device;
90 struct netcp_module *netcp_module;
91 struct list_head inst_list;
92 void *module_priv;
93};
94
95struct netcp_intf_modpriv {
96 struct netcp_intf *netcp_priv;
97 struct netcp_module *netcp_module;
98 struct list_head intf_list;
99 void *module_priv;
100};
101
102static LIST_HEAD(netcp_devices);
103static LIST_HEAD(netcp_modules);
104static DEFINE_MUTEX(netcp_modules_lock);
105
106static int netcp_debug_level = -1;
107module_param(netcp_debug_level, int, 0);
108MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
109
110/* Helper functions - Get/Set */
111static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc,
112 struct knav_dma_desc *desc)
113{
114 *buff_len = desc->buff_len;
115 *buff = desc->buff;
116 *ndesc = desc->next_desc;
117}
118
119static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc)
120{
121 *pad0 = desc->pad[0];
122 *pad1 = desc->pad[1];
123}
124
125static void get_org_pkt_info(u32 *buff, u32 *buff_len,
126 struct knav_dma_desc *desc)
127{
128 *buff = desc->orig_buff;
129 *buff_len = desc->orig_len;
130}
131
132static void get_words(u32 *words, int num_words, u32 *desc)
133{
134 int i;
135
136 for (i = 0; i < num_words; i++)
137 words[i] = desc[i];
138}
139
140static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc,
141 struct knav_dma_desc *desc)
142{
143 desc->buff_len = buff_len;
144 desc->buff = buff;
145 desc->next_desc = ndesc;
146}
147
148static void set_desc_info(u32 desc_info, u32 pkt_info,
149 struct knav_dma_desc *desc)
150{
151 desc->desc_info = desc_info;
152 desc->packet_info = pkt_info;
153}
154
155static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc)
156{
157 desc->pad[0] = pad0;
158 desc->pad[1] = pad1;
159}
160
161static void set_org_pkt_info(u32 buff, u32 buff_len,
162 struct knav_dma_desc *desc)
163{
164 desc->orig_buff = buff;
165 desc->orig_len = buff_len;
166}
167
168static void set_words(u32 *words, int num_words, u32 *desc)
169{
170 int i;
171
172 for (i = 0; i < num_words; i++)
173 desc[i] = words[i];
174}
175
176/* Read the e-fuse value as 32 bit values to be endian independent */
71382bc0 177static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
84640e27
KM
178{
179 unsigned int addr0, addr1;
180
181 addr1 = readl(efuse_mac + 4);
182 addr0 = readl(efuse_mac);
183
71382bc0
WK
184 switch (swap) {
185 case NETCP_EFUSE_ADDR_SWAP:
186 addr0 = addr1;
187 addr1 = readl(efuse_mac);
188 break;
189 default:
190 break;
191 }
192
84640e27
KM
193 x[0] = (addr1 & 0x0000ff00) >> 8;
194 x[1] = addr1 & 0x000000ff;
195 x[2] = (addr0 & 0xff000000) >> 24;
196 x[3] = (addr0 & 0x00ff0000) >> 16;
197 x[4] = (addr0 & 0x0000ff00) >> 8;
198 x[5] = addr0 & 0x000000ff;
199
200 return 0;
201}
202
203static const char *netcp_node_name(struct device_node *node)
204{
205 const char *name;
206
207 if (of_property_read_string(node, "label", &name) < 0)
208 name = node->name;
209 if (!name)
210 name = "unknown";
211 return name;
212}
213
214/* Module management routines */
215static int netcp_register_interface(struct netcp_intf *netcp)
216{
217 int ret;
218
219 ret = register_netdev(netcp->ndev);
220 if (!ret)
221 netcp->netdev_registered = true;
222 return ret;
223}
224
225static int netcp_module_probe(struct netcp_device *netcp_device,
226 struct netcp_module *module)
227{
228 struct device *dev = netcp_device->device;
229 struct device_node *devices, *interface, *node = dev->of_node;
230 struct device_node *child;
231 struct netcp_inst_modpriv *inst_modpriv;
232 struct netcp_intf *netcp_intf;
233 struct netcp_module *tmp;
234 bool primary_module_registered = false;
235 int ret;
236
237 /* Find this module in the sub-tree for this device */
238 devices = of_get_child_by_name(node, "netcp-devices");
239 if (!devices) {
240 dev_err(dev, "could not find netcp-devices node\n");
241 return NETCP_MOD_PROBE_SKIPPED;
242 }
243
244 for_each_available_child_of_node(devices, child) {
245 const char *name = netcp_node_name(child);
246
247 if (!strcasecmp(module->name, name))
248 break;
249 }
250
251 of_node_put(devices);
252 /* If module not used for this device, skip it */
253 if (!child) {
254 dev_warn(dev, "module(%s) not used for device\n", module->name);
255 return NETCP_MOD_PROBE_SKIPPED;
256 }
257
258 inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL);
259 if (!inst_modpriv) {
260 of_node_put(child);
261 return -ENOMEM;
262 }
263
264 inst_modpriv->netcp_device = netcp_device;
265 inst_modpriv->netcp_module = module;
266 list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head);
267
268 ret = module->probe(netcp_device, dev, child,
269 &inst_modpriv->module_priv);
270 of_node_put(child);
271 if (ret) {
272 dev_err(dev, "Probe of module(%s) failed with %d\n",
273 module->name, ret);
274 list_del(&inst_modpriv->inst_list);
275 devm_kfree(dev, inst_modpriv);
276 return NETCP_MOD_PROBE_FAILED;
277 }
278
279 /* Attach modules only if the primary module is probed */
280 for_each_netcp_module(tmp) {
281 if (tmp->primary)
282 primary_module_registered = true;
283 }
284
285 if (!primary_module_registered)
286 return 0;
287
288 /* Attach module to interfaces */
289 list_for_each_entry(netcp_intf, &netcp_device->interface_head,
290 interface_list) {
291 struct netcp_intf_modpriv *intf_modpriv;
292
293 /* If interface not registered then register now */
294 if (!netcp_intf->netdev_registered)
295 ret = netcp_register_interface(netcp_intf);
296
297 if (ret)
298 return -ENODEV;
299
300 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
301 GFP_KERNEL);
302 if (!intf_modpriv)
303 return -ENOMEM;
304
305 interface = of_parse_phandle(netcp_intf->node_interface,
306 module->name, 0);
307
308 intf_modpriv->netcp_priv = netcp_intf;
309 intf_modpriv->netcp_module = module;
310 list_add_tail(&intf_modpriv->intf_list,
311 &netcp_intf->module_head);
312
313 ret = module->attach(inst_modpriv->module_priv,
314 netcp_intf->ndev, interface,
315 &intf_modpriv->module_priv);
316 of_node_put(interface);
317 if (ret) {
318 dev_dbg(dev, "Attach of module %s declined with %d\n",
319 module->name, ret);
320 list_del(&intf_modpriv->intf_list);
321 devm_kfree(dev, intf_modpriv);
322 continue;
323 }
324 }
325 return 0;
326}
327
328int netcp_register_module(struct netcp_module *module)
329{
330 struct netcp_device *netcp_device;
331 struct netcp_module *tmp;
332 int ret;
333
334 if (!module->name) {
335 WARN(1, "error registering netcp module: no name\n");
336 return -EINVAL;
337 }
338
339 if (!module->probe) {
340 WARN(1, "error registering netcp module: no probe\n");
341 return -EINVAL;
342 }
343
344 mutex_lock(&netcp_modules_lock);
345
346 for_each_netcp_module(tmp) {
347 if (!strcasecmp(tmp->name, module->name)) {
348 mutex_unlock(&netcp_modules_lock);
349 return -EEXIST;
350 }
351 }
352 list_add_tail(&module->module_list, &netcp_modules);
353
354 list_for_each_entry(netcp_device, &netcp_devices, device_list) {
355 ret = netcp_module_probe(netcp_device, module);
356 if (ret < 0)
357 goto fail;
358 }
359
360 mutex_unlock(&netcp_modules_lock);
361 return 0;
362
363fail:
364 mutex_unlock(&netcp_modules_lock);
365 netcp_unregister_module(module);
366 return ret;
367}
58c11b5f 368EXPORT_SYMBOL_GPL(netcp_register_module);
84640e27
KM
369
370static void netcp_release_module(struct netcp_device *netcp_device,
371 struct netcp_module *module)
372{
373 struct netcp_inst_modpriv *inst_modpriv, *inst_tmp;
374 struct netcp_intf *netcp_intf, *netcp_tmp;
375 struct device *dev = netcp_device->device;
376
377 /* Release the module from each interface */
378 list_for_each_entry_safe(netcp_intf, netcp_tmp,
379 &netcp_device->interface_head,
380 interface_list) {
381 struct netcp_intf_modpriv *intf_modpriv, *intf_tmp;
382
383 list_for_each_entry_safe(intf_modpriv, intf_tmp,
384 &netcp_intf->module_head,
385 intf_list) {
386 if (intf_modpriv->netcp_module == module) {
387 module->release(intf_modpriv->module_priv);
388 list_del(&intf_modpriv->intf_list);
389 devm_kfree(dev, intf_modpriv);
390 break;
391 }
392 }
393 }
394
395 /* Remove the module from each instance */
396 list_for_each_entry_safe(inst_modpriv, inst_tmp,
397 &netcp_device->modpriv_head, inst_list) {
398 if (inst_modpriv->netcp_module == module) {
399 module->remove(netcp_device,
400 inst_modpriv->module_priv);
401 list_del(&inst_modpriv->inst_list);
402 devm_kfree(dev, inst_modpriv);
403 break;
404 }
405 }
406}
407
408void netcp_unregister_module(struct netcp_module *module)
409{
410 struct netcp_device *netcp_device;
411 struct netcp_module *module_tmp;
412
413 mutex_lock(&netcp_modules_lock);
414
415 list_for_each_entry(netcp_device, &netcp_devices, device_list) {
416 netcp_release_module(netcp_device, module);
417 }
418
419 /* Remove the module from the module list */
420 for_each_netcp_module(module_tmp) {
421 if (module == module_tmp) {
422 list_del(&module->module_list);
423 break;
424 }
425 }
426
427 mutex_unlock(&netcp_modules_lock);
428}
58c11b5f 429EXPORT_SYMBOL_GPL(netcp_unregister_module);
84640e27
KM
430
431void *netcp_module_get_intf_data(struct netcp_module *module,
432 struct netcp_intf *intf)
433{
434 struct netcp_intf_modpriv *intf_modpriv;
435
436 list_for_each_entry(intf_modpriv, &intf->module_head, intf_list)
437 if (intf_modpriv->netcp_module == module)
438 return intf_modpriv->module_priv;
439 return NULL;
440}
58c11b5f 441EXPORT_SYMBOL_GPL(netcp_module_get_intf_data);
84640e27
KM
442
443/* Module TX and RX Hook management */
444struct netcp_hook_list {
445 struct list_head list;
446 netcp_hook_rtn *hook_rtn;
447 void *hook_data;
448 int order;
449};
450
451int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
452 netcp_hook_rtn *hook_rtn, void *hook_data)
453{
454 struct netcp_hook_list *entry;
455 struct netcp_hook_list *next;
456 unsigned long flags;
457
458 entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
459 if (!entry)
460 return -ENOMEM;
461
462 entry->hook_rtn = hook_rtn;
463 entry->hook_data = hook_data;
464 entry->order = order;
465
466 spin_lock_irqsave(&netcp_priv->lock, flags);
467 list_for_each_entry(next, &netcp_priv->txhook_list_head, list) {
468 if (next->order > order)
469 break;
470 }
471 __list_add(&entry->list, next->list.prev, &next->list);
472 spin_unlock_irqrestore(&netcp_priv->lock, flags);
473
474 return 0;
475}
58c11b5f 476EXPORT_SYMBOL_GPL(netcp_register_txhook);
84640e27
KM
477
478int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
479 netcp_hook_rtn *hook_rtn, void *hook_data)
480{
481 struct netcp_hook_list *next, *n;
482 unsigned long flags;
483
484 spin_lock_irqsave(&netcp_priv->lock, flags);
485 list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) {
486 if ((next->order == order) &&
487 (next->hook_rtn == hook_rtn) &&
488 (next->hook_data == hook_data)) {
489 list_del(&next->list);
490 spin_unlock_irqrestore(&netcp_priv->lock, flags);
491 devm_kfree(netcp_priv->dev, next);
492 return 0;
493 }
494 }
495 spin_unlock_irqrestore(&netcp_priv->lock, flags);
496 return -ENOENT;
497}
58c11b5f 498EXPORT_SYMBOL_GPL(netcp_unregister_txhook);
84640e27
KM
499
500int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
501 netcp_hook_rtn *hook_rtn, void *hook_data)
502{
503 struct netcp_hook_list *entry;
504 struct netcp_hook_list *next;
505 unsigned long flags;
506
507 entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
508 if (!entry)
509 return -ENOMEM;
510
511 entry->hook_rtn = hook_rtn;
512 entry->hook_data = hook_data;
513 entry->order = order;
514
515 spin_lock_irqsave(&netcp_priv->lock, flags);
516 list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) {
517 if (next->order > order)
518 break;
519 }
520 __list_add(&entry->list, next->list.prev, &next->list);
521 spin_unlock_irqrestore(&netcp_priv->lock, flags);
522
523 return 0;
524}
525
526int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
527 netcp_hook_rtn *hook_rtn, void *hook_data)
528{
529 struct netcp_hook_list *next, *n;
530 unsigned long flags;
531
532 spin_lock_irqsave(&netcp_priv->lock, flags);
533 list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) {
534 if ((next->order == order) &&
535 (next->hook_rtn == hook_rtn) &&
536 (next->hook_data == hook_data)) {
537 list_del(&next->list);
538 spin_unlock_irqrestore(&netcp_priv->lock, flags);
539 devm_kfree(netcp_priv->dev, next);
540 return 0;
541 }
542 }
543 spin_unlock_irqrestore(&netcp_priv->lock, flags);
544
545 return -ENOENT;
546}
547
548static void netcp_frag_free(bool is_frag, void *ptr)
549{
550 if (is_frag)
7d525c4e 551 skb_free_frag(ptr);
84640e27
KM
552 else
553 kfree(ptr);
554}
555
556static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
557 struct knav_dma_desc *desc)
558{
559 struct knav_dma_desc *ndesc;
560 dma_addr_t dma_desc, dma_buf;
561 unsigned int buf_len, dma_sz = sizeof(*ndesc);
562 void *buf_ptr;
563 u32 tmp;
564
565 get_words(&dma_desc, 1, &desc->next_desc);
566
567 while (dma_desc) {
568 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
569 if (unlikely(!ndesc)) {
570 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
571 break;
572 }
573 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
574 get_pad_info((u32 *)&buf_ptr, &tmp, ndesc);
575 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
576 __free_page(buf_ptr);
577 knav_pool_desc_put(netcp->rx_pool, desc);
578 }
579
580 get_pad_info((u32 *)&buf_ptr, &buf_len, desc);
581 if (buf_ptr)
582 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
583 knav_pool_desc_put(netcp->rx_pool, desc);
584}
585
586static void netcp_empty_rx_queue(struct netcp_intf *netcp)
587{
588 struct knav_dma_desc *desc;
589 unsigned int dma_sz;
590 dma_addr_t dma;
591
592 for (; ;) {
593 dma = knav_queue_pop(netcp->rx_queue, &dma_sz);
594 if (!dma)
595 break;
596
597 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
598 if (unlikely(!desc)) {
599 dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
600 __func__);
601 netcp->ndev->stats.rx_errors++;
602 continue;
603 }
604 netcp_free_rx_desc_chain(netcp, desc);
605 netcp->ndev->stats.rx_dropped++;
606 }
607}
608
609static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
610{
611 unsigned int dma_sz, buf_len, org_buf_len;
612 struct knav_dma_desc *desc, *ndesc;
613 unsigned int pkt_sz = 0, accum_sz;
614 struct netcp_hook_list *rx_hook;
615 dma_addr_t dma_desc, dma_buff;
616 struct netcp_packet p_info;
617 struct sk_buff *skb;
618 void *org_buf_ptr;
619 u32 tmp;
620
621 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
622 if (!dma_desc)
623 return -1;
624
625 desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
626 if (unlikely(!desc)) {
627 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
628 return 0;
629 }
630
631 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
632 get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc);
633
634 if (unlikely(!org_buf_ptr)) {
635 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
636 goto free_desc;
637 }
638
639 pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK;
640 accum_sz = buf_len;
641 dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE);
642
643 /* Build a new sk_buff for the primary buffer */
644 skb = build_skb(org_buf_ptr, org_buf_len);
645 if (unlikely(!skb)) {
646 dev_err(netcp->ndev_dev, "build_skb() failed\n");
647 goto free_desc;
648 }
649
650 /* update data, tail and len */
651 skb_reserve(skb, NETCP_SOP_OFFSET);
652 __skb_put(skb, buf_len);
653
654 /* Fill in the page fragment list */
655 while (dma_desc) {
656 struct page *page;
657
658 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
659 if (unlikely(!ndesc)) {
660 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
661 goto free_desc;
662 }
663
664 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
665 get_pad_info((u32 *)&page, &tmp, ndesc);
666
667 if (likely(dma_buff && buf_len && page)) {
668 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
669 DMA_FROM_DEVICE);
670 } else {
671 dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n",
672 (void *)dma_buff, buf_len, page);
673 goto free_desc;
674 }
675
676 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
677 offset_in_page(dma_buff), buf_len, PAGE_SIZE);
678 accum_sz += buf_len;
679
680 /* Free the descriptor */
681 knav_pool_desc_put(netcp->rx_pool, ndesc);
682 }
683
684 /* Free the primary descriptor */
685 knav_pool_desc_put(netcp->rx_pool, desc);
686
687 /* check for packet len and warn */
688 if (unlikely(pkt_sz != accum_sz))
689 dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
690 pkt_sz, accum_sz);
691
692 /* Remove ethernet FCS from the packet */
693 __pskb_trim(skb, skb->len - ETH_FCS_LEN);
694
695 /* Call each of the RX hooks */
696 p_info.skb = skb;
697 p_info.rxtstamp_complete = false;
698 list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
699 int ret;
700
701 ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
702 &p_info);
703 if (unlikely(ret)) {
704 dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
705 rx_hook->order, ret);
706 netcp->ndev->stats.rx_errors++;
707 dev_kfree_skb(skb);
708 return 0;
709 }
710 }
711
84640e27
KM
712 netcp->ndev->stats.rx_packets++;
713 netcp->ndev->stats.rx_bytes += skb->len;
714
715 /* push skb up the stack */
716 skb->protocol = eth_type_trans(skb, netcp->ndev);
717 netif_receive_skb(skb);
718 return 0;
719
720free_desc:
721 netcp_free_rx_desc_chain(netcp, desc);
722 netcp->ndev->stats.rx_errors++;
723 return 0;
724}
725
726static int netcp_process_rx_packets(struct netcp_intf *netcp,
727 unsigned int budget)
728{
729 int i;
730
731 for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
732 ;
733 return i;
734}
735
736/* Release descriptors and attached buffers from Rx FDQ */
737static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
738{
739 struct knav_dma_desc *desc;
740 unsigned int buf_len, dma_sz;
741 dma_addr_t dma;
742 void *buf_ptr;
743 u32 tmp;
744
745 /* Allocate descriptor */
746 while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
747 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
748 if (unlikely(!desc)) {
749 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
750 continue;
751 }
752
753 get_org_pkt_info(&dma, &buf_len, desc);
754 get_pad_info((u32 *)&buf_ptr, &tmp, desc);
755
756 if (unlikely(!dma)) {
757 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
758 knav_pool_desc_put(netcp->rx_pool, desc);
759 continue;
760 }
761
762 if (unlikely(!buf_ptr)) {
763 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
764 knav_pool_desc_put(netcp->rx_pool, desc);
765 continue;
766 }
767
768 if (fdq == 0) {
769 dma_unmap_single(netcp->dev, dma, buf_len,
770 DMA_FROM_DEVICE);
771 netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
772 } else {
773 dma_unmap_page(netcp->dev, dma, buf_len,
774 DMA_FROM_DEVICE);
775 __free_page(buf_ptr);
776 }
777
778 knav_pool_desc_put(netcp->rx_pool, desc);
779 }
780}
781
782static void netcp_rxpool_free(struct netcp_intf *netcp)
783{
784 int i;
785
786 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
787 !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++)
788 netcp_free_rx_buf(netcp, i);
789
790 if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
791 dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n",
792 netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
793
794 knav_pool_destroy(netcp->rx_pool);
795 netcp->rx_pool = NULL;
796}
797
798static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
799{
800 struct knav_dma_desc *hwdesc;
801 unsigned int buf_len, dma_sz;
802 u32 desc_info, pkt_info;
803 struct page *page;
804 dma_addr_t dma;
805 void *bufptr;
806 u32 pad[2];
807
808 /* Allocate descriptor */
809 hwdesc = knav_pool_desc_get(netcp->rx_pool);
810 if (IS_ERR_OR_NULL(hwdesc)) {
811 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
812 return;
813 }
814
815 if (likely(fdq == 0)) {
816 unsigned int primary_buf_len;
817 /* Allocate a primary receive queue entry */
818 buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
819 primary_buf_len = SKB_DATA_ALIGN(buf_len) +
820 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
821
822 if (primary_buf_len <= PAGE_SIZE) {
823 bufptr = netdev_alloc_frag(primary_buf_len);
824 pad[1] = primary_buf_len;
825 } else {
826 bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
827 GFP_DMA32 | __GFP_COLD);
828 pad[1] = 0;
829 }
830
831 if (unlikely(!bufptr)) {
832 dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
833 goto fail;
834 }
835 dma = dma_map_single(netcp->dev, bufptr, buf_len,
836 DMA_TO_DEVICE);
837 pad[0] = (u32)bufptr;
838
839 } else {
840 /* Allocate a secondary receive queue entry */
841 page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
842 if (unlikely(!page)) {
843 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
844 goto fail;
845 }
846 buf_len = PAGE_SIZE;
847 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
848 pad[0] = (u32)page;
849 pad[1] = 0;
850 }
851
852 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
853 desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
854 pkt_info = KNAV_DMA_DESC_HAS_EPIB;
855 pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
856 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
857 KNAV_DMA_DESC_RETQ_SHIFT;
858 set_org_pkt_info(dma, buf_len, hwdesc);
859 set_pad_info(pad[0], pad[1], hwdesc);
860 set_desc_info(desc_info, pkt_info, hwdesc);
861
862 /* Push to FDQs */
863 knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
864 &dma_sz);
865 knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
866 return;
867
868fail:
869 knav_pool_desc_put(netcp->rx_pool, hwdesc);
870}
871
872/* Refill Rx FDQ with descriptors & attached buffers */
873static void netcp_rxpool_refill(struct netcp_intf *netcp)
874{
875 u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
876 int i;
877
878 /* Calculate the FDQ deficit and refill */
879 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
880 fdq_deficit[i] = netcp->rx_queue_depths[i] -
881 knav_queue_get_count(netcp->rx_fdq[i]);
882
883 while (fdq_deficit[i]--)
884 netcp_allocate_rx_buf(netcp, i);
885 } /* end for fdqs */
886}
887
888/* NAPI poll */
889static int netcp_rx_poll(struct napi_struct *napi, int budget)
890{
891 struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
892 rx_napi);
893 unsigned int packets;
894
895 packets = netcp_process_rx_packets(netcp, budget);
896
897 if (packets < budget) {
898 napi_complete(&netcp->rx_napi);
899 knav_queue_enable_notify(netcp->rx_queue);
900 }
901
902 netcp_rxpool_refill(netcp);
903 return packets;
904}
905
906static void netcp_rx_notify(void *arg)
907{
908 struct netcp_intf *netcp = arg;
909
910 knav_queue_disable_notify(netcp->rx_queue);
911 napi_schedule(&netcp->rx_napi);
912}
913
914static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
915 struct knav_dma_desc *desc,
916 unsigned int desc_sz)
917{
918 struct knav_dma_desc *ndesc = desc;
919 dma_addr_t dma_desc, dma_buf;
920 unsigned int buf_len;
921
922 while (ndesc) {
923 get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc);
924
925 if (dma_buf && buf_len)
926 dma_unmap_single(netcp->dev, dma_buf, buf_len,
927 DMA_TO_DEVICE);
928 else
929 dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n",
930 (void *)dma_buf, buf_len);
931
932 knav_pool_desc_put(netcp->tx_pool, ndesc);
933 ndesc = NULL;
934 if (dma_desc) {
935 ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc,
936 desc_sz);
937 if (!ndesc)
938 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
939 }
940 }
941}
942
943static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
944 unsigned int budget)
945{
946 struct knav_dma_desc *desc;
947 struct sk_buff *skb;
948 unsigned int dma_sz;
949 dma_addr_t dma;
950 int pkts = 0;
951 u32 tmp;
952
953 while (budget--) {
954 dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
955 if (!dma)
956 break;
957 desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
958 if (unlikely(!desc)) {
959 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
960 netcp->ndev->stats.tx_errors++;
961 continue;
962 }
963
964 get_pad_info((u32 *)&skb, &tmp, desc);
965 netcp_free_tx_desc_chain(netcp, desc, dma_sz);
966 if (!skb) {
967 dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
968 netcp->ndev->stats.tx_errors++;
969 continue;
970 }
971
972 if (netif_subqueue_stopped(netcp->ndev, skb) &&
973 netif_running(netcp->ndev) &&
974 (knav_pool_count(netcp->tx_pool) >
975 netcp->tx_resume_threshold)) {
976 u16 subqueue = skb_get_queue_mapping(skb);
977
978 netif_wake_subqueue(netcp->ndev, subqueue);
979 }
980
981 netcp->ndev->stats.tx_packets++;
982 netcp->ndev->stats.tx_bytes += skb->len;
983 dev_kfree_skb(skb);
984 pkts++;
985 }
986 return pkts;
987}
988
989static int netcp_tx_poll(struct napi_struct *napi, int budget)
990{
991 int packets;
992 struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
993 tx_napi);
994
995 packets = netcp_process_tx_compl_packets(netcp, budget);
996 if (packets < budget) {
997 napi_complete(&netcp->tx_napi);
998 knav_queue_enable_notify(netcp->tx_compl_q);
999 }
1000
1001 return packets;
1002}
1003
1004static void netcp_tx_notify(void *arg)
1005{
1006 struct netcp_intf *netcp = arg;
1007
1008 knav_queue_disable_notify(netcp->tx_compl_q);
1009 napi_schedule(&netcp->tx_napi);
1010}
1011
1012static struct knav_dma_desc*
1013netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1014{
1015 struct knav_dma_desc *desc, *ndesc, *pdesc;
1016 unsigned int pkt_len = skb_headlen(skb);
1017 struct device *dev = netcp->dev;
1018 dma_addr_t dma_addr;
1019 unsigned int dma_sz;
1020 int i;
1021
1022 /* Map the linear buffer */
1023 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
1024 if (unlikely(!dma_addr)) {
1025 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
1026 return NULL;
1027 }
1028
1029 desc = knav_pool_desc_get(netcp->tx_pool);
1030 if (unlikely(IS_ERR_OR_NULL(desc))) {
1031 dev_err(netcp->ndev_dev, "out of TX desc\n");
1032 dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
1033 return NULL;
1034 }
1035
1036 set_pkt_info(dma_addr, pkt_len, 0, desc);
1037 if (skb_is_nonlinear(skb)) {
1038 prefetchw(skb_shinfo(skb));
1039 } else {
1040 desc->next_desc = 0;
1041 goto upd_pkt_len;
1042 }
1043
1044 pdesc = desc;
1045
1046 /* Handle the case where skb is fragmented in pages */
1047 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1048 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1049 struct page *page = skb_frag_page(frag);
1050 u32 page_offset = frag->page_offset;
1051 u32 buf_len = skb_frag_size(frag);
1052 dma_addr_t desc_dma;
1053 u32 pkt_info;
1054
1055 dma_addr = dma_map_page(dev, page, page_offset, buf_len,
1056 DMA_TO_DEVICE);
1057 if (unlikely(!dma_addr)) {
1058 dev_err(netcp->ndev_dev, "Failed to map skb page\n");
1059 goto free_descs;
1060 }
1061
1062 ndesc = knav_pool_desc_get(netcp->tx_pool);
1063 if (unlikely(IS_ERR_OR_NULL(ndesc))) {
1064 dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
1065 dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
1066 goto free_descs;
1067 }
1068
1069 desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool,
1070 (void *)ndesc);
1071 pkt_info =
1072 (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
1073 KNAV_DMA_DESC_RETQ_SHIFT;
1074 set_pkt_info(dma_addr, buf_len, 0, ndesc);
1075 set_words(&desc_dma, 1, &pdesc->next_desc);
1076 pkt_len += buf_len;
1077 if (pdesc != desc)
1078 knav_pool_desc_map(netcp->tx_pool, pdesc,
1079 sizeof(*pdesc), &desc_dma, &dma_sz);
1080 pdesc = ndesc;
1081 }
1082 if (pdesc != desc)
1083 knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc),
1084 &dma_addr, &dma_sz);
1085
1086 /* frag list based linkage is not supported for now. */
1087 if (skb_shinfo(skb)->frag_list) {
1088 dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n");
1089 goto free_descs;
1090 }
1091
1092upd_pkt_len:
1093 WARN_ON(pkt_len != skb->len);
1094
1095 pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK;
1096 set_words(&pkt_len, 1, &desc->desc_info);
1097 return desc;
1098
1099free_descs:
1100 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1101 return NULL;
1102}
1103
1104static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1105 struct sk_buff *skb,
1106 struct knav_dma_desc *desc)
1107{
1108 struct netcp_tx_pipe *tx_pipe = NULL;
1109 struct netcp_hook_list *tx_hook;
1110 struct netcp_packet p_info;
84640e27
KM
1111 unsigned int dma_sz;
1112 dma_addr_t dma;
e170f409 1113 u32 tmp = 0;
84640e27
KM
1114 int ret = 0;
1115
1116 p_info.netcp = netcp;
1117 p_info.skb = skb;
1118 p_info.tx_pipe = NULL;
1119 p_info.psdata_len = 0;
1120 p_info.ts_context = NULL;
1121 p_info.txtstamp_complete = NULL;
1122 p_info.epib = desc->epib;
1123 p_info.psdata = desc->psdata;
1124 memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32));
1125
1126 /* Find out where to inject the packet for transmission */
1127 list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
1128 ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
1129 &p_info);
1130 if (unlikely(ret != 0)) {
1131 dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
1132 tx_hook->order, ret);
1133 ret = (ret < 0) ? ret : NETDEV_TX_OK;
1134 goto out;
1135 }
1136 }
1137
1138 /* Make sure some TX hook claimed the packet */
1139 tx_pipe = p_info.tx_pipe;
1140 if (!tx_pipe) {
1141 dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
1142 ret = -ENXIO;
1143 goto out;
1144 }
1145
1146 /* update descriptor */
1147 if (p_info.psdata_len) {
1148 u32 *psdata = p_info.psdata;
1149
1150 memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
1151 p_info.psdata_len);
1152 set_words(psdata, p_info.psdata_len, psdata);
e170f409 1153 tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
84640e27
KM
1154 KNAV_DMA_DESC_PSLEN_SHIFT;
1155 }
1156
e170f409 1157 tmp |= KNAV_DMA_DESC_HAS_EPIB |
84640e27 1158 ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
e170f409 1159 KNAV_DMA_DESC_RETQ_SHIFT);
84640e27 1160
e170f409
KM
1161 if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) {
1162 tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) <<
1163 KNAV_DMA_DESC_PSFLAG_SHIFT);
1164 }
1165
1166 set_words(&tmp, 1, &desc->packet_info);
84640e27
KM
1167 set_words((u32 *)&skb, 1, &desc->pad[0]);
1168
e170f409
KM
1169 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
1170 tmp = tx_pipe->switch_to_port;
1171 set_words((u32 *)&tmp, 1, &desc->tag_info);
1172 }
1173
84640e27
KM
1174 /* submit packet descriptor */
1175 ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
1176 &dma_sz);
1177 if (unlikely(ret)) {
1178 dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
1179 ret = -ENOMEM;
1180 goto out;
1181 }
1182 skb_tx_timestamp(skb);
1183 knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
1184
1185out:
1186 return ret;
1187}
1188
1189/* Submit the packet */
1190static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1191{
1192 struct netcp_intf *netcp = netdev_priv(ndev);
1193 int subqueue = skb_get_queue_mapping(skb);
1194 struct knav_dma_desc *desc;
1195 int desc_count, ret = 0;
1196
1197 if (unlikely(skb->len <= 0)) {
1198 dev_kfree_skb(skb);
1199 return NETDEV_TX_OK;
1200 }
1201
1202 if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
1203 ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
1204 if (ret < 0) {
1205 /* If we get here, the skb has already been dropped */
1206 dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
1207 ret);
1208 ndev->stats.tx_dropped++;
1209 return ret;
1210 }
1211 skb->len = NETCP_MIN_PACKET_SIZE;
1212 }
1213
1214 desc = netcp_tx_map_skb(skb, netcp);
1215 if (unlikely(!desc)) {
1216 netif_stop_subqueue(ndev, subqueue);
1217 ret = -ENOBUFS;
1218 goto drop;
1219 }
1220
1221 ret = netcp_tx_submit_skb(netcp, skb, desc);
1222 if (ret)
1223 goto drop;
1224
1225 ndev->trans_start = jiffies;
1226
1227 /* Check Tx pool count & stop subqueue if needed */
1228 desc_count = knav_pool_count(netcp->tx_pool);
1229 if (desc_count < netcp->tx_pause_threshold) {
1230 dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
1231 netif_stop_subqueue(ndev, subqueue);
1232 }
1233 return NETDEV_TX_OK;
1234
1235drop:
1236 ndev->stats.tx_dropped++;
1237 if (desc)
1238 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1239 dev_kfree_skb(skb);
1240 return ret;
1241}
1242
1243int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe)
1244{
1245 if (tx_pipe->dma_channel) {
1246 knav_dma_close_channel(tx_pipe->dma_channel);
1247 tx_pipe->dma_channel = NULL;
1248 }
1249 return 0;
1250}
58c11b5f 1251EXPORT_SYMBOL_GPL(netcp_txpipe_close);
84640e27
KM
1252
1253int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
1254{
1255 struct device *dev = tx_pipe->netcp_device->device;
1256 struct knav_dma_cfg config;
1257 int ret = 0;
1258 u8 name[16];
1259
1260 memset(&config, 0, sizeof(config));
1261 config.direction = DMA_MEM_TO_DEV;
1262 config.u.tx.filt_einfo = false;
1263 config.u.tx.filt_pswords = false;
1264 config.u.tx.priority = DMA_PRIO_MED_L;
1265
1266 tx_pipe->dma_channel = knav_dma_open_channel(dev,
1267 tx_pipe->dma_chan_name, &config);
1268 if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) {
1269 dev_err(dev, "failed opening tx chan(%s)\n",
1270 tx_pipe->dma_chan_name);
1271 goto err;
1272 }
1273
1274 snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev));
1275 tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
1276 KNAV_QUEUE_SHARED);
1277 if (IS_ERR(tx_pipe->dma_queue)) {
1278 dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
1279 name, ret);
1280 ret = PTR_ERR(tx_pipe->dma_queue);
1281 goto err;
1282 }
1283
1284 dev_dbg(dev, "opened tx pipe %s\n", name);
1285 return 0;
1286
1287err:
1288 if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
1289 knav_dma_close_channel(tx_pipe->dma_channel);
1290 tx_pipe->dma_channel = NULL;
1291 return ret;
1292}
58c11b5f 1293EXPORT_SYMBOL_GPL(netcp_txpipe_open);
84640e27
KM
1294
1295int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
1296 struct netcp_device *netcp_device,
1297 const char *dma_chan_name, unsigned int dma_queue_id)
1298{
1299 memset(tx_pipe, 0, sizeof(*tx_pipe));
1300 tx_pipe->netcp_device = netcp_device;
1301 tx_pipe->dma_chan_name = dma_chan_name;
1302 tx_pipe->dma_queue_id = dma_queue_id;
1303 return 0;
1304}
58c11b5f 1305EXPORT_SYMBOL_GPL(netcp_txpipe_init);
84640e27
KM
1306
1307static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp,
1308 const u8 *addr,
1309 enum netcp_addr_type type)
1310{
1311 struct netcp_addr *naddr;
1312
1313 list_for_each_entry(naddr, &netcp->addr_list, node) {
1314 if (naddr->type != type)
1315 continue;
1316 if (addr && memcmp(addr, naddr->addr, ETH_ALEN))
1317 continue;
1318 return naddr;
1319 }
1320
1321 return NULL;
1322}
1323
1324static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
1325 const u8 *addr,
1326 enum netcp_addr_type type)
1327{
1328 struct netcp_addr *naddr;
1329
1330 naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC);
1331 if (!naddr)
1332 return NULL;
1333
1334 naddr->type = type;
1335 naddr->flags = 0;
1336 naddr->netcp = netcp;
1337 if (addr)
1338 ether_addr_copy(naddr->addr, addr);
1339 else
c7bf7169 1340 eth_zero_addr(naddr->addr);
84640e27
KM
1341 list_add_tail(&naddr->node, &netcp->addr_list);
1342
1343 return naddr;
1344}
1345
1346static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr)
1347{
1348 list_del(&naddr->node);
1349 devm_kfree(netcp->dev, naddr);
1350}
1351
1352static void netcp_addr_clear_mark(struct netcp_intf *netcp)
1353{
1354 struct netcp_addr *naddr;
1355
1356 list_for_each_entry(naddr, &netcp->addr_list, node)
1357 naddr->flags = 0;
1358}
1359
1360static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr,
1361 enum netcp_addr_type type)
1362{
1363 struct netcp_addr *naddr;
1364
1365 naddr = netcp_addr_find(netcp, addr, type);
1366 if (naddr) {
1367 naddr->flags |= ADDR_VALID;
1368 return;
1369 }
1370
1371 naddr = netcp_addr_add(netcp, addr, type);
1372 if (!WARN_ON(!naddr))
1373 naddr->flags |= ADDR_NEW;
1374}
1375
1376static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1377{
1378 struct netcp_addr *naddr, *tmp;
1379 struct netcp_intf_modpriv *priv;
1380 struct netcp_module *module;
1381 int error;
1382
1383 list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1384 if (naddr->flags & (ADDR_VALID | ADDR_NEW))
1385 continue;
1386 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
1387 naddr->addr, naddr->type);
1388 mutex_lock(&netcp_modules_lock);
1389 for_each_module(netcp, priv) {
1390 module = priv->netcp_module;
1391 if (!module->del_addr)
1392 continue;
1393 error = module->del_addr(priv->module_priv,
1394 naddr);
1395 WARN_ON(error);
1396 }
1397 mutex_unlock(&netcp_modules_lock);
1398 netcp_addr_del(netcp, naddr);
1399 }
1400}
1401
1402static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1403{
1404 struct netcp_addr *naddr, *tmp;
1405 struct netcp_intf_modpriv *priv;
1406 struct netcp_module *module;
1407 int error;
1408
1409 list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1410 if (!(naddr->flags & ADDR_NEW))
1411 continue;
1412 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
1413 naddr->addr, naddr->type);
1414 mutex_lock(&netcp_modules_lock);
1415 for_each_module(netcp, priv) {
1416 module = priv->netcp_module;
1417 if (!module->add_addr)
1418 continue;
1419 error = module->add_addr(priv->module_priv, naddr);
1420 WARN_ON(error);
1421 }
1422 mutex_unlock(&netcp_modules_lock);
1423 }
1424}
1425
1426static void netcp_set_rx_mode(struct net_device *ndev)
1427{
1428 struct netcp_intf *netcp = netdev_priv(ndev);
1429 struct netdev_hw_addr *ndev_addr;
1430 bool promisc;
1431
1432 promisc = (ndev->flags & IFF_PROMISC ||
1433 ndev->flags & IFF_ALLMULTI ||
1434 netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
1435
1436 /* first clear all marks */
1437 netcp_addr_clear_mark(netcp);
1438
1439 /* next add new entries, mark existing ones */
1440 netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST);
1441 for_each_dev_addr(ndev, ndev_addr)
1442 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV);
1443 netdev_for_each_uc_addr(ndev_addr, ndev)
1444 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST);
1445 netdev_for_each_mc_addr(ndev_addr, ndev)
1446 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST);
1447
1448 if (promisc)
1449 netcp_addr_add_mark(netcp, NULL, ADDR_ANY);
1450
1451 /* finally sweep and callout into modules */
1452 netcp_addr_sweep_del(netcp);
1453 netcp_addr_sweep_add(netcp);
1454}
1455
1456static void netcp_free_navigator_resources(struct netcp_intf *netcp)
1457{
1458 int i;
1459
1460 if (netcp->rx_channel) {
1461 knav_dma_close_channel(netcp->rx_channel);
1462 netcp->rx_channel = NULL;
1463 }
1464
1465 if (!IS_ERR_OR_NULL(netcp->rx_pool))
1466 netcp_rxpool_free(netcp);
1467
1468 if (!IS_ERR_OR_NULL(netcp->rx_queue)) {
1469 knav_queue_close(netcp->rx_queue);
1470 netcp->rx_queue = NULL;
1471 }
1472
1473 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
1474 !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) {
1475 knav_queue_close(netcp->rx_fdq[i]);
1476 netcp->rx_fdq[i] = NULL;
1477 }
1478
1479 if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) {
1480 knav_queue_close(netcp->tx_compl_q);
1481 netcp->tx_compl_q = NULL;
1482 }
1483
1484 if (!IS_ERR_OR_NULL(netcp->tx_pool)) {
1485 knav_pool_destroy(netcp->tx_pool);
1486 netcp->tx_pool = NULL;
1487 }
1488}
1489
1490static int netcp_setup_navigator_resources(struct net_device *ndev)
1491{
1492 struct netcp_intf *netcp = netdev_priv(ndev);
1493 struct knav_queue_notify_config notify_cfg;
1494 struct knav_dma_cfg config;
1495 u32 last_fdq = 0;
1496 u8 name[16];
1497 int ret;
1498 int i;
1499
1500 /* Create Rx/Tx descriptor pools */
1501 snprintf(name, sizeof(name), "rx-pool-%s", ndev->name);
1502 netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
1503 netcp->rx_pool_region_id);
1504 if (IS_ERR_OR_NULL(netcp->rx_pool)) {
1505 dev_err(netcp->ndev_dev, "Couldn't create rx pool\n");
1506 ret = PTR_ERR(netcp->rx_pool);
1507 goto fail;
1508 }
1509
1510 snprintf(name, sizeof(name), "tx-pool-%s", ndev->name);
1511 netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size,
1512 netcp->tx_pool_region_id);
1513 if (IS_ERR_OR_NULL(netcp->tx_pool)) {
1514 dev_err(netcp->ndev_dev, "Couldn't create tx pool\n");
1515 ret = PTR_ERR(netcp->tx_pool);
1516 goto fail;
1517 }
1518
1519 /* open Tx completion queue */
1520 snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
1521 netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
1522 if (IS_ERR_OR_NULL(netcp->tx_compl_q)) {
1523 ret = PTR_ERR(netcp->tx_compl_q);
1524 goto fail;
1525 }
1526 netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q);
1527
1528 /* Set notification for Tx completion */
1529 notify_cfg.fn = netcp_tx_notify;
1530 notify_cfg.fn_arg = netcp;
1531 ret = knav_queue_device_control(netcp->tx_compl_q,
1532 KNAV_QUEUE_SET_NOTIFIER,
1533 (unsigned long)&notify_cfg);
1534 if (ret)
1535 goto fail;
1536
1537 knav_queue_disable_notify(netcp->tx_compl_q);
1538
1539 /* open Rx completion queue */
1540 snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
1541 netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
1542 if (IS_ERR_OR_NULL(netcp->rx_queue)) {
1543 ret = PTR_ERR(netcp->rx_queue);
1544 goto fail;
1545 }
1546 netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue);
1547
1548 /* Set notification for Rx completion */
1549 notify_cfg.fn = netcp_rx_notify;
1550 notify_cfg.fn_arg = netcp;
1551 ret = knav_queue_device_control(netcp->rx_queue,
1552 KNAV_QUEUE_SET_NOTIFIER,
1553 (unsigned long)&notify_cfg);
1554 if (ret)
1555 goto fail;
1556
1557 knav_queue_disable_notify(netcp->rx_queue);
1558
1559 /* open Rx FDQs */
1560 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
1561 netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
1562 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
1563 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
1564 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
1565 ret = PTR_ERR(netcp->rx_fdq[i]);
1566 goto fail;
1567 }
1568 }
1569
1570 memset(&config, 0, sizeof(config));
1571 config.direction = DMA_DEV_TO_MEM;
1572 config.u.rx.einfo_present = true;
1573 config.u.rx.psinfo_present = true;
1574 config.u.rx.err_mode = DMA_DROP;
1575 config.u.rx.desc_type = DMA_DESC_HOST;
1576 config.u.rx.psinfo_at_sop = false;
1577 config.u.rx.sop_offset = NETCP_SOP_OFFSET;
1578 config.u.rx.dst_q = netcp->rx_queue_id;
1579 config.u.rx.thresh = DMA_THRESH_NONE;
1580
1581 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
1582 if (netcp->rx_fdq[i])
1583 last_fdq = knav_queue_get_id(netcp->rx_fdq[i]);
1584 config.u.rx.fdq[i] = last_fdq;
1585 }
1586
1587 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
1588 netcp->dma_chan_name, &config);
1589 if (IS_ERR_OR_NULL(netcp->rx_channel)) {
1590 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
1591 netcp->dma_chan_name);
1592 goto fail;
1593 }
1594
1595 dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel);
1596 return 0;
1597
1598fail:
1599 netcp_free_navigator_resources(netcp);
1600 return ret;
1601}
1602
1603/* Open the device */
1604static int netcp_ndo_open(struct net_device *ndev)
1605{
1606 struct netcp_intf *netcp = netdev_priv(ndev);
1607 struct netcp_intf_modpriv *intf_modpriv;
1608 struct netcp_module *module;
1609 int ret;
1610
1611 netif_carrier_off(ndev);
1612 ret = netcp_setup_navigator_resources(ndev);
1613 if (ret) {
1614 dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n");
1615 goto fail;
1616 }
1617
1618 mutex_lock(&netcp_modules_lock);
1619 for_each_module(netcp, intf_modpriv) {
1620 module = intf_modpriv->netcp_module;
1621 if (module->open) {
1622 ret = module->open(intf_modpriv->module_priv, ndev);
1623 if (ret != 0) {
1624 dev_err(netcp->ndev_dev, "module open failed\n");
1625 goto fail_open;
1626 }
1627 }
1628 }
1629 mutex_unlock(&netcp_modules_lock);
1630
84640e27
KM
1631 napi_enable(&netcp->rx_napi);
1632 napi_enable(&netcp->tx_napi);
1633 knav_queue_enable_notify(netcp->tx_compl_q);
1634 knav_queue_enable_notify(netcp->rx_queue);
194ac06e 1635 netcp_rxpool_refill(netcp);
84640e27
KM
1636 netif_tx_wake_all_queues(ndev);
1637 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
1638 return 0;
1639
1640fail_open:
1641 for_each_module(netcp, intf_modpriv) {
1642 module = intf_modpriv->netcp_module;
1643 if (module->close)
1644 module->close(intf_modpriv->module_priv, ndev);
1645 }
1646 mutex_unlock(&netcp_modules_lock);
1647
1648fail:
1649 netcp_free_navigator_resources(netcp);
1650 return ret;
1651}
1652
1653/* Close the device */
1654static int netcp_ndo_stop(struct net_device *ndev)
1655{
1656 struct netcp_intf *netcp = netdev_priv(ndev);
1657 struct netcp_intf_modpriv *intf_modpriv;
1658 struct netcp_module *module;
1659 int err = 0;
1660
1661 netif_tx_stop_all_queues(ndev);
1662 netif_carrier_off(ndev);
1663 netcp_addr_clear_mark(netcp);
1664 netcp_addr_sweep_del(netcp);
1665 knav_queue_disable_notify(netcp->rx_queue);
1666 knav_queue_disable_notify(netcp->tx_compl_q);
1667 napi_disable(&netcp->rx_napi);
1668 napi_disable(&netcp->tx_napi);
1669
1670 mutex_lock(&netcp_modules_lock);
1671 for_each_module(netcp, intf_modpriv) {
1672 module = intf_modpriv->netcp_module;
1673 if (module->close) {
1674 err = module->close(intf_modpriv->module_priv, ndev);
1675 if (err != 0)
1676 dev_err(netcp->ndev_dev, "Close failed\n");
1677 }
1678 }
1679 mutex_unlock(&netcp_modules_lock);
1680
1681 /* Recycle Rx descriptors from completion queue */
1682 netcp_empty_rx_queue(netcp);
1683
1684 /* Recycle Tx descriptors from completion queue */
1685 netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1686
1687 if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size)
1688 dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n",
1689 netcp->tx_pool_size - knav_pool_count(netcp->tx_pool));
1690
1691 netcp_free_navigator_resources(netcp);
1692 dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name);
1693 return 0;
1694}
1695
1696static int netcp_ndo_ioctl(struct net_device *ndev,
1697 struct ifreq *req, int cmd)
1698{
1699 struct netcp_intf *netcp = netdev_priv(ndev);
1700 struct netcp_intf_modpriv *intf_modpriv;
1701 struct netcp_module *module;
1702 int ret = -1, err = -EOPNOTSUPP;
1703
1704 if (!netif_running(ndev))
1705 return -EINVAL;
1706
1707 mutex_lock(&netcp_modules_lock);
1708 for_each_module(netcp, intf_modpriv) {
1709 module = intf_modpriv->netcp_module;
1710 if (!module->ioctl)
1711 continue;
1712
1713 err = module->ioctl(intf_modpriv->module_priv, req, cmd);
1714 if ((err < 0) && (err != -EOPNOTSUPP)) {
1715 ret = err;
1716 goto out;
1717 }
1718 if (err == 0)
1719 ret = err;
1720 }
1721
1722out:
1723 mutex_unlock(&netcp_modules_lock);
1724 return (ret == 0) ? 0 : err;
1725}
1726
1727static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu)
1728{
1729 struct netcp_intf *netcp = netdev_priv(ndev);
1730
1731 /* MTU < 68 is an error for IPv4 traffic */
1732 if ((new_mtu < 68) ||
1733 (new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) {
1734 dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu);
1735 return -EINVAL;
1736 }
1737
1738 ndev->mtu = new_mtu;
1739 return 0;
1740}
1741
1742static void netcp_ndo_tx_timeout(struct net_device *ndev)
1743{
1744 struct netcp_intf *netcp = netdev_priv(ndev);
1745 unsigned int descs = knav_pool_count(netcp->tx_pool);
1746
1747 dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
1748 netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1749 ndev->trans_start = jiffies;
1750 netif_tx_wake_all_queues(ndev);
1751}
1752
1753static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1754{
1755 struct netcp_intf *netcp = netdev_priv(ndev);
1756 struct netcp_intf_modpriv *intf_modpriv;
1757 struct netcp_module *module;
1758 int err = 0;
1759
1760 dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
1761
1762 mutex_lock(&netcp_modules_lock);
1763 for_each_module(netcp, intf_modpriv) {
1764 module = intf_modpriv->netcp_module;
1765 if ((module->add_vid) && (vid != 0)) {
1766 err = module->add_vid(intf_modpriv->module_priv, vid);
1767 if (err != 0) {
1768 dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n",
1769 vid);
1770 break;
1771 }
1772 }
1773 }
1774 mutex_unlock(&netcp_modules_lock);
1775 return err;
1776}
1777
1778static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1779{
1780 struct netcp_intf *netcp = netdev_priv(ndev);
1781 struct netcp_intf_modpriv *intf_modpriv;
1782 struct netcp_module *module;
1783 int err = 0;
1784
1785 dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
1786
1787 mutex_lock(&netcp_modules_lock);
1788 for_each_module(netcp, intf_modpriv) {
1789 module = intf_modpriv->netcp_module;
1790 if (module->del_vid) {
1791 err = module->del_vid(intf_modpriv->module_priv, vid);
1792 if (err != 0) {
1793 dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n",
1794 vid);
1795 break;
1796 }
1797 }
1798 }
1799 mutex_unlock(&netcp_modules_lock);
1800 return err;
1801}
1802
1803static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
1804 void *accel_priv,
1805 select_queue_fallback_t fallback)
1806{
1807 return 0;
1808}
1809
1810static int netcp_setup_tc(struct net_device *dev, u8 num_tc)
1811{
1812 int i;
1813
1814 /* setup tc must be called under rtnl lock */
1815 ASSERT_RTNL();
1816
1817 /* Sanity-check the number of traffic classes requested */
1818 if ((dev->real_num_tx_queues <= 1) ||
1819 (dev->real_num_tx_queues < num_tc))
1820 return -EINVAL;
1821
1822 /* Configure traffic class to queue mappings */
1823 if (num_tc) {
1824 netdev_set_num_tc(dev, num_tc);
1825 for (i = 0; i < num_tc; i++)
1826 netdev_set_tc_queue(dev, i, 1, i);
1827 } else {
1828 netdev_reset_tc(dev);
1829 }
1830
1831 return 0;
1832}
1833
1834static const struct net_device_ops netcp_netdev_ops = {
1835 .ndo_open = netcp_ndo_open,
1836 .ndo_stop = netcp_ndo_stop,
1837 .ndo_start_xmit = netcp_ndo_start_xmit,
1838 .ndo_set_rx_mode = netcp_set_rx_mode,
1839 .ndo_do_ioctl = netcp_ndo_ioctl,
1840 .ndo_change_mtu = netcp_ndo_change_mtu,
1841 .ndo_set_mac_address = eth_mac_addr,
1842 .ndo_validate_addr = eth_validate_addr,
1843 .ndo_vlan_rx_add_vid = netcp_rx_add_vid,
1844 .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
1845 .ndo_tx_timeout = netcp_ndo_tx_timeout,
1846 .ndo_select_queue = netcp_select_queue,
1847 .ndo_setup_tc = netcp_setup_tc,
1848};
1849
1850static int netcp_create_interface(struct netcp_device *netcp_device,
1851 struct device_node *node_interface)
1852{
1853 struct device *dev = netcp_device->device;
1854 struct device_node *node = dev->of_node;
1855 struct netcp_intf *netcp;
1856 struct net_device *ndev;
1857 resource_size_t size;
1858 struct resource res;
1859 void __iomem *efuse = NULL;
1860 u32 efuse_mac = 0;
1861 const void *mac_addr;
1862 u8 efuse_mac_addr[6];
1863 u32 temp[2];
1864 int ret = 0;
1865
1866 ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1);
1867 if (!ndev) {
1868 dev_err(dev, "Error allocating netdev\n");
1869 return -ENOMEM;
1870 }
1871
1872 ndev->features |= NETIF_F_SG;
1873 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1874 ndev->hw_features = ndev->features;
1875 ndev->vlan_features |= NETIF_F_SG;
1876
1877 netcp = netdev_priv(ndev);
1878 spin_lock_init(&netcp->lock);
1879 INIT_LIST_HEAD(&netcp->module_head);
1880 INIT_LIST_HEAD(&netcp->txhook_list_head);
1881 INIT_LIST_HEAD(&netcp->rxhook_list_head);
1882 INIT_LIST_HEAD(&netcp->addr_list);
1883 netcp->netcp_device = netcp_device;
1884 netcp->dev = netcp_device->device;
1885 netcp->ndev = ndev;
1886 netcp->ndev_dev = &ndev->dev;
1887 netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG);
1888 netcp->tx_pause_threshold = MAX_SKB_FRAGS;
1889 netcp->tx_resume_threshold = netcp->tx_pause_threshold;
1890 netcp->node_interface = node_interface;
1891
1892 ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac);
1893 if (efuse_mac) {
1894 if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
1895 dev_err(dev, "could not find efuse-mac reg resource\n");
1896 ret = -ENODEV;
1897 goto quit;
1898 }
1899 size = resource_size(&res);
1900
1901 if (!devm_request_mem_region(dev, res.start, size,
1902 dev_name(dev))) {
1903 dev_err(dev, "could not reserve resource\n");
1904 ret = -ENOMEM;
1905 goto quit;
1906 }
1907
1908 efuse = devm_ioremap_nocache(dev, res.start, size);
1909 if (!efuse) {
1910 dev_err(dev, "could not map resource\n");
1911 devm_release_mem_region(dev, res.start, size);
1912 ret = -ENOMEM;
1913 goto quit;
1914 }
1915
71382bc0 1916 emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
84640e27
KM
1917 if (is_valid_ether_addr(efuse_mac_addr))
1918 ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
1919 else
1920 random_ether_addr(ndev->dev_addr);
1921
1922 devm_iounmap(dev, efuse);
1923 devm_release_mem_region(dev, res.start, size);
1924 } else {
1925 mac_addr = of_get_mac_address(node_interface);
1926 if (mac_addr)
1927 ether_addr_copy(ndev->dev_addr, mac_addr);
1928 else
1929 random_ether_addr(ndev->dev_addr);
1930 }
1931
1932 ret = of_property_read_string(node_interface, "rx-channel",
1933 &netcp->dma_chan_name);
1934 if (ret < 0) {
1935 dev_err(dev, "missing \"rx-channel\" parameter\n");
1936 ret = -ENODEV;
1937 goto quit;
1938 }
1939
1940 ret = of_property_read_u32(node_interface, "rx-queue",
1941 &netcp->rx_queue_id);
1942 if (ret < 0) {
1943 dev_warn(dev, "missing \"rx-queue\" parameter\n");
1944 netcp->rx_queue_id = KNAV_QUEUE_QPEND;
1945 }
1946
1947 ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
1948 netcp->rx_queue_depths,
1949 KNAV_DMA_FDQ_PER_CHAN);
1950 if (ret < 0) {
1951 dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
1952 netcp->rx_queue_depths[0] = 128;
1953 }
1954
1955 ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
1956 netcp->rx_buffer_sizes,
1957 KNAV_DMA_FDQ_PER_CHAN);
1958 if (ret) {
1959 dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
1960 netcp->rx_buffer_sizes[0] = 1536;
1961 }
1962
1963 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
1964 if (ret < 0) {
1965 dev_err(dev, "missing \"rx-pool\" parameter\n");
1966 ret = -ENODEV;
1967 goto quit;
1968 }
1969 netcp->rx_pool_size = temp[0];
1970 netcp->rx_pool_region_id = temp[1];
1971
1972 ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2);
1973 if (ret < 0) {
1974 dev_err(dev, "missing \"tx-pool\" parameter\n");
1975 ret = -ENODEV;
1976 goto quit;
1977 }
1978 netcp->tx_pool_size = temp[0];
1979 netcp->tx_pool_region_id = temp[1];
1980
1981 if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
1982 dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
1983 MAX_SKB_FRAGS);
1984 ret = -ENODEV;
1985 goto quit;
1986 }
1987
1988 ret = of_property_read_u32(node_interface, "tx-completion-queue",
1989 &netcp->tx_compl_qid);
1990 if (ret < 0) {
1991 dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
1992 netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
1993 }
1994
1995 /* NAPI register */
1996 netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
1997 netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
1998
1999 /* Register the network device */
2000 ndev->dev_id = 0;
2001 ndev->watchdog_timeo = NETCP_TX_TIMEOUT;
2002 ndev->netdev_ops = &netcp_netdev_ops;
2003 SET_NETDEV_DEV(ndev, dev);
2004
2005 list_add_tail(&netcp->interface_list, &netcp_device->interface_head);
2006 return 0;
2007
2008quit:
2009 free_netdev(ndev);
2010 return ret;
2011}
2012
2013static void netcp_delete_interface(struct netcp_device *netcp_device,
2014 struct net_device *ndev)
2015{
2016 struct netcp_intf_modpriv *intf_modpriv, *tmp;
2017 struct netcp_intf *netcp = netdev_priv(ndev);
2018 struct netcp_module *module;
2019
2020 dev_dbg(netcp_device->device, "Removing interface \"%s\"\n",
2021 ndev->name);
2022
2023 /* Notify each of the modules that the interface is going away */
2024 list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
2025 intf_list) {
2026 module = intf_modpriv->netcp_module;
2027 dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
2028 module->name);
2029 if (module->release)
2030 module->release(intf_modpriv->module_priv);
2031 list_del(&intf_modpriv->intf_list);
2032 kfree(intf_modpriv);
2033 }
2034 WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
2035 ndev->name);
2036
2037 list_del(&netcp->interface_list);
2038
2039 of_node_put(netcp->node_interface);
2040 unregister_netdev(ndev);
2041 netif_napi_del(&netcp->rx_napi);
2042 free_netdev(ndev);
2043}
2044
2045static int netcp_probe(struct platform_device *pdev)
2046{
2047 struct device_node *node = pdev->dev.of_node;
2048 struct netcp_intf *netcp_intf, *netcp_tmp;
2049 struct device_node *child, *interfaces;
2050 struct netcp_device *netcp_device;
2051 struct device *dev = &pdev->dev;
2052 struct netcp_module *module;
2053 int ret;
2054
2055 if (!node) {
2056 dev_err(dev, "could not find device info\n");
2057 return -ENODEV;
2058 }
2059
2060 /* Allocate a new NETCP device instance */
2061 netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL);
2062 if (!netcp_device)
2063 return -ENOMEM;
2064
2065 pm_runtime_enable(&pdev->dev);
2066 ret = pm_runtime_get_sync(&pdev->dev);
2067 if (ret < 0) {
2068 dev_err(dev, "Failed to enable NETCP power-domain\n");
2069 pm_runtime_disable(&pdev->dev);
2070 return ret;
2071 }
2072
2073 /* Initialize the NETCP device instance */
2074 INIT_LIST_HEAD(&netcp_device->interface_head);
2075 INIT_LIST_HEAD(&netcp_device->modpriv_head);
2076 netcp_device->device = dev;
2077 platform_set_drvdata(pdev, netcp_device);
2078
2079 /* create interfaces */
2080 interfaces = of_get_child_by_name(node, "netcp-interfaces");
2081 if (!interfaces) {
2082 dev_err(dev, "could not find netcp-interfaces node\n");
2083 ret = -ENODEV;
2084 goto probe_quit;
2085 }
2086
2087 for_each_available_child_of_node(interfaces, child) {
2088 ret = netcp_create_interface(netcp_device, child);
2089 if (ret) {
2090 dev_err(dev, "could not create interface(%s)\n",
2091 child->name);
2092 goto probe_quit_interface;
2093 }
2094 }
2095
2096 /* Add the device instance to the list */
2097 list_add_tail(&netcp_device->device_list, &netcp_devices);
2098
2099 /* Probe & attach any modules already registered */
2100 mutex_lock(&netcp_modules_lock);
2101 for_each_netcp_module(module) {
2102 ret = netcp_module_probe(netcp_device, module);
2103 if (ret < 0)
2104 dev_err(dev, "module(%s) probe failed\n", module->name);
2105 }
2106 mutex_unlock(&netcp_modules_lock);
2107 return 0;
2108
2109probe_quit_interface:
2110 list_for_each_entry_safe(netcp_intf, netcp_tmp,
2111 &netcp_device->interface_head,
2112 interface_list) {
2113 netcp_delete_interface(netcp_device, netcp_intf->ndev);
2114 }
2115
2116probe_quit:
2117 pm_runtime_put_sync(&pdev->dev);
2118 pm_runtime_disable(&pdev->dev);
2119 platform_set_drvdata(pdev, NULL);
2120 return ret;
2121}
2122
2123static int netcp_remove(struct platform_device *pdev)
2124{
2125 struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2126 struct netcp_inst_modpriv *inst_modpriv, *tmp;
2127 struct netcp_module *module;
2128
2129 list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head,
2130 inst_list) {
2131 module = inst_modpriv->netcp_module;
2132 dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
2133 module->remove(netcp_device, inst_modpriv->module_priv);
2134 list_del(&inst_modpriv->inst_list);
2135 kfree(inst_modpriv);
2136 }
2137 WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
2138 pdev->name);
2139
2140 devm_kfree(&pdev->dev, netcp_device);
2141 pm_runtime_put_sync(&pdev->dev);
2142 pm_runtime_disable(&pdev->dev);
2143 platform_set_drvdata(pdev, NULL);
2144 return 0;
2145}
2146
1156c965 2147static const struct of_device_id of_match[] = {
84640e27
KM
2148 { .compatible = "ti,netcp-1.0", },
2149 {},
2150};
2151MODULE_DEVICE_TABLE(of, of_match);
2152
2153static struct platform_driver netcp_driver = {
2154 .driver = {
2155 .name = "netcp-1.0",
84640e27
KM
2156 .of_match_table = of_match,
2157 },
2158 .probe = netcp_probe,
2159 .remove = netcp_remove,
2160};
2161module_platform_driver(netcp_driver);
2162
2163MODULE_LICENSE("GPL v2");
2164MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2165MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
This page took 0.443475 seconds and 5 git commands to generate.