staging: cxt1e1: Fix do not use // c99 comments
[deliverable/linux.git] / drivers / staging / cxt1e1 / linux.c
1 /* Copyright (C) 2007-2008 One Stop Systems
2 * Copyright (C) 2003-2006 SBE, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/types.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/hdlc.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <asm/uaccess.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/skbuff.h>
26 #include "pmcc4_sysdep.h"
27 #include "sbecom_inline_linux.h"
28 #include "libsbew.h"
29 #include "pmcc4.h"
30 #include "pmcc4_ioctls.h"
31 #include "pmcc4_private.h"
32 #include "sbeproc.h"
33
34 /*******************************************************************************
35 * Error out early if we have compiler trouble.
36 *
37 * (This section is included from the kernel's init/main.c as a friendly
38 * spiderman recommendation...)
39 *
40 * Versions of gcc older than that listed below may actually compile and link
41 * okay, but the end product can have subtle run time bugs. To avoid associated
42 * bogus bug reports, we flatly refuse to compile with a gcc that is known to be
43 * too old from the very beginning.
44 */
45 #if (__GNUC__ < 3) || (__GNUC__ == 3 && __GNUC_MINOR__ < 2)
46 #error Sorry, your GCC is too old. It builds incorrect kernels.
47 #endif
48
49 #if __GNUC__ == 4 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ == 0
50 #warning gcc-4.1.0 is known to miscompile the kernel. A different compiler version is recommended.
51 #endif
52
53 /*******************************************************************************/
54
55 #define CHANNAME "hdlc"
56
57 /*******************************************************************/
58 /* forward references */
59 status_t c4_chan_work_init(mpi_t *, mch_t *);
60 void musycc_wq_chan_restart(void *);
61 status_t __init c4_init(ci_t *, u_char *, u_char *);
62 status_t __init c4_init2(ci_t *);
63 ci_t *__init c4_new(void *);
64 int __init c4hw_attach_all(void);
65 void __init hdw_sn_get(hdw_info_t *, int);
66
67 #ifdef CONFIG_SBE_PMCC4_NCOMM
68 irqreturn_t c4_ebus_intr_th_handler(void *);
69
70 #endif
71 int c4_frame_rw(ci_t *, struct sbecom_port_param *);
72 status_t c4_get_port(ci_t *, int);
73 int c4_loop_port(ci_t *, int, u_int8_t);
74 int c4_musycc_rw(ci_t *, struct c4_musycc_param *);
75 int c4_new_chan(ci_t *, int, int, void *);
76 status_t c4_set_port(ci_t *, int);
77 int c4_pld_rw(ci_t *, struct sbecom_port_param *);
78 void cleanup_devs(void);
79 void cleanup_ioremap(void);
80 status_t musycc_chan_down(ci_t *, int);
81 irqreturn_t musycc_intr_th_handler(void *);
82 int musycc_start_xmit(ci_t *, int, void *);
83
84 extern ci_t *CI;
85 extern struct s_hdw_info hdw_info[];
86
87 #if defined(CONFIG_SBE_HDLC_V7) || defined(CONFIG_SBE_WAN256T3_HDLC_V7) || \
88 defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
89 #define _v7_hdlc_ 1
90 #else
91 #define _v7_hdlc_ 0
92 #endif
93
94 #if _v7_hdlc_
95 #define V7(x) (x ## _v7)
96 extern int hdlc_netif_rx_v7(hdlc_device *, struct sk_buff *);
97 extern int register_hdlc_device_v7(hdlc_device *);
98 extern int unregister_hdlc_device_v7(hdlc_device *);
99
100 #else
101 #define V7(x) x
102 #endif
103
104 int error_flag; /* module load error reporting */
105 int cxt1e1_log_level = LOG_ERROR;
106 int log_level_default = LOG_ERROR;
107 module_param(cxt1e1_log_level, int, 0444);
108
109 int cxt1e1_max_mru = MUSYCC_MRU;
110 int max_mru_default = MUSYCC_MRU;
111 module_param(cxt1e1_max_mru, int, 0444);
112
113 int cxt1e1_max_mtu = MUSYCC_MTU;
114 int max_mtu_default = MUSYCC_MTU;
115 module_param(cxt1e1_max_mtu, int, 0444);
116
117 int max_txdesc_used = MUSYCC_TXDESC_MIN;
118 int max_txdesc_default = MUSYCC_TXDESC_MIN;
119 module_param(max_txdesc_used, int, 0444);
120
121 int max_rxdesc_used = MUSYCC_RXDESC_MIN;
122 int max_rxdesc_default = MUSYCC_RXDESC_MIN;
123 module_param(max_rxdesc_used, int, 0444);
124
125 /****************************************************************************/
126 /****************************************************************************/
127 /****************************************************************************/
128
129 void *
130 getuserbychan(int channum)
131 {
132 mch_t *ch;
133
134 ch = c4_find_chan(channum);
135 return ch ? ch->user : NULL;
136 }
137
138
139 char *
140 get_hdlc_name(hdlc_device *hdlc)
141 {
142 struct c4_priv *priv = hdlc->priv;
143 struct net_device *dev = getuserbychan(priv->channum);
144
145 return dev->name;
146 }
147
148
149 static status_t
150 mkret(int bsd)
151 {
152 if (bsd > 0)
153 return -bsd;
154 else
155 return bsd;
156 }
157
158 /***************************************************************************/
159 #include <linux/workqueue.h>
160
161 /***
162 * One workqueue (wq) per port (since musycc allows simultaneous group
163 * commands), with individual data for each channel:
164 *
165 * mpi_t -> struct workqueue_struct *wq_port; (dynamically allocated using
166 * create_workqueue())
167 *
168 * With work structure (work) statically allocated for each channel:
169 *
170 * mch_t -> struct work_struct ch_work; (statically allocated using ???)
171 *
172 ***/
173
174
175 /*
176 * Called by the start transmit routine when a channel TX_ENABLE is to be
177 * issued. This queues the transmission start request among other channels
178 * within a port's group.
179 */
180 void
181 c4_wk_chan_restart(mch_t *ch)
182 {
183 mpi_t *pi = ch->up;
184
185 #ifdef RLD_RESTART_DEBUG
186 pr_info(">> %s: queueing Port %d Chan %d, mch_t @ %p\n",
187 __func__, pi->portnum, ch->channum, ch);
188 #endif
189
190 /* create new entry w/in workqueue for this channel and let'er rip */
191
192 /** queue_work(struct workqueue_struct *queue,
193 ** struct work_struct *work);
194 **/
195 queue_work(pi->wq_port, &ch->ch_work);
196 }
197
198 status_t
199 c4_wk_chan_init(mpi_t *pi, mch_t *ch)
200 {
201 /*
202 * this will be used to restart a stopped channel
203 */
204
205 /** INIT_WORK(struct work_struct *work,
206 ** void (*function)(void *),
207 ** void *data);
208 **/
209 INIT_WORK(&ch->ch_work, (void *)musycc_wq_chan_restart);
210 return 0; /* success */
211 }
212
213 status_t
214 c4_wq_port_init(mpi_t *pi)
215 {
216
217 char name[16]; /* NOTE: name of the queue limited by system
218 * to 10 characters */
219 if (pi->wq_port)
220 return 0; /* already initialized */
221
222 /* IE pmcc4-01 */
223 snprintf(name, sizeof(name), "%s%d", pi->up->devname, pi->portnum);
224
225 #ifdef RLD_RESTART_DEBUG
226 pr_info(">> %s: creating workqueue <%s> for Port %d.\n",
227 __func__, name, pi->portnum); /* RLD DEBUG */
228 #endif
229 pi->wq_port = create_singlethread_workqueue(name);
230 if (!pi->wq_port)
231 return -ENOMEM;
232 return 0; /* success */
233 }
234
235 void
236 c4_wq_port_cleanup(mpi_t *pi)
237 {
238 /*
239 * PORT POINT: cannot call this if WQ is statically allocated w/in
240 * structure since it calls kfree(wq);
241 */
242 if (pi->wq_port) {
243 destroy_workqueue(pi->wq_port); /* this also calls
244 * flush_workqueue() */
245 pi->wq_port = NULL;
246 }
247 }
248
249 /***************************************************************************/
250
251 irqreturn_t
252 c4_linux_interrupt(int irq, void *dev_instance)
253 {
254 struct net_device *ndev = dev_instance;
255
256 return musycc_intr_th_handler(netdev_priv(ndev));
257 }
258
259
260 #ifdef CONFIG_SBE_PMCC4_NCOMM
261 irqreturn_t
262 c4_ebus_interrupt(int irq, void *dev_instance)
263 {
264 struct net_device *ndev = dev_instance;
265
266 return c4_ebus_intr_th_handler(netdev_priv(ndev));
267 }
268 #endif
269
270
271 static int
272 void_open(struct net_device *ndev)
273 {
274 pr_info("%s: trying to open master device !\n", ndev->name);
275 return -1;
276 }
277
278
279 static int
280 chan_open(struct net_device *ndev)
281 {
282 hdlc_device *hdlc = dev_to_hdlc(ndev);
283 const struct c4_priv *priv = hdlc->priv;
284 int ret;
285
286 ret = hdlc_open(ndev);
287 if (ret) {
288 pr_info("hdlc_open failure, err %d.\n", ret);
289 return ret;
290 }
291
292 ret = c4_chan_up(priv->ci, priv->channum);
293 if (ret)
294 return -ret;
295 try_module_get(THIS_MODULE);
296 netif_start_queue(ndev);
297 return 0; /* no error = success */
298 }
299
300
301 static int
302 chan_close(struct net_device *ndev)
303 {
304 hdlc_device *hdlc = dev_to_hdlc(ndev);
305 const struct c4_priv *priv = hdlc->priv;
306
307 netif_stop_queue(ndev);
308 musycc_chan_down((ci_t *) 0, priv->channum);
309 hdlc_close(ndev);
310 module_put(THIS_MODULE);
311 return 0;
312 }
313
314
315 static int
316 chan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
317 {
318 return hdlc_ioctl(dev, ifr, cmd);
319 }
320
321
322 static int
323 chan_attach_noop(struct net_device *ndev, unsigned short foo_1,
324 unsigned short foo_2)
325 {
326 /* our driver has nothing to do here, show's
327 * over, go home
328 */
329 return 0;
330 }
331
332
333 static struct net_device_stats *
334 chan_get_stats(struct net_device *ndev)
335 {
336 mch_t *ch;
337 struct net_device_stats *nstats;
338 struct sbecom_chan_stats *stats;
339 int channum;
340
341 {
342 struct c4_priv *priv;
343
344 priv = (struct c4_priv *)dev_to_hdlc(ndev)->priv;
345 channum = priv->channum;
346 }
347
348 ch = c4_find_chan(channum);
349 if (ch == NULL)
350 return NULL;
351
352 nstats = &ndev->stats;
353 stats = &ch->s;
354
355 memset(nstats, 0, sizeof(struct net_device_stats));
356 nstats->rx_packets = stats->rx_packets;
357 nstats->tx_packets = stats->tx_packets;
358 nstats->rx_bytes = stats->rx_bytes;
359 nstats->tx_bytes = stats->tx_bytes;
360 nstats->rx_errors = stats->rx_length_errors +
361 stats->rx_over_errors +
362 stats->rx_crc_errors +
363 stats->rx_frame_errors +
364 stats->rx_fifo_errors +
365 stats->rx_missed_errors;
366 nstats->tx_errors = stats->tx_dropped +
367 stats->tx_aborted_errors +
368 stats->tx_fifo_errors;
369 nstats->rx_dropped = stats->rx_dropped;
370 nstats->tx_dropped = stats->tx_dropped;
371
372 nstats->rx_length_errors = stats->rx_length_errors;
373 nstats->rx_over_errors = stats->rx_over_errors;
374 nstats->rx_crc_errors = stats->rx_crc_errors;
375 nstats->rx_frame_errors = stats->rx_frame_errors;
376 nstats->rx_fifo_errors = stats->rx_fifo_errors;
377 nstats->rx_missed_errors = stats->rx_missed_errors;
378
379 nstats->tx_aborted_errors = stats->tx_aborted_errors;
380 nstats->tx_fifo_errors = stats->tx_fifo_errors;
381
382 return nstats;
383 }
384
385
386 static ci_t *
387 get_ci_by_dev(struct net_device *ndev)
388 {
389 return (ci_t *)(netdev_priv(ndev));
390 }
391
392
393 static int
394 c4_linux_xmit(struct sk_buff *skb, struct net_device *ndev)
395 {
396 const struct c4_priv *priv;
397 int rval;
398
399 hdlc_device *hdlc = dev_to_hdlc(ndev);
400
401 priv = hdlc->priv;
402
403 rval = musycc_start_xmit(priv->ci, priv->channum, skb);
404 return rval;
405 }
406
407 static const struct net_device_ops chan_ops = {
408 .ndo_open = chan_open,
409 .ndo_stop = chan_close,
410 .ndo_start_xmit = c4_linux_xmit,
411 .ndo_do_ioctl = chan_dev_ioctl,
412 .ndo_get_stats = chan_get_stats,
413 };
414
415 static struct net_device *
416 create_chan(struct net_device *ndev, ci_t *ci,
417 struct sbecom_chan_param *cp)
418 {
419 hdlc_device *hdlc;
420 struct net_device *dev;
421 hdw_info_t *hi;
422 int ret;
423
424 if (c4_find_chan(cp->channum))
425 return NULL; /* channel already exists */
426
427 {
428 struct c4_priv *priv;
429
430 /* allocate then fill in private data structure */
431 priv = OS_kmalloc(sizeof(struct c4_priv));
432 if (!priv) {
433 pr_warning("%s: no memory for net_device !\n",
434 ci->devname);
435 return NULL;
436 }
437 dev = alloc_hdlcdev(priv);
438 if (!dev) {
439 pr_warning("%s: no memory for hdlc_device !\n",
440 ci->devname);
441 OS_kfree(priv);
442 return NULL;
443 }
444 priv->ci = ci;
445 priv->channum = cp->channum;
446 }
447
448 hdlc = dev_to_hdlc(dev);
449
450 dev->base_addr = 0; /* not I/O mapped */
451 dev->irq = ndev->irq;
452 dev->type = ARPHRD_RAWHDLC;
453 *dev->name = 0; /* default ifconfig name = "hdlc" */
454
455 hi = (hdw_info_t *)ci->hdw_info;
456 if (hi->mfg_info_sts == EEPROM_OK) {
457 switch (hi->promfmt) {
458 case PROM_FORMAT_TYPE1:
459 memcpy(dev->dev_addr,
460 (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
461 break;
462 case PROM_FORMAT_TYPE2:
463 memcpy(dev->dev_addr,
464 (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
465 break;
466 default:
467 memset(dev->dev_addr, 0, 6);
468 break;
469 }
470 } else
471 memset(dev->dev_addr, 0, 6);
472
473 hdlc->xmit = c4_linux_xmit;
474
475 dev->netdev_ops = &chan_ops;
476 /*
477 * The native hdlc stack calls this 'attach' routine during
478 * hdlc_raw_ioctl(), passing parameters for line encoding and parity.
479 * Since hdlc_raw_ioctl() stack does not interrogate whether an 'attach'
480 * routine is actually registered or not, we supply a dummy routine which
481 * does nothing (since encoding and parity are setup for our driver via a
482 * special configuration application).
483 */
484
485 hdlc->attach = chan_attach_noop;
486
487 /* needed due to Ioctl calling sequence */
488 rtnl_unlock();
489 ret = register_hdlc_device(dev);
490 /* NOTE: <stats> setting must occur AFTER registration in order to "take" */
491 dev->tx_queue_len = MAX_DEFAULT_IFQLEN;
492
493 /* needed due to Ioctl calling sequence */
494 rtnl_lock();
495 if (ret) {
496 if (cxt1e1_log_level >= LOG_WARN)
497 pr_info("%s: create_chan[%d] registration error = %d.\n",
498 ci->devname, cp->channum, ret);
499 /* cleanup */
500 free_netdev(dev);
501 /* failed to register */
502 return NULL;
503 }
504 return dev;
505 }
506
507
508 /* the idea here is to get port information and pass it back (using pointer) */
509 static status_t
510 do_get_port(struct net_device *ndev, void *data)
511 {
512 int ret;
513 ci_t *ci; /* ci stands for card information */
514 struct sbecom_port_param pp;/* copy data to kernel land */
515
516 if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
517 return -EFAULT;
518 if (pp.portnum >= MUSYCC_NPORTS)
519 return -EFAULT;
520 ci = get_ci_by_dev(ndev);
521 if (!ci)
522 return -EINVAL; /* get card info */
523
524 ret = mkret(c4_get_port(ci, pp.portnum));
525 if (ret)
526 return ret;
527 if (copy_to_user(data, &ci->port[pp.portnum].p,
528 sizeof(struct sbecom_port_param)))
529 return -EFAULT;
530 return 0;
531 }
532
533 /* this function copys the user data and then calls the real action function */
534 static status_t
535 do_set_port(struct net_device *ndev, void *data)
536 {
537 ci_t *ci; /* ci stands for card information */
538 struct sbecom_port_param pp;/* copy data to kernel land */
539
540 if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
541 return -EFAULT;
542 if (pp.portnum >= MUSYCC_NPORTS)
543 return -EFAULT;
544 ci = get_ci_by_dev(ndev);
545 if (!ci)
546 return -EINVAL; /* get card info */
547
548 if (pp.portnum >= ci->max_port) /* sanity check */
549 return -ENXIO;
550
551 memcpy(&ci->port[pp.portnum].p, &pp, sizeof(struct sbecom_port_param));
552 return mkret(c4_set_port(ci, pp.portnum));
553 }
554
555 /* work the port loopback mode as per directed */
556 static status_t
557 do_port_loop(struct net_device *ndev, void *data)
558 {
559 struct sbecom_port_param pp;
560 ci_t *ci;
561
562 if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
563 return -EFAULT;
564 ci = get_ci_by_dev(ndev);
565 if (!ci)
566 return -EINVAL;
567 return mkret(c4_loop_port(ci, pp.portnum, pp.port_mode));
568 }
569
570 /* set the specified register with the given value / or just read it */
571 static status_t
572 do_framer_rw(struct net_device *ndev, void *data)
573 {
574 struct sbecom_port_param pp;
575 ci_t *ci;
576 int ret;
577
578 if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
579 return -EFAULT;
580 ci = get_ci_by_dev(ndev);
581 if (!ci)
582 return -EINVAL;
583 ret = mkret(c4_frame_rw(ci, &pp));
584 if (ret)
585 return ret;
586 if (copy_to_user(data, &pp, sizeof(struct sbecom_port_param)))
587 return -EFAULT;
588 return 0;
589 }
590
591 /* set the specified register with the given value / or just read it */
592 static status_t
593 do_pld_rw(struct net_device *ndev, void *data)
594 {
595 struct sbecom_port_param pp;
596 ci_t *ci;
597 int ret;
598
599 if (copy_from_user(&pp, data, sizeof(struct sbecom_port_param)))
600 return -EFAULT;
601 ci = get_ci_by_dev(ndev);
602 if (!ci)
603 return -EINVAL;
604 ret = mkret(c4_pld_rw(ci, &pp));
605 if (ret)
606 return ret;
607 if (copy_to_user(data, &pp, sizeof(struct sbecom_port_param)))
608 return -EFAULT;
609 return 0;
610 }
611
612 /* set the specified register with the given value / or just read it */
613 static status_t
614 do_musycc_rw(struct net_device *ndev, void *data)
615 {
616 struct c4_musycc_param mp;
617 ci_t *ci;
618 int ret;
619
620 if (copy_from_user(&mp, data, sizeof(struct c4_musycc_param)))
621 return -EFAULT;
622 ci = get_ci_by_dev(ndev);
623 if (!ci)
624 return -EINVAL;
625 ret = mkret(c4_musycc_rw(ci, &mp));
626 if (ret)
627 return ret;
628 if (copy_to_user(data, &mp, sizeof(struct c4_musycc_param)))
629 return -EFAULT;
630 return 0;
631 }
632
633 static status_t
634 do_get_chan(struct net_device *ndev, void *data)
635 {
636 struct sbecom_chan_param cp;
637 int ret;
638
639 if (copy_from_user(&cp, data,
640 sizeof(struct sbecom_chan_param)))
641 return -EFAULT;
642
643 ret = mkret(c4_get_chan(cp.channum, &cp));
644 if (ret)
645 return ret;
646
647 if (copy_to_user(data, &cp, sizeof(struct sbecom_chan_param)))
648 return -EFAULT;
649 return 0;
650 }
651
652 static status_t
653 do_set_chan(struct net_device *ndev, void *data)
654 {
655 struct sbecom_chan_param cp;
656 int ret;
657 ci_t *ci;
658
659 if (copy_from_user(&cp, data, sizeof(struct sbecom_chan_param)))
660 return -EFAULT;
661 ci = get_ci_by_dev(ndev);
662 if (!ci)
663 return -EINVAL;
664 switch (ret = mkret(c4_set_chan(cp.channum, &cp)))
665 {
666 case 0:
667 return 0;
668 default:
669 return ret;
670 }
671 }
672
673 static status_t
674 do_create_chan(struct net_device *ndev, void *data)
675 {
676 ci_t *ci;
677 struct net_device *dev;
678 struct sbecom_chan_param cp;
679 int ret;
680
681 if (copy_from_user(&cp, data, sizeof(struct sbecom_chan_param)))
682 return -EFAULT;
683 ci = get_ci_by_dev(ndev);
684 if (!ci)
685 return -EINVAL;
686 dev = create_chan(ndev, ci, &cp);
687 if (!dev)
688 return -EBUSY;
689 ret = mkret(c4_new_chan(ci, cp.port, cp.channum, dev));
690 if (ret) {
691 /* needed due to Ioctl calling sequence */
692 rtnl_unlock();
693 unregister_hdlc_device(dev);
694 /* needed due to Ioctl calling sequence */
695 rtnl_lock();
696 free_netdev(dev);
697 }
698 return ret;
699 }
700
701 static status_t
702 do_get_chan_stats(struct net_device *ndev, void *data)
703 {
704 struct c4_chan_stats_wrap ccs;
705 int ret;
706
707 if (copy_from_user(&ccs, data,
708 sizeof(struct c4_chan_stats_wrap)))
709 return -EFAULT;
710 switch (ret = mkret(c4_get_chan_stats(ccs.channum, &ccs.stats)))
711 {
712 case 0:
713 break;
714 default:
715 return ret;
716 }
717 if (copy_to_user(data, &ccs,
718 sizeof(struct c4_chan_stats_wrap)))
719 return -EFAULT;
720 return 0;
721 }
722 static status_t
723 do_set_loglevel(struct net_device *ndev, void *data)
724 {
725 unsigned int cxt1e1_log_level;
726
727 if (copy_from_user(&cxt1e1_log_level, data, sizeof(int)))
728 return -EFAULT;
729 sbecom_set_loglevel(cxt1e1_log_level);
730 return 0;
731 }
732
733 static status_t
734 do_deluser(struct net_device *ndev, int lockit)
735 {
736 if (ndev->flags & IFF_UP)
737 return -EBUSY;
738
739 {
740 ci_t *ci;
741 mch_t *ch;
742 const struct c4_priv *priv;
743 int channum;
744
745 priv = (struct c4_priv *)dev_to_hdlc(ndev)->priv;
746 ci = priv->ci;
747 channum = priv->channum;
748
749 ch = c4_find_chan(channum);
750 if (ch == NULL)
751 return -ENOENT;
752 ch->user = NULL; /* will be freed, below */
753 }
754
755 /* needed if Ioctl calling sequence */
756 if (lockit)
757 rtnl_unlock();
758 unregister_hdlc_device(ndev);
759 /* needed if Ioctl calling sequence */
760 if (lockit)
761 rtnl_lock();
762 free_netdev(ndev);
763 return 0;
764 }
765
766 int
767 do_del_chan(struct net_device *musycc_dev, void *data)
768 {
769 struct sbecom_chan_param cp;
770 char buf[sizeof(CHANNAME) + 3];
771 struct net_device *dev;
772 int ret;
773
774 if (copy_from_user(&cp, data,
775 sizeof(struct sbecom_chan_param)))
776 return -EFAULT;
777 if (cp.channum > 999)
778 return -EINVAL;
779 snprintf(buf, sizeof(buf), CHANNAME "%d", cp.channum);
780 dev = __dev_get_by_name(&init_net, buf);
781 if (!dev)
782 return -ENODEV;
783 ret = do_deluser(dev, 1);
784 if (ret)
785 return ret;
786 return c4_del_chan(cp.channum);
787 }
788 int c4_reset_board(void *);
789
790 int
791 do_reset(struct net_device *musycc_dev, void *data)
792 {
793 const struct c4_priv *priv;
794 int i;
795
796 for (i = 0; i < 128; i++) {
797 struct net_device *ndev;
798 char buf[sizeof(CHANNAME) + 3];
799
800 sprintf(buf, CHANNAME "%d", i);
801 ndev = __dev_get_by_name(&init_net, buf);
802 if (!ndev)
803 continue;
804 priv = dev_to_hdlc(ndev)->priv;
805
806 if ((unsigned long) (priv->ci) ==
807 (unsigned long) (netdev_priv(musycc_dev))) {
808 ndev->flags &= ~IFF_UP;
809 netif_stop_queue(ndev);
810 do_deluser(ndev, 1);
811 }
812 }
813 return 0;
814 }
815
816 int
817 do_reset_chan_stats(struct net_device *musycc_dev, void *data)
818 {
819 struct sbecom_chan_param cp;
820
821 if (copy_from_user(&cp, data,
822 sizeof(struct sbecom_chan_param)))
823 return -EFAULT;
824 return mkret(c4_del_chan_stats(cp.channum));
825 }
826
827 static status_t
828 c4_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
829 {
830 ci_t *ci;
831 void *data;
832 int iocmd, iolen;
833 status_t ret;
834 static struct data {
835 union {
836 u_int8_t c;
837 u_int32_t i;
838 struct sbe_brd_info bip;
839 struct sbe_drv_info dip;
840 struct sbe_iid_info iip;
841 struct sbe_brd_addr bap;
842 struct sbecom_chan_stats stats;
843 struct sbecom_chan_param param;
844 struct temux_card_stats cards;
845 struct sbecom_card_param cardp;
846 struct sbecom_framer_param frp;
847 } u;
848 } arg;
849
850
851 if (!capable(CAP_SYS_ADMIN))
852 return -EPERM;
853 if (cmd != SIOCDEVPRIVATE + 15)
854 return -EINVAL;
855 ci = get_ci_by_dev(ndev);
856 if (!ci)
857 return -EINVAL;
858 if (ci->state != C_RUNNING)
859 return -ENODEV;
860 if (copy_from_user(&iocmd, ifr->ifr_data, sizeof(iocmd)))
861 return -EFAULT;
862 #if 0
863 if (copy_from_user(&len, ifr->ifr_data + sizeof(iocmd), sizeof(len)))
864 return -EFAULT;
865 #endif
866
867 #if 0
868 pr_info("c4_ioctl: iocmd %x, dir %x type %x nr %x iolen %d.\n", iocmd,
869 _IOC_DIR(iocmd), _IOC_TYPE(iocmd), _IOC_NR(iocmd),
870 _IOC_SIZE(iocmd));
871 #endif
872 iolen = _IOC_SIZE(iocmd);
873 if (iolen > sizeof(arg))
874 return -EFAULT;
875 data = ifr->ifr_data + sizeof(iocmd);
876 if (copy_from_user(&arg, data, iolen))
877 return -EFAULT;
878
879 ret = 0;
880 switch (iocmd) {
881 case SBE_IOC_PORT_GET:
882 ret = do_get_port(ndev, data);
883 break;
884 case SBE_IOC_PORT_SET:
885 ret = do_set_port(ndev, data);
886 break;
887 case SBE_IOC_CHAN_GET:
888 ret = do_get_chan(ndev, data);
889 break;
890 case SBE_IOC_CHAN_SET:
891 ret = do_set_chan(ndev, data);
892 break;
893 case C4_DEL_CHAN:
894 ret = do_del_chan(ndev, data);
895 break;
896 case SBE_IOC_CHAN_NEW:
897 ret = do_create_chan(ndev, data);
898 break;
899 case SBE_IOC_CHAN_GET_STAT:
900 ret = do_get_chan_stats(ndev, data);
901 break;
902 case SBE_IOC_LOGLEVEL:
903 ret = do_set_loglevel(ndev, data);
904 break;
905 case SBE_IOC_RESET_DEV:
906 ret = do_reset(ndev, data);
907 break;
908 case SBE_IOC_CHAN_DEL_STAT:
909 ret = do_reset_chan_stats(ndev, data);
910 break;
911 case C4_LOOP_PORT:
912 ret = do_port_loop(ndev, data);
913 break;
914 case C4_RW_FRMR:
915 ret = do_framer_rw(ndev, data);
916 break;
917 case C4_RW_MSYC:
918 ret = do_musycc_rw(ndev, data);
919 break;
920 case C4_RW_PLD:
921 ret = do_pld_rw(ndev, data);
922 break;
923 case SBE_IOC_IID_GET:
924 ret = (iolen == sizeof(struct sbe_iid_info)) ?
925 c4_get_iidinfo(ci, &arg.u.iip) : -EFAULT;
926 if (ret == 0) /* no error, copy data */
927 if (copy_to_user(data, &arg, iolen))
928 return -EFAULT;
929 break;
930 default:
931 ret = -EINVAL;
932 break;
933 }
934 return mkret(ret);
935 }
936
937 static const struct net_device_ops c4_ops = {
938 .ndo_open = void_open,
939 .ndo_start_xmit = c4_linux_xmit,
940 .ndo_do_ioctl = c4_ioctl,
941 };
942
943 static void c4_setup(struct net_device *dev)
944 {
945 dev->type = ARPHRD_VOID;
946 dev->netdev_ops = &c4_ops;
947 }
948
949 struct net_device *__init
950 c4_add_dev(hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
951 int irq0, int irq1)
952 {
953 struct net_device *ndev;
954 ci_t *ci;
955
956 ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, c4_setup);
957 if (!ndev) {
958 pr_warning("%s: no memory for struct net_device !\n",
959 hi->devname);
960 error_flag = ENOMEM;
961 return NULL;
962 }
963 ci = (ci_t *)(netdev_priv(ndev));
964 ndev->irq = irq0;
965
966 ci->hdw_info = hi;
967 ci->state = C_INIT; /* mark as hardware not available */
968 ci->next = c4_list;
969 c4_list = ci;
970 ci->brdno = ci->next ? ci->next->brdno + 1 : 0;
971
972 if (!CI)
973 CI = ci; /* DEBUG, only board 0 usage */
974
975 strcpy(ci->devname, hi->devname);
976
977 /* tasklet */
978 #if defined(SBE_ISR_TASKLET)
979 tasklet_init(&ci->ci_musycc_isr_tasklet,
980 (void (*) (unsigned long)) musycc_intr_bh_tasklet,
981 (unsigned long) ci);
982
983 if (atomic_read(&ci->ci_musycc_isr_tasklet.count) == 0)
984 tasklet_disable_nosync(&ci->ci_musycc_isr_tasklet);
985 #elif defined(SBE_ISR_IMMEDIATE)
986 ci->ci_musycc_isr_tq.routine = (void *)(unsigned long)musycc_intr_bh_tasklet;
987 ci->ci_musycc_isr_tq.data = ci;
988 #endif
989
990
991 if (register_netdev(ndev) ||
992 (c4_init(ci, (u_char *) f0, (u_char *) f1) != SBE_DRVR_SUCCESS)) {
993 OS_kfree(netdev_priv(ndev));
994 OS_kfree(ndev);
995 error_flag = ENODEV;
996 return NULL;
997 }
998 /*************************************************************
999 * int request_irq(unsigned int irq,
1000 * void (*handler)(int, void *, struct pt_regs *),
1001 * unsigned long flags, const char *dev_name, void *dev_id);
1002 * wherein:
1003 * irq -> The interrupt number that is being requested.
1004 * handler -> Pointer to handling function being installed.
1005 * flags -> A bit mask of options related to interrupt management.
1006 * dev_name -> String used in /proc/interrupts to show owner of interrupt.
1007 * dev_id -> Pointer (for shared interrupt lines) to point to its own
1008 * private data area (to identify which device is interrupting).
1009 *
1010 * extern void free_irq(unsigned int irq, void *dev_id);
1011 **************************************************************/
1012
1013 if (request_irq(irq0, &c4_linux_interrupt,
1014 IRQF_SHARED,
1015 ndev->name, ndev)) {
1016 pr_warning("%s: MUSYCC could not get irq: %d\n",
1017 ndev->name, irq0);
1018 unregister_netdev(ndev);
1019 OS_kfree(netdev_priv(ndev));
1020 OS_kfree(ndev);
1021 error_flag = EIO;
1022 return NULL;
1023 }
1024 #ifdef CONFIG_SBE_PMCC4_NCOMM
1025 if (request_irq(irq1, &c4_ebus_interrupt, IRQF_SHARED, ndev->name, ndev)) {
1026 pr_warning("%s: EBUS could not get irq: %d\n", hi->devname, irq1);
1027 unregister_netdev(ndev);
1028 free_irq(irq0, ndev);
1029 OS_kfree(netdev_priv(ndev));
1030 OS_kfree(ndev);
1031 error_flag = EIO;
1032 return NULL;
1033 }
1034 #endif
1035
1036 /* setup board identification information */
1037
1038 {
1039 u_int32_t tmp;
1040
1041 /* also sets PROM format type (promfmt) for later usage */
1042 hdw_sn_get(hi, brdno);
1043
1044 switch (hi->promfmt) {
1045 case PROM_FORMAT_TYPE1:
1046 memcpy(ndev->dev_addr,
1047 (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
1048 /* unaligned data acquisition */
1049 memcpy(&tmp, (FLD_TYPE1 *) (hi->mfg_info.pft1.Id), 4);
1050 ci->brd_id = cpu_to_be32(tmp);
1051 break;
1052 case PROM_FORMAT_TYPE2:
1053 memcpy(ndev->dev_addr,
1054 (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
1055 /* unaligned data acquisition */
1056 memcpy(&tmp, (FLD_TYPE2 *) (hi->mfg_info.pft2.Id), 4);
1057 ci->brd_id = cpu_to_be32(tmp);
1058 break;
1059 default:
1060 ci->brd_id = 0;
1061 memset(ndev->dev_addr, 0, 6);
1062 break;
1063 }
1064
1065 #if 1
1066 /* requires bid to be preset */
1067 sbeid_set_hdwbid(ci);
1068 #else
1069 /* requires hdw_bid to be preset */
1070 sbeid_set_bdtype(ci);
1071 #endif
1072 }
1073
1074 #ifdef CONFIG_PROC_FS
1075 sbecom_proc_brd_init(ci);
1076 #endif
1077 #if defined(SBE_ISR_TASKLET)
1078 tasklet_enable(&ci->ci_musycc_isr_tasklet);
1079 #endif
1080
1081 error_flag = c4_init2(ci);
1082 if (error_flag != SBE_DRVR_SUCCESS) {
1083 #ifdef CONFIG_PROC_FS
1084 sbecom_proc_brd_cleanup(ci);
1085 #endif
1086 unregister_netdev(ndev);
1087 free_irq(irq1, ndev);
1088 free_irq(irq0, ndev);
1089 OS_kfree(netdev_priv(ndev));
1090 OS_kfree(ndev);
1091 /* failure, error_flag is set */
1092 return NULL;
1093 }
1094 return ndev;
1095 }
1096
1097 static int __init
1098 c4_mod_init(void)
1099 {
1100 int rtn;
1101
1102 rtn = c4hw_attach_all();
1103 if (rtn)
1104 return -rtn; /* installation failure - see system log */
1105
1106 /* housekeeping notifications */
1107 if (cxt1e1_log_level != log_level_default)
1108 pr_info("NOTE: driver parameter <cxt1e1_log_level> changed from default %d to %d.\n",
1109 log_level_default, cxt1e1_log_level);
1110 if (cxt1e1_max_mru != max_mru_default)
1111 pr_info("NOTE: driver parameter <cxt1e1_max_mru> changed from default %d to %d.\n",
1112 max_mru_default, cxt1e1_max_mru);
1113 if (cxt1e1_max_mtu != max_mtu_default)
1114 pr_info("NOTE: driver parameter <cxt1e1_max_mtu> changed from default %d to %d.\n",
1115 max_mtu_default, cxt1e1_max_mtu);
1116 if (max_rxdesc_used != max_rxdesc_default) {
1117 if (max_rxdesc_used > 2000)
1118 max_rxdesc_used = 2000; /* out-of-bounds reset */
1119 pr_info("NOTE: driver parameter <max_rxdesc_used> changed from default %d to %d.\n",
1120 max_rxdesc_default, max_rxdesc_used);
1121 }
1122 if (max_txdesc_used != max_txdesc_default) {
1123 if (max_txdesc_used > 1000)
1124 max_txdesc_used = 1000; /* out-of-bounds reset */
1125 pr_info("NOTE: driver parameter <max_txdesc_used> changed from default %d to %d.\n",
1126 max_txdesc_default, max_txdesc_used);
1127 }
1128 return 0; /* installation success */
1129 }
1130
1131
1132 /*
1133 * find any still allocated hdlc registrations and unregister via call to
1134 * do_deluser()
1135 */
1136
1137 static void __exit
1138 cleanup_hdlc(void)
1139 {
1140 hdw_info_t *hi;
1141 ci_t *ci;
1142 struct net_device *ndev;
1143 int i, j, k;
1144
1145 for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) {
1146 if (hi->ndev) { /* a board has been attached */
1147 ci = (ci_t *)(netdev_priv(hi->ndev));
1148 for (j = 0; j < ci->max_port; j++)
1149 for (k = 0; k < MUSYCC_NCHANS; k++) {
1150 ndev = ci->port[j].chan[k]->user;
1151 if (ndev)
1152 do_deluser(ndev, 0);
1153 }
1154 }
1155 }
1156 }
1157
1158
1159 static void __exit
1160 c4_mod_remove(void)
1161 {
1162 cleanup_hdlc(); /* delete any missed channels */
1163 cleanup_devs();
1164 c4_cleanup();
1165 cleanup_ioremap();
1166 pr_info("SBE - driver removed.\n");
1167 }
1168
1169 module_init(c4_mod_init);
1170 module_exit(c4_mod_remove);
1171
1172 MODULE_AUTHOR("SBE Technical Services <support@sbei.com>");
1173 MODULE_DESCRIPTION("wanPCI-CxT1E1 Generic HDLC WAN Driver module");
1174 #ifdef MODULE_LICENSE
1175 MODULE_LICENSE("GPL");
1176 #endif
1177
1178 /*** End-of-File ***/
This page took 0.08555 seconds and 5 git commands to generate.