Merge branch 'pci/resource' into next
[deliverable/linux.git] / drivers / staging / dgrp / dgrp_net_ops.c
1 /*
2 *
3 * Copyright 1999 Digi International (www.digi.com)
4 * James Puzzo <jamesp at digi dot com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
13 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
14 * PURPOSE. See the GNU General Public License for more details.
15 *
16 */
17
18 /*
19 *
20 * Filename:
21 *
22 * dgrp_net_ops.c
23 *
24 * Description:
25 *
26 * Handle the file operations required for the "network" devices.
27 * Includes those functions required to register the "net" devices
28 * in "/proc".
29 *
30 * Author:
31 *
32 * James A. Puzzo
33 *
34 */
35
36 #include <linux/module.h>
37 #include <linux/proc_fs.h>
38 #include <linux/slab.h>
39 #include <linux/string.h>
40 #include <linux/device.h>
41 #include <linux/tty.h>
42 #include <linux/tty_flip.h>
43 #include <linux/spinlock.h>
44 #include <linux/poll.h>
45 #include <linux/sched.h>
46 #include <linux/ratelimit.h>
47 #include <asm/unaligned.h>
48
49 #define MYFLIPLEN TBUF_MAX
50
51 #include "dgrp_common.h"
52
53 #define TTY_FLIPBUF_SIZE 512
54 #define DEVICE_NAME_SIZE 50
55
56 /*
57 * Generic helper function declarations
58 */
59 static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
60 unsigned char *fbuf, int *len);
61
62 /*
63 * File operation declarations
64 */
65 static int dgrp_net_open(struct inode *, struct file *);
66 static int dgrp_net_release(struct inode *, struct file *);
67 static ssize_t dgrp_net_read(struct file *, char __user *, size_t, loff_t *);
68 static ssize_t dgrp_net_write(struct file *, const char __user *, size_t,
69 loff_t *);
70 static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
71 unsigned long arg);
72 static unsigned int dgrp_net_select(struct file *file,
73 struct poll_table_struct *table);
74
75 const struct file_operations dgrp_net_ops = {
76 .owner = THIS_MODULE,
77 .read = dgrp_net_read,
78 .write = dgrp_net_write,
79 .poll = dgrp_net_select,
80 .unlocked_ioctl = dgrp_net_ioctl,
81 .open = dgrp_net_open,
82 .release = dgrp_net_release,
83 };
84
85 /**
86 * dgrp_dump() -- prints memory for debugging purposes.
87 * @mem: Memory location which should be printed to the console
88 * @len: Number of bytes to be dumped
89 */
90 static void dgrp_dump(u8 *mem, int len)
91 {
92 int i;
93
94 pr_debug("dgrp dump length = %d, data = ", len);
95 for (i = 0; i < len; ++i)
96 pr_debug("%.2x ", mem[i]);
97 pr_debug("\n");
98 }
99
100 /**
101 * dgrp_read_data_block() -- Read a data block
102 * @ch: struct ch_struct *
103 * @flipbuf: u8 *
104 * @flipbuf_size: size of flipbuf
105 */
106 static void dgrp_read_data_block(struct ch_struct *ch, u8 *flipbuf,
107 int flipbuf_size)
108 {
109 int t;
110 int n;
111
112 if (flipbuf_size <= 0)
113 return;
114
115 t = RBUF_MAX - ch->ch_rout;
116 n = flipbuf_size;
117
118 if (n >= t) {
119 memcpy(flipbuf, ch->ch_rbuf + ch->ch_rout, t);
120 flipbuf += t;
121 n -= t;
122 ch->ch_rout = 0;
123 }
124
125 memcpy(flipbuf, ch->ch_rbuf + ch->ch_rout, n);
126 flipbuf += n;
127 ch->ch_rout += n;
128 }
129
130
131 /**
132 * dgrp_input() -- send data to the line disipline
133 * @ch: pointer to channel struct
134 *
135 * Copys the rbuf to the flipbuf and sends to line discipline.
136 * Sends input buffer data to the line discipline.
137 *
138 */
139 static void dgrp_input(struct ch_struct *ch)
140 {
141 struct nd_struct *nd;
142 struct tty_struct *tty;
143 int data_len;
144 int len;
145 int tty_count;
146 ulong lock_flags;
147 u8 *myflipbuf;
148 u8 *myflipflagbuf;
149
150 if (!ch)
151 return;
152
153 nd = ch->ch_nd;
154
155 if (!nd)
156 return;
157
158 spin_lock_irqsave(&nd->nd_lock, lock_flags);
159
160 myflipbuf = nd->nd_inputbuf;
161 myflipflagbuf = nd->nd_inputflagbuf;
162
163 if (!ch->ch_open_count) {
164 ch->ch_rout = ch->ch_rin;
165 goto out;
166 }
167
168 if (ch->ch_tun.un_flag & UN_CLOSING) {
169 ch->ch_rout = ch->ch_rin;
170 goto out;
171 }
172
173 tty = (ch->ch_tun).un_tty;
174
175
176 if (!tty || tty->magic != TTY_MAGIC) {
177 ch->ch_rout = ch->ch_rin;
178 goto out;
179 }
180
181 tty_count = tty->count;
182 if (!tty_count) {
183 ch->ch_rout = ch->ch_rin;
184 goto out;
185 }
186
187 if (tty->closing || test_bit(TTY_CLOSING, &tty->flags)) {
188 ch->ch_rout = ch->ch_rin;
189 goto out;
190 }
191
192 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
193
194 /* data_len should be the number of chars that we read in */
195 data_len = (ch->ch_rin - ch->ch_rout) & RBUF_MASK;
196
197 /* len is the amount of data we are going to transfer here */
198 len = tty_buffer_request_room(&ch->port, data_len);
199
200 /* Check DPA flow control */
201 if ((nd->nd_dpa_debug) &&
202 (nd->nd_dpa_flag & DPA_WAIT_SPACE) &&
203 (nd->nd_dpa_port == MINOR(tty_devnum(ch->ch_tun.un_tty))))
204 len = 0;
205
206 if ((len) && !(ch->ch_flag & CH_RXSTOP)) {
207
208 dgrp_read_data_block(ch, myflipbuf, len);
209
210 if (I_PARMRK(tty) || I_BRKINT(tty) || I_INPCK(tty))
211 parity_scan(ch, myflipbuf, myflipflagbuf, &len);
212 else
213 memset(myflipflagbuf, TTY_NORMAL, len);
214
215 if ((nd->nd_dpa_debug) &&
216 (nd->nd_dpa_port == PORT_NUM(MINOR(tty_devnum(tty)))))
217 dgrp_dpa_data(nd, 1, myflipbuf, len);
218
219 tty_insert_flip_string_flags(&ch->port, myflipbuf,
220 myflipflagbuf, len);
221 tty_flip_buffer_push(&ch->port);
222
223 ch->ch_rxcount += len;
224 }
225
226 /*
227 * Wake up any sleepers (maybe dgrp close) that might be waiting
228 * for a channel flag state change.
229 */
230 wake_up_interruptible(&ch->ch_flag_wait);
231 return;
232
233 out:
234 spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
235 }
236
237
238 /*
239 * parity_scan
240 *
241 * Loop to inspect each single character or 0xFF escape.
242 *
243 * if PARMRK & ~DOSMODE:
244 * 0xFF 0xFF Normal 0xFF character, escaped
245 * to eliminate confusion.
246 * 0xFF 0x00 0x00 Break
247 * 0xFF 0x00 CC Error character CC.
248 * CC Normal character CC.
249 *
250 * if PARMRK & DOSMODE:
251 * 0xFF 0x18 0x00 Break
252 * 0xFF 0x08 0x00 Framing Error
253 * 0xFF 0x04 0x00 Parity error
254 * 0xFF 0x0C 0x00 Both Framing and Parity error
255 *
256 * TODO: do we need to do the XMODEM, XOFF, XON, XANY processing??
257 * as per protocol
258 */
259 static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
260 unsigned char *fbuf, int *len)
261 {
262 int l = *len;
263 int count = 0;
264 int DOS = ((ch->ch_iflag & IF_DOSMODE) == 0 ? 0 : 1);
265 unsigned char *cout; /* character buffer */
266 unsigned char *fout; /* flag buffer */
267 unsigned char *in;
268 unsigned char c;
269
270 in = cbuf;
271 cout = cbuf;
272 fout = fbuf;
273
274 while (l--) {
275 c = *in;
276 in++;
277
278 switch (ch->ch_pscan_state) {
279 default:
280 /* reset to sanity and fall through */
281 ch->ch_pscan_state = 0;
282
283 case 0:
284 /* No FF seen yet */
285 if (c == 0xff) /* delete this character from stream */
286 ch->ch_pscan_state = 1;
287 else {
288 *cout++ = c;
289 *fout++ = TTY_NORMAL;
290 count += 1;
291 }
292 break;
293
294 case 1:
295 /* first FF seen */
296 if (c == 0xff) {
297 /* doubled ff, transform to single ff */
298 *cout++ = c;
299 *fout++ = TTY_NORMAL;
300 count += 1;
301 ch->ch_pscan_state = 0;
302 } else {
303 /* save value examination in next state */
304 ch->ch_pscan_savechar = c;
305 ch->ch_pscan_state = 2;
306 }
307 break;
308
309 case 2:
310 /* third character of ff sequence */
311 *cout++ = c;
312 if (DOS) {
313 if (ch->ch_pscan_savechar & 0x10)
314 *fout++ = TTY_BREAK;
315 else if (ch->ch_pscan_savechar & 0x08)
316 *fout++ = TTY_FRAME;
317 else
318 /*
319 * either marked as a parity error,
320 * indeterminate, or not in DOSMODE
321 * call it a parity error
322 */
323 *fout++ = TTY_PARITY;
324 } else {
325 /* case FF XX ?? where XX is not 00 */
326 if (ch->ch_pscan_savechar & 0xff) {
327 /* this should not happen */
328 pr_info("%s: parity_scan: error unexpected byte\n",
329 __func__);
330 *fout++ = TTY_PARITY;
331 }
332 /* case FF 00 XX where XX is not 00 */
333 else if (c == 0xff)
334 *fout++ = TTY_PARITY;
335 /* case FF 00 00 */
336 else
337 *fout++ = TTY_BREAK;
338
339 }
340 count += 1;
341 ch->ch_pscan_state = 0;
342 }
343 }
344 *len = count;
345 }
346
347
348 /**
349 * dgrp_net_idle() -- Idle the network connection
350 * @nd: pointer to node structure to idle
351 */
352 static void dgrp_net_idle(struct nd_struct *nd)
353 {
354 struct ch_struct *ch;
355 int i;
356
357 nd->nd_tx_work = 1;
358
359 nd->nd_state = NS_IDLE;
360 nd->nd_flag = 0;
361
362 for (i = nd->nd_seq_out; ; i = (i + 1) & SEQ_MASK) {
363 if (!nd->nd_seq_wait[i]) {
364 nd->nd_seq_wait[i] = 0;
365 wake_up_interruptible(&nd->nd_seq_wque[i]);
366 }
367
368 if (i == nd->nd_seq_in)
369 break;
370 }
371
372 nd->nd_seq_out = nd->nd_seq_in;
373
374 nd->nd_unack = 0;
375 nd->nd_remain = 0;
376
377 nd->nd_tx_module = 0x10;
378 nd->nd_rx_module = 0x00;
379
380 for (i = 0, ch = nd->nd_chan; i < CHAN_MAX; i++, ch++) {
381 ch->ch_state = CS_IDLE;
382
383 ch->ch_otype = 0;
384 ch->ch_otype_waiting = 0;
385 }
386 }
387
388 /*
389 * Increase the number of channels, waking up any
390 * threads that might be waiting for the channels
391 * to appear.
392 */
393 static void increase_channel_count(struct nd_struct *nd, int n)
394 {
395 struct ch_struct *ch;
396 struct device *classp;
397 char name[DEVICE_NAME_SIZE];
398 int ret;
399 u8 *buf;
400 int i;
401
402 for (i = nd->nd_chan_count; i < n; ++i) {
403 ch = nd->nd_chan + i;
404
405 /* FIXME: return a useful error instead! */
406 buf = kmalloc(TBUF_MAX, GFP_KERNEL);
407 if (!buf)
408 return;
409
410 if (ch->ch_tbuf)
411 pr_info_ratelimited("%s - ch_tbuf was not NULL\n",
412 __func__);
413
414 ch->ch_tbuf = buf;
415
416 buf = kmalloc(RBUF_MAX, GFP_KERNEL);
417 if (!buf)
418 return;
419
420 if (ch->ch_rbuf)
421 pr_info("%s - ch_rbuf was not NULL\n",
422 __func__);
423 ch->ch_rbuf = buf;
424
425 classp = tty_port_register_device(&ch->port,
426 nd->nd_serial_ttdriver, i,
427 NULL);
428
429 ch->ch_tun.un_sysfs = classp;
430 snprintf(name, DEVICE_NAME_SIZE, "tty_%d", i);
431
432 dgrp_create_tty_sysfs(&ch->ch_tun, classp);
433 ret = sysfs_create_link(&nd->nd_class_dev->kobj,
434 &classp->kobj, name);
435
436 /* NOTE: We don't support "cu" devices anymore,
437 * so you will notice we don't register them
438 * here anymore. */
439 if (dgrp_register_prdevices) {
440 classp = tty_register_device(nd->nd_xprint_ttdriver,
441 i, NULL);
442 ch->ch_pun.un_sysfs = classp;
443 snprintf(name, DEVICE_NAME_SIZE, "pr_%d", i);
444
445 dgrp_create_tty_sysfs(&ch->ch_pun, classp);
446 ret = sysfs_create_link(&nd->nd_class_dev->kobj,
447 &classp->kobj, name);
448 }
449
450 nd->nd_chan_count = i + 1;
451 wake_up_interruptible(&ch->ch_flag_wait);
452 }
453 }
454
455 /*
456 * Decrease the number of channels, and wake up any threads that might
457 * be waiting on the channels that vanished.
458 */
459 static void decrease_channel_count(struct nd_struct *nd, int n)
460 {
461 struct ch_struct *ch;
462 char name[DEVICE_NAME_SIZE];
463 int i;
464
465 for (i = nd->nd_chan_count - 1; i >= n; --i) {
466 ch = nd->nd_chan + i;
467
468 /*
469 * Make any open ports inoperative.
470 */
471 ch->ch_state = CS_IDLE;
472
473 ch->ch_otype = 0;
474 ch->ch_otype_waiting = 0;
475
476 /*
477 * Only "HANGUP" if we care about carrier
478 * transitions and we are already open.
479 */
480 if (ch->ch_open_count != 0) {
481 ch->ch_flag |= CH_HANGUP;
482 dgrp_carrier(ch);
483 }
484
485 /*
486 * Unlike the CH_HANGUP flag above, use another
487 * flag to indicate to the RealPort state machine
488 * that this port has disappeared.
489 */
490 if (ch->ch_open_count != 0)
491 ch->ch_flag |= CH_PORT_GONE;
492
493 wake_up_interruptible(&ch->ch_flag_wait);
494
495 nd->nd_chan_count = i;
496
497 kfree(ch->ch_tbuf);
498 ch->ch_tbuf = NULL;
499
500 kfree(ch->ch_rbuf);
501 ch->ch_rbuf = NULL;
502
503 nd->nd_chan_count = i;
504
505 dgrp_remove_tty_sysfs(ch->ch_tun.un_sysfs);
506 snprintf(name, DEVICE_NAME_SIZE, "tty_%d", i);
507 sysfs_remove_link(&nd->nd_class_dev->kobj, name);
508 tty_unregister_device(nd->nd_serial_ttdriver, i);
509
510 /*
511 * NOTE: We don't support "cu" devices anymore, so don't
512 * unregister them here anymore.
513 */
514
515 if (dgrp_register_prdevices) {
516 dgrp_remove_tty_sysfs(ch->ch_pun.un_sysfs);
517 snprintf(name, DEVICE_NAME_SIZE, "pr_%d", i);
518 sysfs_remove_link(&nd->nd_class_dev->kobj, name);
519 tty_unregister_device(nd->nd_xprint_ttdriver, i);
520 }
521 }
522 }
523
524 /**
525 * dgrp_chan_count() -- Adjust the node channel count.
526 * @nd: pointer to a node structure
527 * @n: new value for channel count
528 *
529 * Adjusts the node channel count. If new ports have appeared, it tries
530 * to signal those processes that might have been waiting for ports to
531 * appear. If ports have disappeared it tries to signal those processes
532 * that might be hung waiting for a response for the now non-existant port.
533 */
534 static void dgrp_chan_count(struct nd_struct *nd, int n)
535 {
536 if (n == nd->nd_chan_count)
537 return;
538
539 if (n > nd->nd_chan_count)
540 increase_channel_count(nd, n);
541
542 if (n < nd->nd_chan_count)
543 decrease_channel_count(nd, n);
544 }
545
546 /**
547 * dgrp_monitor() -- send data to the device monitor queue
548 * @nd: pointer to a node structure
549 * @buf: data to copy to the monitoring buffer
550 * @len: number of bytes to transfer to the buffer
551 *
552 * Called by the net device routines to send data to the device
553 * monitor queue. If the device monitor buffer is too full to
554 * accept the data, it waits until the buffer is ready.
555 */
556 static void dgrp_monitor(struct nd_struct *nd, u8 *buf, int len)
557 {
558 int n;
559 int r;
560 int rtn;
561
562 /*
563 * Grab monitor lock.
564 */
565 down(&nd->nd_mon_semaphore);
566
567 /*
568 * Loop while data remains.
569 */
570 while ((len > 0) && (nd->nd_mon_buf)) {
571 /*
572 * Determine the amount of available space left in the
573 * buffer. If there's none, wait until some appears.
574 */
575
576 n = (nd->nd_mon_out - nd->nd_mon_in - 1) & MON_MASK;
577
578 if (!n) {
579 nd->nd_mon_flag |= MON_WAIT_SPACE;
580
581 up(&nd->nd_mon_semaphore);
582
583 /*
584 * Go to sleep waiting until the condition becomes true.
585 */
586 rtn = wait_event_interruptible(nd->nd_mon_wqueue,
587 ((nd->nd_mon_flag & MON_WAIT_SPACE) == 0));
588
589 /* FIXME: really ignore rtn? */
590
591 /*
592 * We can't exit here if we receive a signal, since
593 * to do so would trash the debug stream.
594 */
595
596 down(&nd->nd_mon_semaphore);
597
598 continue;
599 }
600
601 /*
602 * Copy as much data as will fit.
603 */
604
605 if (n > len)
606 n = len;
607
608 r = MON_MAX - nd->nd_mon_in;
609
610 if (r <= n) {
611 memcpy(nd->nd_mon_buf + nd->nd_mon_in, buf, r);
612
613 n -= r;
614
615 nd->nd_mon_in = 0;
616
617 buf += r;
618 len -= r;
619 }
620
621 memcpy(nd->nd_mon_buf + nd->nd_mon_in, buf, n);
622
623 nd->nd_mon_in += n;
624
625 buf += n;
626 len -= n;
627
628 if (nd->nd_mon_in >= MON_MAX)
629 pr_info_ratelimited("%s - nd_mon_in (%i) >= MON_MAX\n",
630 __func__, nd->nd_mon_in);
631
632 /*
633 * Wakeup any thread waiting for data
634 */
635
636 if (nd->nd_mon_flag & MON_WAIT_DATA) {
637 nd->nd_mon_flag &= ~MON_WAIT_DATA;
638 wake_up_interruptible(&nd->nd_mon_wqueue);
639 }
640 }
641
642 /*
643 * Release the monitor lock.
644 */
645 up(&nd->nd_mon_semaphore);
646 }
647
648 /**
649 * dgrp_encode_time() -- Encodes rpdump time into a 4-byte quantity.
650 * @nd: pointer to a node structure
651 * @buf: destination buffer
652 *
653 * Encodes "rpdump" time into a 4-byte quantity. Time is measured since
654 * open.
655 */
656 static void dgrp_encode_time(struct nd_struct *nd, u8 *buf)
657 {
658 ulong t;
659
660 /*
661 * Convert time in HZ since open to time in milliseconds
662 * since open.
663 */
664 t = jiffies - nd->nd_mon_lbolt;
665 t = 1000 * (t / HZ) + 1000 * (t % HZ) / HZ;
666
667 put_unaligned_be32((uint)(t & 0xffffffff), buf);
668 }
669
670
671
672 /**
673 * dgrp_monitor_message() -- Builds a rpdump style message.
674 * @nd: pointer to a node structure
675 * @message: destination buffer
676 */
677 static void dgrp_monitor_message(struct nd_struct *nd, char *message)
678 {
679 u8 header[7];
680 int n;
681
682 header[0] = RPDUMP_MESSAGE;
683
684 dgrp_encode_time(nd, header + 1);
685
686 n = strlen(message);
687
688 put_unaligned_be16(n, header + 5);
689
690 dgrp_monitor(nd, header, sizeof(header));
691 dgrp_monitor(nd, (u8 *) message, n);
692 }
693
694
695
696 /**
697 * dgrp_monitor_reset() -- Note a reset in the monitoring buffer.
698 * @nd: pointer to a node structure
699 */
700 static void dgrp_monitor_reset(struct nd_struct *nd)
701 {
702 u8 header[5];
703
704 header[0] = RPDUMP_RESET;
705
706 dgrp_encode_time(nd, header + 1);
707
708 dgrp_monitor(nd, header, sizeof(header));
709 }
710
711 /**
712 * dgrp_monitor_data() -- builds a monitor data packet
713 * @nd: pointer to a node structure
714 * @type: type of message to be logged
715 * @buf: data to be logged
716 * @size: number of bytes in the buffer
717 */
718 static void dgrp_monitor_data(struct nd_struct *nd, u8 type, u8 *buf, int size)
719 {
720 u8 header[7];
721
722 header[0] = type;
723
724 dgrp_encode_time(nd, header + 1);
725
726 put_unaligned_be16(size, header + 5);
727
728 dgrp_monitor(nd, header, sizeof(header));
729 dgrp_monitor(nd, buf, size);
730 }
731
732 static int alloc_nd_buffers(struct nd_struct *nd)
733 {
734
735 nd->nd_iobuf = NULL;
736 nd->nd_writebuf = NULL;
737 nd->nd_inputbuf = NULL;
738 nd->nd_inputflagbuf = NULL;
739
740 /*
741 * Allocate the network read/write buffer.
742 */
743 nd->nd_iobuf = kzalloc(UIO_MAX + 10, GFP_KERNEL);
744 if (!nd->nd_iobuf)
745 goto out_err;
746
747 /*
748 * Allocate a buffer for doing the copy from user space to
749 * kernel space in the write routines.
750 */
751 nd->nd_writebuf = kzalloc(WRITEBUFLEN, GFP_KERNEL);
752 if (!nd->nd_writebuf)
753 goto out_err;
754
755 /*
756 * Allocate a buffer for doing the copy from kernel space to
757 * tty buffer space in the read routines.
758 */
759 nd->nd_inputbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
760 if (!nd->nd_inputbuf)
761 goto out_err;
762
763 /*
764 * Allocate a buffer for doing the copy from kernel space to
765 * tty buffer space in the read routines.
766 */
767 nd->nd_inputflagbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
768 if (!nd->nd_inputflagbuf)
769 goto out_err;
770
771 return 0;
772
773 out_err:
774 kfree(nd->nd_iobuf);
775 kfree(nd->nd_writebuf);
776 kfree(nd->nd_inputbuf);
777 kfree(nd->nd_inputflagbuf);
778 return -ENOMEM;
779 }
780
781 /*
782 * dgrp_net_open() -- Open the NET device for a particular PortServer
783 */
784 static int dgrp_net_open(struct inode *inode, struct file *file)
785 {
786 struct nd_struct *nd;
787 ulong lock_flags;
788 int rtn;
789
790 rtn = try_module_get(THIS_MODULE);
791 if (!rtn)
792 return -EAGAIN;
793
794 if (!capable(CAP_SYS_ADMIN)) {
795 rtn = -EPERM;
796 goto done;
797 }
798
799 /*
800 * Make sure that the "private_data" field hasn't already been used.
801 */
802 if (file->private_data) {
803 rtn = -EINVAL;
804 goto done;
805 }
806
807 /*
808 * Get the node pointer, and fail if it doesn't exist.
809 */
810 nd = PDE_DATA(inode);
811 if (!nd) {
812 rtn = -ENXIO;
813 goto done;
814 }
815
816 file->private_data = (void *) nd;
817
818 /*
819 * Grab the NET lock.
820 */
821 down(&nd->nd_net_semaphore);
822
823 if (nd->nd_state != NS_CLOSED) {
824 rtn = -EBUSY;
825 goto unlock;
826 }
827
828 /*
829 * Initialize the link speed parameters.
830 */
831
832 nd->nd_link.lk_fast_rate = UIO_MAX;
833 nd->nd_link.lk_slow_rate = UIO_MAX;
834
835 nd->nd_link.lk_fast_delay = 1000;
836 nd->nd_link.lk_slow_delay = 1000;
837
838 nd->nd_link.lk_header_size = 46;
839
840
841 rtn = alloc_nd_buffers(nd);
842 if (rtn)
843 goto unlock;
844
845 /*
846 * The port is now open, so move it to the IDLE state
847 */
848 dgrp_net_idle(nd);
849
850 nd->nd_tx_time = jiffies;
851
852 /*
853 * If the polling routing is not running, start it running here
854 */
855 spin_lock_irqsave(&dgrp_poll_data.poll_lock, lock_flags);
856
857 if (!dgrp_poll_data.node_active_count) {
858 dgrp_poll_data.node_active_count = 2;
859 dgrp_poll_data.timer.expires = jiffies +
860 dgrp_poll_tick * HZ / 1000;
861 add_timer(&dgrp_poll_data.timer);
862 }
863
864 spin_unlock_irqrestore(&dgrp_poll_data.poll_lock, lock_flags);
865
866 dgrp_monitor_message(nd, "Net Open");
867
868 unlock:
869 /*
870 * Release the NET lock.
871 */
872 up(&nd->nd_net_semaphore);
873
874 done:
875 if (rtn)
876 module_put(THIS_MODULE);
877
878 return rtn;
879 }
880
881 /* dgrp_net_release() -- close the NET device for a particular PortServer */
882 static int dgrp_net_release(struct inode *inode, struct file *file)
883 {
884 struct nd_struct *nd;
885 ulong lock_flags;
886
887 nd = (struct nd_struct *)(file->private_data);
888 if (!nd)
889 goto done;
890
891 /* TODO : historical locking placeholder */
892 /*
893 * In the HPUX version of the RealPort driver (which served as a basis
894 * for this driver) this locking code was used. Saved if ever we need
895 * to review the locking under Linux.
896 */
897 /* spinlock(&nd->nd_lock); */
898
899
900 /*
901 * Grab the NET lock.
902 */
903 down(&nd->nd_net_semaphore);
904
905 /*
906 * Before "closing" the internal connection, make sure all
907 * ports are "idle".
908 */
909 dgrp_net_idle(nd);
910
911 nd->nd_state = NS_CLOSED;
912 nd->nd_flag = 0;
913
914 /*
915 * TODO ... must the wait queue be reset on close?
916 * should any pending waiters be reset?
917 * Let's decide to assert that the waitq is empty... and see
918 * how soon we break.
919 */
920 if (waitqueue_active(&nd->nd_tx_waitq))
921 pr_info("%s - expected waitqueue_active to be false\n",
922 __func__);
923
924 nd->nd_send = 0;
925
926 kfree(nd->nd_iobuf);
927 nd->nd_iobuf = NULL;
928
929 /* TODO : historical locking placeholder */
930 /*
931 * In the HPUX version of the RealPort driver (which served as a basis
932 * for this driver) this locking code was used. Saved if ever we need
933 * to review the locking under Linux.
934 */
935 /* spinunlock( &nd->nd_lock ); */
936
937
938 kfree(nd->nd_writebuf);
939 nd->nd_writebuf = NULL;
940
941 kfree(nd->nd_inputbuf);
942 nd->nd_inputbuf = NULL;
943
944 kfree(nd->nd_inputflagbuf);
945 nd->nd_inputflagbuf = NULL;
946
947 /* TODO : historical locking placeholder */
948 /*
949 * In the HPUX version of the RealPort driver (which served as a basis
950 * for this driver) this locking code was used. Saved if ever we need
951 * to review the locking under Linux.
952 */
953 /* spinlock(&nd->nd_lock); */
954
955 /*
956 * Set the active port count to zero.
957 */
958 dgrp_chan_count(nd, 0);
959
960 /* TODO : historical locking placeholder */
961 /*
962 * In the HPUX version of the RealPort driver (which served as a basis
963 * for this driver) this locking code was used. Saved if ever we need
964 * to review the locking under Linux.
965 */
966 /* spinunlock(&nd->nd_lock); */
967
968 /*
969 * Release the NET lock.
970 */
971 up(&nd->nd_net_semaphore);
972
973 /*
974 * Cause the poller to stop scheduling itself if this is
975 * the last active node.
976 */
977 spin_lock_irqsave(&dgrp_poll_data.poll_lock, lock_flags);
978
979 if (dgrp_poll_data.node_active_count == 2) {
980 del_timer(&dgrp_poll_data.timer);
981 dgrp_poll_data.node_active_count = 0;
982 }
983
984 spin_unlock_irqrestore(&dgrp_poll_data.poll_lock, lock_flags);
985
986 down(&nd->nd_net_semaphore);
987
988 dgrp_monitor_message(nd, "Net Close");
989
990 up(&nd->nd_net_semaphore);
991
992 done:
993 module_put(THIS_MODULE);
994 file->private_data = NULL;
995 return 0;
996 }
997
998 /* used in dgrp_send to setup command header */
999 static inline u8 *set_cmd_header(u8 *b, u8 port, u8 cmd)
1000 {
1001 *b++ = 0xb0 + (port & 0x0f);
1002 *b++ = cmd;
1003 return b;
1004 }
1005
1006 /**
1007 * dgrp_send() -- build a packet for transmission to the server
1008 * @nd: pointer to a node structure
1009 * @tmax: maximum bytes to transmit
1010 *
1011 * returns number of bytes sent
1012 */
1013 static int dgrp_send(struct nd_struct *nd, long tmax)
1014 {
1015 struct ch_struct *ch = nd->nd_chan;
1016 u8 *b;
1017 u8 *buf;
1018 u8 *mbuf;
1019 u8 port;
1020 int mod;
1021 long send;
1022 int maxport;
1023 long lastport = -1;
1024 ushort rwin;
1025 long in;
1026 ushort n;
1027 long t;
1028 long ttotal;
1029 long tchan;
1030 long tsend;
1031 ushort tsafe;
1032 long work;
1033 long send_sync;
1034 long wanted_sync_port = -1;
1035 ushort tdata[CHAN_MAX];
1036 long used_buffer;
1037
1038 mbuf = nd->nd_iobuf + UIO_BASE;
1039 buf = b = mbuf;
1040
1041 send_sync = nd->nd_link.lk_slow_rate < UIO_MAX;
1042
1043 ttotal = 0;
1044 tchan = 0;
1045
1046 memset(tdata, 0, sizeof(tdata));
1047
1048
1049 /*
1050 * If there are any outstanding requests to be serviced,
1051 * service them here.
1052 */
1053 if (nd->nd_send & NR_PASSWORD) {
1054
1055 /*
1056 * Send Password response.
1057 */
1058
1059 b[0] = 0xfc;
1060 b[1] = 0x20;
1061 put_unaligned_be16(strlen(nd->password), b + 2);
1062 b += 4;
1063 b += strlen(nd->password);
1064 nd->nd_send &= ~(NR_PASSWORD);
1065 }
1066
1067
1068 /*
1069 * Loop over all modules to generate commands, and determine
1070 * the amount of data queued for transmit.
1071 */
1072
1073 for (mod = 0, port = 0; port < nd->nd_chan_count; mod++) {
1074 /*
1075 * If this is not the current module, enter a module select
1076 * code in the buffer.
1077 */
1078
1079 if (mod != nd->nd_tx_module)
1080 mbuf = ++b;
1081
1082 /*
1083 * Loop to process one module.
1084 */
1085
1086 maxport = port + 16;
1087
1088 if (maxport > nd->nd_chan_count)
1089 maxport = nd->nd_chan_count;
1090
1091 for (; port < maxport; port++, ch++) {
1092 /*
1093 * Switch based on channel state.
1094 */
1095
1096 switch (ch->ch_state) {
1097 /*
1098 * Send requests when the port is closed, and there
1099 * are no Open, Close or Cancel requests expected.
1100 */
1101
1102 case CS_IDLE:
1103 /*
1104 * Wait until any open error code
1105 * has been delivered to all
1106 * associated ports.
1107 */
1108
1109 if (ch->ch_open_error) {
1110 if (ch->ch_wait_count[ch->ch_otype]) {
1111 work = 1;
1112 break;
1113 }
1114
1115 ch->ch_open_error = 0;
1116 }
1117
1118 /*
1119 * Wait until the channel HANGUP flag is reset
1120 * before sending the first open. We can only
1121 * get to this state after a server disconnect.
1122 */
1123
1124 if ((ch->ch_flag & CH_HANGUP) != 0)
1125 break;
1126
1127 /*
1128 * If recovering from a TCP disconnect, or if
1129 * there is an immediate open pending, send an
1130 * Immediate Open request.
1131 */
1132 if ((ch->ch_flag & CH_PORT_GONE) ||
1133 ch->ch_wait_count[OTYPE_IMMEDIATE] != 0) {
1134 b = set_cmd_header(b, port, 10);
1135 *b++ = 0;
1136
1137 ch->ch_state = CS_WAIT_OPEN;
1138 ch->ch_otype = OTYPE_IMMEDIATE;
1139 break;
1140 }
1141
1142 /*
1143 * If there is no Persistent or Incoming Open on the wait
1144 * list in the server, and a thread is waiting for a
1145 * Persistent or Incoming Open, send a Persistent or Incoming
1146 * Open Request.
1147 */
1148 if (ch->ch_otype_waiting == 0) {
1149 if (ch->ch_wait_count[OTYPE_PERSISTENT] != 0) {
1150 b = set_cmd_header(b, port, 10);
1151 *b++ = 1;
1152
1153 ch->ch_state = CS_WAIT_OPEN;
1154 ch->ch_otype = OTYPE_PERSISTENT;
1155 } else if (ch->ch_wait_count[OTYPE_INCOMING] != 0) {
1156 b = set_cmd_header(b, port, 10);
1157 *b++ = 2;
1158
1159 ch->ch_state = CS_WAIT_OPEN;
1160 ch->ch_otype = OTYPE_INCOMING;
1161 }
1162 break;
1163 }
1164
1165 /*
1166 * If a Persistent or Incoming Open is pending in
1167 * the server, but there is no longer an open
1168 * thread waiting for it, cancel the request.
1169 */
1170
1171 if (ch->ch_wait_count[ch->ch_otype_waiting] == 0) {
1172 b = set_cmd_header(b, port, 10);
1173 *b++ = 4;
1174
1175 ch->ch_state = CS_WAIT_CANCEL;
1176 ch->ch_otype = ch->ch_otype_waiting;
1177 }
1178 break;
1179
1180 /*
1181 * Send port parameter queries.
1182 */
1183 case CS_SEND_QUERY:
1184 /*
1185 * Clear out all FEP state that might remain
1186 * from the last connection.
1187 */
1188
1189 ch->ch_flag |= CH_PARAM;
1190
1191 ch->ch_flag &= ~CH_RX_FLUSH;
1192
1193 ch->ch_expect = 0;
1194
1195 ch->ch_s_tin = 0;
1196 ch->ch_s_tpos = 0;
1197 ch->ch_s_tsize = 0;
1198 ch->ch_s_treq = 0;
1199 ch->ch_s_elast = 0;
1200
1201 ch->ch_s_rin = 0;
1202 ch->ch_s_rwin = 0;
1203 ch->ch_s_rsize = 0;
1204
1205 ch->ch_s_tmax = 0;
1206 ch->ch_s_ttime = 0;
1207 ch->ch_s_rmax = 0;
1208 ch->ch_s_rtime = 0;
1209 ch->ch_s_rlow = 0;
1210 ch->ch_s_rhigh = 0;
1211
1212 ch->ch_s_brate = 0;
1213 ch->ch_s_iflag = 0;
1214 ch->ch_s_cflag = 0;
1215 ch->ch_s_oflag = 0;
1216 ch->ch_s_xflag = 0;
1217
1218 ch->ch_s_mout = 0;
1219 ch->ch_s_mflow = 0;
1220 ch->ch_s_mctrl = 0;
1221 ch->ch_s_xon = 0;
1222 ch->ch_s_xoff = 0;
1223 ch->ch_s_lnext = 0;
1224 ch->ch_s_xxon = 0;
1225 ch->ch_s_xxoff = 0;
1226
1227 /* Send Sequence Request */
1228 b = set_cmd_header(b, port, 14);
1229
1230 /* Configure Event Conditions Packet */
1231 b = set_cmd_header(b, port, 42);
1232 put_unaligned_be16(0x02c0, b);
1233 b += 2;
1234 *b++ = (DM_DTR | DM_RTS | DM_CTS |
1235 DM_DSR | DM_RI | DM_CD);
1236
1237 /* Send Status Request */
1238 b = set_cmd_header(b, port, 16);
1239
1240 /* Send Buffer Request */
1241 b = set_cmd_header(b, port, 20);
1242
1243 /* Send Port Capability Request */
1244 b = set_cmd_header(b, port, 22);
1245
1246 ch->ch_expect = (RR_SEQUENCE |
1247 RR_STATUS |
1248 RR_BUFFER |
1249 RR_CAPABILITY);
1250
1251 ch->ch_state = CS_WAIT_QUERY;
1252
1253 /* Raise modem signals */
1254 b = set_cmd_header(b, port, 44);
1255
1256 if (ch->ch_flag & CH_PORT_GONE)
1257 ch->ch_s_mout = ch->ch_mout;
1258 else
1259 ch->ch_s_mout = ch->ch_mout = DM_DTR | DM_RTS;
1260
1261 *b++ = ch->ch_mout;
1262 *b++ = ch->ch_s_mflow = 0;
1263 *b++ = ch->ch_s_mctrl = ch->ch_mctrl = 0;
1264
1265 if (ch->ch_flag & CH_PORT_GONE)
1266 ch->ch_flag &= ~CH_PORT_GONE;
1267
1268 break;
1269
1270 /*
1271 * Handle normal open and ready mode.
1272 */
1273
1274 case CS_READY:
1275
1276 /*
1277 * If the port is not open, and there are no
1278 * no longer any ports requesting an open,
1279 * then close the port.
1280 */
1281
1282 if (ch->ch_open_count == 0 &&
1283 ch->ch_wait_count[ch->ch_otype] == 0) {
1284 goto send_close;
1285 }
1286
1287 /*
1288 * Process waiting input.
1289 *
1290 * If there is no one to read it, discard the data.
1291 *
1292 * Otherwise if we are not in fastcook mode, or if there is a
1293 * fastcook thread waiting for data, send the data to the
1294 * line discipline.
1295 */
1296 if (ch->ch_rin != ch->ch_rout) {
1297 if (ch->ch_tun.un_open_count == 0 ||
1298 (ch->ch_tun.un_flag & UN_CLOSING) ||
1299 (ch->ch_cflag & CF_CREAD) == 0) {
1300 ch->ch_rout = ch->ch_rin;
1301 } else if ((ch->ch_flag & CH_FAST_READ) == 0 ||
1302 ch->ch_inwait != 0) {
1303 dgrp_input(ch);
1304
1305 if (ch->ch_rin != ch->ch_rout)
1306 work = 1;
1307 }
1308 }
1309
1310 /*
1311 * Handle receive flush, and changes to
1312 * server port parameters.
1313 */
1314
1315 if (ch->ch_flag & (CH_RX_FLUSH | CH_PARAM)) {
1316 /*
1317 * If we are in receive flush mode,
1318 * and enough data has gone by, reset
1319 * receive flush mode.
1320 */
1321 if (ch->ch_flag & CH_RX_FLUSH) {
1322 if (((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >
1323 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK))
1324 ch->ch_flag &= ~CH_RX_FLUSH;
1325 else
1326 work = 1;
1327 }
1328
1329 /*
1330 * Send TMAX, TTIME.
1331 */
1332
1333 if (ch->ch_s_tmax != ch->ch_tmax ||
1334 ch->ch_s_ttime != ch->ch_ttime) {
1335 b = set_cmd_header(b, port, 48);
1336
1337 ch->ch_s_tmax = ch->ch_tmax;
1338 ch->ch_s_ttime = ch->ch_ttime;
1339
1340 put_unaligned_be16(ch->ch_s_tmax,
1341 b);
1342 b += 2;
1343
1344 put_unaligned_be16(ch->ch_s_ttime,
1345 b);
1346 b += 2;
1347 }
1348
1349 /*
1350 * Send RLOW, RHIGH.
1351 */
1352
1353 if (ch->ch_s_rlow != ch->ch_rlow ||
1354 ch->ch_s_rhigh != ch->ch_rhigh) {
1355 b = set_cmd_header(b, port, 45);
1356
1357 ch->ch_s_rlow = ch->ch_rlow;
1358 ch->ch_s_rhigh = ch->ch_rhigh;
1359
1360 put_unaligned_be16(ch->ch_s_rlow,
1361 b);
1362 b += 2;
1363
1364 put_unaligned_be16(ch->ch_s_rhigh,
1365 b);
1366 b += 2;
1367 }
1368
1369 /*
1370 * Send BRATE, CFLAG, IFLAG,
1371 * OFLAG, XFLAG.
1372 */
1373
1374 if (ch->ch_s_brate != ch->ch_brate ||
1375 ch->ch_s_cflag != ch->ch_cflag ||
1376 ch->ch_s_iflag != ch->ch_iflag ||
1377 ch->ch_s_oflag != ch->ch_oflag ||
1378 ch->ch_s_xflag != ch->ch_xflag) {
1379 b = set_cmd_header(b, port, 40);
1380
1381 ch->ch_s_brate = ch->ch_brate;
1382 ch->ch_s_cflag = ch->ch_cflag;
1383 ch->ch_s_iflag = ch->ch_iflag;
1384 ch->ch_s_oflag = ch->ch_oflag;
1385 ch->ch_s_xflag = ch->ch_xflag;
1386
1387 put_unaligned_be16(ch->ch_s_brate,
1388 b);
1389 b += 2;
1390
1391 put_unaligned_be16(ch->ch_s_cflag,
1392 b);
1393 b += 2;
1394
1395 put_unaligned_be16(ch->ch_s_iflag,
1396 b);
1397 b += 2;
1398
1399 put_unaligned_be16(ch->ch_s_oflag,
1400 b);
1401 b += 2;
1402
1403 put_unaligned_be16(ch->ch_s_xflag,
1404 b);
1405 b += 2;
1406 }
1407
1408 /*
1409 * Send MOUT, MFLOW, MCTRL.
1410 */
1411
1412 if (ch->ch_s_mout != ch->ch_mout ||
1413 ch->ch_s_mflow != ch->ch_mflow ||
1414 ch->ch_s_mctrl != ch->ch_mctrl) {
1415 b = set_cmd_header(b, port, 44);
1416
1417 *b++ = ch->ch_s_mout = ch->ch_mout;
1418 *b++ = ch->ch_s_mflow = ch->ch_mflow;
1419 *b++ = ch->ch_s_mctrl = ch->ch_mctrl;
1420 }
1421
1422 /*
1423 * Send Flow control characters.
1424 */
1425
1426 if (ch->ch_s_xon != ch->ch_xon ||
1427 ch->ch_s_xoff != ch->ch_xoff ||
1428 ch->ch_s_lnext != ch->ch_lnext ||
1429 ch->ch_s_xxon != ch->ch_xxon ||
1430 ch->ch_s_xxoff != ch->ch_xxoff) {
1431 b = set_cmd_header(b, port, 46);
1432
1433 *b++ = ch->ch_s_xon = ch->ch_xon;
1434 *b++ = ch->ch_s_xoff = ch->ch_xoff;
1435 *b++ = ch->ch_s_lnext = ch->ch_lnext;
1436 *b++ = ch->ch_s_xxon = ch->ch_xxon;
1437 *b++ = ch->ch_s_xxoff = ch->ch_xxoff;
1438 }
1439
1440 /*
1441 * Send RMAX, RTIME.
1442 */
1443
1444 if (ch->ch_s_rmax != ch->ch_rmax ||
1445 ch->ch_s_rtime != ch->ch_rtime) {
1446 b = set_cmd_header(b, port, 47);
1447
1448 ch->ch_s_rmax = ch->ch_rmax;
1449 ch->ch_s_rtime = ch->ch_rtime;
1450
1451 put_unaligned_be16(ch->ch_s_rmax,
1452 b);
1453 b += 2;
1454
1455 put_unaligned_be16(ch->ch_s_rtime,
1456 b);
1457 b += 2;
1458 }
1459
1460 ch->ch_flag &= ~CH_PARAM;
1461 wake_up_interruptible(&ch->ch_flag_wait);
1462 }
1463
1464
1465 /*
1466 * Handle action commands.
1467 */
1468
1469 if (ch->ch_send != 0) {
1470 /* int send = ch->ch_send & ~ch->ch_expect; */
1471 send = ch->ch_send & ~ch->ch_expect;
1472
1473 /* Send character immediate */
1474 if ((send & RR_TX_ICHAR) != 0) {
1475 b = set_cmd_header(b, port, 60);
1476
1477 *b++ = ch->ch_xon;
1478 ch->ch_expect |= RR_TX_ICHAR;
1479 }
1480
1481 /* BREAK request */
1482 if ((send & RR_TX_BREAK) != 0) {
1483 if (ch->ch_break_time != 0) {
1484 b = set_cmd_header(b, port, 61);
1485 put_unaligned_be16(ch->ch_break_time,
1486 b);
1487 b += 2;
1488
1489 ch->ch_expect |= RR_TX_BREAK;
1490 ch->ch_break_time = 0;
1491 } else {
1492 ch->ch_send &= ~RR_TX_BREAK;
1493 ch->ch_flag &= ~CH_TX_BREAK;
1494 wake_up_interruptible(&ch->ch_flag_wait);
1495 }
1496 }
1497
1498 /*
1499 * Flush input/output buffers.
1500 */
1501
1502 if ((send & (RR_RX_FLUSH | RR_TX_FLUSH)) != 0) {
1503 b = set_cmd_header(b, port, 62);
1504
1505 *b++ = ((send & RR_TX_FLUSH) == 0 ? 1 :
1506 (send & RR_RX_FLUSH) == 0 ? 2 : 3);
1507
1508 if (send & RR_RX_FLUSH) {
1509 ch->ch_flush_seq = nd->nd_seq_in;
1510 ch->ch_flag |= CH_RX_FLUSH;
1511 work = 1;
1512 send_sync = 1;
1513 wanted_sync_port = port;
1514 }
1515
1516 ch->ch_send &= ~(RR_RX_FLUSH | RR_TX_FLUSH);
1517 }
1518
1519 /* Pause input/output */
1520 if ((send & (RR_RX_STOP | RR_TX_STOP)) != 0) {
1521 b = set_cmd_header(b, port, 63);
1522 *b = 0;
1523
1524 if ((send & RR_TX_STOP) != 0)
1525 *b |= EV_OPU;
1526
1527 if ((send & RR_RX_STOP) != 0)
1528 *b |= EV_IPU;
1529
1530 b++;
1531
1532 ch->ch_send &= ~(RR_RX_STOP | RR_TX_STOP);
1533 }
1534
1535 /* Start input/output */
1536 if ((send & (RR_RX_START | RR_TX_START)) != 0) {
1537 b = set_cmd_header(b, port, 64);
1538 *b = 0;
1539
1540 if ((send & RR_TX_START) != 0)
1541 *b |= EV_OPU | EV_OPS | EV_OPX;
1542
1543 if ((send & RR_RX_START) != 0)
1544 *b |= EV_IPU | EV_IPS;
1545
1546 b++;
1547
1548 ch->ch_send &= ~(RR_RX_START | RR_TX_START);
1549 }
1550 }
1551
1552
1553 /*
1554 * Send a window sequence to acknowledge received data.
1555 */
1556
1557 rwin = (ch->ch_s_rin +
1558 ((ch->ch_rout - ch->ch_rin - 1) & RBUF_MASK));
1559
1560 n = (rwin - ch->ch_s_rwin) & 0xffff;
1561
1562 if (n >= RBUF_MAX / 4) {
1563 b[0] = 0xa0 + (port & 0xf);
1564 ch->ch_s_rwin = rwin;
1565 put_unaligned_be16(rwin, b + 1);
1566 b += 3;
1567 }
1568
1569 /*
1570 * If the terminal is waiting on LOW
1571 * water or EMPTY, and the condition
1572 * is now satisfied, call the line
1573 * discipline to put more data in the
1574 * buffer.
1575 */
1576
1577 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1578
1579 if ((ch->ch_tun.un_flag & (UN_EMPTY|UN_LOW)) != 0) {
1580 if ((ch->ch_tun.un_flag & UN_LOW) != 0 ?
1581 (n <= TBUF_LOW) :
1582 (n == 0 && ch->ch_s_tpos == ch->ch_s_tin)) {
1583 ch->ch_tun.un_flag &= ~(UN_EMPTY|UN_LOW);
1584
1585 if (waitqueue_active(&((ch->ch_tun.un_tty)->write_wait)))
1586 wake_up_interruptible(&((ch->ch_tun.un_tty)->write_wait));
1587 tty_wakeup(ch->ch_tun.un_tty);
1588 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1589 }
1590 }
1591
1592 /*
1593 * If the printer is waiting on LOW
1594 * water, TIME, EMPTY or PWAIT, and is
1595 * now ready to put more data in the
1596 * buffer, call the line discipline to
1597 * do the job.
1598 */
1599
1600 /* FIXME: jiffies - ch->ch_waketime can never
1601 be < 0. Someone needs to work out what is
1602 actually intended here */
1603 if (ch->ch_pun.un_open_count &&
1604 (ch->ch_pun.un_flag &
1605 (UN_EMPTY|UN_TIME|UN_LOW|UN_PWAIT)) != 0) {
1606
1607 if ((ch->ch_pun.un_flag & UN_LOW) != 0 ?
1608 (n <= TBUF_LOW) :
1609 (ch->ch_pun.un_flag & UN_TIME) != 0 ?
1610 time_is_before_jiffies(ch->ch_waketime) :
1611 (n == 0 && ch->ch_s_tpos == ch->ch_s_tin) &&
1612 ((ch->ch_pun.un_flag & UN_EMPTY) != 0 ||
1613 ((ch->ch_tun.un_open_count &&
1614 ch->ch_tun.un_tty->ops->chars_in_buffer) ?
1615 (ch->ch_tun.un_tty->ops->chars_in_buffer)(ch->ch_tun.un_tty) == 0
1616 : 1
1617 )
1618 )) {
1619 ch->ch_pun.un_flag &= ~(UN_EMPTY | UN_TIME | UN_LOW | UN_PWAIT);
1620
1621 if (waitqueue_active(&((ch->ch_pun.un_tty)->write_wait)))
1622 wake_up_interruptible(&((ch->ch_pun.un_tty)->write_wait));
1623 tty_wakeup(ch->ch_pun.un_tty);
1624 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1625
1626 } else if ((ch->ch_pun.un_flag & UN_TIME) != 0) {
1627 work = 1;
1628 }
1629 }
1630
1631
1632 /*
1633 * Determine the max number of bytes
1634 * this port can send, including
1635 * packet header overhead.
1636 */
1637
1638 t = ((ch->ch_s_tsize + ch->ch_s_tpos - ch->ch_s_tin) & 0xffff);
1639
1640 if (n > t)
1641 n = t;
1642
1643 if (n != 0) {
1644 n += (n <= 8 ? 1 : n <= 255 ? 2 : 3);
1645
1646 tdata[tchan++] = n;
1647 ttotal += n;
1648 }
1649 break;
1650
1651 /*
1652 * Close the port.
1653 */
1654
1655 send_close:
1656 case CS_SEND_CLOSE:
1657 b = set_cmd_header(b, port, 10);
1658 if (ch->ch_otype == OTYPE_IMMEDIATE)
1659 *b++ = 3;
1660 else
1661 *b++ = 4;
1662
1663 ch->ch_state = CS_WAIT_CLOSE;
1664 break;
1665
1666 /*
1667 * Wait for a previous server request.
1668 */
1669
1670 case CS_WAIT_OPEN:
1671 case CS_WAIT_CANCEL:
1672 case CS_WAIT_FAIL:
1673 case CS_WAIT_QUERY:
1674 case CS_WAIT_CLOSE:
1675 break;
1676
1677 default:
1678 pr_info("%s - unexpected channel state (%i)\n",
1679 __func__, ch->ch_state);
1680 }
1681 }
1682
1683 /*
1684 * If a module select code is needed, drop one in. If space
1685 * was reserved for one, but none is needed, recover the space.
1686 */
1687
1688 if (mod != nd->nd_tx_module) {
1689 if (b != mbuf) {
1690 mbuf[-1] = 0xf0 | mod;
1691 nd->nd_tx_module = mod;
1692 } else {
1693 b--;
1694 }
1695 }
1696 }
1697
1698 /*
1699 * Adjust "tmax" so that under worst case conditions we do
1700 * not overflow either the daemon buffer or the internal
1701 * buffer in the loop that follows. Leave a safe area
1702 * of 64 bytes so we start getting asserts before we start
1703 * losing data or clobbering memory.
1704 */
1705
1706 n = UIO_MAX - UIO_BASE;
1707
1708 if (tmax > n)
1709 tmax = n;
1710
1711 tmax -= 64;
1712
1713 tsafe = tmax;
1714
1715 /*
1716 * Allocate space for 5 Module Selects, 1 Sequence Request,
1717 * and 1 Set TREQ for each active channel.
1718 */
1719
1720 tmax -= 5 + 3 + 4 * nd->nd_chan_count;
1721
1722 /*
1723 * Further reduce "tmax" to the available transmit credit.
1724 * Note that this is a soft constraint; The transmit credit
1725 * can go negative for a time and then recover.
1726 */
1727
1728 n = nd->nd_tx_deposit - nd->nd_tx_charge - nd->nd_link.lk_header_size;
1729
1730 if (tmax > n)
1731 tmax = n;
1732
1733 /*
1734 * Finally reduce tmax by the number of bytes already in
1735 * the buffer.
1736 */
1737
1738 tmax -= b - buf;
1739
1740 /*
1741 * Suspend data transmit unless every ready channel can send
1742 * at least 1 character.
1743 */
1744 if (tmax < 2 * nd->nd_chan_count) {
1745 tsend = 1;
1746
1747 } else if (tchan > 1 && ttotal > tmax) {
1748
1749 /*
1750 * If transmit is limited by the credit budget, find the
1751 * largest number of characters we can send without driving
1752 * the credit negative.
1753 */
1754
1755 long tm = tmax;
1756 int tc = tchan;
1757 int try;
1758
1759 tsend = tm / tc;
1760
1761 for (try = 0; try < 3; try++) {
1762 int i;
1763 int c = 0;
1764
1765 for (i = 0; i < tc; i++) {
1766 if (tsend < tdata[i])
1767 tdata[c++] = tdata[i];
1768 else
1769 tm -= tdata[i];
1770 }
1771
1772 if (c == tc)
1773 break;
1774
1775 tsend = tm / c;
1776
1777 if (c == 1)
1778 break;
1779
1780 tc = c;
1781 }
1782
1783 tsend = tm / nd->nd_chan_count;
1784
1785 if (tsend < 2)
1786 tsend = 1;
1787
1788 } else {
1789 /*
1790 * If no budgetary constraints, or only one channel ready
1791 * to send, set the character limit to the remaining
1792 * buffer size.
1793 */
1794
1795 tsend = tmax;
1796 }
1797
1798 tsend -= (tsend <= 9) ? 1 : (tsend <= 257) ? 2 : 3;
1799
1800 /*
1801 * Loop over all channels, sending queued data.
1802 */
1803
1804 port = 0;
1805 ch = nd->nd_chan;
1806 used_buffer = tmax;
1807
1808 for (mod = 0; port < nd->nd_chan_count; mod++) {
1809 /*
1810 * If this is not the current module, enter a module select
1811 * code in the buffer.
1812 */
1813
1814 if (mod != nd->nd_tx_module)
1815 mbuf = ++b;
1816
1817 /*
1818 * Loop to process one module.
1819 */
1820
1821 maxport = port + 16;
1822
1823 if (maxport > nd->nd_chan_count)
1824 maxport = nd->nd_chan_count;
1825
1826 for (; port < maxport; port++, ch++) {
1827 if (ch->ch_state != CS_READY)
1828 continue;
1829
1830 lastport = port;
1831
1832 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1833
1834 /*
1835 * If there is data that can be sent, send it.
1836 */
1837
1838 if (n != 0 && used_buffer > 0) {
1839 t = (ch->ch_s_tsize + ch->ch_s_tpos - ch->ch_s_tin) & 0xffff;
1840
1841 if (n > t)
1842 n = t;
1843
1844 if (n > tsend) {
1845 work = 1;
1846 n = tsend;
1847 }
1848
1849 if (n > used_buffer) {
1850 work = 1;
1851 n = used_buffer;
1852 }
1853
1854 if (n <= 0)
1855 continue;
1856
1857 /*
1858 * Create the correct size transmit header,
1859 * depending on the amount of data to transmit.
1860 */
1861
1862 if (n <= 8) {
1863
1864 b[0] = ((n - 1) << 4) + (port & 0xf);
1865 b += 1;
1866
1867 } else if (n <= 255) {
1868
1869 b[0] = 0x80 + (port & 0xf);
1870 b[1] = n;
1871 b += 2;
1872
1873 } else {
1874
1875 b[0] = 0x90 + (port & 0xf);
1876 put_unaligned_be16(n, b + 1);
1877 b += 3;
1878 }
1879
1880 ch->ch_s_tin = (ch->ch_s_tin + n) & 0xffff;
1881
1882 /*
1883 * Copy transmit data to the packet.
1884 */
1885
1886 t = TBUF_MAX - ch->ch_tout;
1887
1888 if (n >= t) {
1889 memcpy(b, ch->ch_tbuf + ch->ch_tout, t);
1890 b += t;
1891 n -= t;
1892 used_buffer -= t;
1893 ch->ch_tout = 0;
1894 }
1895
1896 memcpy(b, ch->ch_tbuf + ch->ch_tout, n);
1897 b += n;
1898 used_buffer -= n;
1899 ch->ch_tout += n;
1900 n = (ch->ch_tin - ch->ch_tout) & TBUF_MASK;
1901 }
1902
1903 /*
1904 * Wake any terminal unit process waiting in the
1905 * dgrp_write routine for low water.
1906 */
1907
1908 if (n > TBUF_LOW)
1909 continue;
1910
1911 if ((ch->ch_flag & CH_LOW) != 0) {
1912 ch->ch_flag &= ~CH_LOW;
1913 wake_up_interruptible(&ch->ch_flag_wait);
1914 }
1915
1916 /* selwakeup tty_sel */
1917 if (ch->ch_tun.un_open_count) {
1918 struct tty_struct *tty = (ch->ch_tun.un_tty);
1919
1920 if (waitqueue_active(&tty->write_wait))
1921 wake_up_interruptible(&tty->write_wait);
1922
1923 tty_wakeup(tty);
1924 }
1925
1926 if (ch->ch_pun.un_open_count) {
1927 struct tty_struct *tty = (ch->ch_pun.un_tty);
1928
1929 if (waitqueue_active(&tty->write_wait))
1930 wake_up_interruptible(&tty->write_wait);
1931
1932 tty_wakeup(tty);
1933 }
1934
1935 /*
1936 * Do EMPTY processing.
1937 */
1938
1939 if (n != 0)
1940 continue;
1941
1942 if ((ch->ch_flag & (CH_EMPTY | CH_DRAIN)) != 0 ||
1943 (ch->ch_pun.un_flag & UN_EMPTY) != 0) {
1944 /*
1945 * If there is still data in the server, ask the server
1946 * to notify us when its all gone.
1947 */
1948
1949 if (ch->ch_s_treq != ch->ch_s_tin) {
1950 b = set_cmd_header(b, port, 43);
1951
1952 ch->ch_s_treq = ch->ch_s_tin;
1953 put_unaligned_be16(ch->ch_s_treq,
1954 b);
1955 b += 2;
1956 }
1957
1958 /*
1959 * If there is a thread waiting for buffer empty,
1960 * and we are truly empty, wake the thread.
1961 */
1962
1963 else if ((ch->ch_flag & CH_EMPTY) != 0 &&
1964 (ch->ch_send & RR_TX_BREAK) == 0) {
1965 ch->ch_flag &= ~CH_EMPTY;
1966
1967 wake_up_interruptible(&ch->ch_flag_wait);
1968 }
1969 }
1970 }
1971
1972 /*
1973 * If a module select code is needed, drop one in. If space
1974 * was reserved for one, but none is needed, recover the space.
1975 */
1976
1977 if (mod != nd->nd_tx_module) {
1978 if (b != mbuf) {
1979 mbuf[-1] = 0xf0 | mod;
1980 nd->nd_tx_module = mod;
1981 } else {
1982 b--;
1983 }
1984 }
1985 }
1986
1987 /*
1988 * Send a synchronization sequence associated with the last open
1989 * channel that sent data, and remember the time when the data was
1990 * sent.
1991 */
1992
1993 in = nd->nd_seq_in;
1994
1995 if ((send_sync || nd->nd_seq_wait[in] != 0) && lastport >= 0) {
1996 u8 *bb = b;
1997
1998 /*
1999 * Attempt the use the port that really wanted the sync.
2000 * This gets around a race condition where the "lastport" is in
2001 * the middle of the close() routine, and by the time we
2002 * send this command, it will have already acked the close, and
2003 * thus not send the sync response.
2004 */
2005 if (wanted_sync_port >= 0)
2006 lastport = wanted_sync_port;
2007 /*
2008 * Set a flag just in case the port is in the middle of a close,
2009 * it will not be permitted to actually close until we get an
2010 * sync response, and clear the flag there.
2011 */
2012 ch = nd->nd_chan + lastport;
2013 ch->ch_flag |= CH_WAITING_SYNC;
2014
2015 mod = lastport >> 4;
2016
2017 if (mod != nd->nd_tx_module) {
2018 bb[0] = 0xf0 + mod;
2019 bb += 1;
2020
2021 nd->nd_tx_module = mod;
2022 }
2023
2024 bb = set_cmd_header(bb, lastport, 12);
2025 *bb++ = in;
2026
2027 nd->nd_seq_size[in] = bb - buf;
2028 nd->nd_seq_time[in] = jiffies;
2029
2030 if (++in >= SEQ_MAX)
2031 in = 0;
2032
2033 if (in != nd->nd_seq_out) {
2034 b = bb;
2035 nd->nd_seq_in = in;
2036 nd->nd_unack += b - buf;
2037 }
2038 }
2039
2040 /*
2041 * If there are no open ports, a sync cannot be sent.
2042 * There is nothing left to wait for anyway, so wake any
2043 * thread waiting for an acknowledgement.
2044 */
2045
2046 else if (nd->nd_seq_wait[in] != 0) {
2047 nd->nd_seq_wait[in] = 0;
2048
2049 wake_up_interruptible(&nd->nd_seq_wque[in]);
2050 }
2051
2052 /*
2053 * If there is no traffic for an interval of IDLE_MAX, then
2054 * send a single byte packet.
2055 */
2056
2057 if (b != buf) {
2058 nd->nd_tx_time = jiffies;
2059 } else if ((ulong)(jiffies - nd->nd_tx_time) >= IDLE_MAX) {
2060 *b++ = 0xf0 | nd->nd_tx_module;
2061 nd->nd_tx_time = jiffies;
2062 }
2063
2064 n = b - buf;
2065
2066 if (n >= tsafe)
2067 pr_info("%s - n(%i) >= tsafe(%i)\n",
2068 __func__, n, tsafe);
2069
2070 if (tsend < 0)
2071 dgrp_dump(buf, n);
2072
2073 nd->nd_tx_work = work;
2074
2075 return n;
2076 }
2077
2078 /*
2079 * dgrp_net_read()
2080 * Data to be sent TO the PortServer from the "async." half of the driver.
2081 */
2082 static ssize_t dgrp_net_read(struct file *file, char __user *buf, size_t count,
2083 loff_t *ppos)
2084 {
2085 struct nd_struct *nd;
2086 long n;
2087 u8 *local_buf;
2088 u8 *b;
2089 ssize_t rtn;
2090
2091 /*
2092 * Get the node pointer, and quit if it doesn't exist.
2093 */
2094 nd = (struct nd_struct *)(file->private_data);
2095 if (!nd)
2096 return -ENXIO;
2097
2098 if (count < UIO_MIN)
2099 return -EINVAL;
2100
2101 /*
2102 * Only one read/write operation may be in progress at
2103 * any given time.
2104 */
2105
2106 /*
2107 * Grab the NET lock.
2108 */
2109 down(&nd->nd_net_semaphore);
2110
2111 nd->nd_read_count++;
2112
2113 nd->nd_tx_ready = 0;
2114
2115 /*
2116 * Determine the effective size of the buffer.
2117 */
2118
2119 if (nd->nd_remain > UIO_BASE)
2120 pr_info_ratelimited("%s - nd_remain(%i) > UIO_BASE\n",
2121 __func__, nd->nd_remain);
2122
2123 b = local_buf = nd->nd_iobuf + UIO_BASE;
2124
2125 /*
2126 * Generate data according to the node state.
2127 */
2128
2129 switch (nd->nd_state) {
2130 /*
2131 * Initialize the connection.
2132 */
2133
2134 case NS_IDLE:
2135 if (nd->nd_mon_buf)
2136 dgrp_monitor_reset(nd);
2137
2138 /*
2139 * Request a Product ID Packet.
2140 */
2141
2142 b[0] = 0xfb;
2143 b[1] = 0x01;
2144 b += 2;
2145
2146 nd->nd_expect |= NR_IDENT;
2147
2148 /*
2149 * Request a Server Capability ID Response.
2150 */
2151
2152 b[0] = 0xfb;
2153 b[1] = 0x02;
2154 b += 2;
2155
2156 nd->nd_expect |= NR_CAPABILITY;
2157
2158 /*
2159 * Request a Server VPD Response.
2160 */
2161
2162 b[0] = 0xfb;
2163 b[1] = 0x18;
2164 b += 2;
2165
2166 nd->nd_expect |= NR_VPD;
2167
2168 nd->nd_state = NS_WAIT_QUERY;
2169 break;
2170
2171 /*
2172 * We do serious communication with the server only in
2173 * the READY state.
2174 */
2175
2176 case NS_READY:
2177 b = dgrp_send(nd, count) + local_buf;
2178 break;
2179
2180 /*
2181 * Send off an error after receiving a bogus message
2182 * from the server.
2183 */
2184
2185 case NS_SEND_ERROR:
2186 n = strlen(nd->nd_error);
2187
2188 b[0] = 0xff;
2189 b[1] = n;
2190 memcpy(b + 2, nd->nd_error, n);
2191 b += 2 + n;
2192
2193 dgrp_net_idle(nd);
2194 /*
2195 * Set the active port count to zero.
2196 */
2197 dgrp_chan_count(nd, 0);
2198 break;
2199
2200 default:
2201 break;
2202 }
2203
2204 n = b - local_buf;
2205
2206 if (n != 0) {
2207 nd->nd_send_count++;
2208
2209 nd->nd_tx_byte += n + nd->nd_link.lk_header_size;
2210 nd->nd_tx_charge += n + nd->nd_link.lk_header_size;
2211 }
2212
2213 rtn = copy_to_user((void __user *)buf, local_buf, n);
2214 if (rtn) {
2215 rtn = -EFAULT;
2216 goto done;
2217 }
2218
2219 *ppos += n;
2220
2221 rtn = n;
2222
2223 if (nd->nd_mon_buf)
2224 dgrp_monitor_data(nd, RPDUMP_CLIENT, local_buf, n);
2225
2226 /*
2227 * Release the NET lock.
2228 */
2229 done:
2230 up(&nd->nd_net_semaphore);
2231
2232 return rtn;
2233 }
2234
2235 /*
2236 * Common Packet Handling code
2237 */
2238
2239 static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch,
2240 long dlen, long plen, int n1, u8 *dbuf)
2241 {
2242 char *error;
2243 long n;
2244 long remain;
2245 u8 *buf;
2246 u8 *b;
2247
2248 remain = nd->nd_remain;
2249 nd->nd_tx_work = 1;
2250
2251 /*
2252 * Otherwise data should appear only when we are
2253 * in the CS_READY state.
2254 */
2255
2256 if (ch->ch_state < CS_READY) {
2257 error = "Data received before RWIN established";
2258 nd->nd_remain = 0;
2259 nd->nd_state = NS_SEND_ERROR;
2260 nd->nd_error = error;
2261 }
2262
2263 /*
2264 * Assure that the data received is within the
2265 * allowable window.
2266 */
2267
2268 n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
2269
2270 if (dlen > n) {
2271 error = "Receive data overrun";
2272 nd->nd_remain = 0;
2273 nd->nd_state = NS_SEND_ERROR;
2274 nd->nd_error = error;
2275 }
2276
2277 /*
2278 * If we received 3 or less characters,
2279 * assume it is a human typing, and set RTIME
2280 * to 10 milliseconds.
2281 *
2282 * If we receive 10 or more characters,
2283 * assume its not a human typing, and set RTIME
2284 * to 100 milliseconds.
2285 */
2286
2287 if (ch->ch_edelay != DGRP_RTIME) {
2288 if (ch->ch_rtime != ch->ch_edelay) {
2289 ch->ch_rtime = ch->ch_edelay;
2290 ch->ch_flag |= CH_PARAM;
2291 }
2292 } else if (dlen <= 3) {
2293 if (ch->ch_rtime != 10) {
2294 ch->ch_rtime = 10;
2295 ch->ch_flag |= CH_PARAM;
2296 }
2297 } else {
2298 if (ch->ch_rtime != DGRP_RTIME) {
2299 ch->ch_rtime = DGRP_RTIME;
2300 ch->ch_flag |= CH_PARAM;
2301 }
2302 }
2303
2304 /*
2305 * If a portion of the packet is outside the
2306 * buffer, shorten the effective length of the
2307 * data packet to be the amount of data received.
2308 */
2309
2310 if (remain < plen)
2311 dlen -= plen - remain;
2312
2313 /*
2314 * Detect if receive flush is now complete.
2315 */
2316
2317 if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
2318 ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
2319 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2320 ch->ch_flag &= ~CH_RX_FLUSH;
2321 }
2322
2323 /*
2324 * If we are ready to receive, move the data into
2325 * the receive buffer.
2326 */
2327
2328 ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
2329
2330 if (ch->ch_state == CS_READY &&
2331 (ch->ch_tun.un_open_count != 0) &&
2332 (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
2333 (ch->ch_cflag & CF_CREAD) != 0 &&
2334 (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
2335 (ch->ch_send & RR_RX_FLUSH) == 0) {
2336
2337 if (ch->ch_rin + dlen >= RBUF_MAX) {
2338 n = RBUF_MAX - ch->ch_rin;
2339
2340 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
2341
2342 ch->ch_rin = 0;
2343 dbuf += n;
2344 dlen -= n;
2345 }
2346
2347 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
2348
2349 ch->ch_rin += dlen;
2350
2351
2352 /*
2353 * If we are not in fastcook mode, or
2354 * if there is a fastcook thread
2355 * waiting for data, send the data to
2356 * the line discipline.
2357 */
2358
2359 if ((ch->ch_flag & CH_FAST_READ) == 0 ||
2360 ch->ch_inwait != 0) {
2361 dgrp_input(ch);
2362 }
2363
2364 /*
2365 * If there is a read thread waiting
2366 * in select, and we are in fastcook
2367 * mode, wake him up.
2368 */
2369
2370 if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
2371 (ch->ch_flag & CH_FAST_READ) != 0)
2372 wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
2373
2374 /*
2375 * Wake any thread waiting in the
2376 * fastcook loop.
2377 */
2378
2379 if ((ch->ch_flag & CH_INPUT) != 0) {
2380 ch->ch_flag &= ~CH_INPUT;
2381 wake_up_interruptible(&ch->ch_flag_wait);
2382 }
2383 }
2384
2385 /*
2386 * Fabricate and insert a data packet header to
2387 * preced the remaining data when it comes in.
2388 */
2389
2390 if (remain < plen) {
2391 dlen = plen - remain;
2392 b = buf;
2393
2394 b[0] = 0x90 + n1;
2395 put_unaligned_be16(dlen, b + 1);
2396
2397 remain = 3;
2398 if (remain > 0 && b != buf)
2399 memcpy(buf, b, remain);
2400
2401 nd->nd_remain = remain;
2402 return;
2403 }
2404 }
2405
2406 /**
2407 * dgrp_receive() -- decode data packets received from the remote PortServer.
2408 * @nd: pointer to a node structure
2409 */
2410 static void dgrp_receive(struct nd_struct *nd)
2411 {
2412 struct ch_struct *ch;
2413 u8 *buf;
2414 u8 *b;
2415 u8 *dbuf;
2416 char *error;
2417 long port;
2418 long dlen;
2419 long plen;
2420 long remain;
2421 long n;
2422 long mlast;
2423 long elast;
2424 long mstat;
2425 long estat;
2426
2427 char ID[3];
2428
2429 nd->nd_tx_time = jiffies;
2430
2431 ID_TO_CHAR(nd->nd_ID, ID);
2432
2433 b = buf = nd->nd_iobuf;
2434 remain = nd->nd_remain;
2435
2436 /*
2437 * Loop to process Realport protocol packets.
2438 */
2439
2440 while (remain > 0) {
2441 int n0 = b[0] >> 4;
2442 int n1 = b[0] & 0x0f;
2443
2444 if (n0 <= 12) {
2445 port = (nd->nd_rx_module << 4) + n1;
2446
2447 if (port >= nd->nd_chan_count) {
2448 error = "Improper Port Number";
2449 goto prot_error;
2450 }
2451
2452 ch = nd->nd_chan + port;
2453 } else {
2454 port = -1;
2455 ch = NULL;
2456 }
2457
2458 /*
2459 * Process by major packet type.
2460 */
2461
2462 switch (n0) {
2463
2464 /*
2465 * Process 1-byte header data packet.
2466 */
2467
2468 case 0:
2469 case 1:
2470 case 2:
2471 case 3:
2472 case 4:
2473 case 5:
2474 case 6:
2475 case 7:
2476 dlen = n0 + 1;
2477 plen = dlen + 1;
2478
2479 dbuf = b + 1;
2480 handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
2481 break;
2482
2483 /*
2484 * Process 2-byte header data packet.
2485 */
2486
2487 case 8:
2488 if (remain < 3)
2489 goto done;
2490
2491 dlen = b[1];
2492 plen = dlen + 2;
2493
2494 dbuf = b + 2;
2495 handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
2496 break;
2497
2498 /*
2499 * Process 3-byte header data packet.
2500 */
2501
2502 case 9:
2503 if (remain < 4)
2504 goto done;
2505
2506 dlen = get_unaligned_be16(b + 1);
2507 plen = dlen + 3;
2508
2509 dbuf = b + 3;
2510
2511 break;
2512
2513 /*
2514 * Handle Window Sequence packets.
2515 */
2516
2517 case 10:
2518 plen = 3;
2519 if (remain < plen)
2520 goto done;
2521
2522 nd->nd_tx_work = 1;
2523
2524 {
2525 ushort tpos = get_unaligned_be16(b + 1);
2526
2527 ushort ack = (tpos - ch->ch_s_tpos) & 0xffff;
2528 ushort unack = (ch->ch_s_tin - ch->ch_s_tpos) & 0xffff;
2529 ushort notify = (ch->ch_s_treq - ch->ch_s_tpos) & 0xffff;
2530
2531 if (ch->ch_state < CS_READY || ack > unack) {
2532 error = "Improper Window Sequence";
2533 goto prot_error;
2534 }
2535
2536 ch->ch_s_tpos = tpos;
2537
2538 if (notify <= ack)
2539 ch->ch_s_treq = tpos;
2540 }
2541 break;
2542
2543 /*
2544 * Handle Command response packets.
2545 */
2546
2547 case 11:
2548
2549 /*
2550 * RealPort engine fix - 03/11/2004
2551 *
2552 * This check did not used to be here.
2553 *
2554 * We were using b[1] without verifying that the data
2555 * is actually there and valid. On a split packet, it
2556 * might not be yet.
2557 *
2558 * NOTE: I have never actually seen the failure happen
2559 * under Linux, but since I have seen it occur
2560 * under both Solaris and HP-UX, the assumption
2561 * is that it *could* happen here as well...
2562 */
2563 if (remain < 2)
2564 goto done;
2565
2566
2567 switch (b[1]) {
2568
2569 /*
2570 * Handle Open Response.
2571 */
2572
2573 case 11:
2574 plen = 6;
2575 if (remain < plen)
2576 goto done;
2577
2578 nd->nd_tx_work = 1;
2579
2580 {
2581 int req = b[2];
2582 int resp = b[3];
2583 port = get_unaligned_be16(b + 4);
2584
2585 if (port >= nd->nd_chan_count) {
2586 error = "Open channel number out of range";
2587 goto prot_error;
2588 }
2589
2590 ch = nd->nd_chan + port;
2591
2592 /*
2593 * How we handle an open response depends primarily
2594 * on our current channel state.
2595 */
2596
2597 switch (ch->ch_state) {
2598 case CS_IDLE:
2599
2600 /*
2601 * Handle a delayed open.
2602 */
2603
2604 if (ch->ch_otype_waiting != 0 &&
2605 req == ch->ch_otype_waiting &&
2606 resp == 0) {
2607 ch->ch_otype = req;
2608 ch->ch_otype_waiting = 0;
2609 ch->ch_state = CS_SEND_QUERY;
2610 break;
2611 }
2612 goto open_error;
2613
2614 case CS_WAIT_OPEN:
2615
2616 /*
2617 * Handle the open response.
2618 */
2619
2620 if (req == ch->ch_otype) {
2621 switch (resp) {
2622
2623 /*
2624 * On successful response, open the
2625 * port and proceed normally.
2626 */
2627
2628 case 0:
2629 ch->ch_state = CS_SEND_QUERY;
2630 break;
2631
2632 /*
2633 * On a busy response to a persistent open,
2634 * remember that the open is pending.
2635 */
2636
2637 case 1:
2638 case 2:
2639 if (req != OTYPE_IMMEDIATE) {
2640 ch->ch_otype_waiting = req;
2641 ch->ch_state = CS_IDLE;
2642 break;
2643 }
2644
2645 /*
2646 * Otherwise the server open failed. If
2647 * the Unix port is open, hang it up.
2648 */
2649
2650 default:
2651 if (ch->ch_open_count != 0) {
2652 ch->ch_flag |= CH_HANGUP;
2653 dgrp_carrier(ch);
2654 ch->ch_state = CS_IDLE;
2655 break;
2656 }
2657
2658 ch->ch_open_error = resp;
2659 ch->ch_state = CS_IDLE;
2660
2661 wake_up_interruptible(&ch->ch_flag_wait);
2662 }
2663 break;
2664 }
2665
2666 /*
2667 * Handle delayed response arrival preceding
2668 * the open response we are waiting for.
2669 */
2670
2671 if (ch->ch_otype_waiting != 0 &&
2672 req == ch->ch_otype_waiting &&
2673 resp == 0) {
2674 ch->ch_otype = ch->ch_otype_waiting;
2675 ch->ch_otype_waiting = 0;
2676 ch->ch_state = CS_WAIT_FAIL;
2677 break;
2678 }
2679 goto open_error;
2680
2681
2682 case CS_WAIT_FAIL:
2683
2684 /*
2685 * Handle response to immediate open arriving
2686 * after a delayed open success.
2687 */
2688
2689 if (req == OTYPE_IMMEDIATE) {
2690 ch->ch_state = CS_SEND_QUERY;
2691 break;
2692 }
2693 goto open_error;
2694
2695
2696 case CS_WAIT_CANCEL:
2697 /*
2698 * Handle delayed open response arriving before
2699 * the cancel response.
2700 */
2701
2702 if (req == ch->ch_otype_waiting &&
2703 resp == 0) {
2704 ch->ch_otype_waiting = 0;
2705 break;
2706 }
2707
2708 /*
2709 * Handle cancel response.
2710 */
2711
2712 if (req == 4 && resp == 0) {
2713 ch->ch_otype_waiting = 0;
2714 ch->ch_state = CS_IDLE;
2715 break;
2716 }
2717 goto open_error;
2718
2719
2720 case CS_WAIT_CLOSE:
2721 /*
2722 * Handle a successful response to a port
2723 * close.
2724 */
2725
2726 if (req >= 3) {
2727 ch->ch_state = CS_IDLE;
2728 break;
2729 }
2730 goto open_error;
2731
2732 open_error:
2733 default:
2734 {
2735 error = "Improper Open Response";
2736 goto prot_error;
2737 }
2738 }
2739 }
2740 break;
2741
2742 /*
2743 * Handle Synchronize Response.
2744 */
2745
2746 case 13:
2747 plen = 3;
2748 if (remain < plen)
2749 goto done;
2750 {
2751 int seq = b[2];
2752 int s;
2753
2754 /*
2755 * If channel was waiting for this sync response,
2756 * unset the flag, and wake up anyone waiting
2757 * on the event.
2758 */
2759 if (ch->ch_flag & CH_WAITING_SYNC) {
2760 ch->ch_flag &= ~(CH_WAITING_SYNC);
2761 wake_up_interruptible(&ch->ch_flag_wait);
2762 }
2763
2764 if (((seq - nd->nd_seq_out) & SEQ_MASK) >=
2765 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2766 break;
2767 }
2768
2769 for (s = nd->nd_seq_out;; s = (s + 1) & SEQ_MASK) {
2770 if (nd->nd_seq_wait[s] != 0) {
2771 nd->nd_seq_wait[s] = 0;
2772
2773 wake_up_interruptible(&nd->nd_seq_wque[s]);
2774 }
2775
2776 nd->nd_unack -= nd->nd_seq_size[s];
2777
2778 if (s == seq)
2779 break;
2780 }
2781
2782 nd->nd_seq_out = (seq + 1) & SEQ_MASK;
2783 }
2784 break;
2785
2786 /*
2787 * Handle Sequence Response.
2788 */
2789
2790 case 15:
2791 plen = 6;
2792 if (remain < plen)
2793 goto done;
2794
2795 {
2796 /* Record that we have received the Sequence
2797 * Response, but we aren't interested in the
2798 * sequence numbers. We were using RIN like it
2799 * was ROUT and that was causing problems,
2800 * fixed 7-13-2001 David Fries. See comment in
2801 * drp.h for ch_s_rin variable.
2802 int rin = get_unaligned_be16(b + 2);
2803 int tpos = get_unaligned_be16(b + 4);
2804 */
2805
2806 ch->ch_send &= ~RR_SEQUENCE;
2807 ch->ch_expect &= ~RR_SEQUENCE;
2808 }
2809 goto check_query;
2810
2811 /*
2812 * Handle Status Response.
2813 */
2814
2815 case 17:
2816 plen = 5;
2817 if (remain < plen)
2818 goto done;
2819
2820 {
2821 ch->ch_s_elast = get_unaligned_be16(b + 2);
2822 ch->ch_s_mlast = b[4];
2823
2824 ch->ch_expect &= ~RR_STATUS;
2825 ch->ch_send &= ~RR_STATUS;
2826
2827 /*
2828 * CH_PHYS_CD is cleared because something _could_ be
2829 * waiting for the initial sense of carrier... and if
2830 * carrier is high immediately, we want to be sure to
2831 * wake them as soon as possible.
2832 */
2833 ch->ch_flag &= ~CH_PHYS_CD;
2834
2835 dgrp_carrier(ch);
2836 }
2837 goto check_query;
2838
2839 /*
2840 * Handle Line Error Response.
2841 */
2842
2843 case 19:
2844 plen = 14;
2845 if (remain < plen)
2846 goto done;
2847
2848 break;
2849
2850 /*
2851 * Handle Buffer Response.
2852 */
2853
2854 case 21:
2855 plen = 6;
2856 if (remain < plen)
2857 goto done;
2858
2859 {
2860 ch->ch_s_rsize = get_unaligned_be16(b + 2);
2861 ch->ch_s_tsize = get_unaligned_be16(b + 4);
2862
2863 ch->ch_send &= ~RR_BUFFER;
2864 ch->ch_expect &= ~RR_BUFFER;
2865 }
2866 goto check_query;
2867
2868 /*
2869 * Handle Port Capability Response.
2870 */
2871
2872 case 23:
2873 plen = 32;
2874 if (remain < plen)
2875 goto done;
2876
2877 {
2878 ch->ch_send &= ~RR_CAPABILITY;
2879 ch->ch_expect &= ~RR_CAPABILITY;
2880 }
2881
2882 /*
2883 * When all queries are complete, set those parameters
2884 * derived from the query results, then transition
2885 * to the READY state.
2886 */
2887
2888 check_query:
2889 if (ch->ch_state == CS_WAIT_QUERY &&
2890 (ch->ch_expect & (RR_SEQUENCE |
2891 RR_STATUS |
2892 RR_BUFFER |
2893 RR_CAPABILITY)) == 0) {
2894 ch->ch_tmax = ch->ch_s_tsize / 4;
2895
2896 if (ch->ch_edelay == DGRP_TTIME)
2897 ch->ch_ttime = DGRP_TTIME;
2898 else
2899 ch->ch_ttime = ch->ch_edelay;
2900
2901 ch->ch_rmax = ch->ch_s_rsize / 4;
2902
2903 if (ch->ch_edelay == DGRP_RTIME)
2904 ch->ch_rtime = DGRP_RTIME;
2905 else
2906 ch->ch_rtime = ch->ch_edelay;
2907
2908 ch->ch_rlow = 2 * ch->ch_s_rsize / 8;
2909 ch->ch_rhigh = 6 * ch->ch_s_rsize / 8;
2910
2911 ch->ch_state = CS_READY;
2912
2913 nd->nd_tx_work = 1;
2914 wake_up_interruptible(&ch->ch_flag_wait);
2915
2916 }
2917 break;
2918
2919 default:
2920 goto decode_error;
2921 }
2922 break;
2923
2924 /*
2925 * Handle Events.
2926 */
2927
2928 case 12:
2929 plen = 4;
2930 if (remain < plen)
2931 goto done;
2932
2933 mlast = ch->ch_s_mlast;
2934 elast = ch->ch_s_elast;
2935
2936 mstat = ch->ch_s_mlast = b[1];
2937 estat = ch->ch_s_elast = get_unaligned_be16(b + 2);
2938
2939 /*
2940 * Handle modem changes.
2941 */
2942
2943 if (((mstat ^ mlast) & DM_CD) != 0)
2944 dgrp_carrier(ch);
2945
2946
2947 /*
2948 * Handle received break.
2949 */
2950
2951 if ((estat & ~elast & EV_RXB) != 0 &&
2952 (ch->ch_tun.un_open_count != 0) &&
2953 I_BRKINT(ch->ch_tun.un_tty) &&
2954 !(I_IGNBRK(ch->ch_tun.un_tty))) {
2955
2956 tty_buffer_request_room(&ch->port, 1);
2957 tty_insert_flip_char(&ch->port, 0, TTY_BREAK);
2958 tty_flip_buffer_push(&ch->port);
2959
2960 }
2961
2962 /*
2963 * On transmit break complete, if more break traffic
2964 * is waiting then send it. Otherwise wake any threads
2965 * waiting for transmitter empty.
2966 */
2967
2968 if ((~estat & elast & EV_TXB) != 0 &&
2969 (ch->ch_expect & RR_TX_BREAK) != 0) {
2970
2971 nd->nd_tx_work = 1;
2972
2973 ch->ch_expect &= ~RR_TX_BREAK;
2974
2975 if (ch->ch_break_time != 0) {
2976 ch->ch_send |= RR_TX_BREAK;
2977 } else {
2978 ch->ch_send &= ~RR_TX_BREAK;
2979 ch->ch_flag &= ~CH_TX_BREAK;
2980 wake_up_interruptible(&ch->ch_flag_wait);
2981 }
2982 }
2983 break;
2984
2985 case 13:
2986 case 14:
2987 error = "Unrecognized command";
2988 goto prot_error;
2989
2990 /*
2991 * Decode Special Codes.
2992 */
2993
2994 case 15:
2995 switch (n1) {
2996 /*
2997 * One byte module select.
2998 */
2999
3000 case 0:
3001 case 1:
3002 case 2:
3003 case 3:
3004 case 4:
3005 case 5:
3006 case 6:
3007 case 7:
3008 plen = 1;
3009 nd->nd_rx_module = n1;
3010 break;
3011
3012 /*
3013 * Two byte module select.
3014 */
3015
3016 case 8:
3017 plen = 2;
3018 if (remain < plen)
3019 goto done;
3020
3021 nd->nd_rx_module = b[1];
3022 break;
3023
3024 /*
3025 * ID Request packet.
3026 */
3027
3028 case 11:
3029 if (remain < 4)
3030 goto done;
3031
3032 plen = get_unaligned_be16(b + 2);
3033
3034 if (plen < 12 || plen > 1000) {
3035 error = "Response Packet length error";
3036 goto prot_error;
3037 }
3038
3039 nd->nd_tx_work = 1;
3040
3041 switch (b[1]) {
3042 /*
3043 * Echo packet.
3044 */
3045
3046 case 0:
3047 nd->nd_send |= NR_ECHO;
3048 break;
3049
3050 /*
3051 * ID Response packet.
3052 */
3053
3054 case 1:
3055 nd->nd_send |= NR_IDENT;
3056 break;
3057
3058 /*
3059 * ID Response packet.
3060 */
3061
3062 case 32:
3063 nd->nd_send |= NR_PASSWORD;
3064 break;
3065
3066 }
3067 break;
3068
3069 /*
3070 * Various node-level response packets.
3071 */
3072
3073 case 12:
3074 if (remain < 4)
3075 goto done;
3076
3077 plen = get_unaligned_be16(b + 2);
3078
3079 if (plen < 4 || plen > 1000) {
3080 error = "Response Packet length error";
3081 goto prot_error;
3082 }
3083
3084 nd->nd_tx_work = 1;
3085
3086 switch (b[1]) {
3087 /*
3088 * Echo packet.
3089 */
3090
3091 case 0:
3092 nd->nd_expect &= ~NR_ECHO;
3093 break;
3094
3095 /*
3096 * Product Response Packet.
3097 */
3098
3099 case 1:
3100 {
3101 int desclen;
3102
3103 nd->nd_hw_ver = (b[8] << 8) | b[9];
3104 nd->nd_sw_ver = (b[10] << 8) | b[11];
3105 nd->nd_hw_id = b[6];
3106 desclen = (plen - 12 > MAX_DESC_LEN - 1) ? MAX_DESC_LEN - 1 :
3107 plen - 12;
3108
3109 if (desclen <= 0) {
3110 error = "Response Packet desclen error";
3111 goto prot_error;
3112 }
3113
3114 strncpy(nd->nd_ps_desc, b + 12, desclen);
3115 nd->nd_ps_desc[desclen] = 0;
3116 }
3117
3118 nd->nd_expect &= ~NR_IDENT;
3119 break;
3120
3121 /*
3122 * Capability Response Packet.
3123 */
3124
3125 case 2:
3126 {
3127 int nn = get_unaligned_be16(b + 4);
3128
3129 if (nn > CHAN_MAX)
3130 nn = CHAN_MAX;
3131
3132 dgrp_chan_count(nd, nn);
3133 }
3134
3135 nd->nd_expect &= ~NR_CAPABILITY;
3136 break;
3137
3138 /*
3139 * VPD Response Packet.
3140 */
3141
3142 case 15:
3143 /*
3144 * NOTE: case 15 is here ONLY because the EtherLite
3145 * is broken, and sends a response to 24 back as 15.
3146 * To resolve this, the EtherLite firmware is now
3147 * fixed to send back 24 correctly, but, for backwards
3148 * compatibility, we now have reserved 15 for the
3149 * bad EtherLite response to 24 as well.
3150 */
3151
3152 /* Fallthru! */
3153
3154 case 24:
3155
3156 /*
3157 * If the product doesn't support VPD,
3158 * it will send back a null IDRESP,
3159 * which is a length of 4 bytes.
3160 */
3161 if (plen > 4) {
3162 memcpy(nd->nd_vpd, b + 4, min(plen - 4, (long) VPDSIZE));
3163 nd->nd_vpd_len = min(plen - 4, (long) VPDSIZE);
3164 }
3165
3166 nd->nd_expect &= ~NR_VPD;
3167 break;
3168
3169 default:
3170 goto decode_error;
3171 }
3172
3173 if (nd->nd_expect == 0 &&
3174 nd->nd_state == NS_WAIT_QUERY) {
3175 nd->nd_state = NS_READY;
3176 }
3177 break;
3178
3179 /*
3180 * Debug packet.
3181 */
3182
3183 case 14:
3184 if (remain < 4)
3185 goto done;
3186
3187 plen = get_unaligned_be16(b + 2) + 4;
3188
3189 if (plen > 1000) {
3190 error = "Debug Packet too large";
3191 goto prot_error;
3192 }
3193
3194 if (remain < plen)
3195 goto done;
3196 break;
3197
3198 /*
3199 * Handle reset packet.
3200 */
3201
3202 case 15:
3203 if (remain < 2)
3204 goto done;
3205
3206 plen = 2 + b[1];
3207
3208 if (remain < plen)
3209 goto done;
3210
3211 nd->nd_tx_work = 1;
3212
3213 n = b[plen];
3214 b[plen] = 0;
3215
3216 b[plen] = n;
3217
3218 error = "Client Reset Acknowledge";
3219 goto prot_error;
3220
3221 default:
3222 goto decode_error;
3223 }
3224 break;
3225
3226 default:
3227 goto decode_error;
3228 }
3229
3230 b += plen;
3231 remain -= plen;
3232 }
3233
3234 /*
3235 * When the buffer is exhausted, copy any data left at the
3236 * top of the buffer back down to the bottom for the next
3237 * read request.
3238 */
3239
3240 done:
3241 if (remain > 0 && b != buf)
3242 memcpy(buf, b, remain);
3243
3244 nd->nd_remain = remain;
3245 return;
3246
3247 /*
3248 * Handle a decode error.
3249 */
3250
3251 decode_error:
3252 error = "Protocol decode error";
3253
3254 /*
3255 * Handle a general protocol error.
3256 */
3257
3258 prot_error:
3259 nd->nd_remain = 0;
3260 nd->nd_state = NS_SEND_ERROR;
3261 nd->nd_error = error;
3262 }
3263
3264 /*
3265 * dgrp_net_write() -- write data to the network device.
3266 *
3267 * A zero byte write indicates that the connection to the RealPort
3268 * device has been broken.
3269 *
3270 * A non-zero write indicates data from the RealPort device.
3271 */
3272 static ssize_t dgrp_net_write(struct file *file, const char __user *buf,
3273 size_t count, loff_t *ppos)
3274 {
3275 struct nd_struct *nd;
3276 ssize_t rtn = 0;
3277 long n;
3278 long total = 0;
3279
3280 /*
3281 * Get the node pointer, and quit if it doesn't exist.
3282 */
3283 nd = (struct nd_struct *)(file->private_data);
3284 if (!nd)
3285 return -ENXIO;
3286
3287 /*
3288 * Grab the NET lock.
3289 */
3290 down(&nd->nd_net_semaphore);
3291
3292 nd->nd_write_count++;
3293
3294 /*
3295 * Handle disconnect.
3296 */
3297
3298 if (count == 0) {
3299 dgrp_net_idle(nd);
3300 /*
3301 * Set the active port count to zero.
3302 */
3303 dgrp_chan_count(nd, 0);
3304 goto unlock;
3305 }
3306
3307 /*
3308 * Loop to process entire receive packet.
3309 */
3310
3311 while (count > 0) {
3312 n = UIO_MAX - nd->nd_remain;
3313
3314 if (n > count)
3315 n = count;
3316
3317 nd->nd_rx_byte += n + nd->nd_link.lk_header_size;
3318
3319 rtn = copy_from_user(nd->nd_iobuf + nd->nd_remain,
3320 (void __user *) buf + total, n);
3321 if (rtn) {
3322 rtn = -EFAULT;
3323 goto unlock;
3324 }
3325
3326 *ppos += n;
3327
3328 total += n;
3329
3330 count -= n;
3331
3332 if (nd->nd_mon_buf)
3333 dgrp_monitor_data(nd, RPDUMP_SERVER,
3334 nd->nd_iobuf + nd->nd_remain, n);
3335
3336 nd->nd_remain += n;
3337
3338 dgrp_receive(nd);
3339 }
3340
3341 rtn = total;
3342
3343 unlock:
3344 /*
3345 * Release the NET lock.
3346 */
3347 up(&nd->nd_net_semaphore);
3348
3349 return rtn;
3350 }
3351
3352
3353 /*
3354 * dgrp_net_select()
3355 * Determine whether a device is ready to be read or written to, and
3356 * sleep if not.
3357 */
3358 static unsigned int dgrp_net_select(struct file *file,
3359 struct poll_table_struct *table)
3360 {
3361 unsigned int retval = 0;
3362 struct nd_struct *nd = file->private_data;
3363
3364 poll_wait(file, &nd->nd_tx_waitq, table);
3365
3366 if (nd->nd_tx_ready)
3367 retval |= POLLIN | POLLRDNORM; /* Conditionally readable */
3368
3369 retval |= POLLOUT | POLLWRNORM; /* Always writeable */
3370
3371 return retval;
3372 }
3373
3374 /*
3375 * dgrp_net_ioctl
3376 *
3377 * Implement those functions which allow the network daemon to control
3378 * the network parameters in the driver. The ioctls include ones to
3379 * get and set the link speed parameters for the PortServer.
3380 */
3381 static long dgrp_net_ioctl(struct file *file, unsigned int cmd,
3382 unsigned long arg)
3383 {
3384 struct nd_struct *nd;
3385 int rtn = 0;
3386 long size = _IOC_SIZE(cmd);
3387 struct link_struct link;
3388
3389 nd = file->private_data;
3390
3391 if (_IOC_DIR(cmd) & _IOC_READ)
3392 rtn = access_ok(VERIFY_WRITE, (void __user *) arg, size);
3393 else if (_IOC_DIR(cmd) & _IOC_WRITE)
3394 rtn = access_ok(VERIFY_READ, (void __user *) arg, size);
3395
3396 if (!rtn)
3397 return rtn;
3398
3399 switch (cmd) {
3400 case DIGI_SETLINK:
3401 if (size != sizeof(struct link_struct))
3402 return -EINVAL;
3403
3404 if (copy_from_user(&link, (void __user *)arg, size))
3405 return -EFAULT;
3406
3407 if (link.lk_fast_rate < 9600)
3408 link.lk_fast_rate = 9600;
3409
3410 if (link.lk_slow_rate < 2400)
3411 link.lk_slow_rate = 2400;
3412
3413 if (link.lk_fast_rate > 10000000)
3414 link.lk_fast_rate = 10000000;
3415
3416 if (link.lk_slow_rate > link.lk_fast_rate)
3417 link.lk_slow_rate = link.lk_fast_rate;
3418
3419 if (link.lk_fast_delay > 2000)
3420 link.lk_fast_delay = 2000;
3421
3422 if (link.lk_slow_delay > 10000)
3423 link.lk_slow_delay = 10000;
3424
3425 if (link.lk_fast_delay < 60)
3426 link.lk_fast_delay = 60;
3427
3428 if (link.lk_slow_delay < link.lk_fast_delay)
3429 link.lk_slow_delay = link.lk_fast_delay;
3430
3431 if (link.lk_header_size < 2)
3432 link.lk_header_size = 2;
3433
3434 if (link.lk_header_size > 128)
3435 link.lk_header_size = 128;
3436
3437 link.lk_fast_rate /= 8 * 1000 / dgrp_poll_tick;
3438 link.lk_slow_rate /= 8 * 1000 / dgrp_poll_tick;
3439
3440 link.lk_fast_delay /= dgrp_poll_tick;
3441 link.lk_slow_delay /= dgrp_poll_tick;
3442
3443 nd->nd_link = link;
3444
3445 break;
3446
3447 case DIGI_GETLINK:
3448 if (size != sizeof(struct link_struct))
3449 return -EINVAL;
3450
3451 if (copy_to_user((void __user *)arg, (void *)(&nd->nd_link),
3452 size))
3453 return -EFAULT;
3454
3455 break;
3456
3457 default:
3458 return -EINVAL;
3459
3460 }
3461
3462 return 0;
3463 }
3464
3465 /**
3466 * dgrp_poll_handler() -- handler for poll timer
3467 *
3468 * As each timer expires, it determines (a) whether the "transmit"
3469 * waiter needs to be woken up, and (b) whether the poller needs to
3470 * be rescheduled.
3471 */
3472 void dgrp_poll_handler(unsigned long arg)
3473 {
3474 struct dgrp_poll_data *poll_data;
3475 struct nd_struct *nd;
3476 struct link_struct *lk;
3477 ulong time;
3478 ulong poll_time;
3479 ulong freq;
3480 ulong lock_flags;
3481
3482 poll_data = (struct dgrp_poll_data *) arg;
3483 freq = 1000 / poll_data->poll_tick;
3484 poll_data->poll_round += 17;
3485
3486 if (poll_data->poll_round >= freq)
3487 poll_data->poll_round -= freq;
3488
3489 /*
3490 * Loop to process all open nodes.
3491 *
3492 * For each node, determine the rate at which it should
3493 * be transmitting data. Then if the node should wake up
3494 * and transmit data now, enable the net receive select
3495 * to get the transmit going.
3496 */
3497
3498 list_for_each_entry(nd, &nd_struct_list, list) {
3499
3500 lk = &nd->nd_link;
3501
3502 /*
3503 * Decrement statistics. These are only for use with
3504 * KME, so don't worry that the operations are done
3505 * unlocked, and so the results are occasionally wrong.
3506 */
3507
3508 nd->nd_read_count -= (nd->nd_read_count +
3509 poll_data->poll_round) / freq;
3510 nd->nd_write_count -= (nd->nd_write_count +
3511 poll_data->poll_round) / freq;
3512 nd->nd_send_count -= (nd->nd_send_count +
3513 poll_data->poll_round) / freq;
3514 nd->nd_tx_byte -= (nd->nd_tx_byte +
3515 poll_data->poll_round) / freq;
3516 nd->nd_rx_byte -= (nd->nd_rx_byte +
3517 poll_data->poll_round) / freq;
3518
3519 /*
3520 * Wake the daemon to transmit data only when there is
3521 * enough byte credit to send data.
3522 *
3523 * The results are approximate because the operations
3524 * are performed unlocked, and we are inspecting
3525 * data asynchronously updated elsewhere. The whole
3526 * thing is just approximation anyway, so that should
3527 * be okay.
3528 */
3529
3530 if (lk->lk_slow_rate >= UIO_MAX) {
3531
3532 nd->nd_delay = 0;
3533 nd->nd_rate = UIO_MAX;
3534
3535 nd->nd_tx_deposit = nd->nd_tx_charge + 3 * UIO_MAX;
3536 nd->nd_tx_credit = 3 * UIO_MAX;
3537
3538 } else {
3539
3540 long rate;
3541 long delay;
3542 long deposit;
3543 long charge;
3544 long size;
3545 long excess;
3546
3547 long seq_in = nd->nd_seq_in;
3548 long seq_out = nd->nd_seq_out;
3549
3550 /*
3551 * If there are no outstanding packets, run at the
3552 * fastest rate.
3553 */
3554
3555 if (seq_in == seq_out) {
3556 delay = 0;
3557 rate = lk->lk_fast_rate;
3558 }
3559
3560 /*
3561 * Otherwise compute the transmit rate based on the
3562 * delay since the oldest packet.
3563 */
3564
3565 else {
3566 /*
3567 * The actual delay is computed as the
3568 * time since the oldest unacknowledged
3569 * packet was sent, minus the time it
3570 * took to send that packet to the server.
3571 */
3572
3573 delay = ((jiffies - nd->nd_seq_time[seq_out])
3574 - (nd->nd_seq_size[seq_out] /
3575 lk->lk_fast_rate));
3576
3577 /*
3578 * If the delay is less than the "fast"
3579 * delay, transmit full speed. If greater
3580 * than the "slow" delay, transmit at the
3581 * "slow" speed. In between, interpolate
3582 * between the fast and slow speeds.
3583 */
3584
3585 rate =
3586 (delay <= lk->lk_fast_delay ?
3587 lk->lk_fast_rate :
3588 delay >= lk->lk_slow_delay ?
3589 lk->lk_slow_rate :
3590 (lk->lk_slow_rate +
3591 (lk->lk_slow_delay - delay) *
3592 (lk->lk_fast_rate - lk->lk_slow_rate) /
3593 (lk->lk_slow_delay - lk->lk_fast_delay)
3594 )
3595 );
3596 }
3597
3598 nd->nd_delay = delay;
3599 nd->nd_rate = rate;
3600
3601 /*
3602 * Increase the transmit credit by depositing the
3603 * current transmit rate.
3604 */
3605
3606 deposit = nd->nd_tx_deposit;
3607 charge = nd->nd_tx_charge;
3608
3609 deposit += rate;
3610
3611 /*
3612 * If the available transmit credit becomes too large,
3613 * reduce the deposit to correct the value.
3614 *
3615 * Too large is the max of:
3616 * 6 times the header size
3617 * 3 times the current transmit rate.
3618 */
3619
3620 size = 2 * nd->nd_link.lk_header_size;
3621
3622 if (size < rate)
3623 size = rate;
3624
3625 size *= 3;
3626
3627 excess = deposit - charge - size;
3628
3629 if (excess > 0)
3630 deposit -= excess;
3631
3632 nd->nd_tx_deposit = deposit;
3633 nd->nd_tx_credit = deposit - charge;
3634
3635 /*
3636 * Wake the transmit task only if the transmit credit
3637 * is at least 3 times the transmit header size.
3638 */
3639
3640 size = 3 * lk->lk_header_size;
3641
3642 if (nd->nd_tx_credit < size)
3643 continue;
3644 }
3645
3646
3647 /*
3648 * Enable the READ select to wake the daemon if there
3649 * is useful work for the drp_read routine to perform.
3650 */
3651
3652 if (waitqueue_active(&nd->nd_tx_waitq) &&
3653 (nd->nd_tx_work != 0 ||
3654 (ulong)(jiffies - nd->nd_tx_time) >= IDLE_MAX)) {
3655 nd->nd_tx_ready = 1;
3656
3657 wake_up_interruptible(&nd->nd_tx_waitq);
3658
3659 /* not needed */
3660 /* nd->nd_flag &= ~ND_SELECT; */
3661 }
3662 }
3663
3664
3665 /*
3666 * Schedule ourself back at the nominal wakeup interval.
3667 */
3668 spin_lock_irqsave(&poll_data->poll_lock, lock_flags);
3669
3670 poll_data->node_active_count--;
3671 if (poll_data->node_active_count > 0) {
3672 poll_data->node_active_count++;
3673 poll_time = poll_data->timer.expires +
3674 poll_data->poll_tick * HZ / 1000;
3675
3676 time = poll_time - jiffies;
3677
3678 if (time >= 2 * poll_data->poll_tick)
3679 poll_time = jiffies + dgrp_poll_tick * HZ / 1000;
3680
3681 poll_data->timer.expires = poll_time;
3682 add_timer(&poll_data->timer);
3683 }
3684
3685 spin_unlock_irqrestore(&poll_data->poll_lock, lock_flags);
3686 }
This page took 0.13303 seconds and 5 git commands to generate.