[ALSA] usb-audio: ignore Hercules DJ Console mixer errors
[deliverable/linux.git] / net / decnet / af_decnet.c
1
2 /*
3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * DECnet Socket Layer Interface
8 *
9 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * Patrick Caulfield <patrick@pandh.demon.co.uk>
11 *
12 * Changes:
13 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
14 * version of the code. Original copyright preserved
15 * below.
16 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
17 * compatible with my routing layer.
18 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
19 * Caulfield.
20 * Steve Whitehouse: Further bug fixes, checking module code still works
21 * with new routing layer.
22 * Steve Whitehouse: Additional set/get_sockopt() calls.
23 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
24 * code.
25 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
26 * way. Didn't manage it entirely, but its better.
27 * Steve Whitehouse: ditto for sendmsg().
28 * Steve Whitehouse: A selection of bug fixes to various things.
29 * Steve Whitehouse: Added TIOCOUTQ ioctl.
30 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
31 * Steve Whitehouse: Fixes to connect() error returns.
32 * Patrick Caulfield: Fixes to delayed acceptance logic.
33 * David S. Miller: New socket locking
34 * Steve Whitehouse: Socket list hashing/locking
35 * Arnaldo C. Melo: use capable, not suser
36 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
37 * when required.
38 * Patrick Caulfield: /proc/net/decnet now has object name/number
39 * Steve Whitehouse: Fixed local port allocation, hashed sk list
40 * Matthew Wilcox: Fixes for dn_ioctl()
41 * Steve Whitehouse: New connect/accept logic to allow timeouts and
42 * prepare for sendpage etc.
43 */
44
45
46 /******************************************************************************
47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
48
49 This program is free software; you can redistribute it and/or modify
50 it under the terms of the GNU General Public License as published by
51 the Free Software Foundation; either version 2 of the License, or
52 any later version.
53
54 This program is distributed in the hope that it will be useful,
55 but WITHOUT ANY WARRANTY; without even the implied warranty of
56 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
57 GNU General Public License for more details.
58
59 HISTORY:
60
61 Version Kernel Date Author/Comments
62 ------- ------ ---- ---------------
63 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
64 (emserrat@geocities.com)
65
66 First Development of DECnet Socket La-
67 yer for Linux. Only supports outgoing
68 connections.
69
70 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
71 (patrick@pandh.demon.co.uk)
72
73 Port to new kernel development version.
74
75 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com)
77 _
78 Added support for incoming connections
79 so we can start developing server apps
80 on Linux.
81 -
82 Module Support
83 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
84 (emserrat@geocities.com)
85 _
86 Added support for X11R6.4. Now we can
87 use DECnet transport for X on Linux!!!
88 -
89 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
90 (emserrat@geocities.com)
91 Removed bugs on flow control
92 Removed bugs on incoming accessdata
93 order
94 -
95 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
96 dn_recvmsg fixes
97
98 Patrick J. Caulfield
99 dn_bind fixes
100 *******************************************************************************/
101
102 #include <linux/config.h>
103 #include <linux/module.h>
104 #include <linux/errno.h>
105 #include <linux/types.h>
106 #include <linux/slab.h>
107 #include <linux/socket.h>
108 #include <linux/in.h>
109 #include <linux/kernel.h>
110 #include <linux/sched.h>
111 #include <linux/timer.h>
112 #include <linux/string.h>
113 #include <linux/sockios.h>
114 #include <linux/net.h>
115 #include <linux/netdevice.h>
116 #include <linux/inet.h>
117 #include <linux/route.h>
118 #include <linux/netfilter.h>
119 #include <linux/seq_file.h>
120 #include <net/sock.h>
121 #include <net/tcp_states.h>
122 #include <net/flow.h>
123 #include <asm/system.h>
124 #include <asm/ioctls.h>
125 #include <linux/mm.h>
126 #include <linux/interrupt.h>
127 #include <linux/proc_fs.h>
128 #include <linux/stat.h>
129 #include <linux/init.h>
130 #include <linux/poll.h>
131 #include <net/neighbour.h>
132 #include <net/dst.h>
133 #include <net/dn.h>
134 #include <net/dn_nsp.h>
135 #include <net/dn_dev.h>
136 #include <net/dn_route.h>
137 #include <net/dn_fib.h>
138 #include <net/dn_neigh.h>
139
140 struct dn_sock {
141 struct sock sk;
142 struct dn_scp scp;
143 };
144
145 static void dn_keepalive(struct sock *sk);
146
147 #define DN_SK_HASH_SHIFT 8
148 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
149 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
150
151
152 static struct proto_ops dn_proto_ops;
153 static DEFINE_RWLOCK(dn_hash_lock);
154 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
155 static struct hlist_head dn_wild_sk;
156
157 static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen, int flags);
158 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
159
160 static struct hlist_head *dn_find_list(struct sock *sk)
161 {
162 struct dn_scp *scp = DN_SK(sk);
163
164 if (scp->addr.sdn_flags & SDF_WILD)
165 return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
166
167 return &dn_sk_hash[scp->addrloc & DN_SK_HASH_MASK];
168 }
169
170 /*
171 * Valid ports are those greater than zero and not already in use.
172 */
173 static int check_port(unsigned short port)
174 {
175 struct sock *sk;
176 struct hlist_node *node;
177
178 if (port == 0)
179 return -1;
180
181 sk_for_each(sk, node, &dn_sk_hash[port & DN_SK_HASH_MASK]) {
182 struct dn_scp *scp = DN_SK(sk);
183 if (scp->addrloc == port)
184 return -1;
185 }
186 return 0;
187 }
188
189 static unsigned short port_alloc(struct sock *sk)
190 {
191 struct dn_scp *scp = DN_SK(sk);
192 static unsigned short port = 0x2000;
193 unsigned short i_port = port;
194
195 while(check_port(++port) != 0) {
196 if (port == i_port)
197 return 0;
198 }
199
200 scp->addrloc = port;
201
202 return 1;
203 }
204
205 /*
206 * Since this is only ever called from user
207 * level, we don't need a write_lock() version
208 * of this.
209 */
210 static int dn_hash_sock(struct sock *sk)
211 {
212 struct dn_scp *scp = DN_SK(sk);
213 struct hlist_head *list;
214 int rv = -EUSERS;
215
216 BUG_ON(sk_hashed(sk));
217
218 write_lock_bh(&dn_hash_lock);
219
220 if (!scp->addrloc && !port_alloc(sk))
221 goto out;
222
223 rv = -EADDRINUSE;
224 if ((list = dn_find_list(sk)) == NULL)
225 goto out;
226
227 sk_add_node(sk, list);
228 rv = 0;
229 out:
230 write_unlock_bh(&dn_hash_lock);
231 return rv;
232 }
233
234 static void dn_unhash_sock(struct sock *sk)
235 {
236 write_lock(&dn_hash_lock);
237 sk_del_node_init(sk);
238 write_unlock(&dn_hash_lock);
239 }
240
241 static void dn_unhash_sock_bh(struct sock *sk)
242 {
243 write_lock_bh(&dn_hash_lock);
244 sk_del_node_init(sk);
245 write_unlock_bh(&dn_hash_lock);
246 }
247
248 static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
249 {
250 int i;
251 unsigned hash = addr->sdn_objnum;
252
253 if (hash == 0) {
254 hash = addr->sdn_objnamel;
255 for(i = 0; i < dn_ntohs(addr->sdn_objnamel); i++) {
256 hash ^= addr->sdn_objname[i];
257 hash ^= (hash << 3);
258 }
259 }
260
261 return &dn_sk_hash[hash & DN_SK_HASH_MASK];
262 }
263
264 /*
265 * Called to transform a socket from bound (i.e. with a local address)
266 * into a listening socket (doesn't need a local port number) and rehashes
267 * based upon the object name/number.
268 */
269 static void dn_rehash_sock(struct sock *sk)
270 {
271 struct hlist_head *list;
272 struct dn_scp *scp = DN_SK(sk);
273
274 if (scp->addr.sdn_flags & SDF_WILD)
275 return;
276
277 write_lock_bh(&dn_hash_lock);
278 sk_del_node_init(sk);
279 DN_SK(sk)->addrloc = 0;
280 list = listen_hash(&DN_SK(sk)->addr);
281 sk_add_node(sk, list);
282 write_unlock_bh(&dn_hash_lock);
283 }
284
285 int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type)
286 {
287 int len = 2;
288
289 *buf++ = type;
290
291 switch(type) {
292 case 0:
293 *buf++ = sdn->sdn_objnum;
294 break;
295 case 1:
296 *buf++ = 0;
297 *buf++ = dn_ntohs(sdn->sdn_objnamel);
298 memcpy(buf, sdn->sdn_objname, dn_ntohs(sdn->sdn_objnamel));
299 len = 3 + dn_ntohs(sdn->sdn_objnamel);
300 break;
301 case 2:
302 memset(buf, 0, 5);
303 buf += 5;
304 *buf++ = dn_ntohs(sdn->sdn_objnamel);
305 memcpy(buf, sdn->sdn_objname, dn_ntohs(sdn->sdn_objnamel));
306 len = 7 + dn_ntohs(sdn->sdn_objnamel);
307 break;
308 }
309
310 return len;
311 }
312
313 /*
314 * On reception of usernames, we handle types 1 and 0 for destination
315 * addresses only. Types 2 and 4 are used for source addresses, but the
316 * UIC, GIC are ignored and they are both treated the same way. Type 3
317 * is never used as I've no idea what its purpose might be or what its
318 * format is.
319 */
320 int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt)
321 {
322 unsigned char type;
323 int size = len;
324 int namel = 12;
325
326 sdn->sdn_objnum = 0;
327 sdn->sdn_objnamel = dn_htons(0);
328 memset(sdn->sdn_objname, 0, DN_MAXOBJL);
329
330 if (len < 2)
331 return -1;
332
333 len -= 2;
334 *fmt = *data++;
335 type = *data++;
336
337 switch(*fmt) {
338 case 0:
339 sdn->sdn_objnum = type;
340 return 2;
341 case 1:
342 namel = 16;
343 break;
344 case 2:
345 len -= 4;
346 data += 4;
347 break;
348 case 4:
349 len -= 8;
350 data += 8;
351 break;
352 default:
353 return -1;
354 }
355
356 len -= 1;
357
358 if (len < 0)
359 return -1;
360
361 sdn->sdn_objnamel = dn_htons(*data++);
362 len -= dn_ntohs(sdn->sdn_objnamel);
363
364 if ((len < 0) || (dn_ntohs(sdn->sdn_objnamel) > namel))
365 return -1;
366
367 memcpy(sdn->sdn_objname, data, dn_ntohs(sdn->sdn_objnamel));
368
369 return size - len;
370 }
371
372 struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
373 {
374 struct hlist_head *list = listen_hash(addr);
375 struct hlist_node *node;
376 struct sock *sk;
377
378 read_lock(&dn_hash_lock);
379 sk_for_each(sk, node, list) {
380 struct dn_scp *scp = DN_SK(sk);
381 if (sk->sk_state != TCP_LISTEN)
382 continue;
383 if (scp->addr.sdn_objnum) {
384 if (scp->addr.sdn_objnum != addr->sdn_objnum)
385 continue;
386 } else {
387 if (addr->sdn_objnum)
388 continue;
389 if (scp->addr.sdn_objnamel != addr->sdn_objnamel)
390 continue;
391 if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, dn_ntohs(addr->sdn_objnamel)) != 0)
392 continue;
393 }
394 sock_hold(sk);
395 read_unlock(&dn_hash_lock);
396 return sk;
397 }
398
399 sk = sk_head(&dn_wild_sk);
400 if (sk) {
401 if (sk->sk_state == TCP_LISTEN)
402 sock_hold(sk);
403 else
404 sk = NULL;
405 }
406
407 read_unlock(&dn_hash_lock);
408 return sk;
409 }
410
411 struct sock *dn_find_by_skb(struct sk_buff *skb)
412 {
413 struct dn_skb_cb *cb = DN_SKB_CB(skb);
414 struct sock *sk;
415 struct hlist_node *node;
416 struct dn_scp *scp;
417
418 read_lock(&dn_hash_lock);
419 sk_for_each(sk, node, &dn_sk_hash[cb->dst_port & DN_SK_HASH_MASK]) {
420 scp = DN_SK(sk);
421 if (cb->src != dn_saddr2dn(&scp->peer))
422 continue;
423 if (cb->dst_port != scp->addrloc)
424 continue;
425 if (scp->addrrem && (cb->src_port != scp->addrrem))
426 continue;
427 sock_hold(sk);
428 goto found;
429 }
430 sk = NULL;
431 found:
432 read_unlock(&dn_hash_lock);
433 return sk;
434 }
435
436
437
438 static void dn_destruct(struct sock *sk)
439 {
440 struct dn_scp *scp = DN_SK(sk);
441
442 skb_queue_purge(&scp->data_xmit_queue);
443 skb_queue_purge(&scp->other_xmit_queue);
444 skb_queue_purge(&scp->other_receive_queue);
445
446 dst_release(xchg(&sk->sk_dst_cache, NULL));
447 }
448
449 static struct proto dn_proto = {
450 .name = "DECNET",
451 .owner = THIS_MODULE,
452 .obj_size = sizeof(struct dn_sock),
453 };
454
455 static struct sock *dn_alloc_sock(struct socket *sock,
456 unsigned int __nocast gfp)
457 {
458 struct dn_scp *scp;
459 struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1);
460
461 if (!sk)
462 goto out;
463
464 if (sock)
465 sock->ops = &dn_proto_ops;
466 sock_init_data(sock, sk);
467
468 sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
469 sk->sk_destruct = dn_destruct;
470 sk->sk_no_check = 1;
471 sk->sk_family = PF_DECnet;
472 sk->sk_protocol = 0;
473 sk->sk_allocation = gfp;
474
475 /* Initialization of DECnet Session Control Port */
476 scp = DN_SK(sk);
477 scp->state = DN_O; /* Open */
478 scp->numdat = 1; /* Next data seg to tx */
479 scp->numoth = 1; /* Next oth data to tx */
480 scp->ackxmt_dat = 0; /* Last data seg ack'ed */
481 scp->ackxmt_oth = 0; /* Last oth data ack'ed */
482 scp->ackrcv_dat = 0; /* Highest data ack recv*/
483 scp->ackrcv_oth = 0; /* Last oth data ack rec*/
484 scp->flowrem_sw = DN_SEND;
485 scp->flowloc_sw = DN_SEND;
486 scp->flowrem_dat = 0;
487 scp->flowrem_oth = 1;
488 scp->flowloc_dat = 0;
489 scp->flowloc_oth = 1;
490 scp->services_rem = 0;
491 scp->services_loc = 1 | NSP_FC_NONE;
492 scp->info_rem = 0;
493 scp->info_loc = 0x03; /* NSP version 4.1 */
494 scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */
495 scp->nonagle = 0;
496 scp->multi_ireq = 1;
497 scp->accept_mode = ACC_IMMED;
498 scp->addr.sdn_family = AF_DECnet;
499 scp->peer.sdn_family = AF_DECnet;
500 scp->accessdata.acc_accl = 5;
501 memcpy(scp->accessdata.acc_acc, "LINUX", 5);
502
503 scp->max_window = NSP_MAX_WINDOW;
504 scp->snd_window = NSP_MIN_WINDOW;
505 scp->nsp_srtt = NSP_INITIAL_SRTT;
506 scp->nsp_rttvar = NSP_INITIAL_RTTVAR;
507 scp->nsp_rxtshift = 0;
508
509 skb_queue_head_init(&scp->data_xmit_queue);
510 skb_queue_head_init(&scp->other_xmit_queue);
511 skb_queue_head_init(&scp->other_receive_queue);
512
513 scp->persist = 0;
514 scp->persist_fxn = NULL;
515 scp->keepalive = 10 * HZ;
516 scp->keepalive_fxn = dn_keepalive;
517
518 init_timer(&scp->delack_timer);
519 scp->delack_pending = 0;
520 scp->delack_fxn = dn_nsp_delayed_ack;
521
522 dn_start_slow_timer(sk);
523 out:
524 return sk;
525 }
526
527 /*
528 * Keepalive timer.
529 * FIXME: Should respond to SO_KEEPALIVE etc.
530 */
531 static void dn_keepalive(struct sock *sk)
532 {
533 struct dn_scp *scp = DN_SK(sk);
534
535 /*
536 * By checking the other_data transmit queue is empty
537 * we are double checking that we are not sending too
538 * many of these keepalive frames.
539 */
540 if (skb_queue_empty(&scp->other_xmit_queue))
541 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
542 }
543
544
545 /*
546 * Timer for shutdown/destroyed sockets.
547 * When socket is dead & no packets have been sent for a
548 * certain amount of time, they are removed by this
549 * routine. Also takes care of sending out DI & DC
550 * frames at correct times.
551 */
552 int dn_destroy_timer(struct sock *sk)
553 {
554 struct dn_scp *scp = DN_SK(sk);
555
556 scp->persist = dn_nsp_persist(sk);
557
558 switch(scp->state) {
559 case DN_DI:
560 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
561 if (scp->nsp_rxtshift >= decnet_di_count)
562 scp->state = DN_CN;
563 return 0;
564
565 case DN_DR:
566 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
567 if (scp->nsp_rxtshift >= decnet_dr_count)
568 scp->state = DN_DRC;
569 return 0;
570
571 case DN_DN:
572 if (scp->nsp_rxtshift < decnet_dn_count) {
573 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
574 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
575 return 0;
576 }
577 }
578
579 scp->persist = (HZ * decnet_time_wait);
580
581 if (sk->sk_socket)
582 return 0;
583
584 if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) {
585 dn_unhash_sock(sk);
586 sock_put(sk);
587 return 1;
588 }
589
590 return 0;
591 }
592
593 static void dn_destroy_sock(struct sock *sk)
594 {
595 struct dn_scp *scp = DN_SK(sk);
596
597 scp->nsp_rxtshift = 0; /* reset back off */
598
599 if (sk->sk_socket) {
600 if (sk->sk_socket->state != SS_UNCONNECTED)
601 sk->sk_socket->state = SS_DISCONNECTING;
602 }
603
604 sk->sk_state = TCP_CLOSE;
605
606 switch(scp->state) {
607 case DN_DN:
608 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
609 sk->sk_allocation);
610 scp->persist_fxn = dn_destroy_timer;
611 scp->persist = dn_nsp_persist(sk);
612 break;
613 case DN_CR:
614 scp->state = DN_DR;
615 goto disc_reject;
616 case DN_RUN:
617 scp->state = DN_DI;
618 case DN_DI:
619 case DN_DR:
620 disc_reject:
621 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
622 case DN_NC:
623 case DN_NR:
624 case DN_RJ:
625 case DN_DIC:
626 case DN_CN:
627 case DN_DRC:
628 case DN_CI:
629 case DN_CD:
630 scp->persist_fxn = dn_destroy_timer;
631 scp->persist = dn_nsp_persist(sk);
632 break;
633 default:
634 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
635 case DN_O:
636 dn_stop_slow_timer(sk);
637
638 dn_unhash_sock_bh(sk);
639 sock_put(sk);
640
641 break;
642 }
643 }
644
645 char *dn_addr2asc(dn_address addr, char *buf)
646 {
647 unsigned short node, area;
648
649 node = addr & 0x03ff;
650 area = addr >> 10;
651 sprintf(buf, "%hd.%hd", area, node);
652
653 return buf;
654 }
655
656
657
658 static int dn_create(struct socket *sock, int protocol)
659 {
660 struct sock *sk;
661
662 switch(sock->type) {
663 case SOCK_SEQPACKET:
664 if (protocol != DNPROTO_NSP)
665 return -EPROTONOSUPPORT;
666 break;
667 case SOCK_STREAM:
668 break;
669 default:
670 return -ESOCKTNOSUPPORT;
671 }
672
673
674 if ((sk = dn_alloc_sock(sock, GFP_KERNEL)) == NULL)
675 return -ENOBUFS;
676
677 sk->sk_protocol = protocol;
678
679 return 0;
680 }
681
682
683 static int
684 dn_release(struct socket *sock)
685 {
686 struct sock *sk = sock->sk;
687
688 if (sk) {
689 sock_orphan(sk);
690 sock_hold(sk);
691 lock_sock(sk);
692 dn_destroy_sock(sk);
693 release_sock(sk);
694 sock_put(sk);
695 }
696
697 return 0;
698 }
699
700 static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
701 {
702 struct sock *sk = sock->sk;
703 struct dn_scp *scp = DN_SK(sk);
704 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
705 struct net_device *dev;
706 int rv;
707
708 if (addr_len != sizeof(struct sockaddr_dn))
709 return -EINVAL;
710
711 if (saddr->sdn_family != AF_DECnet)
712 return -EINVAL;
713
714 if (dn_ntohs(saddr->sdn_nodeaddrl) && (dn_ntohs(saddr->sdn_nodeaddrl) != 2))
715 return -EINVAL;
716
717 if (dn_ntohs(saddr->sdn_objnamel) > DN_MAXOBJL)
718 return -EINVAL;
719
720 if (saddr->sdn_flags & ~SDF_WILD)
721 return -EINVAL;
722
723 #if 1
724 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
725 (saddr->sdn_flags & SDF_WILD)))
726 return -EACCES;
727 #else
728 /*
729 * Maybe put the default actions in the default security ops for
730 * dn_prot_sock ? Would be nice if the capable call would go there
731 * too.
732 */
733 if (security_dn_prot_sock(saddr) &&
734 !capable(CAP_NET_BIND_SERVICE) ||
735 saddr->sdn_objnum || (saddr->sdn_flags & SDF_WILD))
736 return -EACCES;
737 #endif
738
739
740 if (!(saddr->sdn_flags & SDF_WILD)) {
741 if (dn_ntohs(saddr->sdn_nodeaddrl)) {
742 read_lock(&dev_base_lock);
743 for(dev = dev_base; dev; dev = dev->next) {
744 if (!dev->dn_ptr)
745 continue;
746 if (dn_dev_islocal(dev, dn_saddr2dn(saddr)))
747 break;
748 }
749 read_unlock(&dev_base_lock);
750 if (dev == NULL)
751 return -EADDRNOTAVAIL;
752 }
753 }
754
755 rv = -EINVAL;
756 lock_sock(sk);
757 if (sock_flag(sk, SOCK_ZAPPED)) {
758 memcpy(&scp->addr, saddr, addr_len);
759 sock_reset_flag(sk, SOCK_ZAPPED);
760
761 rv = dn_hash_sock(sk);
762 if (rv)
763 sock_set_flag(sk, SOCK_ZAPPED);
764 }
765 release_sock(sk);
766
767 return rv;
768 }
769
770
771 static int dn_auto_bind(struct socket *sock)
772 {
773 struct sock *sk = sock->sk;
774 struct dn_scp *scp = DN_SK(sk);
775 int rv;
776
777 sock_reset_flag(sk, SOCK_ZAPPED);
778
779 scp->addr.sdn_flags = 0;
780 scp->addr.sdn_objnum = 0;
781
782 /*
783 * This stuff is to keep compatibility with Eduardo's
784 * patch. I hope I can dispense with it shortly...
785 */
786 if ((scp->accessdata.acc_accl != 0) &&
787 (scp->accessdata.acc_accl <= 12)) {
788
789 scp->addr.sdn_objnamel = dn_htons(scp->accessdata.acc_accl);
790 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, dn_ntohs(scp->addr.sdn_objnamel));
791
792 scp->accessdata.acc_accl = 0;
793 memset(scp->accessdata.acc_acc, 0, 40);
794 }
795 /* End of compatibility stuff */
796
797 scp->addr.sdn_add.a_len = dn_htons(2);
798 rv = dn_dev_bind_default((dn_address *)scp->addr.sdn_add.a_addr);
799 if (rv == 0) {
800 rv = dn_hash_sock(sk);
801 if (rv)
802 sock_set_flag(sk, SOCK_ZAPPED);
803 }
804
805 return rv;
806 }
807
808 static int dn_confirm_accept(struct sock *sk, long *timeo,
809 unsigned int __nocast allocation)
810 {
811 struct dn_scp *scp = DN_SK(sk);
812 DEFINE_WAIT(wait);
813 int err;
814
815 if (scp->state != DN_CR)
816 return -EINVAL;
817
818 scp->state = DN_CC;
819 scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS);
820 dn_send_conn_conf(sk, allocation);
821
822 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
823 for(;;) {
824 release_sock(sk);
825 if (scp->state == DN_CC)
826 *timeo = schedule_timeout(*timeo);
827 lock_sock(sk);
828 err = 0;
829 if (scp->state == DN_RUN)
830 break;
831 err = sock_error(sk);
832 if (err)
833 break;
834 err = sock_intr_errno(*timeo);
835 if (signal_pending(current))
836 break;
837 err = -EAGAIN;
838 if (!*timeo)
839 break;
840 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
841 }
842 finish_wait(sk->sk_sleep, &wait);
843 if (err == 0) {
844 sk->sk_socket->state = SS_CONNECTED;
845 } else if (scp->state != DN_CC) {
846 sk->sk_socket->state = SS_UNCONNECTED;
847 }
848 return err;
849 }
850
851 static int dn_wait_run(struct sock *sk, long *timeo)
852 {
853 struct dn_scp *scp = DN_SK(sk);
854 DEFINE_WAIT(wait);
855 int err = 0;
856
857 if (scp->state == DN_RUN)
858 goto out;
859
860 if (!*timeo)
861 return -EALREADY;
862
863 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
864 for(;;) {
865 release_sock(sk);
866 if (scp->state == DN_CI || scp->state == DN_CC)
867 *timeo = schedule_timeout(*timeo);
868 lock_sock(sk);
869 err = 0;
870 if (scp->state == DN_RUN)
871 break;
872 err = sock_error(sk);
873 if (err)
874 break;
875 err = sock_intr_errno(*timeo);
876 if (signal_pending(current))
877 break;
878 err = -ETIMEDOUT;
879 if (!*timeo)
880 break;
881 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
882 }
883 finish_wait(sk->sk_sleep, &wait);
884 out:
885 if (err == 0) {
886 sk->sk_socket->state = SS_CONNECTED;
887 } else if (scp->state != DN_CI && scp->state != DN_CC) {
888 sk->sk_socket->state = SS_UNCONNECTED;
889 }
890 return err;
891 }
892
893 static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
894 {
895 struct socket *sock = sk->sk_socket;
896 struct dn_scp *scp = DN_SK(sk);
897 int err = -EISCONN;
898 struct flowi fl;
899
900 if (sock->state == SS_CONNECTED)
901 goto out;
902
903 if (sock->state == SS_CONNECTING) {
904 err = 0;
905 if (scp->state == DN_RUN) {
906 sock->state = SS_CONNECTED;
907 goto out;
908 }
909 err = -ECONNREFUSED;
910 if (scp->state != DN_CI && scp->state != DN_CC) {
911 sock->state = SS_UNCONNECTED;
912 goto out;
913 }
914 return dn_wait_run(sk, timeo);
915 }
916
917 err = -EINVAL;
918 if (scp->state != DN_O)
919 goto out;
920
921 if (addr == NULL || addrlen != sizeof(struct sockaddr_dn))
922 goto out;
923 if (addr->sdn_family != AF_DECnet)
924 goto out;
925 if (addr->sdn_flags & SDF_WILD)
926 goto out;
927
928 if (sock_flag(sk, SOCK_ZAPPED)) {
929 err = dn_auto_bind(sk->sk_socket);
930 if (err)
931 goto out;
932 }
933
934 memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
935
936 err = -EHOSTUNREACH;
937 memset(&fl, 0, sizeof(fl));
938 fl.oif = sk->sk_bound_dev_if;
939 fl.fld_dst = dn_saddr2dn(&scp->peer);
940 fl.fld_src = dn_saddr2dn(&scp->addr);
941 dn_sk_ports_copy(&fl, scp);
942 fl.proto = DNPROTO_NSP;
943 if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, flags) < 0)
944 goto out;
945 sk->sk_route_caps = sk->sk_dst_cache->dev->features;
946 sock->state = SS_CONNECTING;
947 scp->state = DN_CI;
948 scp->segsize_loc = dst_metric(sk->sk_dst_cache, RTAX_ADVMSS);
949
950 dn_nsp_send_conninit(sk, NSP_CI);
951 err = -EINPROGRESS;
952 if (*timeo) {
953 err = dn_wait_run(sk, timeo);
954 }
955 out:
956 return err;
957 }
958
959 static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags)
960 {
961 struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr;
962 struct sock *sk = sock->sk;
963 int err;
964 long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
965
966 lock_sock(sk);
967 err = __dn_connect(sk, addr, addrlen, &timeo, 0);
968 release_sock(sk);
969
970 return err;
971 }
972
973 static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
974 {
975 struct dn_scp *scp = DN_SK(sk);
976
977 switch(scp->state) {
978 case DN_RUN:
979 return 0;
980 case DN_CR:
981 return dn_confirm_accept(sk, timeo, sk->sk_allocation);
982 case DN_CI:
983 case DN_CC:
984 return dn_wait_run(sk, timeo);
985 case DN_O:
986 return __dn_connect(sk, addr, addrlen, timeo, flags);
987 }
988
989 return -EINVAL;
990 }
991
992
993 static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
994 {
995 unsigned char *ptr = skb->data;
996
997 acc->acc_userl = *ptr++;
998 memcpy(&acc->acc_user, ptr, acc->acc_userl);
999 ptr += acc->acc_userl;
1000
1001 acc->acc_passl = *ptr++;
1002 memcpy(&acc->acc_pass, ptr, acc->acc_passl);
1003 ptr += acc->acc_passl;
1004
1005 acc->acc_accl = *ptr++;
1006 memcpy(&acc->acc_acc, ptr, acc->acc_accl);
1007
1008 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
1009
1010 }
1011
1012 static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
1013 {
1014 unsigned char *ptr = skb->data;
1015
1016 opt->opt_optl = *ptr++;
1017 opt->opt_status = 0;
1018 memcpy(opt->opt_data, ptr, opt->opt_optl);
1019 skb_pull(skb, opt->opt_optl + 1);
1020
1021 }
1022
1023 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1024 {
1025 DEFINE_WAIT(wait);
1026 struct sk_buff *skb = NULL;
1027 int err = 0;
1028
1029 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1030 for(;;) {
1031 release_sock(sk);
1032 skb = skb_dequeue(&sk->sk_receive_queue);
1033 if (skb == NULL) {
1034 *timeo = schedule_timeout(*timeo);
1035 skb = skb_dequeue(&sk->sk_receive_queue);
1036 }
1037 lock_sock(sk);
1038 if (skb != NULL)
1039 break;
1040 err = -EINVAL;
1041 if (sk->sk_state != TCP_LISTEN)
1042 break;
1043 err = sock_intr_errno(*timeo);
1044 if (signal_pending(current))
1045 break;
1046 err = -EAGAIN;
1047 if (!*timeo)
1048 break;
1049 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1050 }
1051 finish_wait(sk->sk_sleep, &wait);
1052
1053 return skb == NULL ? ERR_PTR(err) : skb;
1054 }
1055
1056 static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1057 {
1058 struct sock *sk = sock->sk, *newsk;
1059 struct sk_buff *skb = NULL;
1060 struct dn_skb_cb *cb;
1061 unsigned char menuver;
1062 int err = 0;
1063 unsigned char type;
1064 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1065
1066 lock_sock(sk);
1067
1068 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
1069 release_sock(sk);
1070 return -EINVAL;
1071 }
1072
1073 skb = skb_dequeue(&sk->sk_receive_queue);
1074 if (skb == NULL) {
1075 skb = dn_wait_for_connect(sk, &timeo);
1076 if (IS_ERR(skb)) {
1077 release_sock(sk);
1078 return PTR_ERR(skb);
1079 }
1080 }
1081
1082 cb = DN_SKB_CB(skb);
1083 sk->sk_ack_backlog--;
1084 newsk = dn_alloc_sock(newsock, sk->sk_allocation);
1085 if (newsk == NULL) {
1086 release_sock(sk);
1087 kfree_skb(skb);
1088 return -ENOBUFS;
1089 }
1090 release_sock(sk);
1091
1092 dst_release(xchg(&newsk->sk_dst_cache, skb->dst));
1093 skb->dst = NULL;
1094
1095 DN_SK(newsk)->state = DN_CR;
1096 DN_SK(newsk)->addrrem = cb->src_port;
1097 DN_SK(newsk)->services_rem = cb->services;
1098 DN_SK(newsk)->info_rem = cb->info;
1099 DN_SK(newsk)->segsize_rem = cb->segsize;
1100 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
1101
1102 if (DN_SK(newsk)->segsize_rem < 230)
1103 DN_SK(newsk)->segsize_rem = 230;
1104
1105 if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
1106 DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
1107
1108 newsk->sk_state = TCP_LISTEN;
1109 memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
1110
1111 /*
1112 * If we are listening on a wild socket, we don't want
1113 * the newly created socket on the wrong hash queue.
1114 */
1115 DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD;
1116
1117 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
1118 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
1119 *(dn_address *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
1120 *(dn_address *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst;
1121
1122 menuver = *skb->data;
1123 skb_pull(skb, 1);
1124
1125 if (menuver & DN_MENUVER_ACC)
1126 dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
1127
1128 if (menuver & DN_MENUVER_USR)
1129 dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
1130
1131 if (menuver & DN_MENUVER_PRX)
1132 DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
1133
1134 if (menuver & DN_MENUVER_UIC)
1135 DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
1136
1137 kfree_skb(skb);
1138
1139 memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
1140 sizeof(struct optdata_dn));
1141 memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
1142 sizeof(struct optdata_dn));
1143
1144 lock_sock(newsk);
1145 err = dn_hash_sock(newsk);
1146 if (err == 0) {
1147 sock_reset_flag(newsk, SOCK_ZAPPED);
1148 dn_send_conn_ack(newsk);
1149
1150 /*
1151 * Here we use sk->sk_allocation since although the conn conf is
1152 * for the newsk, the context is the old socket.
1153 */
1154 if (DN_SK(newsk)->accept_mode == ACC_IMMED)
1155 err = dn_confirm_accept(newsk, &timeo,
1156 sk->sk_allocation);
1157 }
1158 release_sock(newsk);
1159 return err;
1160 }
1161
1162
1163 static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len,int peer)
1164 {
1165 struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr;
1166 struct sock *sk = sock->sk;
1167 struct dn_scp *scp = DN_SK(sk);
1168
1169 *uaddr_len = sizeof(struct sockaddr_dn);
1170
1171 lock_sock(sk);
1172
1173 if (peer) {
1174 if ((sock->state != SS_CONNECTED &&
1175 sock->state != SS_CONNECTING) &&
1176 scp->accept_mode == ACC_IMMED)
1177 return -ENOTCONN;
1178
1179 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
1180 } else {
1181 memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn));
1182 }
1183
1184 release_sock(sk);
1185
1186 return 0;
1187 }
1188
1189
1190 static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait)
1191 {
1192 struct sock *sk = sock->sk;
1193 struct dn_scp *scp = DN_SK(sk);
1194 int mask = datagram_poll(file, sock, wait);
1195
1196 if (!skb_queue_empty(&scp->other_receive_queue))
1197 mask |= POLLRDBAND;
1198
1199 return mask;
1200 }
1201
1202 static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1203 {
1204 struct sock *sk = sock->sk;
1205 struct dn_scp *scp = DN_SK(sk);
1206 int err = -EOPNOTSUPP;
1207 long amount = 0;
1208 struct sk_buff *skb;
1209 int val;
1210
1211 switch(cmd)
1212 {
1213 case SIOCGIFADDR:
1214 case SIOCSIFADDR:
1215 return dn_dev_ioctl(cmd, (void __user *)arg);
1216
1217 case SIOCATMARK:
1218 lock_sock(sk);
1219 val = !skb_queue_empty(&scp->other_receive_queue);
1220 if (scp->state != DN_RUN)
1221 val = -ENOTCONN;
1222 release_sock(sk);
1223 return val;
1224
1225 case TIOCOUTQ:
1226 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1227 if (amount < 0)
1228 amount = 0;
1229 err = put_user(amount, (int __user *)arg);
1230 break;
1231
1232 case TIOCINQ:
1233 lock_sock(sk);
1234 if ((skb = skb_peek(&scp->other_receive_queue)) != NULL) {
1235 amount = skb->len;
1236 } else {
1237 struct sk_buff *skb = sk->sk_receive_queue.next;
1238 for(;;) {
1239 if (skb ==
1240 (struct sk_buff *)&sk->sk_receive_queue)
1241 break;
1242 amount += skb->len;
1243 skb = skb->next;
1244 }
1245 }
1246 release_sock(sk);
1247 err = put_user(amount, (int __user *)arg);
1248 break;
1249
1250 default:
1251 err = dev_ioctl(cmd, (void __user *)arg);
1252 break;
1253 }
1254
1255 return err;
1256 }
1257
1258 static int dn_listen(struct socket *sock, int backlog)
1259 {
1260 struct sock *sk = sock->sk;
1261 int err = -EINVAL;
1262
1263 lock_sock(sk);
1264
1265 if (sock_flag(sk, SOCK_ZAPPED))
1266 goto out;
1267
1268 if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
1269 goto out;
1270
1271 sk->sk_max_ack_backlog = backlog;
1272 sk->sk_ack_backlog = 0;
1273 sk->sk_state = TCP_LISTEN;
1274 err = 0;
1275 dn_rehash_sock(sk);
1276
1277 out:
1278 release_sock(sk);
1279
1280 return err;
1281 }
1282
1283
1284 static int dn_shutdown(struct socket *sock, int how)
1285 {
1286 struct sock *sk = sock->sk;
1287 struct dn_scp *scp = DN_SK(sk);
1288 int err = -ENOTCONN;
1289
1290 lock_sock(sk);
1291
1292 if (sock->state == SS_UNCONNECTED)
1293 goto out;
1294
1295 err = 0;
1296 if (sock->state == SS_DISCONNECTING)
1297 goto out;
1298
1299 err = -EINVAL;
1300 if (scp->state == DN_O)
1301 goto out;
1302
1303 if (how != SHUTDOWN_MASK)
1304 goto out;
1305
1306 sk->sk_shutdown = how;
1307 dn_destroy_sock(sk);
1308 err = 0;
1309
1310 out:
1311 release_sock(sk);
1312
1313 return err;
1314 }
1315
1316 static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1317 {
1318 struct sock *sk = sock->sk;
1319 int err;
1320
1321 lock_sock(sk);
1322 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1323 release_sock(sk);
1324
1325 return err;
1326 }
1327
1328 static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, int optlen, int flags)
1329 {
1330 struct sock *sk = sock->sk;
1331 struct dn_scp *scp = DN_SK(sk);
1332 long timeo;
1333 union {
1334 struct optdata_dn opt;
1335 struct accessdata_dn acc;
1336 int mode;
1337 unsigned long win;
1338 int val;
1339 unsigned char services;
1340 unsigned char info;
1341 } u;
1342 int err;
1343
1344 if (optlen && !optval)
1345 return -EINVAL;
1346
1347 if (optlen > sizeof(u))
1348 return -EINVAL;
1349
1350 if (copy_from_user(&u, optval, optlen))
1351 return -EFAULT;
1352
1353 switch(optname) {
1354 case DSO_CONDATA:
1355 if (sock->state == SS_CONNECTED)
1356 return -EISCONN;
1357 if ((scp->state != DN_O) && (scp->state != DN_CR))
1358 return -EINVAL;
1359
1360 if (optlen != sizeof(struct optdata_dn))
1361 return -EINVAL;
1362
1363 if (u.opt.opt_optl > 16)
1364 return -EINVAL;
1365
1366 memcpy(&scp->conndata_out, &u.opt, optlen);
1367 break;
1368
1369 case DSO_DISDATA:
1370 if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED)
1371 return -ENOTCONN;
1372
1373 if (optlen != sizeof(struct optdata_dn))
1374 return -EINVAL;
1375
1376 if (u.opt.opt_optl > 16)
1377 return -EINVAL;
1378
1379 memcpy(&scp->discdata_out, &u.opt, optlen);
1380 break;
1381
1382 case DSO_CONACCESS:
1383 if (sock->state == SS_CONNECTED)
1384 return -EISCONN;
1385 if (scp->state != DN_O)
1386 return -EINVAL;
1387
1388 if (optlen != sizeof(struct accessdata_dn))
1389 return -EINVAL;
1390
1391 if ((u.acc.acc_accl > DN_MAXACCL) ||
1392 (u.acc.acc_passl > DN_MAXACCL) ||
1393 (u.acc.acc_userl > DN_MAXACCL))
1394 return -EINVAL;
1395
1396 memcpy(&scp->accessdata, &u.acc, optlen);
1397 break;
1398
1399 case DSO_ACCEPTMODE:
1400 if (sock->state == SS_CONNECTED)
1401 return -EISCONN;
1402 if (scp->state != DN_O)
1403 return -EINVAL;
1404
1405 if (optlen != sizeof(int))
1406 return -EINVAL;
1407
1408 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
1409 return -EINVAL;
1410
1411 scp->accept_mode = (unsigned char)u.mode;
1412 break;
1413
1414 case DSO_CONACCEPT:
1415
1416 if (scp->state != DN_CR)
1417 return -EINVAL;
1418 timeo = sock_rcvtimeo(sk, 0);
1419 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
1420 return err;
1421
1422 case DSO_CONREJECT:
1423
1424 if (scp->state != DN_CR)
1425 return -EINVAL;
1426
1427 scp->state = DN_DR;
1428 sk->sk_shutdown = SHUTDOWN_MASK;
1429 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1430 break;
1431
1432 default:
1433 #ifdef CONFIG_NETFILTER
1434 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1435 #endif
1436 case DSO_LINKINFO:
1437 case DSO_STREAM:
1438 case DSO_SEQPACKET:
1439 return -ENOPROTOOPT;
1440
1441 case DSO_MAXWINDOW:
1442 if (optlen != sizeof(unsigned long))
1443 return -EINVAL;
1444 if (u.win > NSP_MAX_WINDOW)
1445 u.win = NSP_MAX_WINDOW;
1446 if (u.win == 0)
1447 return -EINVAL;
1448 scp->max_window = u.win;
1449 if (scp->snd_window > u.win)
1450 scp->snd_window = u.win;
1451 break;
1452
1453 case DSO_NODELAY:
1454 if (optlen != sizeof(int))
1455 return -EINVAL;
1456 if (scp->nonagle == 2)
1457 return -EINVAL;
1458 scp->nonagle = (u.val == 0) ? 0 : 1;
1459 /* if (scp->nonagle == 1) { Push pending frames } */
1460 break;
1461
1462 case DSO_CORK:
1463 if (optlen != sizeof(int))
1464 return -EINVAL;
1465 if (scp->nonagle == 1)
1466 return -EINVAL;
1467 scp->nonagle = (u.val == 0) ? 0 : 2;
1468 /* if (scp->nonagle == 0) { Push pending frames } */
1469 break;
1470
1471 case DSO_SERVICES:
1472 if (optlen != sizeof(unsigned char))
1473 return -EINVAL;
1474 if ((u.services & ~NSP_FC_MASK) != 0x01)
1475 return -EINVAL;
1476 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
1477 return -EINVAL;
1478 scp->services_loc = u.services;
1479 break;
1480
1481 case DSO_INFO:
1482 if (optlen != sizeof(unsigned char))
1483 return -EINVAL;
1484 if (u.info & 0xfc)
1485 return -EINVAL;
1486 scp->info_loc = u.info;
1487 break;
1488 }
1489
1490 return 0;
1491 }
1492
1493 static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1494 {
1495 struct sock *sk = sock->sk;
1496 int err;
1497
1498 lock_sock(sk);
1499 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1500 release_sock(sk);
1501
1502 return err;
1503 }
1504
1505 static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags)
1506 {
1507 struct sock *sk = sock->sk;
1508 struct dn_scp *scp = DN_SK(sk);
1509 struct linkinfo_dn link;
1510 unsigned int r_len;
1511 void *r_data = NULL;
1512 unsigned int val;
1513
1514 if(get_user(r_len , optlen))
1515 return -EFAULT;
1516
1517 switch(optname) {
1518 case DSO_CONDATA:
1519 if (r_len > sizeof(struct optdata_dn))
1520 r_len = sizeof(struct optdata_dn);
1521 r_data = &scp->conndata_in;
1522 break;
1523
1524 case DSO_DISDATA:
1525 if (r_len > sizeof(struct optdata_dn))
1526 r_len = sizeof(struct optdata_dn);
1527 r_data = &scp->discdata_in;
1528 break;
1529
1530 case DSO_CONACCESS:
1531 if (r_len > sizeof(struct accessdata_dn))
1532 r_len = sizeof(struct accessdata_dn);
1533 r_data = &scp->accessdata;
1534 break;
1535
1536 case DSO_ACCEPTMODE:
1537 if (r_len > sizeof(unsigned char))
1538 r_len = sizeof(unsigned char);
1539 r_data = &scp->accept_mode;
1540 break;
1541
1542 case DSO_LINKINFO:
1543 if (r_len > sizeof(struct linkinfo_dn))
1544 r_len = sizeof(struct linkinfo_dn);
1545
1546 switch(sock->state) {
1547 case SS_CONNECTING:
1548 link.idn_linkstate = LL_CONNECTING;
1549 break;
1550 case SS_DISCONNECTING:
1551 link.idn_linkstate = LL_DISCONNECTING;
1552 break;
1553 case SS_CONNECTED:
1554 link.idn_linkstate = LL_RUNNING;
1555 break;
1556 default:
1557 link.idn_linkstate = LL_INACTIVE;
1558 }
1559
1560 link.idn_segsize = scp->segsize_rem;
1561 r_data = &link;
1562 break;
1563
1564 default:
1565 #ifdef CONFIG_NETFILTER
1566 {
1567 int val, len;
1568
1569 if(get_user(len, optlen))
1570 return -EFAULT;
1571
1572 val = nf_getsockopt(sk, PF_DECnet, optname,
1573 optval, &len);
1574 if (val >= 0)
1575 val = put_user(len, optlen);
1576 return val;
1577 }
1578 #endif
1579 case DSO_STREAM:
1580 case DSO_SEQPACKET:
1581 case DSO_CONACCEPT:
1582 case DSO_CONREJECT:
1583 return -ENOPROTOOPT;
1584
1585 case DSO_MAXWINDOW:
1586 if (r_len > sizeof(unsigned long))
1587 r_len = sizeof(unsigned long);
1588 r_data = &scp->max_window;
1589 break;
1590
1591 case DSO_NODELAY:
1592 if (r_len > sizeof(int))
1593 r_len = sizeof(int);
1594 val = (scp->nonagle == 1);
1595 r_data = &val;
1596 break;
1597
1598 case DSO_CORK:
1599 if (r_len > sizeof(int))
1600 r_len = sizeof(int);
1601 val = (scp->nonagle == 2);
1602 r_data = &val;
1603 break;
1604
1605 case DSO_SERVICES:
1606 if (r_len > sizeof(unsigned char))
1607 r_len = sizeof(unsigned char);
1608 r_data = &scp->services_rem;
1609 break;
1610
1611 case DSO_INFO:
1612 if (r_len > sizeof(unsigned char))
1613 r_len = sizeof(unsigned char);
1614 r_data = &scp->info_rem;
1615 break;
1616 }
1617
1618 if (r_data) {
1619 if (copy_to_user(optval, r_data, r_len))
1620 return -EFAULT;
1621 if (put_user(r_len, optlen))
1622 return -EFAULT;
1623 }
1624
1625 return 0;
1626 }
1627
1628
1629 static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1630 {
1631 struct sk_buff *skb = q->next;
1632 int len = 0;
1633
1634 if (flags & MSG_OOB)
1635 return !skb_queue_empty(q) ? 1 : 0;
1636
1637 while(skb != (struct sk_buff *)q) {
1638 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1639 len += skb->len;
1640
1641 if (cb->nsp_flags & 0x40) {
1642 /* SOCK_SEQPACKET reads to EOM */
1643 if (sk->sk_type == SOCK_SEQPACKET)
1644 return 1;
1645 /* so does SOCK_STREAM unless WAITALL is specified */
1646 if (!(flags & MSG_WAITALL))
1647 return 1;
1648 }
1649
1650 /* minimum data length for read exceeded */
1651 if (len >= target)
1652 return 1;
1653
1654 skb = skb->next;
1655 }
1656
1657 return 0;
1658 }
1659
1660
1661 static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1662 struct msghdr *msg, size_t size, int flags)
1663 {
1664 struct sock *sk = sock->sk;
1665 struct dn_scp *scp = DN_SK(sk);
1666 struct sk_buff_head *queue = &sk->sk_receive_queue;
1667 size_t target = size > 1 ? 1 : 0;
1668 size_t copied = 0;
1669 int rv = 0;
1670 struct sk_buff *skb, *nskb;
1671 struct dn_skb_cb *cb = NULL;
1672 unsigned char eor = 0;
1673 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1674
1675 lock_sock(sk);
1676
1677 if (sock_flag(sk, SOCK_ZAPPED)) {
1678 rv = -EADDRNOTAVAIL;
1679 goto out;
1680 }
1681
1682 rv = dn_check_state(sk, NULL, 0, &timeo, flags);
1683 if (rv)
1684 goto out;
1685
1686 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1687 if (!(flags & MSG_NOSIGNAL))
1688 send_sig(SIGPIPE, current, 0);
1689 rv = -EPIPE;
1690 goto out;
1691 }
1692
1693 if (flags & ~(MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) {
1694 rv = -EOPNOTSUPP;
1695 goto out;
1696 }
1697
1698 if (flags & MSG_OOB)
1699 queue = &scp->other_receive_queue;
1700
1701 if (flags & MSG_WAITALL)
1702 target = size;
1703
1704
1705 /*
1706 * See if there is data ready to read, sleep if there isn't
1707 */
1708 for(;;) {
1709 if (sk->sk_err)
1710 goto out;
1711
1712 if (!skb_queue_empty(&scp->other_receive_queue)) {
1713 if (!(flags & MSG_OOB)) {
1714 msg->msg_flags |= MSG_OOB;
1715 if (!scp->other_report) {
1716 scp->other_report = 1;
1717 goto out;
1718 }
1719 }
1720 }
1721
1722 if (scp->state != DN_RUN)
1723 goto out;
1724
1725 if (signal_pending(current)) {
1726 rv = sock_intr_errno(timeo);
1727 goto out;
1728 }
1729
1730 if (dn_data_ready(sk, queue, flags, target))
1731 break;
1732
1733 if (flags & MSG_DONTWAIT) {
1734 rv = -EWOULDBLOCK;
1735 goto out;
1736 }
1737
1738 set_bit(SOCK_ASYNC_WAITDATA, &sock->flags);
1739 SOCK_SLEEP_PRE(sk)
1740
1741 if (!dn_data_ready(sk, queue, flags, target))
1742 schedule();
1743
1744 SOCK_SLEEP_POST(sk)
1745 clear_bit(SOCK_ASYNC_WAITDATA, &sock->flags);
1746 }
1747
1748 for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) {
1749 unsigned int chunk = skb->len;
1750 cb = DN_SKB_CB(skb);
1751
1752 if ((chunk + copied) > size)
1753 chunk = size - copied;
1754
1755 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1756 rv = -EFAULT;
1757 break;
1758 }
1759 copied += chunk;
1760
1761 if (!(flags & MSG_PEEK))
1762 skb_pull(skb, chunk);
1763
1764 eor = cb->nsp_flags & 0x40;
1765 nskb = skb->next;
1766
1767 if (skb->len == 0) {
1768 skb_unlink(skb, queue);
1769 kfree_skb(skb);
1770 /*
1771 * N.B. Don't refer to skb or cb after this point
1772 * in loop.
1773 */
1774 if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
1775 scp->flowloc_sw = DN_SEND;
1776 dn_nsp_send_link(sk, DN_SEND, 0);
1777 }
1778 }
1779
1780 if (eor) {
1781 if (sk->sk_type == SOCK_SEQPACKET)
1782 break;
1783 if (!(flags & MSG_WAITALL))
1784 break;
1785 }
1786
1787 if (flags & MSG_OOB)
1788 break;
1789
1790 if (copied >= target)
1791 break;
1792 }
1793
1794 rv = copied;
1795
1796
1797 if (eor && (sk->sk_type == SOCK_SEQPACKET))
1798 msg->msg_flags |= MSG_EOR;
1799
1800 out:
1801 if (rv == 0)
1802 rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
1803
1804 if ((rv >= 0) && msg->msg_name) {
1805 memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
1806 msg->msg_namelen = sizeof(struct sockaddr_dn);
1807 }
1808
1809 release_sock(sk);
1810
1811 return rv;
1812 }
1813
1814
1815 static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
1816 {
1817 unsigned char fctype = scp->services_rem & NSP_FC_MASK;
1818 if (skb_queue_len(queue) >= scp->snd_window)
1819 return 1;
1820 if (fctype != NSP_FC_NONE) {
1821 if (flags & MSG_OOB) {
1822 if (scp->flowrem_oth == 0)
1823 return 1;
1824 } else {
1825 if (scp->flowrem_dat == 0)
1826 return 1;
1827 }
1828 }
1829 return 0;
1830 }
1831
1832 /*
1833 * The DECnet spec requires the the "routing layer" accepts packets which
1834 * are at least 230 bytes in size. This excludes any headers which the NSP
1835 * layer might add, so we always assume that we'll be using the maximal
1836 * length header on data packets. The variation in length is due to the
1837 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1838 * make much practical difference.
1839 */
1840 unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
1841 {
1842 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
1843 if (dev) {
1844 struct dn_dev *dn_db = dev->dn_ptr;
1845 mtu -= LL_RESERVED_SPACE(dev);
1846 if (dn_db->use_long)
1847 mtu -= 21;
1848 else
1849 mtu -= 6;
1850 mtu -= DN_MAX_NSP_DATA_HEADER;
1851 } else {
1852 /*
1853 * 21 = long header, 16 = guess at MAC header length
1854 */
1855 mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16);
1856 }
1857 if (mtu > mss)
1858 mss = mtu;
1859 return mss;
1860 }
1861
1862 static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1863 {
1864 struct dst_entry *dst = __sk_dst_get(sk);
1865 struct dn_scp *scp = DN_SK(sk);
1866 int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem);
1867
1868 /* Other data messages are limited to 16 bytes per packet */
1869 if (flags & MSG_OOB)
1870 return 16;
1871
1872 /* This works out the maximum size of segment we can send out */
1873 if (dst) {
1874 u32 mtu = dst_mtu(dst);
1875 mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
1876 }
1877
1878 return mss_now;
1879 }
1880
1881 /*
1882 * N.B. We get the timeout wrong here, but then we always did get it
1883 * wrong before and this is another step along the road to correcting
1884 * it. It ought to get updated each time we pass through the routine,
1885 * but in practise it probably doesn't matter too much for now.
1886 */
1887 static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
1888 unsigned long datalen, int noblock,
1889 int *errcode)
1890 {
1891 struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
1892 noblock, errcode);
1893 if (skb) {
1894 skb->protocol = __constant_htons(ETH_P_DNA_RT);
1895 skb->pkt_type = PACKET_OUTGOING;
1896 }
1897 return skb;
1898 }
1899
1900 static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
1901 struct msghdr *msg, size_t size)
1902 {
1903 struct sock *sk = sock->sk;
1904 struct dn_scp *scp = DN_SK(sk);
1905 size_t mss;
1906 struct sk_buff_head *queue = &scp->data_xmit_queue;
1907 int flags = msg->msg_flags;
1908 int err = 0;
1909 size_t sent = 0;
1910 int addr_len = msg->msg_namelen;
1911 struct sockaddr_dn *addr = (struct sockaddr_dn *)msg->msg_name;
1912 struct sk_buff *skb = NULL;
1913 struct dn_skb_cb *cb;
1914 size_t len;
1915 unsigned char fctype;
1916 long timeo;
1917
1918 if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT))
1919 return -EOPNOTSUPP;
1920
1921 if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
1922 return -EINVAL;
1923
1924 lock_sock(sk);
1925 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1926 /*
1927 * The only difference between stream sockets and sequenced packet
1928 * sockets is that the stream sockets always behave as if MSG_EOR
1929 * has been set.
1930 */
1931 if (sock->type == SOCK_STREAM) {
1932 if (flags & MSG_EOR) {
1933 err = -EINVAL;
1934 goto out;
1935 }
1936 flags |= MSG_EOR;
1937 }
1938
1939
1940 err = dn_check_state(sk, addr, addr_len, &timeo, flags);
1941 if (err)
1942 goto out_err;
1943
1944 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1945 err = -EPIPE;
1946 goto out_err;
1947 }
1948
1949 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
1950 dst_negative_advice(&sk->sk_dst_cache);
1951
1952 mss = scp->segsize_rem;
1953 fctype = scp->services_rem & NSP_FC_MASK;
1954
1955 mss = dn_current_mss(sk, flags);
1956
1957 if (flags & MSG_OOB) {
1958 queue = &scp->other_xmit_queue;
1959 if (size > mss) {
1960 err = -EMSGSIZE;
1961 goto out;
1962 }
1963 }
1964
1965 scp->persist_fxn = dn_nsp_xmit_timeout;
1966
1967 while(sent < size) {
1968 err = sock_error(sk);
1969 if (err)
1970 goto out;
1971
1972 if (signal_pending(current)) {
1973 err = sock_intr_errno(timeo);
1974 goto out;
1975 }
1976
1977 /*
1978 * Calculate size that we wish to send.
1979 */
1980 len = size - sent;
1981
1982 if (len > mss)
1983 len = mss;
1984
1985 /*
1986 * Wait for queue size to go down below the window
1987 * size.
1988 */
1989 if (dn_queue_too_long(scp, queue, flags)) {
1990 if (flags & MSG_DONTWAIT) {
1991 err = -EWOULDBLOCK;
1992 goto out;
1993 }
1994
1995 SOCK_SLEEP_PRE(sk)
1996
1997 if (dn_queue_too_long(scp, queue, flags))
1998 schedule();
1999
2000 SOCK_SLEEP_POST(sk)
2001
2002 continue;
2003 }
2004
2005 /*
2006 * Get a suitably sized skb.
2007 * 64 is a bit of a hack really, but its larger than any
2008 * link-layer headers and has served us well as a good
2009 * guess as to their real length.
2010 */
2011 skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
2012 flags & MSG_DONTWAIT, &err);
2013
2014 if (err)
2015 break;
2016
2017 if (!skb)
2018 continue;
2019
2020 cb = DN_SKB_CB(skb);
2021
2022 skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
2023
2024 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
2025 err = -EFAULT;
2026 goto out;
2027 }
2028
2029 if (flags & MSG_OOB) {
2030 cb->nsp_flags = 0x30;
2031 if (fctype != NSP_FC_NONE)
2032 scp->flowrem_oth--;
2033 } else {
2034 cb->nsp_flags = 0x00;
2035 if (scp->seg_total == 0)
2036 cb->nsp_flags |= 0x20;
2037
2038 scp->seg_total += len;
2039
2040 if (((sent + len) == size) && (flags & MSG_EOR)) {
2041 cb->nsp_flags |= 0x40;
2042 scp->seg_total = 0;
2043 if (fctype == NSP_FC_SCMC)
2044 scp->flowrem_dat--;
2045 }
2046 if (fctype == NSP_FC_SRC)
2047 scp->flowrem_dat--;
2048 }
2049
2050 sent += len;
2051 dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
2052 skb = NULL;
2053
2054 scp->persist = dn_nsp_persist(sk);
2055
2056 }
2057 out:
2058
2059 if (skb)
2060 kfree_skb(skb);
2061
2062 release_sock(sk);
2063
2064 return sent ? sent : err;
2065
2066 out_err:
2067 err = sk_stream_error(sk, flags, err);
2068 release_sock(sk);
2069 return err;
2070 }
2071
2072 static int dn_device_event(struct notifier_block *this, unsigned long event,
2073 void *ptr)
2074 {
2075 struct net_device *dev = (struct net_device *)ptr;
2076
2077 switch(event) {
2078 case NETDEV_UP:
2079 dn_dev_up(dev);
2080 break;
2081 case NETDEV_DOWN:
2082 dn_dev_down(dev);
2083 break;
2084 default:
2085 break;
2086 }
2087
2088 return NOTIFY_DONE;
2089 }
2090
2091 static struct notifier_block dn_dev_notifier = {
2092 .notifier_call = dn_device_event,
2093 };
2094
2095 extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
2096
2097 static struct packet_type dn_dix_packet_type = {
2098 .type = __constant_htons(ETH_P_DNA_RT),
2099 .dev = NULL, /* All devices */
2100 .func = dn_route_rcv,
2101 };
2102
2103 #ifdef CONFIG_PROC_FS
2104 struct dn_iter_state {
2105 int bucket;
2106 };
2107
2108 static struct sock *dn_socket_get_first(struct seq_file *seq)
2109 {
2110 struct dn_iter_state *state = seq->private;
2111 struct sock *n = NULL;
2112
2113 for(state->bucket = 0;
2114 state->bucket < DN_SK_HASH_SIZE;
2115 ++state->bucket) {
2116 n = sk_head(&dn_sk_hash[state->bucket]);
2117 if (n)
2118 break;
2119 }
2120
2121 return n;
2122 }
2123
2124 static struct sock *dn_socket_get_next(struct seq_file *seq,
2125 struct sock *n)
2126 {
2127 struct dn_iter_state *state = seq->private;
2128
2129 n = sk_next(n);
2130 try_again:
2131 if (n)
2132 goto out;
2133 if (++state->bucket >= DN_SK_HASH_SIZE)
2134 goto out;
2135 n = sk_head(&dn_sk_hash[state->bucket]);
2136 goto try_again;
2137 out:
2138 return n;
2139 }
2140
2141 static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos)
2142 {
2143 struct sock *sk = dn_socket_get_first(seq);
2144
2145 if (sk) {
2146 while(*pos && (sk = dn_socket_get_next(seq, sk)))
2147 --*pos;
2148 }
2149 return *pos ? NULL : sk;
2150 }
2151
2152 static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos)
2153 {
2154 void *rc;
2155 read_lock_bh(&dn_hash_lock);
2156 rc = socket_get_idx(seq, &pos);
2157 if (!rc) {
2158 read_unlock_bh(&dn_hash_lock);
2159 }
2160 return rc;
2161 }
2162
2163 static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos)
2164 {
2165 return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2166 }
2167
2168 static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2169 {
2170 void *rc;
2171
2172 if (v == SEQ_START_TOKEN) {
2173 rc = dn_socket_get_idx(seq, 0);
2174 goto out;
2175 }
2176
2177 rc = dn_socket_get_next(seq, v);
2178 if (rc)
2179 goto out;
2180 read_unlock_bh(&dn_hash_lock);
2181 out:
2182 ++*pos;
2183 return rc;
2184 }
2185
2186 static void dn_socket_seq_stop(struct seq_file *seq, void *v)
2187 {
2188 if (v && v != SEQ_START_TOKEN)
2189 read_unlock_bh(&dn_hash_lock);
2190 }
2191
2192 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2193
2194 static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2195 {
2196 int i;
2197
2198 switch (dn_ntohs(dn->sdn_objnamel)) {
2199 case 0:
2200 sprintf(buf, "%d", dn->sdn_objnum);
2201 break;
2202 default:
2203 for (i = 0; i < dn_ntohs(dn->sdn_objnamel); i++) {
2204 buf[i] = dn->sdn_objname[i];
2205 if (IS_NOT_PRINTABLE(buf[i]))
2206 buf[i] = '.';
2207 }
2208 buf[i] = 0;
2209 }
2210 }
2211
2212 static char *dn_state2asc(unsigned char state)
2213 {
2214 switch(state) {
2215 case DN_O:
2216 return "OPEN";
2217 case DN_CR:
2218 return " CR";
2219 case DN_DR:
2220 return " DR";
2221 case DN_DRC:
2222 return " DRC";
2223 case DN_CC:
2224 return " CC";
2225 case DN_CI:
2226 return " CI";
2227 case DN_NR:
2228 return " NR";
2229 case DN_NC:
2230 return " NC";
2231 case DN_CD:
2232 return " CD";
2233 case DN_RJ:
2234 return " RJ";
2235 case DN_RUN:
2236 return " RUN";
2237 case DN_DI:
2238 return " DI";
2239 case DN_DIC:
2240 return " DIC";
2241 case DN_DN:
2242 return " DN";
2243 case DN_CL:
2244 return " CL";
2245 case DN_CN:
2246 return " CN";
2247 }
2248
2249 return "????";
2250 }
2251
2252 static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk)
2253 {
2254 struct dn_scp *scp = DN_SK(sk);
2255 char buf1[DN_ASCBUF_LEN];
2256 char buf2[DN_ASCBUF_LEN];
2257 char local_object[DN_MAXOBJL+3];
2258 char remote_object[DN_MAXOBJL+3];
2259
2260 dn_printable_object(&scp->addr, local_object);
2261 dn_printable_object(&scp->peer, remote_object);
2262
2263 seq_printf(seq,
2264 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2265 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2266 dn_addr2asc(dn_ntohs(dn_saddr2dn(&scp->addr)), buf1),
2267 scp->addrloc,
2268 scp->numdat,
2269 scp->numoth,
2270 scp->ackxmt_dat,
2271 scp->ackxmt_oth,
2272 scp->flowloc_sw,
2273 local_object,
2274 dn_addr2asc(dn_ntohs(dn_saddr2dn(&scp->peer)), buf2),
2275 scp->addrrem,
2276 scp->numdat_rcv,
2277 scp->numoth_rcv,
2278 scp->ackrcv_dat,
2279 scp->ackrcv_oth,
2280 scp->flowrem_sw,
2281 remote_object,
2282 dn_state2asc(scp->state),
2283 ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER"));
2284 }
2285
2286 static int dn_socket_seq_show(struct seq_file *seq, void *v)
2287 {
2288 if (v == SEQ_START_TOKEN) {
2289 seq_puts(seq, "Local Remote\n");
2290 } else {
2291 dn_socket_format_entry(seq, v);
2292 }
2293 return 0;
2294 }
2295
2296 static struct seq_operations dn_socket_seq_ops = {
2297 .start = dn_socket_seq_start,
2298 .next = dn_socket_seq_next,
2299 .stop = dn_socket_seq_stop,
2300 .show = dn_socket_seq_show,
2301 };
2302
2303 static int dn_socket_seq_open(struct inode *inode, struct file *file)
2304 {
2305 struct seq_file *seq;
2306 int rc = -ENOMEM;
2307 struct dn_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
2308
2309 if (!s)
2310 goto out;
2311
2312 rc = seq_open(file, &dn_socket_seq_ops);
2313 if (rc)
2314 goto out_kfree;
2315
2316 seq = file->private_data;
2317 seq->private = s;
2318 memset(s, 0, sizeof(*s));
2319 out:
2320 return rc;
2321 out_kfree:
2322 kfree(s);
2323 goto out;
2324 }
2325
2326 static struct file_operations dn_socket_seq_fops = {
2327 .owner = THIS_MODULE,
2328 .open = dn_socket_seq_open,
2329 .read = seq_read,
2330 .llseek = seq_lseek,
2331 .release = seq_release_private,
2332 };
2333 #endif
2334
2335 static struct net_proto_family dn_family_ops = {
2336 .family = AF_DECnet,
2337 .create = dn_create,
2338 .owner = THIS_MODULE,
2339 };
2340
2341 static struct proto_ops dn_proto_ops = {
2342 .family = AF_DECnet,
2343 .owner = THIS_MODULE,
2344 .release = dn_release,
2345 .bind = dn_bind,
2346 .connect = dn_connect,
2347 .socketpair = sock_no_socketpair,
2348 .accept = dn_accept,
2349 .getname = dn_getname,
2350 .poll = dn_poll,
2351 .ioctl = dn_ioctl,
2352 .listen = dn_listen,
2353 .shutdown = dn_shutdown,
2354 .setsockopt = dn_setsockopt,
2355 .getsockopt = dn_getsockopt,
2356 .sendmsg = dn_sendmsg,
2357 .recvmsg = dn_recvmsg,
2358 .mmap = sock_no_mmap,
2359 .sendpage = sock_no_sendpage,
2360 };
2361
2362 void dn_register_sysctl(void);
2363 void dn_unregister_sysctl(void);
2364
2365 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2366 MODULE_AUTHOR("Linux DECnet Project Team");
2367 MODULE_LICENSE("GPL");
2368 MODULE_ALIAS_NETPROTO(PF_DECnet);
2369
2370 static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2371
2372 static int __init decnet_init(void)
2373 {
2374 int rc;
2375
2376 printk(banner);
2377
2378 rc = proto_register(&dn_proto, 1);
2379 if (rc != 0)
2380 goto out;
2381
2382 dn_neigh_init();
2383 dn_dev_init();
2384 dn_route_init();
2385 dn_fib_init();
2386
2387 sock_register(&dn_family_ops);
2388 dev_add_pack(&dn_dix_packet_type);
2389 register_netdevice_notifier(&dn_dev_notifier);
2390
2391 proc_net_fops_create("decnet", S_IRUGO, &dn_socket_seq_fops);
2392 dn_register_sysctl();
2393 out:
2394 return rc;
2395
2396 }
2397 module_init(decnet_init);
2398
2399 /*
2400 * Prevent DECnet module unloading until its fixed properly.
2401 * Requires an audit of the code to check for memory leaks and
2402 * initialisation problems etc.
2403 */
2404 #if 0
2405 static void __exit decnet_exit(void)
2406 {
2407 sock_unregister(AF_DECnet);
2408 dev_remove_pack(&dn_dix_packet_type);
2409
2410 dn_unregister_sysctl();
2411
2412 unregister_netdevice_notifier(&dn_dev_notifier);
2413
2414 dn_route_cleanup();
2415 dn_dev_cleanup();
2416 dn_neigh_cleanup();
2417 dn_fib_cleanup();
2418
2419 proc_net_remove("decnet");
2420
2421 proto_unregister(&dn_proto);
2422 }
2423 module_exit(decnet_exit);
2424 #endif
This page took 0.117873 seconds and 5 git commands to generate.