net: Abstract default ADVMSS behind an accessor.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / decnet / af_decnet.c
1
2 /*
3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * DECnet Socket Layer Interface
8 *
9 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * Patrick Caulfield <patrick@pandh.demon.co.uk>
11 *
12 * Changes:
13 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
14 * version of the code. Original copyright preserved
15 * below.
16 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
17 * compatible with my routing layer.
18 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
19 * Caulfield.
20 * Steve Whitehouse: Further bug fixes, checking module code still works
21 * with new routing layer.
22 * Steve Whitehouse: Additional set/get_sockopt() calls.
23 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
24 * code.
25 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
26 * way. Didn't manage it entirely, but its better.
27 * Steve Whitehouse: ditto for sendmsg().
28 * Steve Whitehouse: A selection of bug fixes to various things.
29 * Steve Whitehouse: Added TIOCOUTQ ioctl.
30 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
31 * Steve Whitehouse: Fixes to connect() error returns.
32 * Patrick Caulfield: Fixes to delayed acceptance logic.
33 * David S. Miller: New socket locking
34 * Steve Whitehouse: Socket list hashing/locking
35 * Arnaldo C. Melo: use capable, not suser
36 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
37 * when required.
38 * Patrick Caulfield: /proc/net/decnet now has object name/number
39 * Steve Whitehouse: Fixed local port allocation, hashed sk list
40 * Matthew Wilcox: Fixes for dn_ioctl()
41 * Steve Whitehouse: New connect/accept logic to allow timeouts and
42 * prepare for sendpage etc.
43 */
44
45
46 /******************************************************************************
47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
48
49 This program is free software; you can redistribute it and/or modify
50 it under the terms of the GNU General Public License as published by
51 the Free Software Foundation; either version 2 of the License, or
52 any later version.
53
54 This program is distributed in the hope that it will be useful,
55 but WITHOUT ANY WARRANTY; without even the implied warranty of
56 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
57 GNU General Public License for more details.
58
59 HISTORY:
60
61 Version Kernel Date Author/Comments
62 ------- ------ ---- ---------------
63 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
64 (emserrat@geocities.com)
65
66 First Development of DECnet Socket La-
67 yer for Linux. Only supports outgoing
68 connections.
69
70 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
71 (patrick@pandh.demon.co.uk)
72
73 Port to new kernel development version.
74
75 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com)
77 _
78 Added support for incoming connections
79 so we can start developing server apps
80 on Linux.
81 -
82 Module Support
83 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
84 (emserrat@geocities.com)
85 _
86 Added support for X11R6.4. Now we can
87 use DECnet transport for X on Linux!!!
88 -
89 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
90 (emserrat@geocities.com)
91 Removed bugs on flow control
92 Removed bugs on incoming accessdata
93 order
94 -
95 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
96 dn_recvmsg fixes
97
98 Patrick J. Caulfield
99 dn_bind fixes
100 *******************************************************************************/
101
102 #include <linux/module.h>
103 #include <linux/errno.h>
104 #include <linux/types.h>
105 #include <linux/slab.h>
106 #include <linux/socket.h>
107 #include <linux/in.h>
108 #include <linux/kernel.h>
109 #include <linux/sched.h>
110 #include <linux/timer.h>
111 #include <linux/string.h>
112 #include <linux/sockios.h>
113 #include <linux/net.h>
114 #include <linux/netdevice.h>
115 #include <linux/inet.h>
116 #include <linux/route.h>
117 #include <linux/netfilter.h>
118 #include <linux/seq_file.h>
119 #include <net/sock.h>
120 #include <net/tcp_states.h>
121 #include <net/flow.h>
122 #include <asm/system.h>
123 #include <asm/ioctls.h>
124 #include <linux/capability.h>
125 #include <linux/mm.h>
126 #include <linux/interrupt.h>
127 #include <linux/proc_fs.h>
128 #include <linux/stat.h>
129 #include <linux/init.h>
130 #include <linux/poll.h>
131 #include <net/net_namespace.h>
132 #include <net/neighbour.h>
133 #include <net/dst.h>
134 #include <net/fib_rules.h>
135 #include <net/dn.h>
136 #include <net/dn_nsp.h>
137 #include <net/dn_dev.h>
138 #include <net/dn_route.h>
139 #include <net/dn_fib.h>
140 #include <net/dn_neigh.h>
141
142 struct dn_sock {
143 struct sock sk;
144 struct dn_scp scp;
145 };
146
147 static void dn_keepalive(struct sock *sk);
148
149 #define DN_SK_HASH_SHIFT 8
150 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
151 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
152
153
154 static const struct proto_ops dn_proto_ops;
155 static DEFINE_RWLOCK(dn_hash_lock);
156 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
157 static struct hlist_head dn_wild_sk;
158 static atomic_long_t decnet_memory_allocated;
159
160 static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
161 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
162
163 static struct hlist_head *dn_find_list(struct sock *sk)
164 {
165 struct dn_scp *scp = DN_SK(sk);
166
167 if (scp->addr.sdn_flags & SDF_WILD)
168 return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
169
170 return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK];
171 }
172
173 /*
174 * Valid ports are those greater than zero and not already in use.
175 */
176 static int check_port(__le16 port)
177 {
178 struct sock *sk;
179 struct hlist_node *node;
180
181 if (port == 0)
182 return -1;
183
184 sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
185 struct dn_scp *scp = DN_SK(sk);
186 if (scp->addrloc == port)
187 return -1;
188 }
189 return 0;
190 }
191
192 static unsigned short port_alloc(struct sock *sk)
193 {
194 struct dn_scp *scp = DN_SK(sk);
195 static unsigned short port = 0x2000;
196 unsigned short i_port = port;
197
198 while(check_port(cpu_to_le16(++port)) != 0) {
199 if (port == i_port)
200 return 0;
201 }
202
203 scp->addrloc = cpu_to_le16(port);
204
205 return 1;
206 }
207
208 /*
209 * Since this is only ever called from user
210 * level, we don't need a write_lock() version
211 * of this.
212 */
213 static int dn_hash_sock(struct sock *sk)
214 {
215 struct dn_scp *scp = DN_SK(sk);
216 struct hlist_head *list;
217 int rv = -EUSERS;
218
219 BUG_ON(sk_hashed(sk));
220
221 write_lock_bh(&dn_hash_lock);
222
223 if (!scp->addrloc && !port_alloc(sk))
224 goto out;
225
226 rv = -EADDRINUSE;
227 if ((list = dn_find_list(sk)) == NULL)
228 goto out;
229
230 sk_add_node(sk, list);
231 rv = 0;
232 out:
233 write_unlock_bh(&dn_hash_lock);
234 return rv;
235 }
236
237 static void dn_unhash_sock(struct sock *sk)
238 {
239 write_lock(&dn_hash_lock);
240 sk_del_node_init(sk);
241 write_unlock(&dn_hash_lock);
242 }
243
244 static void dn_unhash_sock_bh(struct sock *sk)
245 {
246 write_lock_bh(&dn_hash_lock);
247 sk_del_node_init(sk);
248 write_unlock_bh(&dn_hash_lock);
249 }
250
251 static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
252 {
253 int i;
254 unsigned hash = addr->sdn_objnum;
255
256 if (hash == 0) {
257 hash = addr->sdn_objnamel;
258 for(i = 0; i < le16_to_cpu(addr->sdn_objnamel); i++) {
259 hash ^= addr->sdn_objname[i];
260 hash ^= (hash << 3);
261 }
262 }
263
264 return &dn_sk_hash[hash & DN_SK_HASH_MASK];
265 }
266
267 /*
268 * Called to transform a socket from bound (i.e. with a local address)
269 * into a listening socket (doesn't need a local port number) and rehashes
270 * based upon the object name/number.
271 */
272 static void dn_rehash_sock(struct sock *sk)
273 {
274 struct hlist_head *list;
275 struct dn_scp *scp = DN_SK(sk);
276
277 if (scp->addr.sdn_flags & SDF_WILD)
278 return;
279
280 write_lock_bh(&dn_hash_lock);
281 sk_del_node_init(sk);
282 DN_SK(sk)->addrloc = 0;
283 list = listen_hash(&DN_SK(sk)->addr);
284 sk_add_node(sk, list);
285 write_unlock_bh(&dn_hash_lock);
286 }
287
288 int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type)
289 {
290 int len = 2;
291
292 *buf++ = type;
293
294 switch(type) {
295 case 0:
296 *buf++ = sdn->sdn_objnum;
297 break;
298 case 1:
299 *buf++ = 0;
300 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
301 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
302 len = 3 + le16_to_cpu(sdn->sdn_objnamel);
303 break;
304 case 2:
305 memset(buf, 0, 5);
306 buf += 5;
307 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
308 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
309 len = 7 + le16_to_cpu(sdn->sdn_objnamel);
310 break;
311 }
312
313 return len;
314 }
315
316 /*
317 * On reception of usernames, we handle types 1 and 0 for destination
318 * addresses only. Types 2 and 4 are used for source addresses, but the
319 * UIC, GIC are ignored and they are both treated the same way. Type 3
320 * is never used as I've no idea what its purpose might be or what its
321 * format is.
322 */
323 int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt)
324 {
325 unsigned char type;
326 int size = len;
327 int namel = 12;
328
329 sdn->sdn_objnum = 0;
330 sdn->sdn_objnamel = cpu_to_le16(0);
331 memset(sdn->sdn_objname, 0, DN_MAXOBJL);
332
333 if (len < 2)
334 return -1;
335
336 len -= 2;
337 *fmt = *data++;
338 type = *data++;
339
340 switch(*fmt) {
341 case 0:
342 sdn->sdn_objnum = type;
343 return 2;
344 case 1:
345 namel = 16;
346 break;
347 case 2:
348 len -= 4;
349 data += 4;
350 break;
351 case 4:
352 len -= 8;
353 data += 8;
354 break;
355 default:
356 return -1;
357 }
358
359 len -= 1;
360
361 if (len < 0)
362 return -1;
363
364 sdn->sdn_objnamel = cpu_to_le16(*data++);
365 len -= le16_to_cpu(sdn->sdn_objnamel);
366
367 if ((len < 0) || (le16_to_cpu(sdn->sdn_objnamel) > namel))
368 return -1;
369
370 memcpy(sdn->sdn_objname, data, le16_to_cpu(sdn->sdn_objnamel));
371
372 return size - len;
373 }
374
375 struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
376 {
377 struct hlist_head *list = listen_hash(addr);
378 struct hlist_node *node;
379 struct sock *sk;
380
381 read_lock(&dn_hash_lock);
382 sk_for_each(sk, node, list) {
383 struct dn_scp *scp = DN_SK(sk);
384 if (sk->sk_state != TCP_LISTEN)
385 continue;
386 if (scp->addr.sdn_objnum) {
387 if (scp->addr.sdn_objnum != addr->sdn_objnum)
388 continue;
389 } else {
390 if (addr->sdn_objnum)
391 continue;
392 if (scp->addr.sdn_objnamel != addr->sdn_objnamel)
393 continue;
394 if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, le16_to_cpu(addr->sdn_objnamel)) != 0)
395 continue;
396 }
397 sock_hold(sk);
398 read_unlock(&dn_hash_lock);
399 return sk;
400 }
401
402 sk = sk_head(&dn_wild_sk);
403 if (sk) {
404 if (sk->sk_state == TCP_LISTEN)
405 sock_hold(sk);
406 else
407 sk = NULL;
408 }
409
410 read_unlock(&dn_hash_lock);
411 return sk;
412 }
413
414 struct sock *dn_find_by_skb(struct sk_buff *skb)
415 {
416 struct dn_skb_cb *cb = DN_SKB_CB(skb);
417 struct sock *sk;
418 struct hlist_node *node;
419 struct dn_scp *scp;
420
421 read_lock(&dn_hash_lock);
422 sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
423 scp = DN_SK(sk);
424 if (cb->src != dn_saddr2dn(&scp->peer))
425 continue;
426 if (cb->dst_port != scp->addrloc)
427 continue;
428 if (scp->addrrem && (cb->src_port != scp->addrrem))
429 continue;
430 sock_hold(sk);
431 goto found;
432 }
433 sk = NULL;
434 found:
435 read_unlock(&dn_hash_lock);
436 return sk;
437 }
438
439
440
441 static void dn_destruct(struct sock *sk)
442 {
443 struct dn_scp *scp = DN_SK(sk);
444
445 skb_queue_purge(&scp->data_xmit_queue);
446 skb_queue_purge(&scp->other_xmit_queue);
447 skb_queue_purge(&scp->other_receive_queue);
448
449 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
450 }
451
452 static int dn_memory_pressure;
453
454 static void dn_enter_memory_pressure(struct sock *sk)
455 {
456 if (!dn_memory_pressure) {
457 dn_memory_pressure = 1;
458 }
459 }
460
461 static struct proto dn_proto = {
462 .name = "NSP",
463 .owner = THIS_MODULE,
464 .enter_memory_pressure = dn_enter_memory_pressure,
465 .memory_pressure = &dn_memory_pressure,
466 .memory_allocated = &decnet_memory_allocated,
467 .sysctl_mem = sysctl_decnet_mem,
468 .sysctl_wmem = sysctl_decnet_wmem,
469 .sysctl_rmem = sysctl_decnet_rmem,
470 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
471 .obj_size = sizeof(struct dn_sock),
472 };
473
474 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
475 {
476 struct dn_scp *scp;
477 struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto);
478
479 if (!sk)
480 goto out;
481
482 if (sock)
483 sock->ops = &dn_proto_ops;
484 sock_init_data(sock, sk);
485
486 sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
487 sk->sk_destruct = dn_destruct;
488 sk->sk_no_check = 1;
489 sk->sk_family = PF_DECnet;
490 sk->sk_protocol = 0;
491 sk->sk_allocation = gfp;
492 sk->sk_sndbuf = sysctl_decnet_wmem[1];
493 sk->sk_rcvbuf = sysctl_decnet_rmem[1];
494
495 /* Initialization of DECnet Session Control Port */
496 scp = DN_SK(sk);
497 scp->state = DN_O; /* Open */
498 scp->numdat = 1; /* Next data seg to tx */
499 scp->numoth = 1; /* Next oth data to tx */
500 scp->ackxmt_dat = 0; /* Last data seg ack'ed */
501 scp->ackxmt_oth = 0; /* Last oth data ack'ed */
502 scp->ackrcv_dat = 0; /* Highest data ack recv*/
503 scp->ackrcv_oth = 0; /* Last oth data ack rec*/
504 scp->flowrem_sw = DN_SEND;
505 scp->flowloc_sw = DN_SEND;
506 scp->flowrem_dat = 0;
507 scp->flowrem_oth = 1;
508 scp->flowloc_dat = 0;
509 scp->flowloc_oth = 1;
510 scp->services_rem = 0;
511 scp->services_loc = 1 | NSP_FC_NONE;
512 scp->info_rem = 0;
513 scp->info_loc = 0x03; /* NSP version 4.1 */
514 scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */
515 scp->nonagle = 0;
516 scp->multi_ireq = 1;
517 scp->accept_mode = ACC_IMMED;
518 scp->addr.sdn_family = AF_DECnet;
519 scp->peer.sdn_family = AF_DECnet;
520 scp->accessdata.acc_accl = 5;
521 memcpy(scp->accessdata.acc_acc, "LINUX", 5);
522
523 scp->max_window = NSP_MAX_WINDOW;
524 scp->snd_window = NSP_MIN_WINDOW;
525 scp->nsp_srtt = NSP_INITIAL_SRTT;
526 scp->nsp_rttvar = NSP_INITIAL_RTTVAR;
527 scp->nsp_rxtshift = 0;
528
529 skb_queue_head_init(&scp->data_xmit_queue);
530 skb_queue_head_init(&scp->other_xmit_queue);
531 skb_queue_head_init(&scp->other_receive_queue);
532
533 scp->persist = 0;
534 scp->persist_fxn = NULL;
535 scp->keepalive = 10 * HZ;
536 scp->keepalive_fxn = dn_keepalive;
537
538 init_timer(&scp->delack_timer);
539 scp->delack_pending = 0;
540 scp->delack_fxn = dn_nsp_delayed_ack;
541
542 dn_start_slow_timer(sk);
543 out:
544 return sk;
545 }
546
547 /*
548 * Keepalive timer.
549 * FIXME: Should respond to SO_KEEPALIVE etc.
550 */
551 static void dn_keepalive(struct sock *sk)
552 {
553 struct dn_scp *scp = DN_SK(sk);
554
555 /*
556 * By checking the other_data transmit queue is empty
557 * we are double checking that we are not sending too
558 * many of these keepalive frames.
559 */
560 if (skb_queue_empty(&scp->other_xmit_queue))
561 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
562 }
563
564
565 /*
566 * Timer for shutdown/destroyed sockets.
567 * When socket is dead & no packets have been sent for a
568 * certain amount of time, they are removed by this
569 * routine. Also takes care of sending out DI & DC
570 * frames at correct times.
571 */
572 int dn_destroy_timer(struct sock *sk)
573 {
574 struct dn_scp *scp = DN_SK(sk);
575
576 scp->persist = dn_nsp_persist(sk);
577
578 switch(scp->state) {
579 case DN_DI:
580 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
581 if (scp->nsp_rxtshift >= decnet_di_count)
582 scp->state = DN_CN;
583 return 0;
584
585 case DN_DR:
586 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
587 if (scp->nsp_rxtshift >= decnet_dr_count)
588 scp->state = DN_DRC;
589 return 0;
590
591 case DN_DN:
592 if (scp->nsp_rxtshift < decnet_dn_count) {
593 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
594 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
595 return 0;
596 }
597 }
598
599 scp->persist = (HZ * decnet_time_wait);
600
601 if (sk->sk_socket)
602 return 0;
603
604 if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) {
605 dn_unhash_sock(sk);
606 sock_put(sk);
607 return 1;
608 }
609
610 return 0;
611 }
612
613 static void dn_destroy_sock(struct sock *sk)
614 {
615 struct dn_scp *scp = DN_SK(sk);
616
617 scp->nsp_rxtshift = 0; /* reset back off */
618
619 if (sk->sk_socket) {
620 if (sk->sk_socket->state != SS_UNCONNECTED)
621 sk->sk_socket->state = SS_DISCONNECTING;
622 }
623
624 sk->sk_state = TCP_CLOSE;
625
626 switch(scp->state) {
627 case DN_DN:
628 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
629 sk->sk_allocation);
630 scp->persist_fxn = dn_destroy_timer;
631 scp->persist = dn_nsp_persist(sk);
632 break;
633 case DN_CR:
634 scp->state = DN_DR;
635 goto disc_reject;
636 case DN_RUN:
637 scp->state = DN_DI;
638 case DN_DI:
639 case DN_DR:
640 disc_reject:
641 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
642 case DN_NC:
643 case DN_NR:
644 case DN_RJ:
645 case DN_DIC:
646 case DN_CN:
647 case DN_DRC:
648 case DN_CI:
649 case DN_CD:
650 scp->persist_fxn = dn_destroy_timer;
651 scp->persist = dn_nsp_persist(sk);
652 break;
653 default:
654 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
655 case DN_O:
656 dn_stop_slow_timer(sk);
657
658 dn_unhash_sock_bh(sk);
659 sock_put(sk);
660
661 break;
662 }
663 }
664
665 char *dn_addr2asc(__u16 addr, char *buf)
666 {
667 unsigned short node, area;
668
669 node = addr & 0x03ff;
670 area = addr >> 10;
671 sprintf(buf, "%hd.%hd", area, node);
672
673 return buf;
674 }
675
676
677
678 static int dn_create(struct net *net, struct socket *sock, int protocol,
679 int kern)
680 {
681 struct sock *sk;
682
683 if (!net_eq(net, &init_net))
684 return -EAFNOSUPPORT;
685
686 switch(sock->type) {
687 case SOCK_SEQPACKET:
688 if (protocol != DNPROTO_NSP)
689 return -EPROTONOSUPPORT;
690 break;
691 case SOCK_STREAM:
692 break;
693 default:
694 return -ESOCKTNOSUPPORT;
695 }
696
697
698 if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL)) == NULL)
699 return -ENOBUFS;
700
701 sk->sk_protocol = protocol;
702
703 return 0;
704 }
705
706
707 static int
708 dn_release(struct socket *sock)
709 {
710 struct sock *sk = sock->sk;
711
712 if (sk) {
713 sock_orphan(sk);
714 sock_hold(sk);
715 lock_sock(sk);
716 dn_destroy_sock(sk);
717 release_sock(sk);
718 sock_put(sk);
719 }
720
721 return 0;
722 }
723
724 static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
725 {
726 struct sock *sk = sock->sk;
727 struct dn_scp *scp = DN_SK(sk);
728 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
729 struct net_device *dev, *ldev;
730 int rv;
731
732 if (addr_len != sizeof(struct sockaddr_dn))
733 return -EINVAL;
734
735 if (saddr->sdn_family != AF_DECnet)
736 return -EINVAL;
737
738 if (le16_to_cpu(saddr->sdn_nodeaddrl) && (le16_to_cpu(saddr->sdn_nodeaddrl) != 2))
739 return -EINVAL;
740
741 if (le16_to_cpu(saddr->sdn_objnamel) > DN_MAXOBJL)
742 return -EINVAL;
743
744 if (saddr->sdn_flags & ~SDF_WILD)
745 return -EINVAL;
746
747 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
748 (saddr->sdn_flags & SDF_WILD)))
749 return -EACCES;
750
751 if (!(saddr->sdn_flags & SDF_WILD)) {
752 if (le16_to_cpu(saddr->sdn_nodeaddrl)) {
753 rcu_read_lock();
754 ldev = NULL;
755 for_each_netdev_rcu(&init_net, dev) {
756 if (!dev->dn_ptr)
757 continue;
758 if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) {
759 ldev = dev;
760 break;
761 }
762 }
763 rcu_read_unlock();
764 if (ldev == NULL)
765 return -EADDRNOTAVAIL;
766 }
767 }
768
769 rv = -EINVAL;
770 lock_sock(sk);
771 if (sock_flag(sk, SOCK_ZAPPED)) {
772 memcpy(&scp->addr, saddr, addr_len);
773 sock_reset_flag(sk, SOCK_ZAPPED);
774
775 rv = dn_hash_sock(sk);
776 if (rv)
777 sock_set_flag(sk, SOCK_ZAPPED);
778 }
779 release_sock(sk);
780
781 return rv;
782 }
783
784
785 static int dn_auto_bind(struct socket *sock)
786 {
787 struct sock *sk = sock->sk;
788 struct dn_scp *scp = DN_SK(sk);
789 int rv;
790
791 sock_reset_flag(sk, SOCK_ZAPPED);
792
793 scp->addr.sdn_flags = 0;
794 scp->addr.sdn_objnum = 0;
795
796 /*
797 * This stuff is to keep compatibility with Eduardo's
798 * patch. I hope I can dispense with it shortly...
799 */
800 if ((scp->accessdata.acc_accl != 0) &&
801 (scp->accessdata.acc_accl <= 12)) {
802
803 scp->addr.sdn_objnamel = cpu_to_le16(scp->accessdata.acc_accl);
804 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, le16_to_cpu(scp->addr.sdn_objnamel));
805
806 scp->accessdata.acc_accl = 0;
807 memset(scp->accessdata.acc_acc, 0, 40);
808 }
809 /* End of compatibility stuff */
810
811 scp->addr.sdn_add.a_len = cpu_to_le16(2);
812 rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr);
813 if (rv == 0) {
814 rv = dn_hash_sock(sk);
815 if (rv)
816 sock_set_flag(sk, SOCK_ZAPPED);
817 }
818
819 return rv;
820 }
821
822 static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
823 {
824 struct dn_scp *scp = DN_SK(sk);
825 DEFINE_WAIT(wait);
826 int err;
827
828 if (scp->state != DN_CR)
829 return -EINVAL;
830
831 scp->state = DN_CC;
832 scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
833 dn_send_conn_conf(sk, allocation);
834
835 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
836 for(;;) {
837 release_sock(sk);
838 if (scp->state == DN_CC)
839 *timeo = schedule_timeout(*timeo);
840 lock_sock(sk);
841 err = 0;
842 if (scp->state == DN_RUN)
843 break;
844 err = sock_error(sk);
845 if (err)
846 break;
847 err = sock_intr_errno(*timeo);
848 if (signal_pending(current))
849 break;
850 err = -EAGAIN;
851 if (!*timeo)
852 break;
853 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
854 }
855 finish_wait(sk_sleep(sk), &wait);
856 if (err == 0) {
857 sk->sk_socket->state = SS_CONNECTED;
858 } else if (scp->state != DN_CC) {
859 sk->sk_socket->state = SS_UNCONNECTED;
860 }
861 return err;
862 }
863
864 static int dn_wait_run(struct sock *sk, long *timeo)
865 {
866 struct dn_scp *scp = DN_SK(sk);
867 DEFINE_WAIT(wait);
868 int err = 0;
869
870 if (scp->state == DN_RUN)
871 goto out;
872
873 if (!*timeo)
874 return -EALREADY;
875
876 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
877 for(;;) {
878 release_sock(sk);
879 if (scp->state == DN_CI || scp->state == DN_CC)
880 *timeo = schedule_timeout(*timeo);
881 lock_sock(sk);
882 err = 0;
883 if (scp->state == DN_RUN)
884 break;
885 err = sock_error(sk);
886 if (err)
887 break;
888 err = sock_intr_errno(*timeo);
889 if (signal_pending(current))
890 break;
891 err = -ETIMEDOUT;
892 if (!*timeo)
893 break;
894 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
895 }
896 finish_wait(sk_sleep(sk), &wait);
897 out:
898 if (err == 0) {
899 sk->sk_socket->state = SS_CONNECTED;
900 } else if (scp->state != DN_CI && scp->state != DN_CC) {
901 sk->sk_socket->state = SS_UNCONNECTED;
902 }
903 return err;
904 }
905
906 static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
907 {
908 struct socket *sock = sk->sk_socket;
909 struct dn_scp *scp = DN_SK(sk);
910 int err = -EISCONN;
911 struct flowi fl;
912
913 if (sock->state == SS_CONNECTED)
914 goto out;
915
916 if (sock->state == SS_CONNECTING) {
917 err = 0;
918 if (scp->state == DN_RUN) {
919 sock->state = SS_CONNECTED;
920 goto out;
921 }
922 err = -ECONNREFUSED;
923 if (scp->state != DN_CI && scp->state != DN_CC) {
924 sock->state = SS_UNCONNECTED;
925 goto out;
926 }
927 return dn_wait_run(sk, timeo);
928 }
929
930 err = -EINVAL;
931 if (scp->state != DN_O)
932 goto out;
933
934 if (addr == NULL || addrlen != sizeof(struct sockaddr_dn))
935 goto out;
936 if (addr->sdn_family != AF_DECnet)
937 goto out;
938 if (addr->sdn_flags & SDF_WILD)
939 goto out;
940
941 if (sock_flag(sk, SOCK_ZAPPED)) {
942 err = dn_auto_bind(sk->sk_socket);
943 if (err)
944 goto out;
945 }
946
947 memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
948
949 err = -EHOSTUNREACH;
950 memset(&fl, 0, sizeof(fl));
951 fl.oif = sk->sk_bound_dev_if;
952 fl.fld_dst = dn_saddr2dn(&scp->peer);
953 fl.fld_src = dn_saddr2dn(&scp->addr);
954 dn_sk_ports_copy(&fl, scp);
955 fl.proto = DNPROTO_NSP;
956 if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, flags) < 0)
957 goto out;
958 sk->sk_route_caps = sk->sk_dst_cache->dev->features;
959 sock->state = SS_CONNECTING;
960 scp->state = DN_CI;
961 scp->segsize_loc = dst_metric_advmss(sk->sk_dst_cache);
962
963 dn_nsp_send_conninit(sk, NSP_CI);
964 err = -EINPROGRESS;
965 if (*timeo) {
966 err = dn_wait_run(sk, timeo);
967 }
968 out:
969 return err;
970 }
971
972 static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags)
973 {
974 struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr;
975 struct sock *sk = sock->sk;
976 int err;
977 long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
978
979 lock_sock(sk);
980 err = __dn_connect(sk, addr, addrlen, &timeo, 0);
981 release_sock(sk);
982
983 return err;
984 }
985
986 static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
987 {
988 struct dn_scp *scp = DN_SK(sk);
989
990 switch(scp->state) {
991 case DN_RUN:
992 return 0;
993 case DN_CR:
994 return dn_confirm_accept(sk, timeo, sk->sk_allocation);
995 case DN_CI:
996 case DN_CC:
997 return dn_wait_run(sk, timeo);
998 case DN_O:
999 return __dn_connect(sk, addr, addrlen, timeo, flags);
1000 }
1001
1002 return -EINVAL;
1003 }
1004
1005
1006 static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
1007 {
1008 unsigned char *ptr = skb->data;
1009
1010 acc->acc_userl = *ptr++;
1011 memcpy(&acc->acc_user, ptr, acc->acc_userl);
1012 ptr += acc->acc_userl;
1013
1014 acc->acc_passl = *ptr++;
1015 memcpy(&acc->acc_pass, ptr, acc->acc_passl);
1016 ptr += acc->acc_passl;
1017
1018 acc->acc_accl = *ptr++;
1019 memcpy(&acc->acc_acc, ptr, acc->acc_accl);
1020
1021 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
1022
1023 }
1024
1025 static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
1026 {
1027 unsigned char *ptr = skb->data;
1028 u16 len = *ptr++; /* yes, it's 8bit on the wire */
1029
1030 BUG_ON(len > 16); /* we've checked the contents earlier */
1031 opt->opt_optl = cpu_to_le16(len);
1032 opt->opt_status = 0;
1033 memcpy(opt->opt_data, ptr, len);
1034 skb_pull(skb, len + 1);
1035 }
1036
1037 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1038 {
1039 DEFINE_WAIT(wait);
1040 struct sk_buff *skb = NULL;
1041 int err = 0;
1042
1043 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1044 for(;;) {
1045 release_sock(sk);
1046 skb = skb_dequeue(&sk->sk_receive_queue);
1047 if (skb == NULL) {
1048 *timeo = schedule_timeout(*timeo);
1049 skb = skb_dequeue(&sk->sk_receive_queue);
1050 }
1051 lock_sock(sk);
1052 if (skb != NULL)
1053 break;
1054 err = -EINVAL;
1055 if (sk->sk_state != TCP_LISTEN)
1056 break;
1057 err = sock_intr_errno(*timeo);
1058 if (signal_pending(current))
1059 break;
1060 err = -EAGAIN;
1061 if (!*timeo)
1062 break;
1063 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1064 }
1065 finish_wait(sk_sleep(sk), &wait);
1066
1067 return skb == NULL ? ERR_PTR(err) : skb;
1068 }
1069
1070 static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1071 {
1072 struct sock *sk = sock->sk, *newsk;
1073 struct sk_buff *skb = NULL;
1074 struct dn_skb_cb *cb;
1075 unsigned char menuver;
1076 int err = 0;
1077 unsigned char type;
1078 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1079 struct dst_entry *dst;
1080
1081 lock_sock(sk);
1082
1083 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
1084 release_sock(sk);
1085 return -EINVAL;
1086 }
1087
1088 skb = skb_dequeue(&sk->sk_receive_queue);
1089 if (skb == NULL) {
1090 skb = dn_wait_for_connect(sk, &timeo);
1091 if (IS_ERR(skb)) {
1092 release_sock(sk);
1093 return PTR_ERR(skb);
1094 }
1095 }
1096
1097 cb = DN_SKB_CB(skb);
1098 sk->sk_ack_backlog--;
1099 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation);
1100 if (newsk == NULL) {
1101 release_sock(sk);
1102 kfree_skb(skb);
1103 return -ENOBUFS;
1104 }
1105 release_sock(sk);
1106
1107 dst = skb_dst(skb);
1108 sk_dst_set(newsk, dst);
1109 skb_dst_set(skb, NULL);
1110
1111 DN_SK(newsk)->state = DN_CR;
1112 DN_SK(newsk)->addrrem = cb->src_port;
1113 DN_SK(newsk)->services_rem = cb->services;
1114 DN_SK(newsk)->info_rem = cb->info;
1115 DN_SK(newsk)->segsize_rem = cb->segsize;
1116 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
1117
1118 if (DN_SK(newsk)->segsize_rem < 230)
1119 DN_SK(newsk)->segsize_rem = 230;
1120
1121 if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
1122 DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
1123
1124 newsk->sk_state = TCP_LISTEN;
1125 memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
1126
1127 /*
1128 * If we are listening on a wild socket, we don't want
1129 * the newly created socket on the wrong hash queue.
1130 */
1131 DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD;
1132
1133 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
1134 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
1135 *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
1136 *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst;
1137
1138 menuver = *skb->data;
1139 skb_pull(skb, 1);
1140
1141 if (menuver & DN_MENUVER_ACC)
1142 dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
1143
1144 if (menuver & DN_MENUVER_USR)
1145 dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
1146
1147 if (menuver & DN_MENUVER_PRX)
1148 DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
1149
1150 if (menuver & DN_MENUVER_UIC)
1151 DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
1152
1153 kfree_skb(skb);
1154
1155 memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
1156 sizeof(struct optdata_dn));
1157 memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
1158 sizeof(struct optdata_dn));
1159
1160 lock_sock(newsk);
1161 err = dn_hash_sock(newsk);
1162 if (err == 0) {
1163 sock_reset_flag(newsk, SOCK_ZAPPED);
1164 dn_send_conn_ack(newsk);
1165
1166 /*
1167 * Here we use sk->sk_allocation since although the conn conf is
1168 * for the newsk, the context is the old socket.
1169 */
1170 if (DN_SK(newsk)->accept_mode == ACC_IMMED)
1171 err = dn_confirm_accept(newsk, &timeo,
1172 sk->sk_allocation);
1173 }
1174 release_sock(newsk);
1175 return err;
1176 }
1177
1178
1179 static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len,int peer)
1180 {
1181 struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr;
1182 struct sock *sk = sock->sk;
1183 struct dn_scp *scp = DN_SK(sk);
1184
1185 *uaddr_len = sizeof(struct sockaddr_dn);
1186
1187 lock_sock(sk);
1188
1189 if (peer) {
1190 if ((sock->state != SS_CONNECTED &&
1191 sock->state != SS_CONNECTING) &&
1192 scp->accept_mode == ACC_IMMED) {
1193 release_sock(sk);
1194 return -ENOTCONN;
1195 }
1196
1197 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
1198 } else {
1199 memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn));
1200 }
1201
1202 release_sock(sk);
1203
1204 return 0;
1205 }
1206
1207
1208 static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait)
1209 {
1210 struct sock *sk = sock->sk;
1211 struct dn_scp *scp = DN_SK(sk);
1212 int mask = datagram_poll(file, sock, wait);
1213
1214 if (!skb_queue_empty(&scp->other_receive_queue))
1215 mask |= POLLRDBAND;
1216
1217 return mask;
1218 }
1219
1220 static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1221 {
1222 struct sock *sk = sock->sk;
1223 struct dn_scp *scp = DN_SK(sk);
1224 int err = -EOPNOTSUPP;
1225 long amount = 0;
1226 struct sk_buff *skb;
1227 int val;
1228
1229 switch(cmd)
1230 {
1231 case SIOCGIFADDR:
1232 case SIOCSIFADDR:
1233 return dn_dev_ioctl(cmd, (void __user *)arg);
1234
1235 case SIOCATMARK:
1236 lock_sock(sk);
1237 val = !skb_queue_empty(&scp->other_receive_queue);
1238 if (scp->state != DN_RUN)
1239 val = -ENOTCONN;
1240 release_sock(sk);
1241 return val;
1242
1243 case TIOCOUTQ:
1244 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1245 if (amount < 0)
1246 amount = 0;
1247 err = put_user(amount, (int __user *)arg);
1248 break;
1249
1250 case TIOCINQ:
1251 lock_sock(sk);
1252 skb = skb_peek(&scp->other_receive_queue);
1253 if (skb) {
1254 amount = skb->len;
1255 } else {
1256 skb_queue_walk(&sk->sk_receive_queue, skb)
1257 amount += skb->len;
1258 }
1259 release_sock(sk);
1260 err = put_user(amount, (int __user *)arg);
1261 break;
1262
1263 default:
1264 err = -ENOIOCTLCMD;
1265 break;
1266 }
1267
1268 return err;
1269 }
1270
1271 static int dn_listen(struct socket *sock, int backlog)
1272 {
1273 struct sock *sk = sock->sk;
1274 int err = -EINVAL;
1275
1276 lock_sock(sk);
1277
1278 if (sock_flag(sk, SOCK_ZAPPED))
1279 goto out;
1280
1281 if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
1282 goto out;
1283
1284 sk->sk_max_ack_backlog = backlog;
1285 sk->sk_ack_backlog = 0;
1286 sk->sk_state = TCP_LISTEN;
1287 err = 0;
1288 dn_rehash_sock(sk);
1289
1290 out:
1291 release_sock(sk);
1292
1293 return err;
1294 }
1295
1296
1297 static int dn_shutdown(struct socket *sock, int how)
1298 {
1299 struct sock *sk = sock->sk;
1300 struct dn_scp *scp = DN_SK(sk);
1301 int err = -ENOTCONN;
1302
1303 lock_sock(sk);
1304
1305 if (sock->state == SS_UNCONNECTED)
1306 goto out;
1307
1308 err = 0;
1309 if (sock->state == SS_DISCONNECTING)
1310 goto out;
1311
1312 err = -EINVAL;
1313 if (scp->state == DN_O)
1314 goto out;
1315
1316 if (how != SHUTDOWN_MASK)
1317 goto out;
1318
1319 sk->sk_shutdown = how;
1320 dn_destroy_sock(sk);
1321 err = 0;
1322
1323 out:
1324 release_sock(sk);
1325
1326 return err;
1327 }
1328
1329 static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1330 {
1331 struct sock *sk = sock->sk;
1332 int err;
1333
1334 lock_sock(sk);
1335 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1336 release_sock(sk);
1337
1338 return err;
1339 }
1340
1341 static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags)
1342 {
1343 struct sock *sk = sock->sk;
1344 struct dn_scp *scp = DN_SK(sk);
1345 long timeo;
1346 union {
1347 struct optdata_dn opt;
1348 struct accessdata_dn acc;
1349 int mode;
1350 unsigned long win;
1351 int val;
1352 unsigned char services;
1353 unsigned char info;
1354 } u;
1355 int err;
1356
1357 if (optlen && !optval)
1358 return -EINVAL;
1359
1360 if (optlen > sizeof(u))
1361 return -EINVAL;
1362
1363 if (copy_from_user(&u, optval, optlen))
1364 return -EFAULT;
1365
1366 switch(optname) {
1367 case DSO_CONDATA:
1368 if (sock->state == SS_CONNECTED)
1369 return -EISCONN;
1370 if ((scp->state != DN_O) && (scp->state != DN_CR))
1371 return -EINVAL;
1372
1373 if (optlen != sizeof(struct optdata_dn))
1374 return -EINVAL;
1375
1376 if (le16_to_cpu(u.opt.opt_optl) > 16)
1377 return -EINVAL;
1378
1379 memcpy(&scp->conndata_out, &u.opt, optlen);
1380 break;
1381
1382 case DSO_DISDATA:
1383 if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED)
1384 return -ENOTCONN;
1385
1386 if (optlen != sizeof(struct optdata_dn))
1387 return -EINVAL;
1388
1389 if (le16_to_cpu(u.opt.opt_optl) > 16)
1390 return -EINVAL;
1391
1392 memcpy(&scp->discdata_out, &u.opt, optlen);
1393 break;
1394
1395 case DSO_CONACCESS:
1396 if (sock->state == SS_CONNECTED)
1397 return -EISCONN;
1398 if (scp->state != DN_O)
1399 return -EINVAL;
1400
1401 if (optlen != sizeof(struct accessdata_dn))
1402 return -EINVAL;
1403
1404 if ((u.acc.acc_accl > DN_MAXACCL) ||
1405 (u.acc.acc_passl > DN_MAXACCL) ||
1406 (u.acc.acc_userl > DN_MAXACCL))
1407 return -EINVAL;
1408
1409 memcpy(&scp->accessdata, &u.acc, optlen);
1410 break;
1411
1412 case DSO_ACCEPTMODE:
1413 if (sock->state == SS_CONNECTED)
1414 return -EISCONN;
1415 if (scp->state != DN_O)
1416 return -EINVAL;
1417
1418 if (optlen != sizeof(int))
1419 return -EINVAL;
1420
1421 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
1422 return -EINVAL;
1423
1424 scp->accept_mode = (unsigned char)u.mode;
1425 break;
1426
1427 case DSO_CONACCEPT:
1428
1429 if (scp->state != DN_CR)
1430 return -EINVAL;
1431 timeo = sock_rcvtimeo(sk, 0);
1432 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
1433 return err;
1434
1435 case DSO_CONREJECT:
1436
1437 if (scp->state != DN_CR)
1438 return -EINVAL;
1439
1440 scp->state = DN_DR;
1441 sk->sk_shutdown = SHUTDOWN_MASK;
1442 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1443 break;
1444
1445 default:
1446 #ifdef CONFIG_NETFILTER
1447 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1448 #endif
1449 case DSO_LINKINFO:
1450 case DSO_STREAM:
1451 case DSO_SEQPACKET:
1452 return -ENOPROTOOPT;
1453
1454 case DSO_MAXWINDOW:
1455 if (optlen != sizeof(unsigned long))
1456 return -EINVAL;
1457 if (u.win > NSP_MAX_WINDOW)
1458 u.win = NSP_MAX_WINDOW;
1459 if (u.win == 0)
1460 return -EINVAL;
1461 scp->max_window = u.win;
1462 if (scp->snd_window > u.win)
1463 scp->snd_window = u.win;
1464 break;
1465
1466 case DSO_NODELAY:
1467 if (optlen != sizeof(int))
1468 return -EINVAL;
1469 if (scp->nonagle == 2)
1470 return -EINVAL;
1471 scp->nonagle = (u.val == 0) ? 0 : 1;
1472 /* if (scp->nonagle == 1) { Push pending frames } */
1473 break;
1474
1475 case DSO_CORK:
1476 if (optlen != sizeof(int))
1477 return -EINVAL;
1478 if (scp->nonagle == 1)
1479 return -EINVAL;
1480 scp->nonagle = (u.val == 0) ? 0 : 2;
1481 /* if (scp->nonagle == 0) { Push pending frames } */
1482 break;
1483
1484 case DSO_SERVICES:
1485 if (optlen != sizeof(unsigned char))
1486 return -EINVAL;
1487 if ((u.services & ~NSP_FC_MASK) != 0x01)
1488 return -EINVAL;
1489 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
1490 return -EINVAL;
1491 scp->services_loc = u.services;
1492 break;
1493
1494 case DSO_INFO:
1495 if (optlen != sizeof(unsigned char))
1496 return -EINVAL;
1497 if (u.info & 0xfc)
1498 return -EINVAL;
1499 scp->info_loc = u.info;
1500 break;
1501 }
1502
1503 return 0;
1504 }
1505
1506 static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1507 {
1508 struct sock *sk = sock->sk;
1509 int err;
1510
1511 lock_sock(sk);
1512 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1513 release_sock(sk);
1514
1515 return err;
1516 }
1517
1518 static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags)
1519 {
1520 struct sock *sk = sock->sk;
1521 struct dn_scp *scp = DN_SK(sk);
1522 struct linkinfo_dn link;
1523 unsigned int r_len;
1524 void *r_data = NULL;
1525 unsigned int val;
1526
1527 if(get_user(r_len , optlen))
1528 return -EFAULT;
1529
1530 switch(optname) {
1531 case DSO_CONDATA:
1532 if (r_len > sizeof(struct optdata_dn))
1533 r_len = sizeof(struct optdata_dn);
1534 r_data = &scp->conndata_in;
1535 break;
1536
1537 case DSO_DISDATA:
1538 if (r_len > sizeof(struct optdata_dn))
1539 r_len = sizeof(struct optdata_dn);
1540 r_data = &scp->discdata_in;
1541 break;
1542
1543 case DSO_CONACCESS:
1544 if (r_len > sizeof(struct accessdata_dn))
1545 r_len = sizeof(struct accessdata_dn);
1546 r_data = &scp->accessdata;
1547 break;
1548
1549 case DSO_ACCEPTMODE:
1550 if (r_len > sizeof(unsigned char))
1551 r_len = sizeof(unsigned char);
1552 r_data = &scp->accept_mode;
1553 break;
1554
1555 case DSO_LINKINFO:
1556 if (r_len > sizeof(struct linkinfo_dn))
1557 r_len = sizeof(struct linkinfo_dn);
1558
1559 memset(&link, 0, sizeof(link));
1560
1561 switch(sock->state) {
1562 case SS_CONNECTING:
1563 link.idn_linkstate = LL_CONNECTING;
1564 break;
1565 case SS_DISCONNECTING:
1566 link.idn_linkstate = LL_DISCONNECTING;
1567 break;
1568 case SS_CONNECTED:
1569 link.idn_linkstate = LL_RUNNING;
1570 break;
1571 default:
1572 link.idn_linkstate = LL_INACTIVE;
1573 }
1574
1575 link.idn_segsize = scp->segsize_rem;
1576 r_data = &link;
1577 break;
1578
1579 default:
1580 #ifdef CONFIG_NETFILTER
1581 {
1582 int ret, len;
1583
1584 if(get_user(len, optlen))
1585 return -EFAULT;
1586
1587 ret = nf_getsockopt(sk, PF_DECnet, optname,
1588 optval, &len);
1589 if (ret >= 0)
1590 ret = put_user(len, optlen);
1591 return ret;
1592 }
1593 #endif
1594 case DSO_STREAM:
1595 case DSO_SEQPACKET:
1596 case DSO_CONACCEPT:
1597 case DSO_CONREJECT:
1598 return -ENOPROTOOPT;
1599
1600 case DSO_MAXWINDOW:
1601 if (r_len > sizeof(unsigned long))
1602 r_len = sizeof(unsigned long);
1603 r_data = &scp->max_window;
1604 break;
1605
1606 case DSO_NODELAY:
1607 if (r_len > sizeof(int))
1608 r_len = sizeof(int);
1609 val = (scp->nonagle == 1);
1610 r_data = &val;
1611 break;
1612
1613 case DSO_CORK:
1614 if (r_len > sizeof(int))
1615 r_len = sizeof(int);
1616 val = (scp->nonagle == 2);
1617 r_data = &val;
1618 break;
1619
1620 case DSO_SERVICES:
1621 if (r_len > sizeof(unsigned char))
1622 r_len = sizeof(unsigned char);
1623 r_data = &scp->services_rem;
1624 break;
1625
1626 case DSO_INFO:
1627 if (r_len > sizeof(unsigned char))
1628 r_len = sizeof(unsigned char);
1629 r_data = &scp->info_rem;
1630 break;
1631 }
1632
1633 if (r_data) {
1634 if (copy_to_user(optval, r_data, r_len))
1635 return -EFAULT;
1636 if (put_user(r_len, optlen))
1637 return -EFAULT;
1638 }
1639
1640 return 0;
1641 }
1642
1643
1644 static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1645 {
1646 struct sk_buff *skb;
1647 int len = 0;
1648
1649 if (flags & MSG_OOB)
1650 return !skb_queue_empty(q) ? 1 : 0;
1651
1652 skb_queue_walk(q, skb) {
1653 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1654 len += skb->len;
1655
1656 if (cb->nsp_flags & 0x40) {
1657 /* SOCK_SEQPACKET reads to EOM */
1658 if (sk->sk_type == SOCK_SEQPACKET)
1659 return 1;
1660 /* so does SOCK_STREAM unless WAITALL is specified */
1661 if (!(flags & MSG_WAITALL))
1662 return 1;
1663 }
1664
1665 /* minimum data length for read exceeded */
1666 if (len >= target)
1667 return 1;
1668 }
1669
1670 return 0;
1671 }
1672
1673
1674 static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1675 struct msghdr *msg, size_t size, int flags)
1676 {
1677 struct sock *sk = sock->sk;
1678 struct dn_scp *scp = DN_SK(sk);
1679 struct sk_buff_head *queue = &sk->sk_receive_queue;
1680 size_t target = size > 1 ? 1 : 0;
1681 size_t copied = 0;
1682 int rv = 0;
1683 struct sk_buff *skb, *n;
1684 struct dn_skb_cb *cb = NULL;
1685 unsigned char eor = 0;
1686 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1687
1688 lock_sock(sk);
1689
1690 if (sock_flag(sk, SOCK_ZAPPED)) {
1691 rv = -EADDRNOTAVAIL;
1692 goto out;
1693 }
1694
1695 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1696 rv = 0;
1697 goto out;
1698 }
1699
1700 rv = dn_check_state(sk, NULL, 0, &timeo, flags);
1701 if (rv)
1702 goto out;
1703
1704 if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) {
1705 rv = -EOPNOTSUPP;
1706 goto out;
1707 }
1708
1709 if (flags & MSG_OOB)
1710 queue = &scp->other_receive_queue;
1711
1712 if (flags & MSG_WAITALL)
1713 target = size;
1714
1715
1716 /*
1717 * See if there is data ready to read, sleep if there isn't
1718 */
1719 for(;;) {
1720 DEFINE_WAIT(wait);
1721
1722 if (sk->sk_err)
1723 goto out;
1724
1725 if (!skb_queue_empty(&scp->other_receive_queue)) {
1726 if (!(flags & MSG_OOB)) {
1727 msg->msg_flags |= MSG_OOB;
1728 if (!scp->other_report) {
1729 scp->other_report = 1;
1730 goto out;
1731 }
1732 }
1733 }
1734
1735 if (scp->state != DN_RUN)
1736 goto out;
1737
1738 if (signal_pending(current)) {
1739 rv = sock_intr_errno(timeo);
1740 goto out;
1741 }
1742
1743 if (dn_data_ready(sk, queue, flags, target))
1744 break;
1745
1746 if (flags & MSG_DONTWAIT) {
1747 rv = -EWOULDBLOCK;
1748 goto out;
1749 }
1750
1751 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1752 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1753 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
1754 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1755 finish_wait(sk_sleep(sk), &wait);
1756 }
1757
1758 skb_queue_walk_safe(queue, skb, n) {
1759 unsigned int chunk = skb->len;
1760 cb = DN_SKB_CB(skb);
1761
1762 if ((chunk + copied) > size)
1763 chunk = size - copied;
1764
1765 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1766 rv = -EFAULT;
1767 break;
1768 }
1769 copied += chunk;
1770
1771 if (!(flags & MSG_PEEK))
1772 skb_pull(skb, chunk);
1773
1774 eor = cb->nsp_flags & 0x40;
1775
1776 if (skb->len == 0) {
1777 skb_unlink(skb, queue);
1778 kfree_skb(skb);
1779 /*
1780 * N.B. Don't refer to skb or cb after this point
1781 * in loop.
1782 */
1783 if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
1784 scp->flowloc_sw = DN_SEND;
1785 dn_nsp_send_link(sk, DN_SEND, 0);
1786 }
1787 }
1788
1789 if (eor) {
1790 if (sk->sk_type == SOCK_SEQPACKET)
1791 break;
1792 if (!(flags & MSG_WAITALL))
1793 break;
1794 }
1795
1796 if (flags & MSG_OOB)
1797 break;
1798
1799 if (copied >= target)
1800 break;
1801 }
1802
1803 rv = copied;
1804
1805
1806 if (eor && (sk->sk_type == SOCK_SEQPACKET))
1807 msg->msg_flags |= MSG_EOR;
1808
1809 out:
1810 if (rv == 0)
1811 rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
1812
1813 if ((rv >= 0) && msg->msg_name) {
1814 memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
1815 msg->msg_namelen = sizeof(struct sockaddr_dn);
1816 }
1817
1818 release_sock(sk);
1819
1820 return rv;
1821 }
1822
1823
1824 static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
1825 {
1826 unsigned char fctype = scp->services_rem & NSP_FC_MASK;
1827 if (skb_queue_len(queue) >= scp->snd_window)
1828 return 1;
1829 if (fctype != NSP_FC_NONE) {
1830 if (flags & MSG_OOB) {
1831 if (scp->flowrem_oth == 0)
1832 return 1;
1833 } else {
1834 if (scp->flowrem_dat == 0)
1835 return 1;
1836 }
1837 }
1838 return 0;
1839 }
1840
1841 /*
1842 * The DECnet spec requires that the "routing layer" accepts packets which
1843 * are at least 230 bytes in size. This excludes any headers which the NSP
1844 * layer might add, so we always assume that we'll be using the maximal
1845 * length header on data packets. The variation in length is due to the
1846 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1847 * make much practical difference.
1848 */
1849 unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
1850 {
1851 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
1852 if (dev) {
1853 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1854 mtu -= LL_RESERVED_SPACE(dev);
1855 if (dn_db->use_long)
1856 mtu -= 21;
1857 else
1858 mtu -= 6;
1859 mtu -= DN_MAX_NSP_DATA_HEADER;
1860 } else {
1861 /*
1862 * 21 = long header, 16 = guess at MAC header length
1863 */
1864 mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16);
1865 }
1866 if (mtu > mss)
1867 mss = mtu;
1868 return mss;
1869 }
1870
1871 static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1872 {
1873 struct dst_entry *dst = __sk_dst_get(sk);
1874 struct dn_scp *scp = DN_SK(sk);
1875 int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem);
1876
1877 /* Other data messages are limited to 16 bytes per packet */
1878 if (flags & MSG_OOB)
1879 return 16;
1880
1881 /* This works out the maximum size of segment we can send out */
1882 if (dst) {
1883 u32 mtu = dst_mtu(dst);
1884 mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
1885 }
1886
1887 return mss_now;
1888 }
1889
1890 /*
1891 * N.B. We get the timeout wrong here, but then we always did get it
1892 * wrong before and this is another step along the road to correcting
1893 * it. It ought to get updated each time we pass through the routine,
1894 * but in practise it probably doesn't matter too much for now.
1895 */
1896 static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
1897 unsigned long datalen, int noblock,
1898 int *errcode)
1899 {
1900 struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
1901 noblock, errcode);
1902 if (skb) {
1903 skb->protocol = htons(ETH_P_DNA_RT);
1904 skb->pkt_type = PACKET_OUTGOING;
1905 }
1906 return skb;
1907 }
1908
1909 static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
1910 struct msghdr *msg, size_t size)
1911 {
1912 struct sock *sk = sock->sk;
1913 struct dn_scp *scp = DN_SK(sk);
1914 size_t mss;
1915 struct sk_buff_head *queue = &scp->data_xmit_queue;
1916 int flags = msg->msg_flags;
1917 int err = 0;
1918 size_t sent = 0;
1919 int addr_len = msg->msg_namelen;
1920 struct sockaddr_dn *addr = (struct sockaddr_dn *)msg->msg_name;
1921 struct sk_buff *skb = NULL;
1922 struct dn_skb_cb *cb;
1923 size_t len;
1924 unsigned char fctype;
1925 long timeo;
1926
1927 if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT))
1928 return -EOPNOTSUPP;
1929
1930 if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
1931 return -EINVAL;
1932
1933 lock_sock(sk);
1934 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1935 /*
1936 * The only difference between stream sockets and sequenced packet
1937 * sockets is that the stream sockets always behave as if MSG_EOR
1938 * has been set.
1939 */
1940 if (sock->type == SOCK_STREAM) {
1941 if (flags & MSG_EOR) {
1942 err = -EINVAL;
1943 goto out;
1944 }
1945 flags |= MSG_EOR;
1946 }
1947
1948
1949 err = dn_check_state(sk, addr, addr_len, &timeo, flags);
1950 if (err)
1951 goto out_err;
1952
1953 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1954 err = -EPIPE;
1955 if (!(flags & MSG_NOSIGNAL))
1956 send_sig(SIGPIPE, current, 0);
1957 goto out_err;
1958 }
1959
1960 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
1961 dst_negative_advice(sk);
1962
1963 mss = scp->segsize_rem;
1964 fctype = scp->services_rem & NSP_FC_MASK;
1965
1966 mss = dn_current_mss(sk, flags);
1967
1968 if (flags & MSG_OOB) {
1969 queue = &scp->other_xmit_queue;
1970 if (size > mss) {
1971 err = -EMSGSIZE;
1972 goto out;
1973 }
1974 }
1975
1976 scp->persist_fxn = dn_nsp_xmit_timeout;
1977
1978 while(sent < size) {
1979 err = sock_error(sk);
1980 if (err)
1981 goto out;
1982
1983 if (signal_pending(current)) {
1984 err = sock_intr_errno(timeo);
1985 goto out;
1986 }
1987
1988 /*
1989 * Calculate size that we wish to send.
1990 */
1991 len = size - sent;
1992
1993 if (len > mss)
1994 len = mss;
1995
1996 /*
1997 * Wait for queue size to go down below the window
1998 * size.
1999 */
2000 if (dn_queue_too_long(scp, queue, flags)) {
2001 DEFINE_WAIT(wait);
2002
2003 if (flags & MSG_DONTWAIT) {
2004 err = -EWOULDBLOCK;
2005 goto out;
2006 }
2007
2008 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2009 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2010 sk_wait_event(sk, &timeo,
2011 !dn_queue_too_long(scp, queue, flags));
2012 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2013 finish_wait(sk_sleep(sk), &wait);
2014 continue;
2015 }
2016
2017 /*
2018 * Get a suitably sized skb.
2019 * 64 is a bit of a hack really, but its larger than any
2020 * link-layer headers and has served us well as a good
2021 * guess as to their real length.
2022 */
2023 skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
2024 flags & MSG_DONTWAIT, &err);
2025
2026 if (err)
2027 break;
2028
2029 if (!skb)
2030 continue;
2031
2032 cb = DN_SKB_CB(skb);
2033
2034 skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
2035
2036 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
2037 err = -EFAULT;
2038 goto out;
2039 }
2040
2041 if (flags & MSG_OOB) {
2042 cb->nsp_flags = 0x30;
2043 if (fctype != NSP_FC_NONE)
2044 scp->flowrem_oth--;
2045 } else {
2046 cb->nsp_flags = 0x00;
2047 if (scp->seg_total == 0)
2048 cb->nsp_flags |= 0x20;
2049
2050 scp->seg_total += len;
2051
2052 if (((sent + len) == size) && (flags & MSG_EOR)) {
2053 cb->nsp_flags |= 0x40;
2054 scp->seg_total = 0;
2055 if (fctype == NSP_FC_SCMC)
2056 scp->flowrem_dat--;
2057 }
2058 if (fctype == NSP_FC_SRC)
2059 scp->flowrem_dat--;
2060 }
2061
2062 sent += len;
2063 dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
2064 skb = NULL;
2065
2066 scp->persist = dn_nsp_persist(sk);
2067
2068 }
2069 out:
2070
2071 kfree_skb(skb);
2072
2073 release_sock(sk);
2074
2075 return sent ? sent : err;
2076
2077 out_err:
2078 err = sk_stream_error(sk, flags, err);
2079 release_sock(sk);
2080 return err;
2081 }
2082
2083 static int dn_device_event(struct notifier_block *this, unsigned long event,
2084 void *ptr)
2085 {
2086 struct net_device *dev = (struct net_device *)ptr;
2087
2088 if (!net_eq(dev_net(dev), &init_net))
2089 return NOTIFY_DONE;
2090
2091 switch(event) {
2092 case NETDEV_UP:
2093 dn_dev_up(dev);
2094 break;
2095 case NETDEV_DOWN:
2096 dn_dev_down(dev);
2097 break;
2098 default:
2099 break;
2100 }
2101
2102 return NOTIFY_DONE;
2103 }
2104
2105 static struct notifier_block dn_dev_notifier = {
2106 .notifier_call = dn_device_event,
2107 };
2108
2109 extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
2110
2111 static struct packet_type dn_dix_packet_type __read_mostly = {
2112 .type = cpu_to_be16(ETH_P_DNA_RT),
2113 .func = dn_route_rcv,
2114 };
2115
2116 #ifdef CONFIG_PROC_FS
2117 struct dn_iter_state {
2118 int bucket;
2119 };
2120
2121 static struct sock *dn_socket_get_first(struct seq_file *seq)
2122 {
2123 struct dn_iter_state *state = seq->private;
2124 struct sock *n = NULL;
2125
2126 for(state->bucket = 0;
2127 state->bucket < DN_SK_HASH_SIZE;
2128 ++state->bucket) {
2129 n = sk_head(&dn_sk_hash[state->bucket]);
2130 if (n)
2131 break;
2132 }
2133
2134 return n;
2135 }
2136
2137 static struct sock *dn_socket_get_next(struct seq_file *seq,
2138 struct sock *n)
2139 {
2140 struct dn_iter_state *state = seq->private;
2141
2142 n = sk_next(n);
2143 try_again:
2144 if (n)
2145 goto out;
2146 if (++state->bucket >= DN_SK_HASH_SIZE)
2147 goto out;
2148 n = sk_head(&dn_sk_hash[state->bucket]);
2149 goto try_again;
2150 out:
2151 return n;
2152 }
2153
2154 static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos)
2155 {
2156 struct sock *sk = dn_socket_get_first(seq);
2157
2158 if (sk) {
2159 while(*pos && (sk = dn_socket_get_next(seq, sk)))
2160 --*pos;
2161 }
2162 return *pos ? NULL : sk;
2163 }
2164
2165 static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos)
2166 {
2167 void *rc;
2168 read_lock_bh(&dn_hash_lock);
2169 rc = socket_get_idx(seq, &pos);
2170 if (!rc) {
2171 read_unlock_bh(&dn_hash_lock);
2172 }
2173 return rc;
2174 }
2175
2176 static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos)
2177 {
2178 return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2179 }
2180
2181 static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2182 {
2183 void *rc;
2184
2185 if (v == SEQ_START_TOKEN) {
2186 rc = dn_socket_get_idx(seq, 0);
2187 goto out;
2188 }
2189
2190 rc = dn_socket_get_next(seq, v);
2191 if (rc)
2192 goto out;
2193 read_unlock_bh(&dn_hash_lock);
2194 out:
2195 ++*pos;
2196 return rc;
2197 }
2198
2199 static void dn_socket_seq_stop(struct seq_file *seq, void *v)
2200 {
2201 if (v && v != SEQ_START_TOKEN)
2202 read_unlock_bh(&dn_hash_lock);
2203 }
2204
2205 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2206
2207 static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2208 {
2209 int i;
2210
2211 switch (le16_to_cpu(dn->sdn_objnamel)) {
2212 case 0:
2213 sprintf(buf, "%d", dn->sdn_objnum);
2214 break;
2215 default:
2216 for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) {
2217 buf[i] = dn->sdn_objname[i];
2218 if (IS_NOT_PRINTABLE(buf[i]))
2219 buf[i] = '.';
2220 }
2221 buf[i] = 0;
2222 }
2223 }
2224
2225 static char *dn_state2asc(unsigned char state)
2226 {
2227 switch(state) {
2228 case DN_O:
2229 return "OPEN";
2230 case DN_CR:
2231 return " CR";
2232 case DN_DR:
2233 return " DR";
2234 case DN_DRC:
2235 return " DRC";
2236 case DN_CC:
2237 return " CC";
2238 case DN_CI:
2239 return " CI";
2240 case DN_NR:
2241 return " NR";
2242 case DN_NC:
2243 return " NC";
2244 case DN_CD:
2245 return " CD";
2246 case DN_RJ:
2247 return " RJ";
2248 case DN_RUN:
2249 return " RUN";
2250 case DN_DI:
2251 return " DI";
2252 case DN_DIC:
2253 return " DIC";
2254 case DN_DN:
2255 return " DN";
2256 case DN_CL:
2257 return " CL";
2258 case DN_CN:
2259 return " CN";
2260 }
2261
2262 return "????";
2263 }
2264
2265 static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk)
2266 {
2267 struct dn_scp *scp = DN_SK(sk);
2268 char buf1[DN_ASCBUF_LEN];
2269 char buf2[DN_ASCBUF_LEN];
2270 char local_object[DN_MAXOBJL+3];
2271 char remote_object[DN_MAXOBJL+3];
2272
2273 dn_printable_object(&scp->addr, local_object);
2274 dn_printable_object(&scp->peer, remote_object);
2275
2276 seq_printf(seq,
2277 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2278 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2279 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->addr)), buf1),
2280 scp->addrloc,
2281 scp->numdat,
2282 scp->numoth,
2283 scp->ackxmt_dat,
2284 scp->ackxmt_oth,
2285 scp->flowloc_sw,
2286 local_object,
2287 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->peer)), buf2),
2288 scp->addrrem,
2289 scp->numdat_rcv,
2290 scp->numoth_rcv,
2291 scp->ackrcv_dat,
2292 scp->ackrcv_oth,
2293 scp->flowrem_sw,
2294 remote_object,
2295 dn_state2asc(scp->state),
2296 ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER"));
2297 }
2298
2299 static int dn_socket_seq_show(struct seq_file *seq, void *v)
2300 {
2301 if (v == SEQ_START_TOKEN) {
2302 seq_puts(seq, "Local Remote\n");
2303 } else {
2304 dn_socket_format_entry(seq, v);
2305 }
2306 return 0;
2307 }
2308
2309 static const struct seq_operations dn_socket_seq_ops = {
2310 .start = dn_socket_seq_start,
2311 .next = dn_socket_seq_next,
2312 .stop = dn_socket_seq_stop,
2313 .show = dn_socket_seq_show,
2314 };
2315
2316 static int dn_socket_seq_open(struct inode *inode, struct file *file)
2317 {
2318 return seq_open_private(file, &dn_socket_seq_ops,
2319 sizeof(struct dn_iter_state));
2320 }
2321
2322 static const struct file_operations dn_socket_seq_fops = {
2323 .owner = THIS_MODULE,
2324 .open = dn_socket_seq_open,
2325 .read = seq_read,
2326 .llseek = seq_lseek,
2327 .release = seq_release_private,
2328 };
2329 #endif
2330
2331 static const struct net_proto_family dn_family_ops = {
2332 .family = AF_DECnet,
2333 .create = dn_create,
2334 .owner = THIS_MODULE,
2335 };
2336
2337 static const struct proto_ops dn_proto_ops = {
2338 .family = AF_DECnet,
2339 .owner = THIS_MODULE,
2340 .release = dn_release,
2341 .bind = dn_bind,
2342 .connect = dn_connect,
2343 .socketpair = sock_no_socketpair,
2344 .accept = dn_accept,
2345 .getname = dn_getname,
2346 .poll = dn_poll,
2347 .ioctl = dn_ioctl,
2348 .listen = dn_listen,
2349 .shutdown = dn_shutdown,
2350 .setsockopt = dn_setsockopt,
2351 .getsockopt = dn_getsockopt,
2352 .sendmsg = dn_sendmsg,
2353 .recvmsg = dn_recvmsg,
2354 .mmap = sock_no_mmap,
2355 .sendpage = sock_no_sendpage,
2356 };
2357
2358 void dn_register_sysctl(void);
2359 void dn_unregister_sysctl(void);
2360
2361 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2362 MODULE_AUTHOR("Linux DECnet Project Team");
2363 MODULE_LICENSE("GPL");
2364 MODULE_ALIAS_NETPROTO(PF_DECnet);
2365
2366 static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2367
2368 static int __init decnet_init(void)
2369 {
2370 int rc;
2371
2372 printk(banner);
2373
2374 rc = proto_register(&dn_proto, 1);
2375 if (rc != 0)
2376 goto out;
2377
2378 dn_neigh_init();
2379 dn_dev_init();
2380 dn_route_init();
2381 dn_fib_init();
2382
2383 sock_register(&dn_family_ops);
2384 dev_add_pack(&dn_dix_packet_type);
2385 register_netdevice_notifier(&dn_dev_notifier);
2386
2387 proc_net_fops_create(&init_net, "decnet", S_IRUGO, &dn_socket_seq_fops);
2388 dn_register_sysctl();
2389 out:
2390 return rc;
2391
2392 }
2393 module_init(decnet_init);
2394
2395 /*
2396 * Prevent DECnet module unloading until its fixed properly.
2397 * Requires an audit of the code to check for memory leaks and
2398 * initialisation problems etc.
2399 */
2400 #if 0
2401 static void __exit decnet_exit(void)
2402 {
2403 sock_unregister(AF_DECnet);
2404 rtnl_unregister_all(PF_DECnet);
2405 dev_remove_pack(&dn_dix_packet_type);
2406
2407 dn_unregister_sysctl();
2408
2409 unregister_netdevice_notifier(&dn_dev_notifier);
2410
2411 dn_route_cleanup();
2412 dn_dev_cleanup();
2413 dn_neigh_cleanup();
2414 dn_fib_cleanup();
2415
2416 proc_net_remove(&init_net, "decnet");
2417
2418 proto_unregister(&dn_proto);
2419
2420 rcu_barrier_bh(); /* Wait for completion of call_rcu_bh()'s */
2421 }
2422 module_exit(decnet_exit);
2423 #endif