[NET]: {get|set}sockopt compatibility layer
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / dccp / proto.c
1 /*
2 * net/dccp/proto.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/config.h>
13 #include <linux/dccp.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/skbuff.h>
19 #include <linux/netdevice.h>
20 #include <linux/in.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <linux/random.h>
24 #include <net/checksum.h>
25
26 #include <net/inet_sock.h>
27 #include <net/sock.h>
28 #include <net/xfrm.h>
29
30 #include <asm/semaphore.h>
31 #include <linux/spinlock.h>
32 #include <linux/timer.h>
33 #include <linux/delay.h>
34 #include <linux/poll.h>
35
36 #include "ccid.h"
37 #include "dccp.h"
38 #include "feat.h"
39
40 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
41
42 EXPORT_SYMBOL_GPL(dccp_statistics);
43
44 atomic_t dccp_orphan_count = ATOMIC_INIT(0);
45
46 EXPORT_SYMBOL_GPL(dccp_orphan_count);
47
48 struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
49 .lhash_lock = RW_LOCK_UNLOCKED,
50 .lhash_users = ATOMIC_INIT(0),
51 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
52 };
53
54 EXPORT_SYMBOL_GPL(dccp_hashinfo);
55
56 void dccp_set_state(struct sock *sk, const int state)
57 {
58 const int oldstate = sk->sk_state;
59
60 dccp_pr_debug("%s(%p) %-10.10s -> %s\n",
61 dccp_role(sk), sk,
62 dccp_state_name(oldstate), dccp_state_name(state));
63 WARN_ON(state == oldstate);
64
65 switch (state) {
66 case DCCP_OPEN:
67 if (oldstate != DCCP_OPEN)
68 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
69 break;
70
71 case DCCP_CLOSED:
72 if (oldstate == DCCP_CLOSING || oldstate == DCCP_OPEN)
73 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
74
75 sk->sk_prot->unhash(sk);
76 if (inet_csk(sk)->icsk_bind_hash != NULL &&
77 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
78 inet_put_port(&dccp_hashinfo, sk);
79 /* fall through */
80 default:
81 if (oldstate == DCCP_OPEN)
82 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
83 }
84
85 /* Change state AFTER socket is unhashed to avoid closed
86 * socket sitting in hash tables.
87 */
88 sk->sk_state = state;
89 }
90
91 EXPORT_SYMBOL_GPL(dccp_set_state);
92
93 void dccp_done(struct sock *sk)
94 {
95 dccp_set_state(sk, DCCP_CLOSED);
96 dccp_clear_xmit_timers(sk);
97
98 sk->sk_shutdown = SHUTDOWN_MASK;
99
100 if (!sock_flag(sk, SOCK_DEAD))
101 sk->sk_state_change(sk);
102 else
103 inet_csk_destroy_sock(sk);
104 }
105
106 EXPORT_SYMBOL_GPL(dccp_done);
107
108 const char *dccp_packet_name(const int type)
109 {
110 static const char *dccp_packet_names[] = {
111 [DCCP_PKT_REQUEST] = "REQUEST",
112 [DCCP_PKT_RESPONSE] = "RESPONSE",
113 [DCCP_PKT_DATA] = "DATA",
114 [DCCP_PKT_ACK] = "ACK",
115 [DCCP_PKT_DATAACK] = "DATAACK",
116 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
117 [DCCP_PKT_CLOSE] = "CLOSE",
118 [DCCP_PKT_RESET] = "RESET",
119 [DCCP_PKT_SYNC] = "SYNC",
120 [DCCP_PKT_SYNCACK] = "SYNCACK",
121 };
122
123 if (type >= DCCP_NR_PKT_TYPES)
124 return "INVALID";
125 else
126 return dccp_packet_names[type];
127 }
128
129 EXPORT_SYMBOL_GPL(dccp_packet_name);
130
131 const char *dccp_state_name(const int state)
132 {
133 static char *dccp_state_names[] = {
134 [DCCP_OPEN] = "OPEN",
135 [DCCP_REQUESTING] = "REQUESTING",
136 [DCCP_PARTOPEN] = "PARTOPEN",
137 [DCCP_LISTEN] = "LISTEN",
138 [DCCP_RESPOND] = "RESPOND",
139 [DCCP_CLOSING] = "CLOSING",
140 [DCCP_TIME_WAIT] = "TIME_WAIT",
141 [DCCP_CLOSED] = "CLOSED",
142 };
143
144 if (state >= DCCP_MAX_STATES)
145 return "INVALID STATE!";
146 else
147 return dccp_state_names[state];
148 }
149
150 EXPORT_SYMBOL_GPL(dccp_state_name);
151
152 void dccp_hash(struct sock *sk)
153 {
154 inet_hash(&dccp_hashinfo, sk);
155 }
156
157 EXPORT_SYMBOL_GPL(dccp_hash);
158
159 void dccp_unhash(struct sock *sk)
160 {
161 inet_unhash(&dccp_hashinfo, sk);
162 }
163
164 EXPORT_SYMBOL_GPL(dccp_unhash);
165
166 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
167 {
168 struct dccp_sock *dp = dccp_sk(sk);
169 struct inet_connection_sock *icsk = inet_csk(sk);
170
171 dccp_options_init(&dp->dccps_options);
172 do_gettimeofday(&dp->dccps_epoch);
173
174 /*
175 * FIXME: We're hardcoding the CCID, and doing this at this point makes
176 * the listening (master) sock get CCID control blocks, which is not
177 * necessary, but for now, to not mess with the test userspace apps,
178 * lets leave it here, later the real solution is to do this in a
179 * setsockopt(CCIDs-I-want/accept). -acme
180 */
181 if (likely(ctl_sock_initialized)) {
182 int rc = dccp_feat_init(sk);
183
184 if (rc)
185 return rc;
186
187 if (dp->dccps_options.dccpo_send_ack_vector) {
188 dp->dccps_hc_rx_ackvec = dccp_ackvec_alloc(GFP_KERNEL);
189 if (dp->dccps_hc_rx_ackvec == NULL)
190 return -ENOMEM;
191 }
192 dp->dccps_hc_rx_ccid =
193 ccid_hc_rx_new(dp->dccps_options.dccpo_rx_ccid,
194 sk, GFP_KERNEL);
195 dp->dccps_hc_tx_ccid =
196 ccid_hc_tx_new(dp->dccps_options.dccpo_tx_ccid,
197 sk, GFP_KERNEL);
198 if (unlikely(dp->dccps_hc_rx_ccid == NULL ||
199 dp->dccps_hc_tx_ccid == NULL)) {
200 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
201 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
202 if (dp->dccps_options.dccpo_send_ack_vector) {
203 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
204 dp->dccps_hc_rx_ackvec = NULL;
205 }
206 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
207 return -ENOMEM;
208 }
209 } else {
210 /* control socket doesn't need feat nego */
211 INIT_LIST_HEAD(&dp->dccps_options.dccpo_pending);
212 INIT_LIST_HEAD(&dp->dccps_options.dccpo_conf);
213 }
214
215 dccp_init_xmit_timers(sk);
216 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
217 sk->sk_state = DCCP_CLOSED;
218 sk->sk_write_space = dccp_write_space;
219 icsk->icsk_sync_mss = dccp_sync_mss;
220 dp->dccps_mss_cache = 536;
221 dp->dccps_role = DCCP_ROLE_UNDEFINED;
222 dp->dccps_service = DCCP_SERVICE_INVALID_VALUE;
223 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
224
225 return 0;
226 }
227
228 EXPORT_SYMBOL_GPL(dccp_init_sock);
229
230 int dccp_destroy_sock(struct sock *sk)
231 {
232 struct dccp_sock *dp = dccp_sk(sk);
233
234 /*
235 * DCCP doesn't use sk_write_queue, just sk_send_head
236 * for retransmissions
237 */
238 if (sk->sk_send_head != NULL) {
239 kfree_skb(sk->sk_send_head);
240 sk->sk_send_head = NULL;
241 }
242
243 /* Clean up a referenced DCCP bind bucket. */
244 if (inet_csk(sk)->icsk_bind_hash != NULL)
245 inet_put_port(&dccp_hashinfo, sk);
246
247 kfree(dp->dccps_service_list);
248 dp->dccps_service_list = NULL;
249
250 if (dp->dccps_options.dccpo_send_ack_vector) {
251 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
252 dp->dccps_hc_rx_ackvec = NULL;
253 }
254 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
255 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
256 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
257
258 /* clean up feature negotiation state */
259 dccp_feat_clean(sk);
260
261 return 0;
262 }
263
264 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
265
266 static inline int dccp_listen_start(struct sock *sk)
267 {
268 struct dccp_sock *dp = dccp_sk(sk);
269
270 dp->dccps_role = DCCP_ROLE_LISTEN;
271 /*
272 * Apps need to use setsockopt(DCCP_SOCKOPT_SERVICE)
273 * before calling listen()
274 */
275 if (dccp_service_not_initialized(sk))
276 return -EPROTO;
277 return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
278 }
279
280 int dccp_disconnect(struct sock *sk, int flags)
281 {
282 struct inet_connection_sock *icsk = inet_csk(sk);
283 struct inet_sock *inet = inet_sk(sk);
284 int err = 0;
285 const int old_state = sk->sk_state;
286
287 if (old_state != DCCP_CLOSED)
288 dccp_set_state(sk, DCCP_CLOSED);
289
290 /* ABORT function of RFC793 */
291 if (old_state == DCCP_LISTEN) {
292 inet_csk_listen_stop(sk);
293 /* FIXME: do the active reset thing */
294 } else if (old_state == DCCP_REQUESTING)
295 sk->sk_err = ECONNRESET;
296
297 dccp_clear_xmit_timers(sk);
298 __skb_queue_purge(&sk->sk_receive_queue);
299 if (sk->sk_send_head != NULL) {
300 __kfree_skb(sk->sk_send_head);
301 sk->sk_send_head = NULL;
302 }
303
304 inet->dport = 0;
305
306 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
307 inet_reset_saddr(sk);
308
309 sk->sk_shutdown = 0;
310 sock_reset_flag(sk, SOCK_DONE);
311
312 icsk->icsk_backoff = 0;
313 inet_csk_delack_init(sk);
314 __sk_dst_reset(sk);
315
316 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
317
318 sk->sk_error_report(sk);
319 return err;
320 }
321
322 EXPORT_SYMBOL_GPL(dccp_disconnect);
323
324 /*
325 * Wait for a DCCP event.
326 *
327 * Note that we don't need to lock the socket, as the upper poll layers
328 * take care of normal races (between the test and the event) and we don't
329 * go look at any of the socket buffers directly.
330 */
331 unsigned int dccp_poll(struct file *file, struct socket *sock,
332 poll_table *wait)
333 {
334 unsigned int mask;
335 struct sock *sk = sock->sk;
336
337 poll_wait(file, sk->sk_sleep, wait);
338 if (sk->sk_state == DCCP_LISTEN)
339 return inet_csk_listen_poll(sk);
340
341 /* Socket is not locked. We are protected from async events
342 by poll logic and correct handling of state changes
343 made by another threads is impossible in any case.
344 */
345
346 mask = 0;
347 if (sk->sk_err)
348 mask = POLLERR;
349
350 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
351 mask |= POLLHUP;
352 if (sk->sk_shutdown & RCV_SHUTDOWN)
353 mask |= POLLIN | POLLRDNORM;
354
355 /* Connected? */
356 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
357 if (atomic_read(&sk->sk_rmem_alloc) > 0)
358 mask |= POLLIN | POLLRDNORM;
359
360 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
361 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
362 mask |= POLLOUT | POLLWRNORM;
363 } else { /* send SIGIO later */
364 set_bit(SOCK_ASYNC_NOSPACE,
365 &sk->sk_socket->flags);
366 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
367
368 /* Race breaker. If space is freed after
369 * wspace test but before the flags are set,
370 * IO signal will be lost.
371 */
372 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
373 mask |= POLLOUT | POLLWRNORM;
374 }
375 }
376 }
377 return mask;
378 }
379
380 EXPORT_SYMBOL_GPL(dccp_poll);
381
382 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
383 {
384 dccp_pr_debug("entry\n");
385 return -ENOIOCTLCMD;
386 }
387
388 EXPORT_SYMBOL_GPL(dccp_ioctl);
389
390 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
391 char __user *optval, int optlen)
392 {
393 struct dccp_sock *dp = dccp_sk(sk);
394 struct dccp_service_list *sl = NULL;
395
396 if (service == DCCP_SERVICE_INVALID_VALUE ||
397 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
398 return -EINVAL;
399
400 if (optlen > sizeof(service)) {
401 sl = kmalloc(optlen, GFP_KERNEL);
402 if (sl == NULL)
403 return -ENOMEM;
404
405 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
406 if (copy_from_user(sl->dccpsl_list,
407 optval + sizeof(service),
408 optlen - sizeof(service)) ||
409 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
410 kfree(sl);
411 return -EFAULT;
412 }
413 }
414
415 lock_sock(sk);
416 dp->dccps_service = service;
417
418 kfree(dp->dccps_service_list);
419
420 dp->dccps_service_list = sl;
421 release_sock(sk);
422 return 0;
423 }
424
425 /* byte 1 is feature. the rest is the preference list */
426 static int dccp_setsockopt_change(struct sock *sk, int type,
427 struct dccp_so_feat __user *optval)
428 {
429 struct dccp_so_feat opt;
430 u8 *val;
431 int rc;
432
433 if (copy_from_user(&opt, optval, sizeof(opt)))
434 return -EFAULT;
435
436 val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
437 if (!val)
438 return -ENOMEM;
439
440 if (copy_from_user(val, opt.dccpsf_val, opt.dccpsf_len)) {
441 rc = -EFAULT;
442 goto out_free_val;
443 }
444
445 rc = dccp_feat_change(sk, type, opt.dccpsf_feat, val, opt.dccpsf_len,
446 GFP_KERNEL);
447 if (rc)
448 goto out_free_val;
449
450 out:
451 return rc;
452
453 out_free_val:
454 kfree(val);
455 goto out;
456 }
457
458 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
459 char __user *optval, int optlen)
460 {
461 struct dccp_sock *dp;
462 int err;
463 int val;
464
465 if (optlen < sizeof(int))
466 return -EINVAL;
467
468 if (get_user(val, (int __user *)optval))
469 return -EFAULT;
470
471 if (optname == DCCP_SOCKOPT_SERVICE)
472 return dccp_setsockopt_service(sk, val, optval, optlen);
473
474 lock_sock(sk);
475 dp = dccp_sk(sk);
476 err = 0;
477
478 switch (optname) {
479 case DCCP_SOCKOPT_PACKET_SIZE:
480 dp->dccps_packet_size = val;
481 break;
482
483 case DCCP_SOCKOPT_CHANGE_L:
484 if (optlen != sizeof(struct dccp_so_feat))
485 err = -EINVAL;
486 else
487 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
488 (struct dccp_so_feat *)
489 optval);
490 break;
491
492 case DCCP_SOCKOPT_CHANGE_R:
493 if (optlen != sizeof(struct dccp_so_feat))
494 err = -EINVAL;
495 else
496 err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R,
497 (struct dccp_so_feat *)
498 optval);
499 break;
500
501 default:
502 err = -ENOPROTOOPT;
503 break;
504 }
505
506 release_sock(sk);
507 return err;
508 }
509
510 int dccp_setsockopt(struct sock *sk, int level, int optname,
511 char __user *optval, int optlen)
512 {
513 if (level != SOL_DCCP)
514 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
515 optname, optval,
516 optlen);
517 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
518 }
519 EXPORT_SYMBOL_GPL(dccp_setsockopt);
520
521 #ifdef CONFIG_COMPAT
522 int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
523 char __user *optval, int optlen)
524 {
525 if (level != SOL_DCCP) {
526 if (inet_csk(sk)->icsk_af_ops->compat_setsockopt)
527 return inet_csk(sk)->icsk_af_ops->compat_setsockopt(sk,
528 level, optname, optval, optlen);
529 else
530 return inet_csk(sk)->icsk_af_ops->setsockopt(sk,
531 level, optname, optval, optlen);
532 }
533 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
534 }
535 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
536 #endif
537
538 static int dccp_getsockopt_service(struct sock *sk, int len,
539 __be32 __user *optval,
540 int __user *optlen)
541 {
542 const struct dccp_sock *dp = dccp_sk(sk);
543 const struct dccp_service_list *sl;
544 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
545
546 lock_sock(sk);
547 if (dccp_service_not_initialized(sk))
548 goto out;
549
550 if ((sl = dp->dccps_service_list) != NULL) {
551 slen = sl->dccpsl_nr * sizeof(u32);
552 total_len += slen;
553 }
554
555 err = -EINVAL;
556 if (total_len > len)
557 goto out;
558
559 err = 0;
560 if (put_user(total_len, optlen) ||
561 put_user(dp->dccps_service, optval) ||
562 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
563 err = -EFAULT;
564 out:
565 release_sock(sk);
566 return err;
567 }
568
569 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
570 char __user *optval, int __user *optlen)
571 {
572 struct dccp_sock *dp;
573 int val, len;
574
575 if (get_user(len, optlen))
576 return -EFAULT;
577
578 if (len < sizeof(int))
579 return -EINVAL;
580
581 dp = dccp_sk(sk);
582
583 switch (optname) {
584 case DCCP_SOCKOPT_PACKET_SIZE:
585 val = dp->dccps_packet_size;
586 len = sizeof(dp->dccps_packet_size);
587 break;
588 case DCCP_SOCKOPT_SERVICE:
589 return dccp_getsockopt_service(sk, len,
590 (__be32 __user *)optval, optlen);
591 case 128 ... 191:
592 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
593 len, (u32 __user *)optval, optlen);
594 case 192 ... 255:
595 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
596 len, (u32 __user *)optval, optlen);
597 default:
598 return -ENOPROTOOPT;
599 }
600
601 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
602 return -EFAULT;
603
604 return 0;
605 }
606
607 int dccp_getsockopt(struct sock *sk, int level, int optname,
608 char __user *optval, int __user *optlen)
609 {
610 if (level != SOL_DCCP)
611 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
612 optname, optval,
613 optlen);
614 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
615 }
616 EXPORT_SYMBOL_GPL(dccp_getsockopt);
617
618 #ifdef CONFIG_COMPAT
619 int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
620 char __user *optval, int __user *optlen)
621 {
622 if (level != SOL_DCCP) {
623 if (inet_csk(sk)->icsk_af_ops->compat_setsockopt)
624 return inet_csk(sk)->icsk_af_ops->compat_getsockopt(sk,
625 level, optname, optval, optlen);
626 else
627 return inet_csk(sk)->icsk_af_ops->getsockopt(sk,
628 level, optname, optval, optlen);
629 }
630 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
631 }
632 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
633 #endif
634
635 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
636 size_t len)
637 {
638 const struct dccp_sock *dp = dccp_sk(sk);
639 const int flags = msg->msg_flags;
640 const int noblock = flags & MSG_DONTWAIT;
641 struct sk_buff *skb;
642 int rc, size;
643 long timeo;
644
645 if (len > dp->dccps_mss_cache)
646 return -EMSGSIZE;
647
648 lock_sock(sk);
649 timeo = sock_sndtimeo(sk, noblock);
650
651 /*
652 * We have to use sk_stream_wait_connect here to set sk_write_pending,
653 * so that the trick in dccp_rcv_request_sent_state_process.
654 */
655 /* Wait for a connection to finish. */
656 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
657 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
658 goto out_release;
659
660 size = sk->sk_prot->max_header + len;
661 release_sock(sk);
662 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
663 lock_sock(sk);
664 if (skb == NULL)
665 goto out_release;
666
667 skb_reserve(skb, sk->sk_prot->max_header);
668 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
669 if (rc != 0)
670 goto out_discard;
671
672 rc = dccp_write_xmit(sk, skb, &timeo);
673 /*
674 * XXX we don't use sk_write_queue, so just discard the packet.
675 * Current plan however is to _use_ sk_write_queue with
676 * an algorith similar to tcp_sendmsg, where the main difference
677 * is that in DCCP we have to respect packet boundaries, so
678 * no coalescing of skbs.
679 *
680 * This bug was _quickly_ found & fixed by just looking at an OSTRA
681 * generated callgraph 8) -acme
682 */
683 out_release:
684 release_sock(sk);
685 return rc ? : len;
686 out_discard:
687 kfree_skb(skb);
688 goto out_release;
689 }
690
691 EXPORT_SYMBOL_GPL(dccp_sendmsg);
692
693 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
694 size_t len, int nonblock, int flags, int *addr_len)
695 {
696 const struct dccp_hdr *dh;
697 long timeo;
698
699 lock_sock(sk);
700
701 if (sk->sk_state == DCCP_LISTEN) {
702 len = -ENOTCONN;
703 goto out;
704 }
705
706 timeo = sock_rcvtimeo(sk, nonblock);
707
708 do {
709 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
710
711 if (skb == NULL)
712 goto verify_sock_status;
713
714 dh = dccp_hdr(skb);
715
716 if (dh->dccph_type == DCCP_PKT_DATA ||
717 dh->dccph_type == DCCP_PKT_DATAACK)
718 goto found_ok_skb;
719
720 if (dh->dccph_type == DCCP_PKT_RESET ||
721 dh->dccph_type == DCCP_PKT_CLOSE) {
722 dccp_pr_debug("found fin ok!\n");
723 len = 0;
724 goto found_fin_ok;
725 }
726 dccp_pr_debug("packet_type=%s\n",
727 dccp_packet_name(dh->dccph_type));
728 sk_eat_skb(sk, skb);
729 verify_sock_status:
730 if (sock_flag(sk, SOCK_DONE)) {
731 len = 0;
732 break;
733 }
734
735 if (sk->sk_err) {
736 len = sock_error(sk);
737 break;
738 }
739
740 if (sk->sk_shutdown & RCV_SHUTDOWN) {
741 len = 0;
742 break;
743 }
744
745 if (sk->sk_state == DCCP_CLOSED) {
746 if (!sock_flag(sk, SOCK_DONE)) {
747 /* This occurs when user tries to read
748 * from never connected socket.
749 */
750 len = -ENOTCONN;
751 break;
752 }
753 len = 0;
754 break;
755 }
756
757 if (!timeo) {
758 len = -EAGAIN;
759 break;
760 }
761
762 if (signal_pending(current)) {
763 len = sock_intr_errno(timeo);
764 break;
765 }
766
767 sk_wait_data(sk, &timeo);
768 continue;
769 found_ok_skb:
770 if (len > skb->len)
771 len = skb->len;
772 else if (len < skb->len)
773 msg->msg_flags |= MSG_TRUNC;
774
775 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
776 /* Exception. Bailout! */
777 len = -EFAULT;
778 break;
779 }
780 found_fin_ok:
781 if (!(flags & MSG_PEEK))
782 sk_eat_skb(sk, skb);
783 break;
784 } while (1);
785 out:
786 release_sock(sk);
787 return len;
788 }
789
790 EXPORT_SYMBOL_GPL(dccp_recvmsg);
791
792 int inet_dccp_listen(struct socket *sock, int backlog)
793 {
794 struct sock *sk = sock->sk;
795 unsigned char old_state;
796 int err;
797
798 lock_sock(sk);
799
800 err = -EINVAL;
801 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
802 goto out;
803
804 old_state = sk->sk_state;
805 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
806 goto out;
807
808 /* Really, if the socket is already in listen state
809 * we can only allow the backlog to be adjusted.
810 */
811 if (old_state != DCCP_LISTEN) {
812 /*
813 * FIXME: here it probably should be sk->sk_prot->listen_start
814 * see tcp_listen_start
815 */
816 err = dccp_listen_start(sk);
817 if (err)
818 goto out;
819 }
820 sk->sk_max_ack_backlog = backlog;
821 err = 0;
822
823 out:
824 release_sock(sk);
825 return err;
826 }
827
828 EXPORT_SYMBOL_GPL(inet_dccp_listen);
829
830 static const unsigned char dccp_new_state[] = {
831 /* current state: new state: action: */
832 [0] = DCCP_CLOSED,
833 [DCCP_OPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
834 [DCCP_REQUESTING] = DCCP_CLOSED,
835 [DCCP_PARTOPEN] = DCCP_CLOSING | DCCP_ACTION_FIN,
836 [DCCP_LISTEN] = DCCP_CLOSED,
837 [DCCP_RESPOND] = DCCP_CLOSED,
838 [DCCP_CLOSING] = DCCP_CLOSED,
839 [DCCP_TIME_WAIT] = DCCP_CLOSED,
840 [DCCP_CLOSED] = DCCP_CLOSED,
841 };
842
843 static int dccp_close_state(struct sock *sk)
844 {
845 const int next = dccp_new_state[sk->sk_state];
846 const int ns = next & DCCP_STATE_MASK;
847
848 if (ns != sk->sk_state)
849 dccp_set_state(sk, ns);
850
851 return next & DCCP_ACTION_FIN;
852 }
853
854 void dccp_close(struct sock *sk, long timeout)
855 {
856 struct sk_buff *skb;
857
858 lock_sock(sk);
859
860 sk->sk_shutdown = SHUTDOWN_MASK;
861
862 if (sk->sk_state == DCCP_LISTEN) {
863 dccp_set_state(sk, DCCP_CLOSED);
864
865 /* Special case. */
866 inet_csk_listen_stop(sk);
867
868 goto adjudge_to_death;
869 }
870
871 /*
872 * We need to flush the recv. buffs. We do this only on the
873 * descriptor close, not protocol-sourced closes, because the
874 *reader process may not have drained the data yet!
875 */
876 /* FIXME: check for unread data */
877 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
878 __kfree_skb(skb);
879 }
880
881 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
882 /* Check zero linger _after_ checking for unread data. */
883 sk->sk_prot->disconnect(sk, 0);
884 } else if (dccp_close_state(sk)) {
885 dccp_send_close(sk, 1);
886 }
887
888 sk_stream_wait_close(sk, timeout);
889
890 adjudge_to_death:
891 /*
892 * It is the last release_sock in its life. It will remove backlog.
893 */
894 release_sock(sk);
895 /*
896 * Now socket is owned by kernel and we acquire BH lock
897 * to finish close. No need to check for user refs.
898 */
899 local_bh_disable();
900 bh_lock_sock(sk);
901 BUG_TRAP(!sock_owned_by_user(sk));
902
903 sock_hold(sk);
904 sock_orphan(sk);
905
906 /*
907 * The last release_sock may have processed the CLOSE or RESET
908 * packet moving sock to CLOSED state, if not we have to fire
909 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
910 * in draft-ietf-dccp-spec-11. -acme
911 */
912 if (sk->sk_state == DCCP_CLOSING) {
913 /* FIXME: should start at 2 * RTT */
914 /* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
915 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
916 inet_csk(sk)->icsk_rto,
917 DCCP_RTO_MAX);
918 #if 0
919 /* Yeah, we should use sk->sk_prot->orphan_count, etc */
920 dccp_set_state(sk, DCCP_CLOSED);
921 #endif
922 }
923
924 atomic_inc(sk->sk_prot->orphan_count);
925 if (sk->sk_state == DCCP_CLOSED)
926 inet_csk_destroy_sock(sk);
927
928 /* Otherwise, socket is reprieved until protocol close. */
929
930 bh_unlock_sock(sk);
931 local_bh_enable();
932 sock_put(sk);
933 }
934
935 EXPORT_SYMBOL_GPL(dccp_close);
936
937 void dccp_shutdown(struct sock *sk, int how)
938 {
939 dccp_pr_debug("entry\n");
940 }
941
942 EXPORT_SYMBOL_GPL(dccp_shutdown);
943
944 static int __init dccp_mib_init(void)
945 {
946 int rc = -ENOMEM;
947
948 dccp_statistics[0] = alloc_percpu(struct dccp_mib);
949 if (dccp_statistics[0] == NULL)
950 goto out;
951
952 dccp_statistics[1] = alloc_percpu(struct dccp_mib);
953 if (dccp_statistics[1] == NULL)
954 goto out_free_one;
955
956 rc = 0;
957 out:
958 return rc;
959 out_free_one:
960 free_percpu(dccp_statistics[0]);
961 dccp_statistics[0] = NULL;
962 goto out;
963
964 }
965
966 static void dccp_mib_exit(void)
967 {
968 free_percpu(dccp_statistics[0]);
969 free_percpu(dccp_statistics[1]);
970 dccp_statistics[0] = dccp_statistics[1] = NULL;
971 }
972
973 static int thash_entries;
974 module_param(thash_entries, int, 0444);
975 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
976
977 #ifdef CONFIG_IP_DCCP_DEBUG
978 int dccp_debug;
979 module_param(dccp_debug, int, 0444);
980 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
981
982 EXPORT_SYMBOL_GPL(dccp_debug);
983 #endif
984
985 static int __init dccp_init(void)
986 {
987 unsigned long goal;
988 int ehash_order, bhash_order, i;
989 int rc = -ENOBUFS;
990
991 dccp_hashinfo.bind_bucket_cachep =
992 kmem_cache_create("dccp_bind_bucket",
993 sizeof(struct inet_bind_bucket), 0,
994 SLAB_HWCACHE_ALIGN, NULL, NULL);
995 if (!dccp_hashinfo.bind_bucket_cachep)
996 goto out;
997
998 /*
999 * Size and allocate the main established and bind bucket
1000 * hash tables.
1001 *
1002 * The methodology is similar to that of the buffer cache.
1003 */
1004 if (num_physpages >= (128 * 1024))
1005 goal = num_physpages >> (21 - PAGE_SHIFT);
1006 else
1007 goal = num_physpages >> (23 - PAGE_SHIFT);
1008
1009 if (thash_entries)
1010 goal = (thash_entries *
1011 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1012 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1013 ;
1014 do {
1015 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
1016 sizeof(struct inet_ehash_bucket);
1017 dccp_hashinfo.ehash_size >>= 1;
1018 while (dccp_hashinfo.ehash_size &
1019 (dccp_hashinfo.ehash_size - 1))
1020 dccp_hashinfo.ehash_size--;
1021 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1022 __get_free_pages(GFP_ATOMIC, ehash_order);
1023 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1024
1025 if (!dccp_hashinfo.ehash) {
1026 printk(KERN_CRIT "Failed to allocate DCCP "
1027 "established hash table\n");
1028 goto out_free_bind_bucket_cachep;
1029 }
1030
1031 for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
1032 rwlock_init(&dccp_hashinfo.ehash[i].lock);
1033 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
1034 }
1035
1036 bhash_order = ehash_order;
1037
1038 do {
1039 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1040 sizeof(struct inet_bind_hashbucket);
1041 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1042 bhash_order > 0)
1043 continue;
1044 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1045 __get_free_pages(GFP_ATOMIC, bhash_order);
1046 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1047
1048 if (!dccp_hashinfo.bhash) {
1049 printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
1050 goto out_free_dccp_ehash;
1051 }
1052
1053 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1054 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1055 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1056 }
1057
1058 rc = dccp_mib_init();
1059 if (rc)
1060 goto out_free_dccp_bhash;
1061
1062 rc = dccp_ackvec_init();
1063 if (rc)
1064 goto out_free_dccp_mib;
1065
1066 rc = dccp_sysctl_init();
1067 if (rc)
1068 goto out_ackvec_exit;
1069 out:
1070 return rc;
1071 out_ackvec_exit:
1072 dccp_ackvec_exit();
1073 out_free_dccp_mib:
1074 dccp_mib_exit();
1075 out_free_dccp_bhash:
1076 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1077 dccp_hashinfo.bhash = NULL;
1078 out_free_dccp_ehash:
1079 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1080 dccp_hashinfo.ehash = NULL;
1081 out_free_bind_bucket_cachep:
1082 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1083 dccp_hashinfo.bind_bucket_cachep = NULL;
1084 goto out;
1085 }
1086
1087 static void __exit dccp_fini(void)
1088 {
1089 dccp_mib_exit();
1090 free_pages((unsigned long)dccp_hashinfo.bhash,
1091 get_order(dccp_hashinfo.bhash_size *
1092 sizeof(struct inet_bind_hashbucket)));
1093 free_pages((unsigned long)dccp_hashinfo.ehash,
1094 get_order(dccp_hashinfo.ehash_size *
1095 sizeof(struct inet_ehash_bucket)));
1096 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1097 dccp_ackvec_exit();
1098 dccp_sysctl_exit();
1099 }
1100
1101 module_init(dccp_init);
1102 module_exit(dccp_fini);
1103
1104 MODULE_LICENSE("GPL");
1105 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1106 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");