include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / dccp / proto.c
1 /*
2 * net/dccp/proto.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/dccp.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/skbuff.h>
18 #include <linux/netdevice.h>
19 #include <linux/in.h>
20 #include <linux/if_arp.h>
21 #include <linux/init.h>
22 #include <linux/random.h>
23 #include <linux/slab.h>
24 #include <net/checksum.h>
25
26 #include <net/inet_sock.h>
27 #include <net/sock.h>
28 #include <net/xfrm.h>
29
30 #include <asm/ioctls.h>
31 #include <linux/spinlock.h>
32 #include <linux/timer.h>
33 #include <linux/delay.h>
34 #include <linux/poll.h>
35
36 #include "ccid.h"
37 #include "dccp.h"
38 #include "feat.h"
39
40 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
41
42 EXPORT_SYMBOL_GPL(dccp_statistics);
43
44 struct percpu_counter dccp_orphan_count;
45 EXPORT_SYMBOL_GPL(dccp_orphan_count);
46
47 struct inet_hashinfo dccp_hashinfo;
48 EXPORT_SYMBOL_GPL(dccp_hashinfo);
49
50 /* the maximum queue length for tx in packets. 0 is no limit */
51 int sysctl_dccp_tx_qlen __read_mostly = 5;
52
53 void dccp_set_state(struct sock *sk, const int state)
54 {
55 const int oldstate = sk->sk_state;
56
57 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk,
58 dccp_state_name(oldstate), dccp_state_name(state));
59 WARN_ON(state == oldstate);
60
61 switch (state) {
62 case DCCP_OPEN:
63 if (oldstate != DCCP_OPEN)
64 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
65 /* Client retransmits all Confirm options until entering OPEN */
66 if (oldstate == DCCP_PARTOPEN)
67 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
68 break;
69
70 case DCCP_CLOSED:
71 if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
72 oldstate == DCCP_CLOSING)
73 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
74
75 sk->sk_prot->unhash(sk);
76 if (inet_csk(sk)->icsk_bind_hash != NULL &&
77 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
78 inet_put_port(sk);
79 /* fall through */
80 default:
81 if (oldstate == DCCP_OPEN)
82 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
83 }
84
85 /* Change state AFTER socket is unhashed to avoid closed
86 * socket sitting in hash tables.
87 */
88 sk->sk_state = state;
89 }
90
91 EXPORT_SYMBOL_GPL(dccp_set_state);
92
93 static void dccp_finish_passive_close(struct sock *sk)
94 {
95 switch (sk->sk_state) {
96 case DCCP_PASSIVE_CLOSE:
97 /* Node (client or server) has received Close packet. */
98 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
99 dccp_set_state(sk, DCCP_CLOSED);
100 break;
101 case DCCP_PASSIVE_CLOSEREQ:
102 /*
103 * Client received CloseReq. We set the `active' flag so that
104 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
105 */
106 dccp_send_close(sk, 1);
107 dccp_set_state(sk, DCCP_CLOSING);
108 }
109 }
110
111 void dccp_done(struct sock *sk)
112 {
113 dccp_set_state(sk, DCCP_CLOSED);
114 dccp_clear_xmit_timers(sk);
115
116 sk->sk_shutdown = SHUTDOWN_MASK;
117
118 if (!sock_flag(sk, SOCK_DEAD))
119 sk->sk_state_change(sk);
120 else
121 inet_csk_destroy_sock(sk);
122 }
123
124 EXPORT_SYMBOL_GPL(dccp_done);
125
126 const char *dccp_packet_name(const int type)
127 {
128 static const char *const dccp_packet_names[] = {
129 [DCCP_PKT_REQUEST] = "REQUEST",
130 [DCCP_PKT_RESPONSE] = "RESPONSE",
131 [DCCP_PKT_DATA] = "DATA",
132 [DCCP_PKT_ACK] = "ACK",
133 [DCCP_PKT_DATAACK] = "DATAACK",
134 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
135 [DCCP_PKT_CLOSE] = "CLOSE",
136 [DCCP_PKT_RESET] = "RESET",
137 [DCCP_PKT_SYNC] = "SYNC",
138 [DCCP_PKT_SYNCACK] = "SYNCACK",
139 };
140
141 if (type >= DCCP_NR_PKT_TYPES)
142 return "INVALID";
143 else
144 return dccp_packet_names[type];
145 }
146
147 EXPORT_SYMBOL_GPL(dccp_packet_name);
148
149 const char *dccp_state_name(const int state)
150 {
151 static const char *const dccp_state_names[] = {
152 [DCCP_OPEN] = "OPEN",
153 [DCCP_REQUESTING] = "REQUESTING",
154 [DCCP_PARTOPEN] = "PARTOPEN",
155 [DCCP_LISTEN] = "LISTEN",
156 [DCCP_RESPOND] = "RESPOND",
157 [DCCP_CLOSING] = "CLOSING",
158 [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ",
159 [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE",
160 [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ",
161 [DCCP_TIME_WAIT] = "TIME_WAIT",
162 [DCCP_CLOSED] = "CLOSED",
163 };
164
165 if (state >= DCCP_MAX_STATES)
166 return "INVALID STATE!";
167 else
168 return dccp_state_names[state];
169 }
170
171 EXPORT_SYMBOL_GPL(dccp_state_name);
172
173 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
174 {
175 struct dccp_sock *dp = dccp_sk(sk);
176 struct inet_connection_sock *icsk = inet_csk(sk);
177
178 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
179 icsk->icsk_syn_retries = sysctl_dccp_request_retries;
180 sk->sk_state = DCCP_CLOSED;
181 sk->sk_write_space = dccp_write_space;
182 icsk->icsk_sync_mss = dccp_sync_mss;
183 dp->dccps_mss_cache = 536;
184 dp->dccps_rate_last = jiffies;
185 dp->dccps_role = DCCP_ROLE_UNDEFINED;
186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
187 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
188
189 dccp_init_xmit_timers(sk);
190
191 INIT_LIST_HEAD(&dp->dccps_featneg);
192 /* control socket doesn't need feat nego */
193 if (likely(ctl_sock_initialized))
194 return dccp_feat_init(sk);
195 return 0;
196 }
197
198 EXPORT_SYMBOL_GPL(dccp_init_sock);
199
200 void dccp_destroy_sock(struct sock *sk)
201 {
202 struct dccp_sock *dp = dccp_sk(sk);
203
204 /*
205 * DCCP doesn't use sk_write_queue, just sk_send_head
206 * for retransmissions
207 */
208 if (sk->sk_send_head != NULL) {
209 kfree_skb(sk->sk_send_head);
210 sk->sk_send_head = NULL;
211 }
212
213 /* Clean up a referenced DCCP bind bucket. */
214 if (inet_csk(sk)->icsk_bind_hash != NULL)
215 inet_put_port(sk);
216
217 kfree(dp->dccps_service_list);
218 dp->dccps_service_list = NULL;
219
220 if (dp->dccps_hc_rx_ackvec != NULL) {
221 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
222 dp->dccps_hc_rx_ackvec = NULL;
223 }
224 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
225 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
226 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
227
228 /* clean up feature negotiation state */
229 dccp_feat_list_purge(&dp->dccps_featneg);
230 }
231
232 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
233
234 static inline int dccp_listen_start(struct sock *sk, int backlog)
235 {
236 struct dccp_sock *dp = dccp_sk(sk);
237
238 dp->dccps_role = DCCP_ROLE_LISTEN;
239 /* do not start to listen if feature negotiation setup fails */
240 if (dccp_feat_finalise_settings(dp))
241 return -EPROTO;
242 return inet_csk_listen_start(sk, backlog);
243 }
244
245 static inline int dccp_need_reset(int state)
246 {
247 return state != DCCP_CLOSED && state != DCCP_LISTEN &&
248 state != DCCP_REQUESTING;
249 }
250
251 int dccp_disconnect(struct sock *sk, int flags)
252 {
253 struct inet_connection_sock *icsk = inet_csk(sk);
254 struct inet_sock *inet = inet_sk(sk);
255 int err = 0;
256 const int old_state = sk->sk_state;
257
258 if (old_state != DCCP_CLOSED)
259 dccp_set_state(sk, DCCP_CLOSED);
260
261 /*
262 * This corresponds to the ABORT function of RFC793, sec. 3.8
263 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
264 */
265 if (old_state == DCCP_LISTEN) {
266 inet_csk_listen_stop(sk);
267 } else if (dccp_need_reset(old_state)) {
268 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
269 sk->sk_err = ECONNRESET;
270 } else if (old_state == DCCP_REQUESTING)
271 sk->sk_err = ECONNRESET;
272
273 dccp_clear_xmit_timers(sk);
274
275 __skb_queue_purge(&sk->sk_receive_queue);
276 __skb_queue_purge(&sk->sk_write_queue);
277 if (sk->sk_send_head != NULL) {
278 __kfree_skb(sk->sk_send_head);
279 sk->sk_send_head = NULL;
280 }
281
282 inet->inet_dport = 0;
283
284 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
285 inet_reset_saddr(sk);
286
287 sk->sk_shutdown = 0;
288 sock_reset_flag(sk, SOCK_DONE);
289
290 icsk->icsk_backoff = 0;
291 inet_csk_delack_init(sk);
292 __sk_dst_reset(sk);
293
294 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
295
296 sk->sk_error_report(sk);
297 return err;
298 }
299
300 EXPORT_SYMBOL_GPL(dccp_disconnect);
301
302 /*
303 * Wait for a DCCP event.
304 *
305 * Note that we don't need to lock the socket, as the upper poll layers
306 * take care of normal races (between the test and the event) and we don't
307 * go look at any of the socket buffers directly.
308 */
309 unsigned int dccp_poll(struct file *file, struct socket *sock,
310 poll_table *wait)
311 {
312 unsigned int mask;
313 struct sock *sk = sock->sk;
314
315 sock_poll_wait(file, sk->sk_sleep, wait);
316 if (sk->sk_state == DCCP_LISTEN)
317 return inet_csk_listen_poll(sk);
318
319 /* Socket is not locked. We are protected from async events
320 by poll logic and correct handling of state changes
321 made by another threads is impossible in any case.
322 */
323
324 mask = 0;
325 if (sk->sk_err)
326 mask = POLLERR;
327
328 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
329 mask |= POLLHUP;
330 if (sk->sk_shutdown & RCV_SHUTDOWN)
331 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
332
333 /* Connected? */
334 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
335 if (atomic_read(&sk->sk_rmem_alloc) > 0)
336 mask |= POLLIN | POLLRDNORM;
337
338 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
339 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
340 mask |= POLLOUT | POLLWRNORM;
341 } else { /* send SIGIO later */
342 set_bit(SOCK_ASYNC_NOSPACE,
343 &sk->sk_socket->flags);
344 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
345
346 /* Race breaker. If space is freed after
347 * wspace test but before the flags are set,
348 * IO signal will be lost.
349 */
350 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
351 mask |= POLLOUT | POLLWRNORM;
352 }
353 }
354 }
355 return mask;
356 }
357
358 EXPORT_SYMBOL_GPL(dccp_poll);
359
360 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
361 {
362 int rc = -ENOTCONN;
363
364 lock_sock(sk);
365
366 if (sk->sk_state == DCCP_LISTEN)
367 goto out;
368
369 switch (cmd) {
370 case SIOCINQ: {
371 struct sk_buff *skb;
372 unsigned long amount = 0;
373
374 skb = skb_peek(&sk->sk_receive_queue);
375 if (skb != NULL) {
376 /*
377 * We will only return the amount of this packet since
378 * that is all that will be read.
379 */
380 amount = skb->len;
381 }
382 rc = put_user(amount, (int __user *)arg);
383 }
384 break;
385 default:
386 rc = -ENOIOCTLCMD;
387 break;
388 }
389 out:
390 release_sock(sk);
391 return rc;
392 }
393
394 EXPORT_SYMBOL_GPL(dccp_ioctl);
395
396 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
397 char __user *optval, unsigned int optlen)
398 {
399 struct dccp_sock *dp = dccp_sk(sk);
400 struct dccp_service_list *sl = NULL;
401
402 if (service == DCCP_SERVICE_INVALID_VALUE ||
403 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
404 return -EINVAL;
405
406 if (optlen > sizeof(service)) {
407 sl = kmalloc(optlen, GFP_KERNEL);
408 if (sl == NULL)
409 return -ENOMEM;
410
411 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
412 if (copy_from_user(sl->dccpsl_list,
413 optval + sizeof(service),
414 optlen - sizeof(service)) ||
415 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
416 kfree(sl);
417 return -EFAULT;
418 }
419 }
420
421 lock_sock(sk);
422 dp->dccps_service = service;
423
424 kfree(dp->dccps_service_list);
425
426 dp->dccps_service_list = sl;
427 release_sock(sk);
428 return 0;
429 }
430
431 static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
432 {
433 u8 *list, len;
434 int i, rc;
435
436 if (cscov < 0 || cscov > 15)
437 return -EINVAL;
438 /*
439 * Populate a list of permissible values, in the range cscov...15. This
440 * is necessary since feature negotiation of single values only works if
441 * both sides incidentally choose the same value. Since the list starts
442 * lowest-value first, negotiation will pick the smallest shared value.
443 */
444 if (cscov == 0)
445 return 0;
446 len = 16 - cscov;
447
448 list = kmalloc(len, GFP_KERNEL);
449 if (list == NULL)
450 return -ENOBUFS;
451
452 for (i = 0; i < len; i++)
453 list[i] = cscov++;
454
455 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
456
457 if (rc == 0) {
458 if (rx)
459 dccp_sk(sk)->dccps_pcrlen = cscov;
460 else
461 dccp_sk(sk)->dccps_pcslen = cscov;
462 }
463 kfree(list);
464 return rc;
465 }
466
467 static int dccp_setsockopt_ccid(struct sock *sk, int type,
468 char __user *optval, unsigned int optlen)
469 {
470 u8 *val;
471 int rc = 0;
472
473 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
474 return -EINVAL;
475
476 val = kmalloc(optlen, GFP_KERNEL);
477 if (val == NULL)
478 return -ENOMEM;
479
480 if (copy_from_user(val, optval, optlen)) {
481 kfree(val);
482 return -EFAULT;
483 }
484
485 lock_sock(sk);
486 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
487 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
488
489 if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
490 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
491 release_sock(sk);
492
493 kfree(val);
494 return rc;
495 }
496
497 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
498 char __user *optval, unsigned int optlen)
499 {
500 struct dccp_sock *dp = dccp_sk(sk);
501 int val, err = 0;
502
503 switch (optname) {
504 case DCCP_SOCKOPT_PACKET_SIZE:
505 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
506 return 0;
507 case DCCP_SOCKOPT_CHANGE_L:
508 case DCCP_SOCKOPT_CHANGE_R:
509 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
510 return 0;
511 case DCCP_SOCKOPT_CCID:
512 case DCCP_SOCKOPT_RX_CCID:
513 case DCCP_SOCKOPT_TX_CCID:
514 return dccp_setsockopt_ccid(sk, optname, optval, optlen);
515 }
516
517 if (optlen < (int)sizeof(int))
518 return -EINVAL;
519
520 if (get_user(val, (int __user *)optval))
521 return -EFAULT;
522
523 if (optname == DCCP_SOCKOPT_SERVICE)
524 return dccp_setsockopt_service(sk, val, optval, optlen);
525
526 lock_sock(sk);
527 switch (optname) {
528 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
529 if (dp->dccps_role != DCCP_ROLE_SERVER)
530 err = -EOPNOTSUPP;
531 else
532 dp->dccps_server_timewait = (val != 0);
533 break;
534 case DCCP_SOCKOPT_SEND_CSCOV:
535 err = dccp_setsockopt_cscov(sk, val, false);
536 break;
537 case DCCP_SOCKOPT_RECV_CSCOV:
538 err = dccp_setsockopt_cscov(sk, val, true);
539 break;
540 default:
541 err = -ENOPROTOOPT;
542 break;
543 }
544 release_sock(sk);
545
546 return err;
547 }
548
549 int dccp_setsockopt(struct sock *sk, int level, int optname,
550 char __user *optval, unsigned int optlen)
551 {
552 if (level != SOL_DCCP)
553 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
554 optname, optval,
555 optlen);
556 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
557 }
558
559 EXPORT_SYMBOL_GPL(dccp_setsockopt);
560
561 #ifdef CONFIG_COMPAT
562 int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
563 char __user *optval, unsigned int optlen)
564 {
565 if (level != SOL_DCCP)
566 return inet_csk_compat_setsockopt(sk, level, optname,
567 optval, optlen);
568 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
569 }
570
571 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
572 #endif
573
574 static int dccp_getsockopt_service(struct sock *sk, int len,
575 __be32 __user *optval,
576 int __user *optlen)
577 {
578 const struct dccp_sock *dp = dccp_sk(sk);
579 const struct dccp_service_list *sl;
580 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
581
582 lock_sock(sk);
583 if ((sl = dp->dccps_service_list) != NULL) {
584 slen = sl->dccpsl_nr * sizeof(u32);
585 total_len += slen;
586 }
587
588 err = -EINVAL;
589 if (total_len > len)
590 goto out;
591
592 err = 0;
593 if (put_user(total_len, optlen) ||
594 put_user(dp->dccps_service, optval) ||
595 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
596 err = -EFAULT;
597 out:
598 release_sock(sk);
599 return err;
600 }
601
602 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
603 char __user *optval, int __user *optlen)
604 {
605 struct dccp_sock *dp;
606 int val, len;
607
608 if (get_user(len, optlen))
609 return -EFAULT;
610
611 if (len < (int)sizeof(int))
612 return -EINVAL;
613
614 dp = dccp_sk(sk);
615
616 switch (optname) {
617 case DCCP_SOCKOPT_PACKET_SIZE:
618 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
619 return 0;
620 case DCCP_SOCKOPT_SERVICE:
621 return dccp_getsockopt_service(sk, len,
622 (__be32 __user *)optval, optlen);
623 case DCCP_SOCKOPT_GET_CUR_MPS:
624 val = dp->dccps_mss_cache;
625 break;
626 case DCCP_SOCKOPT_AVAILABLE_CCIDS:
627 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
628 case DCCP_SOCKOPT_TX_CCID:
629 val = ccid_get_current_tx_ccid(dp);
630 if (val < 0)
631 return -ENOPROTOOPT;
632 break;
633 case DCCP_SOCKOPT_RX_CCID:
634 val = ccid_get_current_rx_ccid(dp);
635 if (val < 0)
636 return -ENOPROTOOPT;
637 break;
638 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
639 val = dp->dccps_server_timewait;
640 break;
641 case DCCP_SOCKOPT_SEND_CSCOV:
642 val = dp->dccps_pcslen;
643 break;
644 case DCCP_SOCKOPT_RECV_CSCOV:
645 val = dp->dccps_pcrlen;
646 break;
647 case 128 ... 191:
648 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
649 len, (u32 __user *)optval, optlen);
650 case 192 ... 255:
651 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
652 len, (u32 __user *)optval, optlen);
653 default:
654 return -ENOPROTOOPT;
655 }
656
657 len = sizeof(val);
658 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
659 return -EFAULT;
660
661 return 0;
662 }
663
664 int dccp_getsockopt(struct sock *sk, int level, int optname,
665 char __user *optval, int __user *optlen)
666 {
667 if (level != SOL_DCCP)
668 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
669 optname, optval,
670 optlen);
671 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
672 }
673
674 EXPORT_SYMBOL_GPL(dccp_getsockopt);
675
676 #ifdef CONFIG_COMPAT
677 int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
678 char __user *optval, int __user *optlen)
679 {
680 if (level != SOL_DCCP)
681 return inet_csk_compat_getsockopt(sk, level, optname,
682 optval, optlen);
683 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
684 }
685
686 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
687 #endif
688
689 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
690 size_t len)
691 {
692 const struct dccp_sock *dp = dccp_sk(sk);
693 const int flags = msg->msg_flags;
694 const int noblock = flags & MSG_DONTWAIT;
695 struct sk_buff *skb;
696 int rc, size;
697 long timeo;
698
699 if (len > dp->dccps_mss_cache)
700 return -EMSGSIZE;
701
702 lock_sock(sk);
703
704 if (sysctl_dccp_tx_qlen &&
705 (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) {
706 rc = -EAGAIN;
707 goto out_release;
708 }
709
710 timeo = sock_sndtimeo(sk, noblock);
711
712 /*
713 * We have to use sk_stream_wait_connect here to set sk_write_pending,
714 * so that the trick in dccp_rcv_request_sent_state_process.
715 */
716 /* Wait for a connection to finish. */
717 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
718 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
719 goto out_release;
720
721 size = sk->sk_prot->max_header + len;
722 release_sock(sk);
723 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
724 lock_sock(sk);
725 if (skb == NULL)
726 goto out_release;
727
728 skb_reserve(skb, sk->sk_prot->max_header);
729 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
730 if (rc != 0)
731 goto out_discard;
732
733 skb_queue_tail(&sk->sk_write_queue, skb);
734 dccp_write_xmit(sk,0);
735 out_release:
736 release_sock(sk);
737 return rc ? : len;
738 out_discard:
739 kfree_skb(skb);
740 goto out_release;
741 }
742
743 EXPORT_SYMBOL_GPL(dccp_sendmsg);
744
745 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
746 size_t len, int nonblock, int flags, int *addr_len)
747 {
748 const struct dccp_hdr *dh;
749 long timeo;
750
751 lock_sock(sk);
752
753 if (sk->sk_state == DCCP_LISTEN) {
754 len = -ENOTCONN;
755 goto out;
756 }
757
758 timeo = sock_rcvtimeo(sk, nonblock);
759
760 do {
761 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
762
763 if (skb == NULL)
764 goto verify_sock_status;
765
766 dh = dccp_hdr(skb);
767
768 switch (dh->dccph_type) {
769 case DCCP_PKT_DATA:
770 case DCCP_PKT_DATAACK:
771 goto found_ok_skb;
772
773 case DCCP_PKT_CLOSE:
774 case DCCP_PKT_CLOSEREQ:
775 if (!(flags & MSG_PEEK))
776 dccp_finish_passive_close(sk);
777 /* fall through */
778 case DCCP_PKT_RESET:
779 dccp_pr_debug("found fin (%s) ok!\n",
780 dccp_packet_name(dh->dccph_type));
781 len = 0;
782 goto found_fin_ok;
783 default:
784 dccp_pr_debug("packet_type=%s\n",
785 dccp_packet_name(dh->dccph_type));
786 sk_eat_skb(sk, skb, 0);
787 }
788 verify_sock_status:
789 if (sock_flag(sk, SOCK_DONE)) {
790 len = 0;
791 break;
792 }
793
794 if (sk->sk_err) {
795 len = sock_error(sk);
796 break;
797 }
798
799 if (sk->sk_shutdown & RCV_SHUTDOWN) {
800 len = 0;
801 break;
802 }
803
804 if (sk->sk_state == DCCP_CLOSED) {
805 if (!sock_flag(sk, SOCK_DONE)) {
806 /* This occurs when user tries to read
807 * from never connected socket.
808 */
809 len = -ENOTCONN;
810 break;
811 }
812 len = 0;
813 break;
814 }
815
816 if (!timeo) {
817 len = -EAGAIN;
818 break;
819 }
820
821 if (signal_pending(current)) {
822 len = sock_intr_errno(timeo);
823 break;
824 }
825
826 sk_wait_data(sk, &timeo);
827 continue;
828 found_ok_skb:
829 if (len > skb->len)
830 len = skb->len;
831 else if (len < skb->len)
832 msg->msg_flags |= MSG_TRUNC;
833
834 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
835 /* Exception. Bailout! */
836 len = -EFAULT;
837 break;
838 }
839 if (flags & MSG_TRUNC)
840 len = skb->len;
841 found_fin_ok:
842 if (!(flags & MSG_PEEK))
843 sk_eat_skb(sk, skb, 0);
844 break;
845 } while (1);
846 out:
847 release_sock(sk);
848 return len;
849 }
850
851 EXPORT_SYMBOL_GPL(dccp_recvmsg);
852
853 int inet_dccp_listen(struct socket *sock, int backlog)
854 {
855 struct sock *sk = sock->sk;
856 unsigned char old_state;
857 int err;
858
859 lock_sock(sk);
860
861 err = -EINVAL;
862 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
863 goto out;
864
865 old_state = sk->sk_state;
866 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
867 goto out;
868
869 /* Really, if the socket is already in listen state
870 * we can only allow the backlog to be adjusted.
871 */
872 if (old_state != DCCP_LISTEN) {
873 /*
874 * FIXME: here it probably should be sk->sk_prot->listen_start
875 * see tcp_listen_start
876 */
877 err = dccp_listen_start(sk, backlog);
878 if (err)
879 goto out;
880 }
881 sk->sk_max_ack_backlog = backlog;
882 err = 0;
883
884 out:
885 release_sock(sk);
886 return err;
887 }
888
889 EXPORT_SYMBOL_GPL(inet_dccp_listen);
890
891 static void dccp_terminate_connection(struct sock *sk)
892 {
893 u8 next_state = DCCP_CLOSED;
894
895 switch (sk->sk_state) {
896 case DCCP_PASSIVE_CLOSE:
897 case DCCP_PASSIVE_CLOSEREQ:
898 dccp_finish_passive_close(sk);
899 break;
900 case DCCP_PARTOPEN:
901 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
902 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
903 /* fall through */
904 case DCCP_OPEN:
905 dccp_send_close(sk, 1);
906
907 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
908 !dccp_sk(sk)->dccps_server_timewait)
909 next_state = DCCP_ACTIVE_CLOSEREQ;
910 else
911 next_state = DCCP_CLOSING;
912 /* fall through */
913 default:
914 dccp_set_state(sk, next_state);
915 }
916 }
917
918 void dccp_close(struct sock *sk, long timeout)
919 {
920 struct dccp_sock *dp = dccp_sk(sk);
921 struct sk_buff *skb;
922 u32 data_was_unread = 0;
923 int state;
924
925 lock_sock(sk);
926
927 sk->sk_shutdown = SHUTDOWN_MASK;
928
929 if (sk->sk_state == DCCP_LISTEN) {
930 dccp_set_state(sk, DCCP_CLOSED);
931
932 /* Special case. */
933 inet_csk_listen_stop(sk);
934
935 goto adjudge_to_death;
936 }
937
938 sk_stop_timer(sk, &dp->dccps_xmit_timer);
939
940 /*
941 * We need to flush the recv. buffs. We do this only on the
942 * descriptor close, not protocol-sourced closes, because the
943 *reader process may not have drained the data yet!
944 */
945 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
946 data_was_unread += skb->len;
947 __kfree_skb(skb);
948 }
949
950 if (data_was_unread) {
951 /* Unread data was tossed, send an appropriate Reset Code */
952 DCCP_WARN("DCCP: ABORT -- %u bytes unread\n", data_was_unread);
953 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
954 dccp_set_state(sk, DCCP_CLOSED);
955 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
956 /* Check zero linger _after_ checking for unread data. */
957 sk->sk_prot->disconnect(sk, 0);
958 } else if (sk->sk_state != DCCP_CLOSED) {
959 dccp_terminate_connection(sk);
960 }
961
962 sk_stream_wait_close(sk, timeout);
963
964 adjudge_to_death:
965 state = sk->sk_state;
966 sock_hold(sk);
967 sock_orphan(sk);
968
969 /*
970 * It is the last release_sock in its life. It will remove backlog.
971 */
972 release_sock(sk);
973 /*
974 * Now socket is owned by kernel and we acquire BH lock
975 * to finish close. No need to check for user refs.
976 */
977 local_bh_disable();
978 bh_lock_sock(sk);
979 WARN_ON(sock_owned_by_user(sk));
980
981 percpu_counter_inc(sk->sk_prot->orphan_count);
982
983 /* Have we already been destroyed by a softirq or backlog? */
984 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
985 goto out;
986
987 if (sk->sk_state == DCCP_CLOSED)
988 inet_csk_destroy_sock(sk);
989
990 /* Otherwise, socket is reprieved until protocol close. */
991
992 out:
993 bh_unlock_sock(sk);
994 local_bh_enable();
995 sock_put(sk);
996 }
997
998 EXPORT_SYMBOL_GPL(dccp_close);
999
1000 void dccp_shutdown(struct sock *sk, int how)
1001 {
1002 dccp_pr_debug("called shutdown(%x)\n", how);
1003 }
1004
1005 EXPORT_SYMBOL_GPL(dccp_shutdown);
1006
1007 static inline int dccp_mib_init(void)
1008 {
1009 return snmp_mib_init((void __percpu **)dccp_statistics,
1010 sizeof(struct dccp_mib));
1011 }
1012
1013 static inline void dccp_mib_exit(void)
1014 {
1015 snmp_mib_free((void __percpu **)dccp_statistics);
1016 }
1017
1018 static int thash_entries;
1019 module_param(thash_entries, int, 0444);
1020 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1021
1022 #ifdef CONFIG_IP_DCCP_DEBUG
1023 int dccp_debug;
1024 module_param(dccp_debug, bool, 0644);
1025 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1026
1027 EXPORT_SYMBOL_GPL(dccp_debug);
1028 #endif
1029
1030 static int __init dccp_init(void)
1031 {
1032 unsigned long goal;
1033 int ehash_order, bhash_order, i;
1034 int rc;
1035
1036 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1037 FIELD_SIZEOF(struct sk_buff, cb));
1038 rc = percpu_counter_init(&dccp_orphan_count, 0);
1039 if (rc)
1040 goto out_fail;
1041 rc = -ENOBUFS;
1042 inet_hashinfo_init(&dccp_hashinfo);
1043 dccp_hashinfo.bind_bucket_cachep =
1044 kmem_cache_create("dccp_bind_bucket",
1045 sizeof(struct inet_bind_bucket), 0,
1046 SLAB_HWCACHE_ALIGN, NULL);
1047 if (!dccp_hashinfo.bind_bucket_cachep)
1048 goto out_free_percpu;
1049
1050 /*
1051 * Size and allocate the main established and bind bucket
1052 * hash tables.
1053 *
1054 * The methodology is similar to that of the buffer cache.
1055 */
1056 if (totalram_pages >= (128 * 1024))
1057 goal = totalram_pages >> (21 - PAGE_SHIFT);
1058 else
1059 goal = totalram_pages >> (23 - PAGE_SHIFT);
1060
1061 if (thash_entries)
1062 goal = (thash_entries *
1063 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1064 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1065 ;
1066 do {
1067 unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
1068 sizeof(struct inet_ehash_bucket);
1069
1070 while (hash_size & (hash_size - 1))
1071 hash_size--;
1072 dccp_hashinfo.ehash_mask = hash_size - 1;
1073 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1074 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
1075 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1076
1077 if (!dccp_hashinfo.ehash) {
1078 DCCP_CRIT("Failed to allocate DCCP established hash table");
1079 goto out_free_bind_bucket_cachep;
1080 }
1081
1082 for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) {
1083 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
1084 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
1085 }
1086
1087 if (inet_ehash_locks_alloc(&dccp_hashinfo))
1088 goto out_free_dccp_ehash;
1089
1090 bhash_order = ehash_order;
1091
1092 do {
1093 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1094 sizeof(struct inet_bind_hashbucket);
1095 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1096 bhash_order > 0)
1097 continue;
1098 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1099 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
1100 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1101
1102 if (!dccp_hashinfo.bhash) {
1103 DCCP_CRIT("Failed to allocate DCCP bind hash table");
1104 goto out_free_dccp_locks;
1105 }
1106
1107 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1108 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1109 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1110 }
1111
1112 rc = dccp_mib_init();
1113 if (rc)
1114 goto out_free_dccp_bhash;
1115
1116 rc = dccp_ackvec_init();
1117 if (rc)
1118 goto out_free_dccp_mib;
1119
1120 rc = dccp_sysctl_init();
1121 if (rc)
1122 goto out_ackvec_exit;
1123
1124 rc = ccid_initialize_builtins();
1125 if (rc)
1126 goto out_sysctl_exit;
1127
1128 dccp_timestamping_init();
1129
1130 return 0;
1131
1132 out_sysctl_exit:
1133 dccp_sysctl_exit();
1134 out_ackvec_exit:
1135 dccp_ackvec_exit();
1136 out_free_dccp_mib:
1137 dccp_mib_exit();
1138 out_free_dccp_bhash:
1139 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1140 out_free_dccp_locks:
1141 inet_ehash_locks_free(&dccp_hashinfo);
1142 out_free_dccp_ehash:
1143 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1144 out_free_bind_bucket_cachep:
1145 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1146 out_free_percpu:
1147 percpu_counter_destroy(&dccp_orphan_count);
1148 out_fail:
1149 dccp_hashinfo.bhash = NULL;
1150 dccp_hashinfo.ehash = NULL;
1151 dccp_hashinfo.bind_bucket_cachep = NULL;
1152 return rc;
1153 }
1154
1155 static void __exit dccp_fini(void)
1156 {
1157 ccid_cleanup_builtins();
1158 dccp_mib_exit();
1159 free_pages((unsigned long)dccp_hashinfo.bhash,
1160 get_order(dccp_hashinfo.bhash_size *
1161 sizeof(struct inet_bind_hashbucket)));
1162 free_pages((unsigned long)dccp_hashinfo.ehash,
1163 get_order((dccp_hashinfo.ehash_mask + 1) *
1164 sizeof(struct inet_ehash_bucket)));
1165 inet_ehash_locks_free(&dccp_hashinfo);
1166 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1167 dccp_ackvec_exit();
1168 dccp_sysctl_exit();
1169 percpu_counter_destroy(&dccp_orphan_count);
1170 }
1171
1172 module_init(dccp_init);
1173 module_exit(dccp_fini);
1174
1175 MODULE_LICENSE("GPL");
1176 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1177 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");