Merge 4.14.70 into android-4.14-p
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / net / sctp / socket.c
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
6 * Copyright (c) 2001-2002 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This file is part of the SCTP kernel implementation
10 *
11 * These functions interface with the sockets layer to implement the
12 * SCTP Extensions for the Sockets API.
13 *
14 * Note that the descriptions from the specification are USER level
15 * functions--this file is the functions which populate the struct proto
16 * for SCTP which is the BOTTOM of the sockets interface.
17 *
18 * This SCTP implementation is free software;
19 * you can redistribute it and/or modify it under the terms of
20 * the GNU General Public License as published by
21 * the Free Software Foundation; either version 2, or (at your option)
22 * any later version.
23 *
24 * This SCTP implementation is distributed in the hope that it
25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
26 * ************************
27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
28 * See the GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with GNU CC; see the file COPYING. If not, see
32 * <http://www.gnu.org/licenses/>.
33 *
34 * Please send any bug reports or fixes you make to the
35 * email address(es):
36 * lksctp developers <linux-sctp@vger.kernel.org>
37 *
38 * Written or modified by:
39 * La Monte H.P. Yarroll <piggy@acm.org>
40 * Narasimha Budihal <narsi@refcode.org>
41 * Karl Knutson <karl@athena.chicago.il.us>
42 * Jon Grimm <jgrimm@us.ibm.com>
43 * Xingang Guo <xingang.guo@intel.com>
44 * Daisy Chang <daisyc@us.ibm.com>
45 * Sridhar Samudrala <samudrala@us.ibm.com>
46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
47 * Ardelle Fan <ardelle.fan@intel.com>
48 * Ryan Layer <rmlayer@us.ibm.com>
49 * Anup Pemmaiah <pemmaiah@cc.usu.edu>
50 * Kevin Gao <kevin.gao@intel.com>
51 */
52
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
55 #include <crypto/hash.h>
56 #include <linux/types.h>
57 #include <linux/kernel.h>
58 #include <linux/wait.h>
59 #include <linux/time.h>
60 #include <linux/sched/signal.h>
61 #include <linux/ip.h>
62 #include <linux/capability.h>
63 #include <linux/fcntl.h>
64 #include <linux/poll.h>
65 #include <linux/init.h>
66 #include <linux/slab.h>
67 #include <linux/file.h>
68 #include <linux/compat.h>
69
70 #include <net/ip.h>
71 #include <net/icmp.h>
72 #include <net/route.h>
73 #include <net/ipv6.h>
74 #include <net/inet_common.h>
75 #include <net/busy_poll.h>
76
77 #include <linux/socket.h> /* for sa_family_t */
78 #include <linux/export.h>
79 #include <net/sock.h>
80 #include <net/sctp/sctp.h>
81 #include <net/sctp/sm.h>
82
83 /* Forward declarations for internal helper functions. */
84 static int sctp_writeable(struct sock *sk);
85 static void sctp_wfree(struct sk_buff *skb);
86 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
87 size_t msg_len);
88 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
89 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
90 static int sctp_wait_for_accept(struct sock *sk, long timeo);
91 static void sctp_wait_for_close(struct sock *sk, long timeo);
92 static void sctp_destruct_sock(struct sock *sk);
93 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
94 union sctp_addr *addr, int len);
95 static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
96 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
97 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
98 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int);
99 static int sctp_send_asconf(struct sctp_association *asoc,
100 struct sctp_chunk *chunk);
101 static int sctp_do_bind(struct sock *, union sctp_addr *, int);
102 static int sctp_autobind(struct sock *sk);
103 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
104 struct sctp_association *assoc,
105 enum sctp_socket_type type);
106
107 static unsigned long sctp_memory_pressure;
108 static atomic_long_t sctp_memory_allocated;
109 struct percpu_counter sctp_sockets_allocated;
110
111 static void sctp_enter_memory_pressure(struct sock *sk)
112 {
113 sctp_memory_pressure = 1;
114 }
115
116
117 /* Get the sndbuf space available at the time on the association. */
118 static inline int sctp_wspace(struct sctp_association *asoc)
119 {
120 int amt;
121
122 if (asoc->ep->sndbuf_policy)
123 amt = asoc->sndbuf_used;
124 else
125 amt = sk_wmem_alloc_get(asoc->base.sk);
126
127 if (amt >= asoc->base.sk->sk_sndbuf) {
128 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK)
129 amt = 0;
130 else {
131 amt = sk_stream_wspace(asoc->base.sk);
132 if (amt < 0)
133 amt = 0;
134 }
135 } else {
136 amt = asoc->base.sk->sk_sndbuf - amt;
137 }
138 return amt;
139 }
140
141 /* Increment the used sndbuf space count of the corresponding association by
142 * the size of the outgoing data chunk.
143 * Also, set the skb destructor for sndbuf accounting later.
144 *
145 * Since it is always 1-1 between chunk and skb, and also a new skb is always
146 * allocated for chunk bundling in sctp_packet_transmit(), we can use the
147 * destructor in the data chunk skb for the purpose of the sndbuf space
148 * tracking.
149 */
150 static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
151 {
152 struct sctp_association *asoc = chunk->asoc;
153 struct sock *sk = asoc->base.sk;
154
155 /* The sndbuf space is tracked per association. */
156 sctp_association_hold(asoc);
157
158 skb_set_owner_w(chunk->skb, sk);
159
160 chunk->skb->destructor = sctp_wfree;
161 /* Save the chunk pointer in skb for sctp_wfree to use later. */
162 skb_shinfo(chunk->skb)->destructor_arg = chunk;
163
164 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) +
165 sizeof(struct sk_buff) +
166 sizeof(struct sctp_chunk);
167
168 refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
169 sk->sk_wmem_queued += chunk->skb->truesize;
170 sk_mem_charge(sk, chunk->skb->truesize);
171 }
172
173 static void sctp_clear_owner_w(struct sctp_chunk *chunk)
174 {
175 skb_orphan(chunk->skb);
176 }
177
178 static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
179 void (*cb)(struct sctp_chunk *))
180
181 {
182 struct sctp_outq *q = &asoc->outqueue;
183 struct sctp_transport *t;
184 struct sctp_chunk *chunk;
185
186 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
187 list_for_each_entry(chunk, &t->transmitted, transmitted_list)
188 cb(chunk);
189
190 list_for_each_entry(chunk, &q->retransmit, transmitted_list)
191 cb(chunk);
192
193 list_for_each_entry(chunk, &q->sacked, transmitted_list)
194 cb(chunk);
195
196 list_for_each_entry(chunk, &q->abandoned, transmitted_list)
197 cb(chunk);
198
199 list_for_each_entry(chunk, &q->out_chunk_list, list)
200 cb(chunk);
201 }
202
203 /* Verify that this is a valid address. */
204 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
205 int len)
206 {
207 struct sctp_af *af;
208
209 /* Verify basic sockaddr. */
210 af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
211 if (!af)
212 return -EINVAL;
213
214 /* Is this a valid SCTP address? */
215 if (!af->addr_valid(addr, sctp_sk(sk), NULL))
216 return -EINVAL;
217
218 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
219 return -EINVAL;
220
221 return 0;
222 }
223
224 /* Look up the association by its id. If this is not a UDP-style
225 * socket, the ID field is always ignored.
226 */
227 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
228 {
229 struct sctp_association *asoc = NULL;
230
231 /* If this is not a UDP-style socket, assoc id should be ignored. */
232 if (!sctp_style(sk, UDP)) {
233 /* Return NULL if the socket state is not ESTABLISHED. It
234 * could be a TCP-style listening socket or a socket which
235 * hasn't yet called connect() to establish an association.
236 */
237 if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING))
238 return NULL;
239
240 /* Get the first and the only association from the list. */
241 if (!list_empty(&sctp_sk(sk)->ep->asocs))
242 asoc = list_entry(sctp_sk(sk)->ep->asocs.next,
243 struct sctp_association, asocs);
244 return asoc;
245 }
246
247 /* Otherwise this is a UDP-style socket. */
248 if (!id || (id == (sctp_assoc_t)-1))
249 return NULL;
250
251 spin_lock_bh(&sctp_assocs_id_lock);
252 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
253 spin_unlock_bh(&sctp_assocs_id_lock);
254
255 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
256 return NULL;
257
258 return asoc;
259 }
260
261 /* Look up the transport from an address and an assoc id. If both address and
262 * id are specified, the associations matching the address and the id should be
263 * the same.
264 */
265 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
266 struct sockaddr_storage *addr,
267 sctp_assoc_t id)
268 {
269 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
270 struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
271 union sctp_addr *laddr = (union sctp_addr *)addr;
272 struct sctp_transport *transport;
273
274 if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
275 return NULL;
276
277 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
278 laddr,
279 &transport);
280
281 if (!addr_asoc)
282 return NULL;
283
284 id_asoc = sctp_id2assoc(sk, id);
285 if (id_asoc && (id_asoc != addr_asoc))
286 return NULL;
287
288 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
289 (union sctp_addr *)addr);
290
291 return transport;
292 }
293
294 /* API 3.1.2 bind() - UDP Style Syntax
295 * The syntax of bind() is,
296 *
297 * ret = bind(int sd, struct sockaddr *addr, int addrlen);
298 *
299 * sd - the socket descriptor returned by socket().
300 * addr - the address structure (struct sockaddr_in or struct
301 * sockaddr_in6 [RFC 2553]),
302 * addr_len - the size of the address structure.
303 */
304 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
305 {
306 int retval = 0;
307
308 lock_sock(sk);
309
310 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
311 addr, addr_len);
312
313 /* Disallow binding twice. */
314 if (!sctp_sk(sk)->ep->base.bind_addr.port)
315 retval = sctp_do_bind(sk, (union sctp_addr *)addr,
316 addr_len);
317 else
318 retval = -EINVAL;
319
320 release_sock(sk);
321
322 return retval;
323 }
324
325 static long sctp_get_port_local(struct sock *, union sctp_addr *);
326
327 /* Verify this is a valid sockaddr. */
328 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
329 union sctp_addr *addr, int len)
330 {
331 struct sctp_af *af;
332
333 /* Check minimum size. */
334 if (len < sizeof (struct sockaddr))
335 return NULL;
336
337 if (!opt->pf->af_supported(addr->sa.sa_family, opt))
338 return NULL;
339
340 if (addr->sa.sa_family == AF_INET6) {
341 if (len < SIN6_LEN_RFC2133)
342 return NULL;
343 /* V4 mapped address are really of AF_INET family */
344 if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
345 !opt->pf->af_supported(AF_INET, opt))
346 return NULL;
347 }
348
349 /* If we get this far, af is valid. */
350 af = sctp_get_af_specific(addr->sa.sa_family);
351
352 if (len < af->sockaddr_len)
353 return NULL;
354
355 return af;
356 }
357
358 /* Bind a local address either to an endpoint or to an association. */
359 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
360 {
361 struct net *net = sock_net(sk);
362 struct sctp_sock *sp = sctp_sk(sk);
363 struct sctp_endpoint *ep = sp->ep;
364 struct sctp_bind_addr *bp = &ep->base.bind_addr;
365 struct sctp_af *af;
366 unsigned short snum;
367 int ret = 0;
368
369 /* Common sockaddr verification. */
370 af = sctp_sockaddr_af(sp, addr, len);
371 if (!af) {
372 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
373 __func__, sk, addr, len);
374 return -EINVAL;
375 }
376
377 snum = ntohs(addr->v4.sin_port);
378
379 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
380 __func__, sk, &addr->sa, bp->port, snum, len);
381
382 /* PF specific bind() address verification. */
383 if (!sp->pf->bind_verify(sp, addr))
384 return -EADDRNOTAVAIL;
385
386 /* We must either be unbound, or bind to the same port.
387 * It's OK to allow 0 ports if we are already bound.
388 * We'll just inhert an already bound port in this case
389 */
390 if (bp->port) {
391 if (!snum)
392 snum = bp->port;
393 else if (snum != bp->port) {
394 pr_debug("%s: new port %d doesn't match existing port "
395 "%d\n", __func__, snum, bp->port);
396 return -EINVAL;
397 }
398 }
399
400 if (snum && snum < inet_prot_sock(net) &&
401 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
402 return -EACCES;
403
404 /* See if the address matches any of the addresses we may have
405 * already bound before checking against other endpoints.
406 */
407 if (sctp_bind_addr_match(bp, addr, sp))
408 return -EINVAL;
409
410 /* Make sure we are allowed to bind here.
411 * The function sctp_get_port_local() does duplicate address
412 * detection.
413 */
414 addr->v4.sin_port = htons(snum);
415 if ((ret = sctp_get_port_local(sk, addr))) {
416 return -EADDRINUSE;
417 }
418
419 /* Refresh ephemeral port. */
420 if (!bp->port)
421 bp->port = inet_sk(sk)->inet_num;
422
423 /* Add the address to the bind address list.
424 * Use GFP_ATOMIC since BHs will be disabled.
425 */
426 ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len,
427 SCTP_ADDR_SRC, GFP_ATOMIC);
428
429 /* Copy back into socket for getsockname() use. */
430 if (!ret) {
431 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
432 sp->pf->to_sk_saddr(addr, sk);
433 }
434
435 return ret;
436 }
437
438 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
439 *
440 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
441 * at any one time. If a sender, after sending an ASCONF chunk, decides
442 * it needs to transfer another ASCONF Chunk, it MUST wait until the
443 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
444 * subsequent ASCONF. Note this restriction binds each side, so at any
445 * time two ASCONF may be in-transit on any given association (one sent
446 * from each endpoint).
447 */
448 static int sctp_send_asconf(struct sctp_association *asoc,
449 struct sctp_chunk *chunk)
450 {
451 struct net *net = sock_net(asoc->base.sk);
452 int retval = 0;
453
454 /* If there is an outstanding ASCONF chunk, queue it for later
455 * transmission.
456 */
457 if (asoc->addip_last_asconf) {
458 list_add_tail(&chunk->list, &asoc->addip_chunk_list);
459 goto out;
460 }
461
462 /* Hold the chunk until an ASCONF_ACK is received. */
463 sctp_chunk_hold(chunk);
464 retval = sctp_primitive_ASCONF(net, asoc, chunk);
465 if (retval)
466 sctp_chunk_free(chunk);
467 else
468 asoc->addip_last_asconf = chunk;
469
470 out:
471 return retval;
472 }
473
474 /* Add a list of addresses as bind addresses to local endpoint or
475 * association.
476 *
477 * Basically run through each address specified in the addrs/addrcnt
478 * array/length pair, determine if it is IPv6 or IPv4 and call
479 * sctp_do_bind() on it.
480 *
481 * If any of them fails, then the operation will be reversed and the
482 * ones that were added will be removed.
483 *
484 * Only sctp_setsockopt_bindx() is supposed to call this function.
485 */
486 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
487 {
488 int cnt;
489 int retval = 0;
490 void *addr_buf;
491 struct sockaddr *sa_addr;
492 struct sctp_af *af;
493
494 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk,
495 addrs, addrcnt);
496
497 addr_buf = addrs;
498 for (cnt = 0; cnt < addrcnt; cnt++) {
499 /* The list may contain either IPv4 or IPv6 address;
500 * determine the address length for walking thru the list.
501 */
502 sa_addr = addr_buf;
503 af = sctp_get_af_specific(sa_addr->sa_family);
504 if (!af) {
505 retval = -EINVAL;
506 goto err_bindx_add;
507 }
508
509 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
510 af->sockaddr_len);
511
512 addr_buf += af->sockaddr_len;
513
514 err_bindx_add:
515 if (retval < 0) {
516 /* Failed. Cleanup the ones that have been added */
517 if (cnt > 0)
518 sctp_bindx_rem(sk, addrs, cnt);
519 return retval;
520 }
521 }
522
523 return retval;
524 }
525
526 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the
527 * associations that are part of the endpoint indicating that a list of local
528 * addresses are added to the endpoint.
529 *
530 * If any of the addresses is already in the bind address list of the
531 * association, we do not send the chunk for that association. But it will not
532 * affect other associations.
533 *
534 * Only sctp_setsockopt_bindx() is supposed to call this function.
535 */
536 static int sctp_send_asconf_add_ip(struct sock *sk,
537 struct sockaddr *addrs,
538 int addrcnt)
539 {
540 struct net *net = sock_net(sk);
541 struct sctp_sock *sp;
542 struct sctp_endpoint *ep;
543 struct sctp_association *asoc;
544 struct sctp_bind_addr *bp;
545 struct sctp_chunk *chunk;
546 struct sctp_sockaddr_entry *laddr;
547 union sctp_addr *addr;
548 union sctp_addr saveaddr;
549 void *addr_buf;
550 struct sctp_af *af;
551 struct list_head *p;
552 int i;
553 int retval = 0;
554
555 if (!net->sctp.addip_enable)
556 return retval;
557
558 sp = sctp_sk(sk);
559 ep = sp->ep;
560
561 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
562 __func__, sk, addrs, addrcnt);
563
564 list_for_each_entry(asoc, &ep->asocs, asocs) {
565 if (!asoc->peer.asconf_capable)
566 continue;
567
568 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP)
569 continue;
570
571 if (!sctp_state(asoc, ESTABLISHED))
572 continue;
573
574 /* Check if any address in the packed array of addresses is
575 * in the bind address list of the association. If so,
576 * do not send the asconf chunk to its peer, but continue with
577 * other associations.
578 */
579 addr_buf = addrs;
580 for (i = 0; i < addrcnt; i++) {
581 addr = addr_buf;
582 af = sctp_get_af_specific(addr->v4.sin_family);
583 if (!af) {
584 retval = -EINVAL;
585 goto out;
586 }
587
588 if (sctp_assoc_lookup_laddr(asoc, addr))
589 break;
590
591 addr_buf += af->sockaddr_len;
592 }
593 if (i < addrcnt)
594 continue;
595
596 /* Use the first valid address in bind addr list of
597 * association as Address Parameter of ASCONF CHUNK.
598 */
599 bp = &asoc->base.bind_addr;
600 p = bp->address_list.next;
601 laddr = list_entry(p, struct sctp_sockaddr_entry, list);
602 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
603 addrcnt, SCTP_PARAM_ADD_IP);
604 if (!chunk) {
605 retval = -ENOMEM;
606 goto out;
607 }
608
609 /* Add the new addresses to the bind address list with
610 * use_as_src set to 0.
611 */
612 addr_buf = addrs;
613 for (i = 0; i < addrcnt; i++) {
614 addr = addr_buf;
615 af = sctp_get_af_specific(addr->v4.sin_family);
616 memcpy(&saveaddr, addr, af->sockaddr_len);
617 retval = sctp_add_bind_addr(bp, &saveaddr,
618 sizeof(saveaddr),
619 SCTP_ADDR_NEW, GFP_ATOMIC);
620 addr_buf += af->sockaddr_len;
621 }
622 if (asoc->src_out_of_asoc_ok) {
623 struct sctp_transport *trans;
624
625 list_for_each_entry(trans,
626 &asoc->peer.transport_addr_list, transports) {
627 /* Clear the source and route cache */
628 sctp_transport_dst_release(trans);
629 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
630 2*asoc->pathmtu, 4380));
631 trans->ssthresh = asoc->peer.i.a_rwnd;
632 trans->rto = asoc->rto_initial;
633 sctp_max_rto(asoc, trans);
634 trans->rtt = trans->srtt = trans->rttvar = 0;
635 sctp_transport_route(trans, NULL,
636 sctp_sk(asoc->base.sk));
637 }
638 }
639 retval = sctp_send_asconf(asoc, chunk);
640 }
641
642 out:
643 return retval;
644 }
645
646 /* Remove a list of addresses from bind addresses list. Do not remove the
647 * last address.
648 *
649 * Basically run through each address specified in the addrs/addrcnt
650 * array/length pair, determine if it is IPv6 or IPv4 and call
651 * sctp_del_bind() on it.
652 *
653 * If any of them fails, then the operation will be reversed and the
654 * ones that were removed will be added back.
655 *
656 * At least one address has to be left; if only one address is
657 * available, the operation will return -EBUSY.
658 *
659 * Only sctp_setsockopt_bindx() is supposed to call this function.
660 */
661 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
662 {
663 struct sctp_sock *sp = sctp_sk(sk);
664 struct sctp_endpoint *ep = sp->ep;
665 int cnt;
666 struct sctp_bind_addr *bp = &ep->base.bind_addr;
667 int retval = 0;
668 void *addr_buf;
669 union sctp_addr *sa_addr;
670 struct sctp_af *af;
671
672 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
673 __func__, sk, addrs, addrcnt);
674
675 addr_buf = addrs;
676 for (cnt = 0; cnt < addrcnt; cnt++) {
677 /* If the bind address list is empty or if there is only one
678 * bind address, there is nothing more to be removed (we need
679 * at least one address here).
680 */
681 if (list_empty(&bp->address_list) ||
682 (sctp_list_single_entry(&bp->address_list))) {
683 retval = -EBUSY;
684 goto err_bindx_rem;
685 }
686
687 sa_addr = addr_buf;
688 af = sctp_get_af_specific(sa_addr->sa.sa_family);
689 if (!af) {
690 retval = -EINVAL;
691 goto err_bindx_rem;
692 }
693
694 if (!af->addr_valid(sa_addr, sp, NULL)) {
695 retval = -EADDRNOTAVAIL;
696 goto err_bindx_rem;
697 }
698
699 if (sa_addr->v4.sin_port &&
700 sa_addr->v4.sin_port != htons(bp->port)) {
701 retval = -EINVAL;
702 goto err_bindx_rem;
703 }
704
705 if (!sa_addr->v4.sin_port)
706 sa_addr->v4.sin_port = htons(bp->port);
707
708 /* FIXME - There is probably a need to check if sk->sk_saddr and
709 * sk->sk_rcv_addr are currently set to one of the addresses to
710 * be removed. This is something which needs to be looked into
711 * when we are fixing the outstanding issues with multi-homing
712 * socket routing and failover schemes. Refer to comments in
713 * sctp_do_bind(). -daisy
714 */
715 retval = sctp_del_bind_addr(bp, sa_addr);
716
717 addr_buf += af->sockaddr_len;
718 err_bindx_rem:
719 if (retval < 0) {
720 /* Failed. Add the ones that has been removed back */
721 if (cnt > 0)
722 sctp_bindx_add(sk, addrs, cnt);
723 return retval;
724 }
725 }
726
727 return retval;
728 }
729
730 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of
731 * the associations that are part of the endpoint indicating that a list of
732 * local addresses are removed from the endpoint.
733 *
734 * If any of the addresses is already in the bind address list of the
735 * association, we do not send the chunk for that association. But it will not
736 * affect other associations.
737 *
738 * Only sctp_setsockopt_bindx() is supposed to call this function.
739 */
740 static int sctp_send_asconf_del_ip(struct sock *sk,
741 struct sockaddr *addrs,
742 int addrcnt)
743 {
744 struct net *net = sock_net(sk);
745 struct sctp_sock *sp;
746 struct sctp_endpoint *ep;
747 struct sctp_association *asoc;
748 struct sctp_transport *transport;
749 struct sctp_bind_addr *bp;
750 struct sctp_chunk *chunk;
751 union sctp_addr *laddr;
752 void *addr_buf;
753 struct sctp_af *af;
754 struct sctp_sockaddr_entry *saddr;
755 int i;
756 int retval = 0;
757 int stored = 0;
758
759 chunk = NULL;
760 if (!net->sctp.addip_enable)
761 return retval;
762
763 sp = sctp_sk(sk);
764 ep = sp->ep;
765
766 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
767 __func__, sk, addrs, addrcnt);
768
769 list_for_each_entry(asoc, &ep->asocs, asocs) {
770
771 if (!asoc->peer.asconf_capable)
772 continue;
773
774 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP)
775 continue;
776
777 if (!sctp_state(asoc, ESTABLISHED))
778 continue;
779
780 /* Check if any address in the packed array of addresses is
781 * not present in the bind address list of the association.
782 * If so, do not send the asconf chunk to its peer, but
783 * continue with other associations.
784 */
785 addr_buf = addrs;
786 for (i = 0; i < addrcnt; i++) {
787 laddr = addr_buf;
788 af = sctp_get_af_specific(laddr->v4.sin_family);
789 if (!af) {
790 retval = -EINVAL;
791 goto out;
792 }
793
794 if (!sctp_assoc_lookup_laddr(asoc, laddr))
795 break;
796
797 addr_buf += af->sockaddr_len;
798 }
799 if (i < addrcnt)
800 continue;
801
802 /* Find one address in the association's bind address list
803 * that is not in the packed array of addresses. This is to
804 * make sure that we do not delete all the addresses in the
805 * association.
806 */
807 bp = &asoc->base.bind_addr;
808 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
809 addrcnt, sp);
810 if ((laddr == NULL) && (addrcnt == 1)) {
811 if (asoc->asconf_addr_del_pending)
812 continue;
813 asoc->asconf_addr_del_pending =
814 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
815 if (asoc->asconf_addr_del_pending == NULL) {
816 retval = -ENOMEM;
817 goto out;
818 }
819 asoc->asconf_addr_del_pending->sa.sa_family =
820 addrs->sa_family;
821 asoc->asconf_addr_del_pending->v4.sin_port =
822 htons(bp->port);
823 if (addrs->sa_family == AF_INET) {
824 struct sockaddr_in *sin;
825
826 sin = (struct sockaddr_in *)addrs;
827 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
828 } else if (addrs->sa_family == AF_INET6) {
829 struct sockaddr_in6 *sin6;
830
831 sin6 = (struct sockaddr_in6 *)addrs;
832 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
833 }
834
835 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
836 __func__, asoc, &asoc->asconf_addr_del_pending->sa,
837 asoc->asconf_addr_del_pending);
838
839 asoc->src_out_of_asoc_ok = 1;
840 stored = 1;
841 goto skip_mkasconf;
842 }
843
844 if (laddr == NULL)
845 return -EINVAL;
846
847 /* We do not need RCU protection throughout this loop
848 * because this is done under a socket lock from the
849 * setsockopt call.
850 */
851 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
852 SCTP_PARAM_DEL_IP);
853 if (!chunk) {
854 retval = -ENOMEM;
855 goto out;
856 }
857
858 skip_mkasconf:
859 /* Reset use_as_src flag for the addresses in the bind address
860 * list that are to be deleted.
861 */
862 addr_buf = addrs;
863 for (i = 0; i < addrcnt; i++) {
864 laddr = addr_buf;
865 af = sctp_get_af_specific(laddr->v4.sin_family);
866 list_for_each_entry(saddr, &bp->address_list, list) {
867 if (sctp_cmp_addr_exact(&saddr->a, laddr))
868 saddr->state = SCTP_ADDR_DEL;
869 }
870 addr_buf += af->sockaddr_len;
871 }
872
873 /* Update the route and saddr entries for all the transports
874 * as some of the addresses in the bind address list are
875 * about to be deleted and cannot be used as source addresses.
876 */
877 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
878 transports) {
879 sctp_transport_dst_release(transport);
880 sctp_transport_route(transport, NULL,
881 sctp_sk(asoc->base.sk));
882 }
883
884 if (stored)
885 /* We don't need to transmit ASCONF */
886 continue;
887 retval = sctp_send_asconf(asoc, chunk);
888 }
889 out:
890 return retval;
891 }
892
893 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
894 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
895 {
896 struct sock *sk = sctp_opt2sk(sp);
897 union sctp_addr *addr;
898 struct sctp_af *af;
899
900 /* It is safe to write port space in caller. */
901 addr = &addrw->a;
902 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
903 af = sctp_get_af_specific(addr->sa.sa_family);
904 if (!af)
905 return -EINVAL;
906 if (sctp_verify_addr(sk, addr, af->sockaddr_len))
907 return -EINVAL;
908
909 if (addrw->state == SCTP_ADDR_NEW)
910 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
911 else
912 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
913 }
914
915 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
916 *
917 * API 8.1
918 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
919 * int flags);
920 *
921 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
922 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
923 * or IPv6 addresses.
924 *
925 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
926 * Section 3.1.2 for this usage.
927 *
928 * addrs is a pointer to an array of one or more socket addresses. Each
929 * address is contained in its appropriate structure (i.e. struct
930 * sockaddr_in or struct sockaddr_in6) the family of the address type
931 * must be used to distinguish the address length (note that this
932 * representation is termed a "packed array" of addresses). The caller
933 * specifies the number of addresses in the array with addrcnt.
934 *
935 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
936 * -1, and sets errno to the appropriate error code.
937 *
938 * For SCTP, the port given in each socket address must be the same, or
939 * sctp_bindx() will fail, setting errno to EINVAL.
940 *
941 * The flags parameter is formed from the bitwise OR of zero or more of
942 * the following currently defined flags:
943 *
944 * SCTP_BINDX_ADD_ADDR
945 *
946 * SCTP_BINDX_REM_ADDR
947 *
948 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
949 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
950 * addresses from the association. The two flags are mutually exclusive;
951 * if both are given, sctp_bindx() will fail with EINVAL. A caller may
952 * not remove all addresses from an association; sctp_bindx() will
953 * reject such an attempt with EINVAL.
954 *
955 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
956 * additional addresses with an endpoint after calling bind(). Or use
957 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
958 * socket is associated with so that no new association accepted will be
959 * associated with those addresses. If the endpoint supports dynamic
960 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
961 * endpoint to send the appropriate message to the peer to change the
962 * peers address lists.
963 *
964 * Adding and removing addresses from a connected association is
965 * optional functionality. Implementations that do not support this
966 * functionality should return EOPNOTSUPP.
967 *
968 * Basically do nothing but copying the addresses from user to kernel
969 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
970 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
971 * from userspace.
972 *
973 * We don't use copy_from_user() for optimization: we first do the
974 * sanity checks (buffer size -fast- and access check-healthy
975 * pointer); if all of those succeed, then we can alloc the memory
976 * (expensive operation) needed to copy the data to kernel. Then we do
977 * the copying without checking the user space area
978 * (__copy_from_user()).
979 *
980 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
981 * it.
982 *
983 * sk The sk of the socket
984 * addrs The pointer to the addresses in user land
985 * addrssize Size of the addrs buffer
986 * op Operation to perform (add or remove, see the flags of
987 * sctp_bindx)
988 *
989 * Returns 0 if ok, <0 errno code on error.
990 */
991 static int sctp_setsockopt_bindx(struct sock *sk,
992 struct sockaddr __user *addrs,
993 int addrs_size, int op)
994 {
995 struct sockaddr *kaddrs;
996 int err;
997 int addrcnt = 0;
998 int walk_size = 0;
999 struct sockaddr *sa_addr;
1000 void *addr_buf;
1001 struct sctp_af *af;
1002
1003 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
1004 __func__, sk, addrs, addrs_size, op);
1005
1006 if (unlikely(addrs_size <= 0))
1007 return -EINVAL;
1008
1009 /* Check the user passed a healthy pointer. */
1010 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
1011 return -EFAULT;
1012
1013 /* Alloc space for the address array in kernel memory. */
1014 kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
1015 if (unlikely(!kaddrs))
1016 return -ENOMEM;
1017
1018 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
1019 kfree(kaddrs);
1020 return -EFAULT;
1021 }
1022
1023 /* Walk through the addrs buffer and count the number of addresses. */
1024 addr_buf = kaddrs;
1025 while (walk_size < addrs_size) {
1026 if (walk_size + sizeof(sa_family_t) > addrs_size) {
1027 kfree(kaddrs);
1028 return -EINVAL;
1029 }
1030
1031 sa_addr = addr_buf;
1032 af = sctp_get_af_specific(sa_addr->sa_family);
1033
1034 /* If the address family is not supported or if this address
1035 * causes the address buffer to overflow return EINVAL.
1036 */
1037 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
1038 kfree(kaddrs);
1039 return -EINVAL;
1040 }
1041 addrcnt++;
1042 addr_buf += af->sockaddr_len;
1043 walk_size += af->sockaddr_len;
1044 }
1045
1046 /* Do the work. */
1047 switch (op) {
1048 case SCTP_BINDX_ADD_ADDR:
1049 err = sctp_bindx_add(sk, kaddrs, addrcnt);
1050 if (err)
1051 goto out;
1052 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
1053 break;
1054
1055 case SCTP_BINDX_REM_ADDR:
1056 err = sctp_bindx_rem(sk, kaddrs, addrcnt);
1057 if (err)
1058 goto out;
1059 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
1060 break;
1061
1062 default:
1063 err = -EINVAL;
1064 break;
1065 }
1066
1067 out:
1068 kfree(kaddrs);
1069
1070 return err;
1071 }
1072
1073 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
1074 *
1075 * Common routine for handling connect() and sctp_connectx().
1076 * Connect will come in with just a single address.
1077 */
1078 static int __sctp_connect(struct sock *sk,
1079 struct sockaddr *kaddrs,
1080 int addrs_size,
1081 sctp_assoc_t *assoc_id)
1082 {
1083 struct net *net = sock_net(sk);
1084 struct sctp_sock *sp;
1085 struct sctp_endpoint *ep;
1086 struct sctp_association *asoc = NULL;
1087 struct sctp_association *asoc2;
1088 struct sctp_transport *transport;
1089 union sctp_addr to;
1090 enum sctp_scope scope;
1091 long timeo;
1092 int err = 0;
1093 int addrcnt = 0;
1094 int walk_size = 0;
1095 union sctp_addr *sa_addr = NULL;
1096 void *addr_buf;
1097 unsigned short port;
1098 unsigned int f_flags = 0;
1099
1100 sp = sctp_sk(sk);
1101 ep = sp->ep;
1102
1103 /* connect() cannot be done on a socket that is already in ESTABLISHED
1104 * state - UDP-style peeled off socket or a TCP-style socket that
1105 * is already connected.
1106 * It cannot be done even on a TCP-style listening socket.
1107 */
1108 if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) ||
1109 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) {
1110 err = -EISCONN;
1111 goto out_free;
1112 }
1113
1114 /* Walk through the addrs buffer and count the number of addresses. */
1115 addr_buf = kaddrs;
1116 while (walk_size < addrs_size) {
1117 struct sctp_af *af;
1118
1119 if (walk_size + sizeof(sa_family_t) > addrs_size) {
1120 err = -EINVAL;
1121 goto out_free;
1122 }
1123
1124 sa_addr = addr_buf;
1125 af = sctp_get_af_specific(sa_addr->sa.sa_family);
1126
1127 /* If the address family is not supported or if this address
1128 * causes the address buffer to overflow return EINVAL.
1129 */
1130 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
1131 err = -EINVAL;
1132 goto out_free;
1133 }
1134
1135 port = ntohs(sa_addr->v4.sin_port);
1136
1137 /* Save current address so we can work with it */
1138 memcpy(&to, sa_addr, af->sockaddr_len);
1139
1140 err = sctp_verify_addr(sk, &to, af->sockaddr_len);
1141 if (err)
1142 goto out_free;
1143
1144 /* Make sure the destination port is correctly set
1145 * in all addresses.
1146 */
1147 if (asoc && asoc->peer.port && asoc->peer.port != port) {
1148 err = -EINVAL;
1149 goto out_free;
1150 }
1151
1152 /* Check if there already is a matching association on the
1153 * endpoint (other than the one created here).
1154 */
1155 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
1156 if (asoc2 && asoc2 != asoc) {
1157 if (asoc2->state >= SCTP_STATE_ESTABLISHED)
1158 err = -EISCONN;
1159 else
1160 err = -EALREADY;
1161 goto out_free;
1162 }
1163
1164 /* If we could not find a matching association on the endpoint,
1165 * make sure that there is no peeled-off association matching
1166 * the peer address even on another socket.
1167 */
1168 if (sctp_endpoint_is_peeled_off(ep, &to)) {
1169 err = -EADDRNOTAVAIL;
1170 goto out_free;
1171 }
1172
1173 if (!asoc) {
1174 /* If a bind() or sctp_bindx() is not called prior to
1175 * an sctp_connectx() call, the system picks an
1176 * ephemeral port and will choose an address set
1177 * equivalent to binding with a wildcard address.
1178 */
1179 if (!ep->base.bind_addr.port) {
1180 if (sctp_autobind(sk)) {
1181 err = -EAGAIN;
1182 goto out_free;
1183 }
1184 } else {
1185 /*
1186 * If an unprivileged user inherits a 1-many
1187 * style socket with open associations on a
1188 * privileged port, it MAY be permitted to
1189 * accept new associations, but it SHOULD NOT
1190 * be permitted to open new associations.
1191 */
1192 if (ep->base.bind_addr.port <
1193 inet_prot_sock(net) &&
1194 !ns_capable(net->user_ns,
1195 CAP_NET_BIND_SERVICE)) {
1196 err = -EACCES;
1197 goto out_free;
1198 }
1199 }
1200
1201 scope = sctp_scope(&to);
1202 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
1203 if (!asoc) {
1204 err = -ENOMEM;
1205 goto out_free;
1206 }
1207
1208 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
1209 GFP_KERNEL);
1210 if (err < 0) {
1211 goto out_free;
1212 }
1213
1214 }
1215
1216 /* Prime the peer's transport structures. */
1217 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
1218 SCTP_UNKNOWN);
1219 if (!transport) {
1220 err = -ENOMEM;
1221 goto out_free;
1222 }
1223
1224 addrcnt++;
1225 addr_buf += af->sockaddr_len;
1226 walk_size += af->sockaddr_len;
1227 }
1228
1229 /* In case the user of sctp_connectx() wants an association
1230 * id back, assign one now.
1231 */
1232 if (assoc_id) {
1233 err = sctp_assoc_set_id(asoc, GFP_KERNEL);
1234 if (err < 0)
1235 goto out_free;
1236 }
1237
1238 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
1239 if (err < 0) {
1240 goto out_free;
1241 }
1242
1243 /* Initialize sk's dport and daddr for getpeername() */
1244 inet_sk(sk)->inet_dport = htons(asoc->peer.port);
1245 sp->pf->to_sk_daddr(sa_addr, sk);
1246 sk->sk_err = 0;
1247
1248 /* in-kernel sockets don't generally have a file allocated to them
1249 * if all they do is call sock_create_kern().
1250 */
1251 if (sk->sk_socket->file)
1252 f_flags = sk->sk_socket->file->f_flags;
1253
1254 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1255
1256 if (assoc_id)
1257 *assoc_id = asoc->assoc_id;
1258 err = sctp_wait_for_connect(asoc, &timeo);
1259 /* Note: the asoc may be freed after the return of
1260 * sctp_wait_for_connect.
1261 */
1262
1263 /* Don't free association on exit. */
1264 asoc = NULL;
1265
1266 out_free:
1267 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
1268 __func__, asoc, kaddrs, err);
1269
1270 if (asoc) {
1271 /* sctp_primitive_ASSOCIATE may have added this association
1272 * To the hash table, try to unhash it, just in case, its a noop
1273 * if it wasn't hashed so we're safe
1274 */
1275 sctp_association_free(asoc);
1276 }
1277 return err;
1278 }
1279
1280 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1281 *
1282 * API 8.9
1283 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
1284 * sctp_assoc_t *asoc);
1285 *
1286 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1287 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
1288 * or IPv6 addresses.
1289 *
1290 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
1291 * Section 3.1.2 for this usage.
1292 *
1293 * addrs is a pointer to an array of one or more socket addresses. Each
1294 * address is contained in its appropriate structure (i.e. struct
1295 * sockaddr_in or struct sockaddr_in6) the family of the address type
1296 * must be used to distengish the address length (note that this
1297 * representation is termed a "packed array" of addresses). The caller
1298 * specifies the number of addresses in the array with addrcnt.
1299 *
1300 * On success, sctp_connectx() returns 0. It also sets the assoc_id to
1301 * the association id of the new association. On failure, sctp_connectx()
1302 * returns -1, and sets errno to the appropriate error code. The assoc_id
1303 * is not touched by the kernel.
1304 *
1305 * For SCTP, the port given in each socket address must be the same, or
1306 * sctp_connectx() will fail, setting errno to EINVAL.
1307 *
1308 * An application can use sctp_connectx to initiate an association with
1309 * an endpoint that is multi-homed. Much like sctp_bindx() this call
1310 * allows a caller to specify multiple addresses at which a peer can be
1311 * reached. The way the SCTP stack uses the list of addresses to set up
1312 * the association is implementation dependent. This function only
1313 * specifies that the stack will try to make use of all the addresses in
1314 * the list when needed.
1315 *
1316 * Note that the list of addresses passed in is only used for setting up
1317 * the association. It does not necessarily equal the set of addresses
1318 * the peer uses for the resulting association. If the caller wants to
1319 * find out the set of peer addresses, it must use sctp_getpaddrs() to
1320 * retrieve them after the association has been set up.
1321 *
1322 * Basically do nothing but copying the addresses from user to kernel
1323 * land and invoking either sctp_connectx(). This is used for tunneling
1324 * the sctp_connectx() request through sctp_setsockopt() from userspace.
1325 *
1326 * We don't use copy_from_user() for optimization: we first do the
1327 * sanity checks (buffer size -fast- and access check-healthy
1328 * pointer); if all of those succeed, then we can alloc the memory
1329 * (expensive operation) needed to copy the data to kernel. Then we do
1330 * the copying without checking the user space area
1331 * (__copy_from_user()).
1332 *
1333 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
1334 * it.
1335 *
1336 * sk The sk of the socket
1337 * addrs The pointer to the addresses in user land
1338 * addrssize Size of the addrs buffer
1339 *
1340 * Returns >=0 if ok, <0 errno code on error.
1341 */
1342 static int __sctp_setsockopt_connectx(struct sock *sk,
1343 struct sockaddr __user *addrs,
1344 int addrs_size,
1345 sctp_assoc_t *assoc_id)
1346 {
1347 struct sockaddr *kaddrs;
1348 gfp_t gfp = GFP_KERNEL;
1349 int err = 0;
1350
1351 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
1352 __func__, sk, addrs, addrs_size);
1353
1354 if (unlikely(addrs_size <= 0))
1355 return -EINVAL;
1356
1357 /* Check the user passed a healthy pointer. */
1358 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
1359 return -EFAULT;
1360
1361 /* Alloc space for the address array in kernel memory. */
1362 if (sk->sk_socket->file)
1363 gfp = GFP_USER | __GFP_NOWARN;
1364 kaddrs = kmalloc(addrs_size, gfp);
1365 if (unlikely(!kaddrs))
1366 return -ENOMEM;
1367
1368 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
1369 err = -EFAULT;
1370 } else {
1371 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
1372 }
1373
1374 kfree(kaddrs);
1375
1376 return err;
1377 }
1378
1379 /*
1380 * This is an older interface. It's kept for backward compatibility
1381 * to the option that doesn't provide association id.
1382 */
1383 static int sctp_setsockopt_connectx_old(struct sock *sk,
1384 struct sockaddr __user *addrs,
1385 int addrs_size)
1386 {
1387 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
1388 }
1389
1390 /*
1391 * New interface for the API. The since the API is done with a socket
1392 * option, to make it simple we feed back the association id is as a return
1393 * indication to the call. Error is always negative and association id is
1394 * always positive.
1395 */
1396 static int sctp_setsockopt_connectx(struct sock *sk,
1397 struct sockaddr __user *addrs,
1398 int addrs_size)
1399 {
1400 sctp_assoc_t assoc_id = 0;
1401 int err = 0;
1402
1403 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
1404
1405 if (err)
1406 return err;
1407 else
1408 return assoc_id;
1409 }
1410
1411 /*
1412 * New (hopefully final) interface for the API.
1413 * We use the sctp_getaddrs_old structure so that use-space library
1414 * can avoid any unnecessary allocations. The only different part
1415 * is that we store the actual length of the address buffer into the
1416 * addrs_num structure member. That way we can re-use the existing
1417 * code.
1418 */
1419 #ifdef CONFIG_COMPAT
1420 struct compat_sctp_getaddrs_old {
1421 sctp_assoc_t assoc_id;
1422 s32 addr_num;
1423 compat_uptr_t addrs; /* struct sockaddr * */
1424 };
1425 #endif
1426
1427 static int sctp_getsockopt_connectx3(struct sock *sk, int len,
1428 char __user *optval,
1429 int __user *optlen)
1430 {
1431 struct sctp_getaddrs_old param;
1432 sctp_assoc_t assoc_id = 0;
1433 int err = 0;
1434
1435 #ifdef CONFIG_COMPAT
1436 if (in_compat_syscall()) {
1437 struct compat_sctp_getaddrs_old param32;
1438
1439 if (len < sizeof(param32))
1440 return -EINVAL;
1441 if (copy_from_user(&param32, optval, sizeof(param32)))
1442 return -EFAULT;
1443
1444 param.assoc_id = param32.assoc_id;
1445 param.addr_num = param32.addr_num;
1446 param.addrs = compat_ptr(param32.addrs);
1447 } else
1448 #endif
1449 {
1450 if (len < sizeof(param))
1451 return -EINVAL;
1452 if (copy_from_user(&param, optval, sizeof(param)))
1453 return -EFAULT;
1454 }
1455
1456 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
1457 param.addrs, param.addr_num,
1458 &assoc_id);
1459 if (err == 0 || err == -EINPROGRESS) {
1460 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
1461 return -EFAULT;
1462 if (put_user(sizeof(assoc_id), optlen))
1463 return -EFAULT;
1464 }
1465
1466 return err;
1467 }
1468
1469 /* API 3.1.4 close() - UDP Style Syntax
1470 * Applications use close() to perform graceful shutdown (as described in
1471 * Section 10.1 of [SCTP]) on ALL the associations currently represented
1472 * by a UDP-style socket.
1473 *
1474 * The syntax is
1475 *
1476 * ret = close(int sd);
1477 *
1478 * sd - the socket descriptor of the associations to be closed.
1479 *
1480 * To gracefully shutdown a specific association represented by the
1481 * UDP-style socket, an application should use the sendmsg() call,
1482 * passing no user data, but including the appropriate flag in the
1483 * ancillary data (see Section xxxx).
1484 *
1485 * If sd in the close() call is a branched-off socket representing only
1486 * one association, the shutdown is performed on that association only.
1487 *
1488 * 4.1.6 close() - TCP Style Syntax
1489 *
1490 * Applications use close() to gracefully close down an association.
1491 *
1492 * The syntax is:
1493 *
1494 * int close(int sd);
1495 *
1496 * sd - the socket descriptor of the association to be closed.
1497 *
1498 * After an application calls close() on a socket descriptor, no further
1499 * socket operations will succeed on that descriptor.
1500 *
1501 * API 7.1.4 SO_LINGER
1502 *
1503 * An application using the TCP-style socket can use this option to
1504 * perform the SCTP ABORT primitive. The linger option structure is:
1505 *
1506 * struct linger {
1507 * int l_onoff; // option on/off
1508 * int l_linger; // linger time
1509 * };
1510 *
1511 * To enable the option, set l_onoff to 1. If the l_linger value is set
1512 * to 0, calling close() is the same as the ABORT primitive. If the
1513 * value is set to a negative value, the setsockopt() call will return
1514 * an error. If the value is set to a positive value linger_time, the
1515 * close() can be blocked for at most linger_time ms. If the graceful
1516 * shutdown phase does not finish during this period, close() will
1517 * return but the graceful shutdown phase continues in the system.
1518 */
1519 static void sctp_close(struct sock *sk, long timeout)
1520 {
1521 struct net *net = sock_net(sk);
1522 struct sctp_endpoint *ep;
1523 struct sctp_association *asoc;
1524 struct list_head *pos, *temp;
1525 unsigned int data_was_unread;
1526
1527 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
1528
1529 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1530 sk->sk_shutdown = SHUTDOWN_MASK;
1531 sk->sk_state = SCTP_SS_CLOSING;
1532
1533 ep = sctp_sk(sk)->ep;
1534
1535 /* Clean up any skbs sitting on the receive queue. */
1536 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
1537 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
1538
1539 /* Walk all associations on an endpoint. */
1540 list_for_each_safe(pos, temp, &ep->asocs) {
1541 asoc = list_entry(pos, struct sctp_association, asocs);
1542
1543 if (sctp_style(sk, TCP)) {
1544 /* A closed association can still be in the list if
1545 * it belongs to a TCP-style listening socket that is
1546 * not yet accepted. If so, free it. If not, send an
1547 * ABORT or SHUTDOWN based on the linger options.
1548 */
1549 if (sctp_state(asoc, CLOSED)) {
1550 sctp_association_free(asoc);
1551 continue;
1552 }
1553 }
1554
1555 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
1556 !skb_queue_empty(&asoc->ulpq.reasm) ||
1557 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
1558 struct sctp_chunk *chunk;
1559
1560 chunk = sctp_make_abort_user(asoc, NULL, 0);
1561 sctp_primitive_ABORT(net, asoc, chunk);
1562 } else
1563 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1564 }
1565
1566 /* On a TCP-style socket, block for at most linger_time if set. */
1567 if (sctp_style(sk, TCP) && timeout)
1568 sctp_wait_for_close(sk, timeout);
1569
1570 /* This will run the backlog queue. */
1571 release_sock(sk);
1572
1573 /* Supposedly, no process has access to the socket, but
1574 * the net layers still may.
1575 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
1576 * held and that should be grabbed before socket lock.
1577 */
1578 spin_lock_bh(&net->sctp.addr_wq_lock);
1579 bh_lock_sock_nested(sk);
1580
1581 /* Hold the sock, since sk_common_release() will put sock_put()
1582 * and we have just a little more cleanup.
1583 */
1584 sock_hold(sk);
1585 sk_common_release(sk);
1586
1587 bh_unlock_sock(sk);
1588 spin_unlock_bh(&net->sctp.addr_wq_lock);
1589
1590 sock_put(sk);
1591
1592 SCTP_DBG_OBJCNT_DEC(sock);
1593 }
1594
1595 /* Handle EPIPE error. */
1596 static int sctp_error(struct sock *sk, int flags, int err)
1597 {
1598 if (err == -EPIPE)
1599 err = sock_error(sk) ? : -EPIPE;
1600 if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
1601 send_sig(SIGPIPE, current, 0);
1602 return err;
1603 }
1604
1605 /* API 3.1.3 sendmsg() - UDP Style Syntax
1606 *
1607 * An application uses sendmsg() and recvmsg() calls to transmit data to
1608 * and receive data from its peer.
1609 *
1610 * ssize_t sendmsg(int socket, const struct msghdr *message,
1611 * int flags);
1612 *
1613 * socket - the socket descriptor of the endpoint.
1614 * message - pointer to the msghdr structure which contains a single
1615 * user message and possibly some ancillary data.
1616 *
1617 * See Section 5 for complete description of the data
1618 * structures.
1619 *
1620 * flags - flags sent or received with the user message, see Section
1621 * 5 for complete description of the flags.
1622 *
1623 * Note: This function could use a rewrite especially when explicit
1624 * connect support comes in.
1625 */
1626 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
1627
1628 static int sctp_msghdr_parse(const struct msghdr *msg,
1629 struct sctp_cmsgs *cmsgs);
1630
1631 static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1632 {
1633 struct net *net = sock_net(sk);
1634 struct sctp_sock *sp;
1635 struct sctp_endpoint *ep;
1636 struct sctp_association *new_asoc = NULL, *asoc = NULL;
1637 struct sctp_transport *transport, *chunk_tp;
1638 struct sctp_chunk *chunk;
1639 union sctp_addr to;
1640 struct sockaddr *msg_name = NULL;
1641 struct sctp_sndrcvinfo default_sinfo;
1642 struct sctp_sndrcvinfo *sinfo;
1643 struct sctp_initmsg *sinit;
1644 sctp_assoc_t associd = 0;
1645 struct sctp_cmsgs cmsgs = { NULL };
1646 enum sctp_scope scope;
1647 bool fill_sinfo_ttl = false, wait_connect = false;
1648 struct sctp_datamsg *datamsg;
1649 int msg_flags = msg->msg_flags;
1650 __u16 sinfo_flags = 0;
1651 long timeo;
1652 int err;
1653
1654 err = 0;
1655 sp = sctp_sk(sk);
1656 ep = sp->ep;
1657
1658 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk,
1659 msg, msg_len, ep);
1660
1661 /* We cannot send a message over a TCP-style listening socket. */
1662 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) {
1663 err = -EPIPE;
1664 goto out_nounlock;
1665 }
1666
1667 /* Parse out the SCTP CMSGs. */
1668 err = sctp_msghdr_parse(msg, &cmsgs);
1669 if (err) {
1670 pr_debug("%s: msghdr parse err:%x\n", __func__, err);
1671 goto out_nounlock;
1672 }
1673
1674 /* Fetch the destination address for this packet. This
1675 * address only selects the association--it is not necessarily
1676 * the address we will send to.
1677 * For a peeled-off socket, msg_name is ignored.
1678 */
1679 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) {
1680 int msg_namelen = msg->msg_namelen;
1681
1682 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name,
1683 msg_namelen);
1684 if (err)
1685 return err;
1686
1687 if (msg_namelen > sizeof(to))
1688 msg_namelen = sizeof(to);
1689 memcpy(&to, msg->msg_name, msg_namelen);
1690 msg_name = msg->msg_name;
1691 }
1692
1693 sinit = cmsgs.init;
1694 if (cmsgs.sinfo != NULL) {
1695 memset(&default_sinfo, 0, sizeof(default_sinfo));
1696 default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid;
1697 default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags;
1698 default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid;
1699 default_sinfo.sinfo_context = cmsgs.sinfo->snd_context;
1700 default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id;
1701
1702 sinfo = &default_sinfo;
1703 fill_sinfo_ttl = true;
1704 } else {
1705 sinfo = cmsgs.srinfo;
1706 }
1707 /* Did the user specify SNDINFO/SNDRCVINFO? */
1708 if (sinfo) {
1709 sinfo_flags = sinfo->sinfo_flags;
1710 associd = sinfo->sinfo_assoc_id;
1711 }
1712
1713 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__,
1714 msg_len, sinfo_flags);
1715
1716 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */
1717 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) {
1718 err = -EINVAL;
1719 goto out_nounlock;
1720 }
1721
1722 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero
1723 * length messages when SCTP_EOF|SCTP_ABORT is not set.
1724 * If SCTP_ABORT is set, the message length could be non zero with
1725 * the msg_iov set to the user abort reason.
1726 */
1727 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) ||
1728 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) {
1729 err = -EINVAL;
1730 goto out_nounlock;
1731 }
1732
1733 /* If SCTP_ADDR_OVER is set, there must be an address
1734 * specified in msg_name.
1735 */
1736 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) {
1737 err = -EINVAL;
1738 goto out_nounlock;
1739 }
1740
1741 transport = NULL;
1742
1743 pr_debug("%s: about to look up association\n", __func__);
1744
1745 lock_sock(sk);
1746
1747 /* If a msg_name has been specified, assume this is to be used. */
1748 if (msg_name) {
1749 /* Look for a matching association on the endpoint. */
1750 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport);
1751
1752 /* If we could not find a matching association on the
1753 * endpoint, make sure that it is not a TCP-style
1754 * socket that already has an association or there is
1755 * no peeled-off association on another socket.
1756 */
1757 if (!asoc &&
1758 ((sctp_style(sk, TCP) &&
1759 (sctp_sstate(sk, ESTABLISHED) ||
1760 sctp_sstate(sk, CLOSING))) ||
1761 sctp_endpoint_is_peeled_off(ep, &to))) {
1762 err = -EADDRNOTAVAIL;
1763 goto out_unlock;
1764 }
1765 } else {
1766 asoc = sctp_id2assoc(sk, associd);
1767 if (!asoc) {
1768 err = -EPIPE;
1769 goto out_unlock;
1770 }
1771 }
1772
1773 if (asoc) {
1774 pr_debug("%s: just looked up association:%p\n", __func__, asoc);
1775
1776 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED
1777 * socket that has an association in CLOSED state. This can
1778 * happen when an accepted socket has an association that is
1779 * already CLOSED.
1780 */
1781 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) {
1782 err = -EPIPE;
1783 goto out_unlock;
1784 }
1785
1786 if (sinfo_flags & SCTP_EOF) {
1787 pr_debug("%s: shutting down association:%p\n",
1788 __func__, asoc);
1789
1790 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1791 err = 0;
1792 goto out_unlock;
1793 }
1794 if (sinfo_flags & SCTP_ABORT) {
1795
1796 chunk = sctp_make_abort_user(asoc, msg, msg_len);
1797 if (!chunk) {
1798 err = -ENOMEM;
1799 goto out_unlock;
1800 }
1801
1802 pr_debug("%s: aborting association:%p\n",
1803 __func__, asoc);
1804
1805 sctp_primitive_ABORT(net, asoc, chunk);
1806 err = 0;
1807 goto out_unlock;
1808 }
1809 }
1810
1811 /* Do we need to create the association? */
1812 if (!asoc) {
1813 pr_debug("%s: there is no association yet\n", __func__);
1814
1815 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) {
1816 err = -EINVAL;
1817 goto out_unlock;
1818 }
1819
1820 /* Check for invalid stream against the stream counts,
1821 * either the default or the user specified stream counts.
1822 */
1823 if (sinfo) {
1824 if (!sinit || !sinit->sinit_num_ostreams) {
1825 /* Check against the defaults. */
1826 if (sinfo->sinfo_stream >=
1827 sp->initmsg.sinit_num_ostreams) {
1828 err = -EINVAL;
1829 goto out_unlock;
1830 }
1831 } else {
1832 /* Check against the requested. */
1833 if (sinfo->sinfo_stream >=
1834 sinit->sinit_num_ostreams) {
1835 err = -EINVAL;
1836 goto out_unlock;
1837 }
1838 }
1839 }
1840
1841 /*
1842 * API 3.1.2 bind() - UDP Style Syntax
1843 * If a bind() or sctp_bindx() is not called prior to a
1844 * sendmsg() call that initiates a new association, the
1845 * system picks an ephemeral port and will choose an address
1846 * set equivalent to binding with a wildcard address.
1847 */
1848 if (!ep->base.bind_addr.port) {
1849 if (sctp_autobind(sk)) {
1850 err = -EAGAIN;
1851 goto out_unlock;
1852 }
1853 } else {
1854 /*
1855 * If an unprivileged user inherits a one-to-many
1856 * style socket with open associations on a privileged
1857 * port, it MAY be permitted to accept new associations,
1858 * but it SHOULD NOT be permitted to open new
1859 * associations.
1860 */
1861 if (ep->base.bind_addr.port < inet_prot_sock(net) &&
1862 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
1863 err = -EACCES;
1864 goto out_unlock;
1865 }
1866 }
1867
1868 scope = sctp_scope(&to);
1869 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
1870 if (!new_asoc) {
1871 err = -ENOMEM;
1872 goto out_unlock;
1873 }
1874 asoc = new_asoc;
1875 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
1876 if (err < 0) {
1877 err = -ENOMEM;
1878 goto out_free;
1879 }
1880
1881 /* If the SCTP_INIT ancillary data is specified, set all
1882 * the association init values accordingly.
1883 */
1884 if (sinit) {
1885 if (sinit->sinit_num_ostreams) {
1886 __u16 outcnt = sinit->sinit_num_ostreams;
1887
1888 asoc->c.sinit_num_ostreams = outcnt;
1889 /* outcnt has been changed, so re-init stream */
1890 err = sctp_stream_init(&asoc->stream, outcnt, 0,
1891 GFP_KERNEL);
1892 if (err)
1893 goto out_free;
1894 }
1895 if (sinit->sinit_max_instreams) {
1896 asoc->c.sinit_max_instreams =
1897 sinit->sinit_max_instreams;
1898 }
1899 if (sinit->sinit_max_attempts) {
1900 asoc->max_init_attempts
1901 = sinit->sinit_max_attempts;
1902 }
1903 if (sinit->sinit_max_init_timeo) {
1904 asoc->max_init_timeo =
1905 msecs_to_jiffies(sinit->sinit_max_init_timeo);
1906 }
1907 }
1908
1909 /* Prime the peer's transport structures. */
1910 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN);
1911 if (!transport) {
1912 err = -ENOMEM;
1913 goto out_free;
1914 }
1915 }
1916
1917 /* ASSERT: we have a valid association at this point. */
1918 pr_debug("%s: we have a valid association\n", __func__);
1919
1920 if (!sinfo) {
1921 /* If the user didn't specify SNDINFO/SNDRCVINFO, make up
1922 * one with some defaults.
1923 */
1924 memset(&default_sinfo, 0, sizeof(default_sinfo));
1925 default_sinfo.sinfo_stream = asoc->default_stream;
1926 default_sinfo.sinfo_flags = asoc->default_flags;
1927 default_sinfo.sinfo_ppid = asoc->default_ppid;
1928 default_sinfo.sinfo_context = asoc->default_context;
1929 default_sinfo.sinfo_timetolive = asoc->default_timetolive;
1930 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc);
1931
1932 sinfo = &default_sinfo;
1933 } else if (fill_sinfo_ttl) {
1934 /* In case SNDINFO was specified, we still need to fill
1935 * it with a default ttl from the assoc here.
1936 */
1937 sinfo->sinfo_timetolive = asoc->default_timetolive;
1938 }
1939
1940 /* API 7.1.7, the sndbuf size per association bounds the
1941 * maximum size of data that can be sent in a single send call.
1942 */
1943 if (msg_len > sk->sk_sndbuf) {
1944 err = -EMSGSIZE;
1945 goto out_free;
1946 }
1947
1948 if (asoc->pmtu_pending)
1949 sctp_assoc_pending_pmtu(asoc);
1950
1951 /* If fragmentation is disabled and the message length exceeds the
1952 * association fragmentation point, return EMSGSIZE. The I-D
1953 * does not specify what this error is, but this looks like
1954 * a great fit.
1955 */
1956 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) {
1957 err = -EMSGSIZE;
1958 goto out_free;
1959 }
1960
1961 /* Check for invalid stream. */
1962 if (sinfo->sinfo_stream >= asoc->stream.outcnt) {
1963 err = -EINVAL;
1964 goto out_free;
1965 }
1966
1967 if (sctp_wspace(asoc) < msg_len)
1968 sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
1969
1970 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1971 if (!sctp_wspace(asoc)) {
1972 /* sk can be changed by peel off when waiting for buf. */
1973 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
1974 if (err) {
1975 if (err == -ESRCH) {
1976 /* asoc is already dead. */
1977 new_asoc = NULL;
1978 err = -EPIPE;
1979 }
1980 goto out_free;
1981 }
1982 }
1983
1984 /* If an address is passed with the sendto/sendmsg call, it is used
1985 * to override the primary destination address in the TCP model, or
1986 * when SCTP_ADDR_OVER flag is set in the UDP model.
1987 */
1988 if ((sctp_style(sk, TCP) && msg_name) ||
1989 (sinfo_flags & SCTP_ADDR_OVER)) {
1990 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to);
1991 if (!chunk_tp) {
1992 err = -EINVAL;
1993 goto out_free;
1994 }
1995 } else
1996 chunk_tp = NULL;
1997
1998 /* Auto-connect, if we aren't connected already. */
1999 if (sctp_state(asoc, CLOSED)) {
2000 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
2001 if (err < 0)
2002 goto out_free;
2003
2004 wait_connect = true;
2005 pr_debug("%s: we associated primitively\n", __func__);
2006 }
2007
2008 /* Break the message into multiple chunks of maximum size. */
2009 datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter);
2010 if (IS_ERR(datamsg)) {
2011 err = PTR_ERR(datamsg);
2012 goto out_free;
2013 }
2014 asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
2015
2016 /* Now send the (possibly) fragmented message. */
2017 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
2018 sctp_chunk_hold(chunk);
2019
2020 /* Do accounting for the write space. */
2021 sctp_set_owner_w(chunk);
2022
2023 chunk->transport = chunk_tp;
2024 }
2025
2026 /* Send it to the lower layers. Note: all chunks
2027 * must either fail or succeed. The lower layer
2028 * works that way today. Keep it that way or this
2029 * breaks.
2030 */
2031 err = sctp_primitive_SEND(net, asoc, datamsg);
2032 /* Did the lower layer accept the chunk? */
2033 if (err) {
2034 sctp_datamsg_free(datamsg);
2035 goto out_free;
2036 }
2037
2038 pr_debug("%s: we sent primitively\n", __func__);
2039
2040 sctp_datamsg_put(datamsg);
2041 err = msg_len;
2042
2043 if (unlikely(wait_connect)) {
2044 timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT);
2045 sctp_wait_for_connect(asoc, &timeo);
2046 }
2047
2048 /* If we are already past ASSOCIATE, the lower
2049 * layers are responsible for association cleanup.
2050 */
2051 goto out_unlock;
2052
2053 out_free:
2054 if (new_asoc)
2055 sctp_association_free(asoc);
2056 out_unlock:
2057 release_sock(sk);
2058
2059 out_nounlock:
2060 return sctp_error(sk, msg_flags, err);
2061
2062 #if 0
2063 do_sock_err:
2064 if (msg_len)
2065 err = msg_len;
2066 else
2067 err = sock_error(sk);
2068 goto out;
2069
2070 do_interrupted:
2071 if (msg_len)
2072 err = msg_len;
2073 goto out;
2074 #endif /* 0 */
2075 }
2076
2077 /* This is an extended version of skb_pull() that removes the data from the
2078 * start of a skb even when data is spread across the list of skb's in the
2079 * frag_list. len specifies the total amount of data that needs to be removed.
2080 * when 'len' bytes could be removed from the skb, it returns 0.
2081 * If 'len' exceeds the total skb length, it returns the no. of bytes that
2082 * could not be removed.
2083 */
2084 static int sctp_skb_pull(struct sk_buff *skb, int len)
2085 {
2086 struct sk_buff *list;
2087 int skb_len = skb_headlen(skb);
2088 int rlen;
2089
2090 if (len <= skb_len) {
2091 __skb_pull(skb, len);
2092 return 0;
2093 }
2094 len -= skb_len;
2095 __skb_pull(skb, skb_len);
2096
2097 skb_walk_frags(skb, list) {
2098 rlen = sctp_skb_pull(list, len);
2099 skb->len -= (len-rlen);
2100 skb->data_len -= (len-rlen);
2101
2102 if (!rlen)
2103 return 0;
2104
2105 len = rlen;
2106 }
2107
2108 return len;
2109 }
2110
2111 /* API 3.1.3 recvmsg() - UDP Style Syntax
2112 *
2113 * ssize_t recvmsg(int socket, struct msghdr *message,
2114 * int flags);
2115 *
2116 * socket - the socket descriptor of the endpoint.
2117 * message - pointer to the msghdr structure which contains a single
2118 * user message and possibly some ancillary data.
2119 *
2120 * See Section 5 for complete description of the data
2121 * structures.
2122 *
2123 * flags - flags sent or received with the user message, see Section
2124 * 5 for complete description of the flags.
2125 */
2126 static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2127 int noblock, int flags, int *addr_len)
2128 {
2129 struct sctp_ulpevent *event = NULL;
2130 struct sctp_sock *sp = sctp_sk(sk);
2131 struct sk_buff *skb, *head_skb;
2132 int copied;
2133 int err = 0;
2134 int skb_len;
2135
2136 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
2137 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
2138 addr_len);
2139
2140 lock_sock(sk);
2141
2142 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) &&
2143 !sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) {
2144 err = -ENOTCONN;
2145 goto out;
2146 }
2147
2148 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
2149 if (!skb)
2150 goto out;
2151
2152 /* Get the total length of the skb including any skb's in the
2153 * frag_list.
2154 */
2155 skb_len = skb->len;
2156
2157 copied = skb_len;
2158 if (copied > len)
2159 copied = len;
2160
2161 err = skb_copy_datagram_msg(skb, 0, msg, copied);
2162
2163 event = sctp_skb2event(skb);
2164
2165 if (err)
2166 goto out_free;
2167
2168 if (event->chunk && event->chunk->head_skb)
2169 head_skb = event->chunk->head_skb;
2170 else
2171 head_skb = skb;
2172 sock_recv_ts_and_drops(msg, sk, head_skb);
2173 if (sctp_ulpevent_is_notification(event)) {
2174 msg->msg_flags |= MSG_NOTIFICATION;
2175 sp->pf->event_msgname(event, msg->msg_name, addr_len);
2176 } else {
2177 sp->pf->skb_msgname(head_skb, msg->msg_name, addr_len);
2178 }
2179
2180 /* Check if we allow SCTP_NXTINFO. */
2181 if (sp->recvnxtinfo)
2182 sctp_ulpevent_read_nxtinfo(event, msg, sk);
2183 /* Check if we allow SCTP_RCVINFO. */
2184 if (sp->recvrcvinfo)
2185 sctp_ulpevent_read_rcvinfo(event, msg);
2186 /* Check if we allow SCTP_SNDRCVINFO. */
2187 if (sp->subscribe.sctp_data_io_event)
2188 sctp_ulpevent_read_sndrcvinfo(event, msg);
2189
2190 err = copied;
2191
2192 /* If skb's length exceeds the user's buffer, update the skb and
2193 * push it back to the receive_queue so that the next call to
2194 * recvmsg() will return the remaining data. Don't set MSG_EOR.
2195 */
2196 if (skb_len > copied) {
2197 msg->msg_flags &= ~MSG_EOR;
2198 if (flags & MSG_PEEK)
2199 goto out_free;
2200 sctp_skb_pull(skb, copied);
2201 skb_queue_head(&sk->sk_receive_queue, skb);
2202
2203 /* When only partial message is copied to the user, increase
2204 * rwnd by that amount. If all the data in the skb is read,
2205 * rwnd is updated when the event is freed.
2206 */
2207 if (!sctp_ulpevent_is_notification(event))
2208 sctp_assoc_rwnd_increase(event->asoc, copied);
2209 goto out;
2210 } else if ((event->msg_flags & MSG_NOTIFICATION) ||
2211 (event->msg_flags & MSG_EOR))
2212 msg->msg_flags |= MSG_EOR;
2213 else
2214 msg->msg_flags &= ~MSG_EOR;
2215
2216 out_free:
2217 if (flags & MSG_PEEK) {
2218 /* Release the skb reference acquired after peeking the skb in
2219 * sctp_skb_recv_datagram().
2220 */
2221 kfree_skb(skb);
2222 } else {
2223 /* Free the event which includes releasing the reference to
2224 * the owner of the skb, freeing the skb and updating the
2225 * rwnd.
2226 */
2227 sctp_ulpevent_free(event);
2228 }
2229 out:
2230 release_sock(sk);
2231 return err;
2232 }
2233
2234 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
2235 *
2236 * This option is a on/off flag. If enabled no SCTP message
2237 * fragmentation will be performed. Instead if a message being sent
2238 * exceeds the current PMTU size, the message will NOT be sent and
2239 * instead a error will be indicated to the user.
2240 */
2241 static int sctp_setsockopt_disable_fragments(struct sock *sk,
2242 char __user *optval,
2243 unsigned int optlen)
2244 {
2245 int val;
2246
2247 if (optlen < sizeof(int))
2248 return -EINVAL;
2249
2250 if (get_user(val, (int __user *)optval))
2251 return -EFAULT;
2252
2253 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1;
2254
2255 return 0;
2256 }
2257
2258 static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2259 unsigned int optlen)
2260 {
2261 struct sctp_association *asoc;
2262 struct sctp_ulpevent *event;
2263
2264 if (optlen > sizeof(struct sctp_event_subscribe))
2265 return -EINVAL;
2266 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2267 return -EFAULT;
2268
2269 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2270 * if there is no data to be sent or retransmit, the stack will
2271 * immediately send up this notification.
2272 */
2273 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
2274 &sctp_sk(sk)->subscribe)) {
2275 asoc = sctp_id2assoc(sk, 0);
2276
2277 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2278 event = sctp_ulpevent_make_sender_dry_event(asoc,
2279 GFP_ATOMIC);
2280 if (!event)
2281 return -ENOMEM;
2282
2283 sctp_ulpq_tail_event(&asoc->ulpq, event);
2284 }
2285 }
2286
2287 return 0;
2288 }
2289
2290 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
2291 *
2292 * This socket option is applicable to the UDP-style socket only. When
2293 * set it will cause associations that are idle for more than the
2294 * specified number of seconds to automatically close. An association
2295 * being idle is defined an association that has NOT sent or received
2296 * user data. The special value of '0' indicates that no automatic
2297 * close of any associations should be performed. The option expects an
2298 * integer defining the number of seconds of idle time before an
2299 * association is closed.
2300 */
2301 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2302 unsigned int optlen)
2303 {
2304 struct sctp_sock *sp = sctp_sk(sk);
2305 struct net *net = sock_net(sk);
2306
2307 /* Applicable to UDP-style socket only */
2308 if (sctp_style(sk, TCP))
2309 return -EOPNOTSUPP;
2310 if (optlen != sizeof(int))
2311 return -EINVAL;
2312 if (copy_from_user(&sp->autoclose, optval, optlen))
2313 return -EFAULT;
2314
2315 if (sp->autoclose > net->sctp.max_autoclose)
2316 sp->autoclose = net->sctp.max_autoclose;
2317
2318 return 0;
2319 }
2320
2321 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
2322 *
2323 * Applications can enable or disable heartbeats for any peer address of
2324 * an association, modify an address's heartbeat interval, force a
2325 * heartbeat to be sent immediately, and adjust the address's maximum
2326 * number of retransmissions sent before an address is considered
2327 * unreachable. The following structure is used to access and modify an
2328 * address's parameters:
2329 *
2330 * struct sctp_paddrparams {
2331 * sctp_assoc_t spp_assoc_id;
2332 * struct sockaddr_storage spp_address;
2333 * uint32_t spp_hbinterval;
2334 * uint16_t spp_pathmaxrxt;
2335 * uint32_t spp_pathmtu;
2336 * uint32_t spp_sackdelay;
2337 * uint32_t spp_flags;
2338 * };
2339 *
2340 * spp_assoc_id - (one-to-many style socket) This is filled in the
2341 * application, and identifies the association for
2342 * this query.
2343 * spp_address - This specifies which address is of interest.
2344 * spp_hbinterval - This contains the value of the heartbeat interval,
2345 * in milliseconds. If a value of zero
2346 * is present in this field then no changes are to
2347 * be made to this parameter.
2348 * spp_pathmaxrxt - This contains the maximum number of
2349 * retransmissions before this address shall be
2350 * considered unreachable. If a value of zero
2351 * is present in this field then no changes are to
2352 * be made to this parameter.
2353 * spp_pathmtu - When Path MTU discovery is disabled the value
2354 * specified here will be the "fixed" path mtu.
2355 * Note that if the spp_address field is empty
2356 * then all associations on this address will
2357 * have this fixed path mtu set upon them.
2358 *
2359 * spp_sackdelay - When delayed sack is enabled, this value specifies
2360 * the number of milliseconds that sacks will be delayed
2361 * for. This value will apply to all addresses of an
2362 * association if the spp_address field is empty. Note
2363 * also, that if delayed sack is enabled and this
2364 * value is set to 0, no change is made to the last
2365 * recorded delayed sack timer value.
2366 *
2367 * spp_flags - These flags are used to control various features
2368 * on an association. The flag field may contain
2369 * zero or more of the following options.
2370 *
2371 * SPP_HB_ENABLE - Enable heartbeats on the
2372 * specified address. Note that if the address
2373 * field is empty all addresses for the association
2374 * have heartbeats enabled upon them.
2375 *
2376 * SPP_HB_DISABLE - Disable heartbeats on the
2377 * speicifed address. Note that if the address
2378 * field is empty all addresses for the association
2379 * will have their heartbeats disabled. Note also
2380 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
2381 * mutually exclusive, only one of these two should
2382 * be specified. Enabling both fields will have
2383 * undetermined results.
2384 *
2385 * SPP_HB_DEMAND - Request a user initiated heartbeat
2386 * to be made immediately.
2387 *
2388 * SPP_HB_TIME_IS_ZERO - Specify's that the time for
2389 * heartbeat delayis to be set to the value of 0
2390 * milliseconds.
2391 *
2392 * SPP_PMTUD_ENABLE - This field will enable PMTU
2393 * discovery upon the specified address. Note that
2394 * if the address feild is empty then all addresses
2395 * on the association are effected.
2396 *
2397 * SPP_PMTUD_DISABLE - This field will disable PMTU
2398 * discovery upon the specified address. Note that
2399 * if the address feild is empty then all addresses
2400 * on the association are effected. Not also that
2401 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
2402 * exclusive. Enabling both will have undetermined
2403 * results.
2404 *
2405 * SPP_SACKDELAY_ENABLE - Setting this flag turns
2406 * on delayed sack. The time specified in spp_sackdelay
2407 * is used to specify the sack delay for this address. Note
2408 * that if spp_address is empty then all addresses will
2409 * enable delayed sack and take on the sack delay
2410 * value specified in spp_sackdelay.
2411 * SPP_SACKDELAY_DISABLE - Setting this flag turns
2412 * off delayed sack. If the spp_address field is blank then
2413 * delayed sack is disabled for the entire association. Note
2414 * also that this field is mutually exclusive to
2415 * SPP_SACKDELAY_ENABLE, setting both will have undefined
2416 * results.
2417 */
2418 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2419 struct sctp_transport *trans,
2420 struct sctp_association *asoc,
2421 struct sctp_sock *sp,
2422 int hb_change,
2423 int pmtud_change,
2424 int sackdelay_change)
2425 {
2426 int error;
2427
2428 if (params->spp_flags & SPP_HB_DEMAND && trans) {
2429 struct net *net = sock_net(trans->asoc->base.sk);
2430
2431 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
2432 if (error)
2433 return error;
2434 }
2435
2436 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
2437 * this field is ignored. Note also that a value of zero indicates
2438 * the current setting should be left unchanged.
2439 */
2440 if (params->spp_flags & SPP_HB_ENABLE) {
2441
2442 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
2443 * set. This lets us use 0 value when this flag
2444 * is set.
2445 */
2446 if (params->spp_flags & SPP_HB_TIME_IS_ZERO)
2447 params->spp_hbinterval = 0;
2448
2449 if (params->spp_hbinterval ||
2450 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) {
2451 if (trans) {
2452 trans->hbinterval =
2453 msecs_to_jiffies(params->spp_hbinterval);
2454 } else if (asoc) {
2455 asoc->hbinterval =
2456 msecs_to_jiffies(params->spp_hbinterval);
2457 } else {
2458 sp->hbinterval = params->spp_hbinterval;
2459 }
2460 }
2461 }
2462
2463 if (hb_change) {
2464 if (trans) {
2465 trans->param_flags =
2466 (trans->param_flags & ~SPP_HB) | hb_change;
2467 } else if (asoc) {
2468 asoc->param_flags =
2469 (asoc->param_flags & ~SPP_HB) | hb_change;
2470 } else {
2471 sp->param_flags =
2472 (sp->param_flags & ~SPP_HB) | hb_change;
2473 }
2474 }
2475
2476 /* When Path MTU discovery is disabled the value specified here will
2477 * be the "fixed" path mtu (i.e. the value of the spp_flags field must
2478 * include the flag SPP_PMTUD_DISABLE for this field to have any
2479 * effect).
2480 */
2481 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
2482 if (trans) {
2483 trans->pathmtu = params->spp_pathmtu;
2484 sctp_assoc_sync_pmtu(asoc);
2485 } else if (asoc) {
2486 asoc->pathmtu = params->spp_pathmtu;
2487 } else {
2488 sp->pathmtu = params->spp_pathmtu;
2489 }
2490 }
2491
2492 if (pmtud_change) {
2493 if (trans) {
2494 int update = (trans->param_flags & SPP_PMTUD_DISABLE) &&
2495 (params->spp_flags & SPP_PMTUD_ENABLE);
2496 trans->param_flags =
2497 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2498 if (update) {
2499 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2500 sctp_assoc_sync_pmtu(asoc);
2501 }
2502 } else if (asoc) {
2503 asoc->param_flags =
2504 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
2505 } else {
2506 sp->param_flags =
2507 (sp->param_flags & ~SPP_PMTUD) | pmtud_change;
2508 }
2509 }
2510
2511 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
2512 * value of this field is ignored. Note also that a value of zero
2513 * indicates the current setting should be left unchanged.
2514 */
2515 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) {
2516 if (trans) {
2517 trans->sackdelay =
2518 msecs_to_jiffies(params->spp_sackdelay);
2519 } else if (asoc) {
2520 asoc->sackdelay =
2521 msecs_to_jiffies(params->spp_sackdelay);
2522 } else {
2523 sp->sackdelay = params->spp_sackdelay;
2524 }
2525 }
2526
2527 if (sackdelay_change) {
2528 if (trans) {
2529 trans->param_flags =
2530 (trans->param_flags & ~SPP_SACKDELAY) |
2531 sackdelay_change;
2532 } else if (asoc) {
2533 asoc->param_flags =
2534 (asoc->param_flags & ~SPP_SACKDELAY) |
2535 sackdelay_change;
2536 } else {
2537 sp->param_flags =
2538 (sp->param_flags & ~SPP_SACKDELAY) |
2539 sackdelay_change;
2540 }
2541 }
2542
2543 /* Note that a value of zero indicates the current setting should be
2544 left unchanged.
2545 */
2546 if (params->spp_pathmaxrxt) {
2547 if (trans) {
2548 trans->pathmaxrxt = params->spp_pathmaxrxt;
2549 } else if (asoc) {
2550 asoc->pathmaxrxt = params->spp_pathmaxrxt;
2551 } else {
2552 sp->pathmaxrxt = params->spp_pathmaxrxt;
2553 }
2554 }
2555
2556 return 0;
2557 }
2558
2559 static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2560 char __user *optval,
2561 unsigned int optlen)
2562 {
2563 struct sctp_paddrparams params;
2564 struct sctp_transport *trans = NULL;
2565 struct sctp_association *asoc = NULL;
2566 struct sctp_sock *sp = sctp_sk(sk);
2567 int error;
2568 int hb_change, pmtud_change, sackdelay_change;
2569
2570 if (optlen != sizeof(struct sctp_paddrparams))
2571 return -EINVAL;
2572
2573 if (copy_from_user(&params, optval, optlen))
2574 return -EFAULT;
2575
2576 /* Validate flags and value parameters. */
2577 hb_change = params.spp_flags & SPP_HB;
2578 pmtud_change = params.spp_flags & SPP_PMTUD;
2579 sackdelay_change = params.spp_flags & SPP_SACKDELAY;
2580
2581 if (hb_change == SPP_HB ||
2582 pmtud_change == SPP_PMTUD ||
2583 sackdelay_change == SPP_SACKDELAY ||
2584 params.spp_sackdelay > 500 ||
2585 (params.spp_pathmtu &&
2586 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
2587 return -EINVAL;
2588
2589 /* If an address other than INADDR_ANY is specified, and
2590 * no transport is found, then the request is invalid.
2591 */
2592 if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
2593 trans = sctp_addr_id2transport(sk, &params.spp_address,
2594 params.spp_assoc_id);
2595 if (!trans)
2596 return -EINVAL;
2597 }
2598
2599 /* Get association, if assoc_id != 0 and the socket is a one
2600 * to many style socket, and an association was not found, then
2601 * the id was invalid.
2602 */
2603 asoc = sctp_id2assoc(sk, params.spp_assoc_id);
2604 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP))
2605 return -EINVAL;
2606
2607 /* Heartbeat demand can only be sent on a transport or
2608 * association, but not a socket.
2609 */
2610 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc)
2611 return -EINVAL;
2612
2613 /* Process parameters. */
2614 error = sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2615 hb_change, pmtud_change,
2616 sackdelay_change);
2617
2618 if (error)
2619 return error;
2620
2621 /* If changes are for association, also apply parameters to each
2622 * transport.
2623 */
2624 if (!trans && asoc) {
2625 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2626 transports) {
2627 sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2628 hb_change, pmtud_change,
2629 sackdelay_change);
2630 }
2631 }
2632
2633 return 0;
2634 }
2635
2636 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags)
2637 {
2638 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE;
2639 }
2640
2641 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags)
2642 {
2643 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE;
2644 }
2645
2646 /*
2647 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
2648 *
2649 * This option will effect the way delayed acks are performed. This
2650 * option allows you to get or set the delayed ack time, in
2651 * milliseconds. It also allows changing the delayed ack frequency.
2652 * Changing the frequency to 1 disables the delayed sack algorithm. If
2653 * the assoc_id is 0, then this sets or gets the endpoints default
2654 * values. If the assoc_id field is non-zero, then the set or get
2655 * effects the specified association for the one to many model (the
2656 * assoc_id field is ignored by the one to one model). Note that if
2657 * sack_delay or sack_freq are 0 when setting this option, then the
2658 * current values will remain unchanged.
2659 *
2660 * struct sctp_sack_info {
2661 * sctp_assoc_t sack_assoc_id;
2662 * uint32_t sack_delay;
2663 * uint32_t sack_freq;
2664 * };
2665 *
2666 * sack_assoc_id - This parameter, indicates which association the user
2667 * is performing an action upon. Note that if this field's value is
2668 * zero then the endpoints default value is changed (effecting future
2669 * associations only).
2670 *
2671 * sack_delay - This parameter contains the number of milliseconds that
2672 * the user is requesting the delayed ACK timer be set to. Note that
2673 * this value is defined in the standard to be between 200 and 500
2674 * milliseconds.
2675 *
2676 * sack_freq - This parameter contains the number of packets that must
2677 * be received before a sack is sent without waiting for the delay
2678 * timer to expire. The default value for this is 2, setting this
2679 * value to 1 will disable the delayed sack algorithm.
2680 */
2681
2682 static int sctp_setsockopt_delayed_ack(struct sock *sk,
2683 char __user *optval, unsigned int optlen)
2684 {
2685 struct sctp_sack_info params;
2686 struct sctp_transport *trans = NULL;
2687 struct sctp_association *asoc = NULL;
2688 struct sctp_sock *sp = sctp_sk(sk);
2689
2690 if (optlen == sizeof(struct sctp_sack_info)) {
2691 if (copy_from_user(&params, optval, optlen))
2692 return -EFAULT;
2693
2694 if (params.sack_delay == 0 && params.sack_freq == 0)
2695 return 0;
2696 } else if (optlen == sizeof(struct sctp_assoc_value)) {
2697 pr_warn_ratelimited(DEPRECATED
2698 "%s (pid %d) "
2699 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
2700 "Use struct sctp_sack_info instead\n",
2701 current->comm, task_pid_nr(current));
2702 if (copy_from_user(&params, optval, optlen))
2703 return -EFAULT;
2704
2705 if (params.sack_delay == 0)
2706 params.sack_freq = 1;
2707 else
2708 params.sack_freq = 0;
2709 } else
2710 return -EINVAL;
2711
2712 /* Validate value parameter. */
2713 if (params.sack_delay > 500)
2714 return -EINVAL;
2715
2716 /* Get association, if sack_assoc_id != 0 and the socket is a one
2717 * to many style socket, and an association was not found, then
2718 * the id was invalid.
2719 */
2720 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
2721 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
2722 return -EINVAL;
2723
2724 if (params.sack_delay) {
2725 if (asoc) {
2726 asoc->sackdelay =
2727 msecs_to_jiffies(params.sack_delay);
2728 asoc->param_flags =
2729 sctp_spp_sackdelay_enable(asoc->param_flags);
2730 } else {
2731 sp->sackdelay = params.sack_delay;
2732 sp->param_flags =
2733 sctp_spp_sackdelay_enable(sp->param_flags);
2734 }
2735 }
2736
2737 if (params.sack_freq == 1) {
2738 if (asoc) {
2739 asoc->param_flags =
2740 sctp_spp_sackdelay_disable(asoc->param_flags);
2741 } else {
2742 sp->param_flags =
2743 sctp_spp_sackdelay_disable(sp->param_flags);
2744 }
2745 } else if (params.sack_freq > 1) {
2746 if (asoc) {
2747 asoc->sackfreq = params.sack_freq;
2748 asoc->param_flags =
2749 sctp_spp_sackdelay_enable(asoc->param_flags);
2750 } else {
2751 sp->sackfreq = params.sack_freq;
2752 sp->param_flags =
2753 sctp_spp_sackdelay_enable(sp->param_flags);
2754 }
2755 }
2756
2757 /* If change is for association, also apply to each transport. */
2758 if (asoc) {
2759 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2760 transports) {
2761 if (params.sack_delay) {
2762 trans->sackdelay =
2763 msecs_to_jiffies(params.sack_delay);
2764 trans->param_flags =
2765 sctp_spp_sackdelay_enable(trans->param_flags);
2766 }
2767 if (params.sack_freq == 1) {
2768 trans->param_flags =
2769 sctp_spp_sackdelay_disable(trans->param_flags);
2770 } else if (params.sack_freq > 1) {
2771 trans->sackfreq = params.sack_freq;
2772 trans->param_flags =
2773 sctp_spp_sackdelay_enable(trans->param_flags);
2774 }
2775 }
2776 }
2777
2778 return 0;
2779 }
2780
2781 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
2782 *
2783 * Applications can specify protocol parameters for the default association
2784 * initialization. The option name argument to setsockopt() and getsockopt()
2785 * is SCTP_INITMSG.
2786 *
2787 * Setting initialization parameters is effective only on an unconnected
2788 * socket (for UDP-style sockets only future associations are effected
2789 * by the change). With TCP-style sockets, this option is inherited by
2790 * sockets derived from a listener socket.
2791 */
2792 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
2793 {
2794 struct sctp_initmsg sinit;
2795 struct sctp_sock *sp = sctp_sk(sk);
2796
2797 if (optlen != sizeof(struct sctp_initmsg))
2798 return -EINVAL;
2799 if (copy_from_user(&sinit, optval, optlen))
2800 return -EFAULT;
2801
2802 if (sinit.sinit_num_ostreams)
2803 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
2804 if (sinit.sinit_max_instreams)
2805 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
2806 if (sinit.sinit_max_attempts)
2807 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
2808 if (sinit.sinit_max_init_timeo)
2809 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
2810
2811 return 0;
2812 }
2813
2814 /*
2815 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
2816 *
2817 * Applications that wish to use the sendto() system call may wish to
2818 * specify a default set of parameters that would normally be supplied
2819 * through the inclusion of ancillary data. This socket option allows
2820 * such an application to set the default sctp_sndrcvinfo structure.
2821 * The application that wishes to use this socket option simply passes
2822 * in to this call the sctp_sndrcvinfo structure defined in Section
2823 * 5.2.2) The input parameters accepted by this call include
2824 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
2825 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
2826 * to this call if the caller is using the UDP model.
2827 */
2828 static int sctp_setsockopt_default_send_param(struct sock *sk,
2829 char __user *optval,
2830 unsigned int optlen)
2831 {
2832 struct sctp_sock *sp = sctp_sk(sk);
2833 struct sctp_association *asoc;
2834 struct sctp_sndrcvinfo info;
2835
2836 if (optlen != sizeof(info))
2837 return -EINVAL;
2838 if (copy_from_user(&info, optval, optlen))
2839 return -EFAULT;
2840 if (info.sinfo_flags &
2841 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
2842 SCTP_ABORT | SCTP_EOF))
2843 return -EINVAL;
2844
2845 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
2846 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
2847 return -EINVAL;
2848 if (asoc) {
2849 asoc->default_stream = info.sinfo_stream;
2850 asoc->default_flags = info.sinfo_flags;
2851 asoc->default_ppid = info.sinfo_ppid;
2852 asoc->default_context = info.sinfo_context;
2853 asoc->default_timetolive = info.sinfo_timetolive;
2854 } else {
2855 sp->default_stream = info.sinfo_stream;
2856 sp->default_flags = info.sinfo_flags;
2857 sp->default_ppid = info.sinfo_ppid;
2858 sp->default_context = info.sinfo_context;
2859 sp->default_timetolive = info.sinfo_timetolive;
2860 }
2861
2862 return 0;
2863 }
2864
2865 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
2866 * (SCTP_DEFAULT_SNDINFO)
2867 */
2868 static int sctp_setsockopt_default_sndinfo(struct sock *sk,
2869 char __user *optval,
2870 unsigned int optlen)
2871 {
2872 struct sctp_sock *sp = sctp_sk(sk);
2873 struct sctp_association *asoc;
2874 struct sctp_sndinfo info;
2875
2876 if (optlen != sizeof(info))
2877 return -EINVAL;
2878 if (copy_from_user(&info, optval, optlen))
2879 return -EFAULT;
2880 if (info.snd_flags &
2881 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
2882 SCTP_ABORT | SCTP_EOF))
2883 return -EINVAL;
2884
2885 asoc = sctp_id2assoc(sk, info.snd_assoc_id);
2886 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
2887 return -EINVAL;
2888 if (asoc) {
2889 asoc->default_stream = info.snd_sid;
2890 asoc->default_flags = info.snd_flags;
2891 asoc->default_ppid = info.snd_ppid;
2892 asoc->default_context = info.snd_context;
2893 } else {
2894 sp->default_stream = info.snd_sid;
2895 sp->default_flags = info.snd_flags;
2896 sp->default_ppid = info.snd_ppid;
2897 sp->default_context = info.snd_context;
2898 }
2899
2900 return 0;
2901 }
2902
2903 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
2904 *
2905 * Requests that the local SCTP stack use the enclosed peer address as
2906 * the association primary. The enclosed address must be one of the
2907 * association peer's addresses.
2908 */
2909 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
2910 unsigned int optlen)
2911 {
2912 struct sctp_prim prim;
2913 struct sctp_transport *trans;
2914
2915 if (optlen != sizeof(struct sctp_prim))
2916 return -EINVAL;
2917
2918 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim)))
2919 return -EFAULT;
2920
2921 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id);
2922 if (!trans)
2923 return -EINVAL;
2924
2925 sctp_assoc_set_primary(trans->asoc, trans);
2926
2927 return 0;
2928 }
2929
2930 /*
2931 * 7.1.5 SCTP_NODELAY
2932 *
2933 * Turn on/off any Nagle-like algorithm. This means that packets are
2934 * generally sent as soon as possible and no unnecessary delays are
2935 * introduced, at the cost of more packets in the network. Expects an
2936 * integer boolean flag.
2937 */
2938 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
2939 unsigned int optlen)
2940 {
2941 int val;
2942
2943 if (optlen < sizeof(int))
2944 return -EINVAL;
2945 if (get_user(val, (int __user *)optval))
2946 return -EFAULT;
2947
2948 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1;
2949 return 0;
2950 }
2951
2952 /*
2953 *
2954 * 7.1.1 SCTP_RTOINFO
2955 *
2956 * The protocol parameters used to initialize and bound retransmission
2957 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
2958 * and modify these parameters.
2959 * All parameters are time values, in milliseconds. A value of 0, when
2960 * modifying the parameters, indicates that the current value should not
2961 * be changed.
2962 *
2963 */
2964 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
2965 {
2966 struct sctp_rtoinfo rtoinfo;
2967 struct sctp_association *asoc;
2968 unsigned long rto_min, rto_max;
2969 struct sctp_sock *sp = sctp_sk(sk);
2970
2971 if (optlen != sizeof (struct sctp_rtoinfo))
2972 return -EINVAL;
2973
2974 if (copy_from_user(&rtoinfo, optval, optlen))
2975 return -EFAULT;
2976
2977 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
2978
2979 /* Set the values to the specific association */
2980 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
2981 return -EINVAL;
2982
2983 rto_max = rtoinfo.srto_max;
2984 rto_min = rtoinfo.srto_min;
2985
2986 if (rto_max)
2987 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
2988 else
2989 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
2990
2991 if (rto_min)
2992 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
2993 else
2994 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
2995
2996 if (rto_min > rto_max)
2997 return -EINVAL;
2998
2999 if (asoc) {
3000 if (rtoinfo.srto_initial != 0)
3001 asoc->rto_initial =
3002 msecs_to_jiffies(rtoinfo.srto_initial);
3003 asoc->rto_max = rto_max;
3004 asoc->rto_min = rto_min;
3005 } else {
3006 /* If there is no association or the association-id = 0
3007 * set the values to the endpoint.
3008 */
3009 if (rtoinfo.srto_initial != 0)
3010 sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
3011 sp->rtoinfo.srto_max = rto_max;
3012 sp->rtoinfo.srto_min = rto_min;
3013 }
3014
3015 return 0;
3016 }
3017
3018 /*
3019 *
3020 * 7.1.2 SCTP_ASSOCINFO
3021 *
3022 * This option is used to tune the maximum retransmission attempts
3023 * of the association.
3024 * Returns an error if the new association retransmission value is
3025 * greater than the sum of the retransmission value of the peer.
3026 * See [SCTP] for more information.
3027 *
3028 */
3029 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
3030 {
3031
3032 struct sctp_assocparams assocparams;
3033 struct sctp_association *asoc;
3034
3035 if (optlen != sizeof(struct sctp_assocparams))
3036 return -EINVAL;
3037 if (copy_from_user(&assocparams, optval, optlen))
3038 return -EFAULT;
3039
3040 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
3041
3042 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
3043 return -EINVAL;
3044
3045 /* Set the values to the specific association */
3046 if (asoc) {
3047 if (assocparams.sasoc_asocmaxrxt != 0) {
3048 __u32 path_sum = 0;
3049 int paths = 0;
3050 struct sctp_transport *peer_addr;
3051
3052 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list,
3053 transports) {
3054 path_sum += peer_addr->pathmaxrxt;
3055 paths++;
3056 }
3057
3058 /* Only validate asocmaxrxt if we have more than
3059 * one path/transport. We do this because path
3060 * retransmissions are only counted when we have more
3061 * then one path.
3062 */
3063 if (paths > 1 &&
3064 assocparams.sasoc_asocmaxrxt > path_sum)
3065 return -EINVAL;
3066
3067 asoc->max_retrans = assocparams.sasoc_asocmaxrxt;
3068 }
3069
3070 if (assocparams.sasoc_cookie_life != 0)
3071 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life);
3072 } else {
3073 /* Set the values to the endpoint */
3074 struct sctp_sock *sp = sctp_sk(sk);
3075
3076 if (assocparams.sasoc_asocmaxrxt != 0)
3077 sp->assocparams.sasoc_asocmaxrxt =
3078 assocparams.sasoc_asocmaxrxt;
3079 if (assocparams.sasoc_cookie_life != 0)
3080 sp->assocparams.sasoc_cookie_life =
3081 assocparams.sasoc_cookie_life;
3082 }
3083 return 0;
3084 }
3085
3086 /*
3087 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
3088 *
3089 * This socket option is a boolean flag which turns on or off mapped V4
3090 * addresses. If this option is turned on and the socket is type
3091 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
3092 * If this option is turned off, then no mapping will be done of V4
3093 * addresses and a user will receive both PF_INET6 and PF_INET type
3094 * addresses on the socket.
3095 */
3096 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
3097 {
3098 int val;
3099 struct sctp_sock *sp = sctp_sk(sk);
3100
3101 if (optlen < sizeof(int))
3102 return -EINVAL;
3103 if (get_user(val, (int __user *)optval))
3104 return -EFAULT;
3105 if (val)
3106 sp->v4mapped = 1;
3107 else
3108 sp->v4mapped = 0;
3109
3110 return 0;
3111 }
3112
3113 /*
3114 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
3115 * This option will get or set the maximum size to put in any outgoing
3116 * SCTP DATA chunk. If a message is larger than this size it will be
3117 * fragmented by SCTP into the specified size. Note that the underlying
3118 * SCTP implementation may fragment into smaller sized chunks when the
3119 * PMTU of the underlying association is smaller than the value set by
3120 * the user. The default value for this option is '0' which indicates
3121 * the user is NOT limiting fragmentation and only the PMTU will effect
3122 * SCTP's choice of DATA chunk size. Note also that values set larger
3123 * than the maximum size of an IP datagram will effectively let SCTP
3124 * control fragmentation (i.e. the same as setting this option to 0).
3125 *
3126 * The following structure is used to access and modify this parameter:
3127 *
3128 * struct sctp_assoc_value {
3129 * sctp_assoc_t assoc_id;
3130 * uint32_t assoc_value;
3131 * };
3132 *
3133 * assoc_id: This parameter is ignored for one-to-one style sockets.
3134 * For one-to-many style sockets this parameter indicates which
3135 * association the user is performing an action upon. Note that if
3136 * this field's value is zero then the endpoints default value is
3137 * changed (effecting future associations only).
3138 * assoc_value: This parameter specifies the maximum size in bytes.
3139 */
3140 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
3141 {
3142 struct sctp_sock *sp = sctp_sk(sk);
3143 struct sctp_assoc_value params;
3144 struct sctp_association *asoc;
3145 int val;
3146
3147 if (optlen == sizeof(int)) {
3148 pr_warn_ratelimited(DEPRECATED
3149 "%s (pid %d) "
3150 "Use of int in maxseg socket option.\n"
3151 "Use struct sctp_assoc_value instead\n",
3152 current->comm, task_pid_nr(current));
3153 if (copy_from_user(&val, optval, optlen))
3154 return -EFAULT;
3155 params.assoc_id = 0;
3156 } else if (optlen == sizeof(struct sctp_assoc_value)) {
3157 if (copy_from_user(&params, optval, optlen))
3158 return -EFAULT;
3159 val = params.assoc_value;
3160 } else {
3161 return -EINVAL;
3162 }
3163
3164 if (val) {
3165 int min_len, max_len;
3166
3167 min_len = SCTP_DEFAULT_MINSEGMENT - sp->pf->af->net_header_len;
3168 min_len -= sizeof(struct sctphdr) +
3169 sizeof(struct sctp_data_chunk);
3170
3171 max_len = SCTP_MAX_CHUNK_LEN - sizeof(struct sctp_data_chunk);
3172
3173 if (val < min_len || val > max_len)
3174 return -EINVAL;
3175 }
3176
3177 asoc = sctp_id2assoc(sk, params.assoc_id);
3178 if (asoc) {
3179 if (val == 0) {
3180 val = asoc->pathmtu - sp->pf->af->net_header_len;
3181 val -= sizeof(struct sctphdr) +
3182 sizeof(struct sctp_data_chunk);
3183 }
3184 asoc->user_frag = val;
3185 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
3186 } else {
3187 if (params.assoc_id && sctp_style(sk, UDP))
3188 return -EINVAL;
3189 sp->user_frag = val;
3190 }
3191
3192 return 0;
3193 }
3194
3195
3196 /*
3197 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
3198 *
3199 * Requests that the peer mark the enclosed address as the association
3200 * primary. The enclosed address must be one of the association's
3201 * locally bound addresses. The following structure is used to make a
3202 * set primary request:
3203 */
3204 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
3205 unsigned int optlen)
3206 {
3207 struct net *net = sock_net(sk);
3208 struct sctp_sock *sp;
3209 struct sctp_association *asoc = NULL;
3210 struct sctp_setpeerprim prim;
3211 struct sctp_chunk *chunk;
3212 struct sctp_af *af;
3213 int err;
3214
3215 sp = sctp_sk(sk);
3216
3217 if (!net->sctp.addip_enable)
3218 return -EPERM;
3219
3220 if (optlen != sizeof(struct sctp_setpeerprim))
3221 return -EINVAL;
3222
3223 if (copy_from_user(&prim, optval, optlen))
3224 return -EFAULT;
3225
3226 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
3227 if (!asoc)
3228 return -EINVAL;
3229
3230 if (!asoc->peer.asconf_capable)
3231 return -EPERM;
3232
3233 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY)
3234 return -EPERM;
3235
3236 if (!sctp_state(asoc, ESTABLISHED))
3237 return -ENOTCONN;
3238
3239 af = sctp_get_af_specific(prim.sspp_addr.ss_family);
3240 if (!af)
3241 return -EINVAL;
3242
3243 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
3244 return -EADDRNOTAVAIL;
3245
3246 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
3247 return -EADDRNOTAVAIL;
3248
3249 /* Create an ASCONF chunk with SET_PRIMARY parameter */
3250 chunk = sctp_make_asconf_set_prim(asoc,
3251 (union sctp_addr *)&prim.sspp_addr);
3252 if (!chunk)
3253 return -ENOMEM;
3254
3255 err = sctp_send_asconf(asoc, chunk);
3256
3257 pr_debug("%s: we set peer primary addr primitively\n", __func__);
3258
3259 return err;
3260 }
3261
3262 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
3263 unsigned int optlen)
3264 {
3265 struct sctp_setadaptation adaptation;
3266
3267 if (optlen != sizeof(struct sctp_setadaptation))
3268 return -EINVAL;
3269 if (copy_from_user(&adaptation, optval, optlen))
3270 return -EFAULT;
3271
3272 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
3273
3274 return 0;
3275 }
3276
3277 /*
3278 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
3279 *
3280 * The context field in the sctp_sndrcvinfo structure is normally only
3281 * used when a failed message is retrieved holding the value that was
3282 * sent down on the actual send call. This option allows the setting of
3283 * a default context on an association basis that will be received on
3284 * reading messages from the peer. This is especially helpful in the
3285 * one-2-many model for an application to keep some reference to an
3286 * internal state machine that is processing messages on the
3287 * association. Note that the setting of this value only effects
3288 * received messages from the peer and does not effect the value that is
3289 * saved with outbound messages.
3290 */
3291 static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
3292 unsigned int optlen)
3293 {
3294 struct sctp_assoc_value params;
3295 struct sctp_sock *sp;
3296 struct sctp_association *asoc;
3297
3298 if (optlen != sizeof(struct sctp_assoc_value))
3299 return -EINVAL;
3300 if (copy_from_user(&params, optval, optlen))
3301 return -EFAULT;
3302
3303 sp = sctp_sk(sk);
3304
3305 if (params.assoc_id != 0) {
3306 asoc = sctp_id2assoc(sk, params.assoc_id);
3307 if (!asoc)
3308 return -EINVAL;
3309 asoc->default_rcv_context = params.assoc_value;
3310 } else {
3311 sp->default_rcv_context = params.assoc_value;
3312 }
3313
3314 return 0;
3315 }
3316
3317 /*
3318 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
3319 *
3320 * This options will at a minimum specify if the implementation is doing
3321 * fragmented interleave. Fragmented interleave, for a one to many
3322 * socket, is when subsequent calls to receive a message may return
3323 * parts of messages from different associations. Some implementations
3324 * may allow you to turn this value on or off. If so, when turned off,
3325 * no fragment interleave will occur (which will cause a head of line
3326 * blocking amongst multiple associations sharing the same one to many
3327 * socket). When this option is turned on, then each receive call may
3328 * come from a different association (thus the user must receive data
3329 * with the extended calls (e.g. sctp_recvmsg) to keep track of which
3330 * association each receive belongs to.
3331 *
3332 * This option takes a boolean value. A non-zero value indicates that
3333 * fragmented interleave is on. A value of zero indicates that
3334 * fragmented interleave is off.
3335 *
3336 * Note that it is important that an implementation that allows this
3337 * option to be turned on, have it off by default. Otherwise an unaware
3338 * application using the one to many model may become confused and act
3339 * incorrectly.
3340 */
3341 static int sctp_setsockopt_fragment_interleave(struct sock *sk,
3342 char __user *optval,
3343 unsigned int optlen)
3344 {
3345 int val;
3346
3347 if (optlen != sizeof(int))
3348 return -EINVAL;
3349 if (get_user(val, (int __user *)optval))
3350 return -EFAULT;
3351
3352 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
3353
3354 return 0;
3355 }
3356
3357 /*
3358 * 8.1.21. Set or Get the SCTP Partial Delivery Point
3359 * (SCTP_PARTIAL_DELIVERY_POINT)
3360 *
3361 * This option will set or get the SCTP partial delivery point. This
3362 * point is the size of a message where the partial delivery API will be
3363 * invoked to help free up rwnd space for the peer. Setting this to a
3364 * lower value will cause partial deliveries to happen more often. The
3365 * calls argument is an integer that sets or gets the partial delivery
3366 * point. Note also that the call will fail if the user attempts to set
3367 * this value larger than the socket receive buffer size.
3368 *
3369 * Note that any single message having a length smaller than or equal to
3370 * the SCTP partial delivery point will be delivered in one single read
3371 * call as long as the user provided buffer is large enough to hold the
3372 * message.
3373 */
3374 static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
3375 char __user *optval,
3376 unsigned int optlen)
3377 {
3378 u32 val;
3379
3380 if (optlen != sizeof(u32))
3381 return -EINVAL;
3382 if (get_user(val, (int __user *)optval))
3383 return -EFAULT;
3384
3385 /* Note: We double the receive buffer from what the user sets
3386 * it to be, also initial rwnd is based on rcvbuf/2.
3387 */
3388 if (val > (sk->sk_rcvbuf >> 1))
3389 return -EINVAL;
3390
3391 sctp_sk(sk)->pd_point = val;
3392
3393 return 0; /* is this the right error code? */
3394 }
3395
3396 /*
3397 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
3398 *
3399 * This option will allow a user to change the maximum burst of packets
3400 * that can be emitted by this association. Note that the default value
3401 * is 4, and some implementations may restrict this setting so that it
3402 * can only be lowered.
3403 *
3404 * NOTE: This text doesn't seem right. Do this on a socket basis with
3405 * future associations inheriting the socket value.
3406 */
3407 static int sctp_setsockopt_maxburst(struct sock *sk,
3408 char __user *optval,
3409 unsigned int optlen)
3410 {
3411 struct sctp_assoc_value params;
3412 struct sctp_sock *sp;
3413 struct sctp_association *asoc;
3414 int val;
3415 int assoc_id = 0;
3416
3417 if (optlen == sizeof(int)) {
3418 pr_warn_ratelimited(DEPRECATED
3419 "%s (pid %d) "
3420 "Use of int in max_burst socket option deprecated.\n"
3421 "Use struct sctp_assoc_value instead\n",
3422 current->comm, task_pid_nr(current));
3423 if (copy_from_user(&val, optval, optlen))
3424 return -EFAULT;
3425 } else if (optlen == sizeof(struct sctp_assoc_value)) {
3426 if (copy_from_user(&params, optval, optlen))
3427 return -EFAULT;
3428 val = params.assoc_value;
3429 assoc_id = params.assoc_id;
3430 } else
3431 return -EINVAL;
3432
3433 sp = sctp_sk(sk);
3434
3435 if (assoc_id != 0) {
3436 asoc = sctp_id2assoc(sk, assoc_id);
3437 if (!asoc)
3438 return -EINVAL;
3439 asoc->max_burst = val;
3440 } else
3441 sp->max_burst = val;
3442
3443 return 0;
3444 }
3445
3446 /*
3447 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
3448 *
3449 * This set option adds a chunk type that the user is requesting to be
3450 * received only in an authenticated way. Changes to the list of chunks
3451 * will only effect future associations on the socket.
3452 */
3453 static int sctp_setsockopt_auth_chunk(struct sock *sk,
3454 char __user *optval,
3455 unsigned int optlen)
3456 {
3457 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3458 struct sctp_authchunk val;
3459
3460 if (!ep->auth_enable)
3461 return -EACCES;
3462
3463 if (optlen != sizeof(struct sctp_authchunk))
3464 return -EINVAL;
3465 if (copy_from_user(&val, optval, optlen))
3466 return -EFAULT;
3467
3468 switch (val.sauth_chunk) {
3469 case SCTP_CID_INIT:
3470 case SCTP_CID_INIT_ACK:
3471 case SCTP_CID_SHUTDOWN_COMPLETE:
3472 case SCTP_CID_AUTH:
3473 return -EINVAL;
3474 }
3475
3476 /* add this chunk id to the endpoint */
3477 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk);
3478 }
3479
3480 /*
3481 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
3482 *
3483 * This option gets or sets the list of HMAC algorithms that the local
3484 * endpoint requires the peer to use.
3485 */
3486 static int sctp_setsockopt_hmac_ident(struct sock *sk,
3487 char __user *optval,
3488 unsigned int optlen)
3489 {
3490 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3491 struct sctp_hmacalgo *hmacs;
3492 u32 idents;
3493 int err;
3494
3495 if (!ep->auth_enable)
3496 return -EACCES;
3497
3498 if (optlen < sizeof(struct sctp_hmacalgo))
3499 return -EINVAL;
3500 optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
3501 SCTP_AUTH_NUM_HMACS * sizeof(u16));
3502
3503 hmacs = memdup_user(optval, optlen);
3504 if (IS_ERR(hmacs))
3505 return PTR_ERR(hmacs);
3506
3507 idents = hmacs->shmac_num_idents;
3508 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
3509 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
3510 err = -EINVAL;
3511 goto out;
3512 }
3513
3514 err = sctp_auth_ep_set_hmacs(ep, hmacs);
3515 out:
3516 kfree(hmacs);
3517 return err;
3518 }
3519
3520 /*
3521 * 7.1.20. Set a shared key (SCTP_AUTH_KEY)
3522 *
3523 * This option will set a shared secret key which is used to build an
3524 * association shared key.
3525 */
3526 static int sctp_setsockopt_auth_key(struct sock *sk,
3527 char __user *optval,
3528 unsigned int optlen)
3529 {
3530 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3531 struct sctp_authkey *authkey;
3532 struct sctp_association *asoc;
3533 int ret;
3534
3535 if (!ep->auth_enable)
3536 return -EACCES;
3537
3538 if (optlen <= sizeof(struct sctp_authkey))
3539 return -EINVAL;
3540 /* authkey->sca_keylength is u16, so optlen can't be bigger than
3541 * this.
3542 */
3543 optlen = min_t(unsigned int, optlen, USHRT_MAX +
3544 sizeof(struct sctp_authkey));
3545
3546 authkey = memdup_user(optval, optlen);
3547 if (IS_ERR(authkey))
3548 return PTR_ERR(authkey);
3549
3550 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
3551 ret = -EINVAL;
3552 goto out;
3553 }
3554
3555 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
3556 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
3557 ret = -EINVAL;
3558 goto out;
3559 }
3560
3561 ret = sctp_auth_set_key(ep, asoc, authkey);
3562 out:
3563 kzfree(authkey);
3564 return ret;
3565 }
3566
3567 /*
3568 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
3569 *
3570 * This option will get or set the active shared key to be used to build
3571 * the association shared key.
3572 */
3573 static int sctp_setsockopt_active_key(struct sock *sk,
3574 char __user *optval,
3575 unsigned int optlen)
3576 {
3577 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3578 struct sctp_authkeyid val;
3579 struct sctp_association *asoc;
3580
3581 if (!ep->auth_enable)
3582 return -EACCES;
3583
3584 if (optlen != sizeof(struct sctp_authkeyid))
3585 return -EINVAL;
3586 if (copy_from_user(&val, optval, optlen))
3587 return -EFAULT;
3588
3589 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
3590 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
3591 return -EINVAL;
3592
3593 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
3594 }
3595
3596 /*
3597 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
3598 *
3599 * This set option will delete a shared secret key from use.
3600 */
3601 static int sctp_setsockopt_del_key(struct sock *sk,
3602 char __user *optval,
3603 unsigned int optlen)
3604 {
3605 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3606 struct sctp_authkeyid val;
3607 struct sctp_association *asoc;
3608
3609 if (!ep->auth_enable)
3610 return -EACCES;
3611
3612 if (optlen != sizeof(struct sctp_authkeyid))
3613 return -EINVAL;
3614 if (copy_from_user(&val, optval, optlen))
3615 return -EFAULT;
3616
3617 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
3618 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
3619 return -EINVAL;
3620
3621 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
3622
3623 }
3624
3625 /*
3626 * 8.1.23 SCTP_AUTO_ASCONF
3627 *
3628 * This option will enable or disable the use of the automatic generation of
3629 * ASCONF chunks to add and delete addresses to an existing association. Note
3630 * that this option has two caveats namely: a) it only affects sockets that
3631 * are bound to all addresses available to the SCTP stack, and b) the system
3632 * administrator may have an overriding control that turns the ASCONF feature
3633 * off no matter what setting the socket option may have.
3634 * This option expects an integer boolean flag, where a non-zero value turns on
3635 * the option, and a zero value turns off the option.
3636 * Note. In this implementation, socket operation overrides default parameter
3637 * being set by sysctl as well as FreeBSD implementation
3638 */
3639 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
3640 unsigned int optlen)
3641 {
3642 int val;
3643 struct sctp_sock *sp = sctp_sk(sk);
3644
3645 if (optlen < sizeof(int))
3646 return -EINVAL;
3647 if (get_user(val, (int __user *)optval))
3648 return -EFAULT;
3649 if (!sctp_is_ep_boundall(sk) && val)
3650 return -EINVAL;
3651 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
3652 return 0;
3653
3654 spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
3655 if (val == 0 && sp->do_auto_asconf) {
3656 list_del(&sp->auto_asconf_list);
3657 sp->do_auto_asconf = 0;
3658 } else if (val && !sp->do_auto_asconf) {
3659 list_add_tail(&sp->auto_asconf_list,
3660 &sock_net(sk)->sctp.auto_asconf_splist);
3661 sp->do_auto_asconf = 1;
3662 }
3663 spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
3664 return 0;
3665 }
3666
3667 /*
3668 * SCTP_PEER_ADDR_THLDS
3669 *
3670 * This option allows us to alter the partially failed threshold for one or all
3671 * transports in an association. See Section 6.1 of:
3672 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
3673 */
3674 static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
3675 char __user *optval,
3676 unsigned int optlen)
3677 {
3678 struct sctp_paddrthlds val;
3679 struct sctp_transport *trans;
3680 struct sctp_association *asoc;
3681
3682 if (optlen < sizeof(struct sctp_paddrthlds))
3683 return -EINVAL;
3684 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
3685 sizeof(struct sctp_paddrthlds)))
3686 return -EFAULT;
3687
3688
3689 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
3690 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
3691 if (!asoc)
3692 return -ENOENT;
3693 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
3694 transports) {
3695 if (val.spt_pathmaxrxt)
3696 trans->pathmaxrxt = val.spt_pathmaxrxt;
3697 trans->pf_retrans = val.spt_pathpfthld;
3698 }
3699
3700 if (val.spt_pathmaxrxt)
3701 asoc->pathmaxrxt = val.spt_pathmaxrxt;
3702 asoc->pf_retrans = val.spt_pathpfthld;
3703 } else {
3704 trans = sctp_addr_id2transport(sk, &val.spt_address,
3705 val.spt_assoc_id);
3706 if (!trans)
3707 return -ENOENT;
3708
3709 if (val.spt_pathmaxrxt)
3710 trans->pathmaxrxt = val.spt_pathmaxrxt;
3711 trans->pf_retrans = val.spt_pathpfthld;
3712 }
3713
3714 return 0;
3715 }
3716
3717 static int sctp_setsockopt_recvrcvinfo(struct sock *sk,
3718 char __user *optval,
3719 unsigned int optlen)
3720 {
3721 int val;
3722
3723 if (optlen < sizeof(int))
3724 return -EINVAL;
3725 if (get_user(val, (int __user *) optval))
3726 return -EFAULT;
3727
3728 sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1;
3729
3730 return 0;
3731 }
3732
3733 static int sctp_setsockopt_recvnxtinfo(struct sock *sk,
3734 char __user *optval,
3735 unsigned int optlen)
3736 {
3737 int val;
3738
3739 if (optlen < sizeof(int))
3740 return -EINVAL;
3741 if (get_user(val, (int __user *) optval))
3742 return -EFAULT;
3743
3744 sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1;
3745
3746 return 0;
3747 }
3748
3749 static int sctp_setsockopt_pr_supported(struct sock *sk,
3750 char __user *optval,
3751 unsigned int optlen)
3752 {
3753 struct sctp_assoc_value params;
3754 struct sctp_association *asoc;
3755 int retval = -EINVAL;
3756
3757 if (optlen != sizeof(params))
3758 goto out;
3759
3760 if (copy_from_user(&params, optval, optlen)) {
3761 retval = -EFAULT;
3762 goto out;
3763 }
3764
3765 asoc = sctp_id2assoc(sk, params.assoc_id);
3766 if (asoc) {
3767 asoc->prsctp_enable = !!params.assoc_value;
3768 } else if (!params.assoc_id) {
3769 struct sctp_sock *sp = sctp_sk(sk);
3770
3771 sp->ep->prsctp_enable = !!params.assoc_value;
3772 } else {
3773 goto out;
3774 }
3775
3776 retval = 0;
3777
3778 out:
3779 return retval;
3780 }
3781
3782 static int sctp_setsockopt_default_prinfo(struct sock *sk,
3783 char __user *optval,
3784 unsigned int optlen)
3785 {
3786 struct sctp_default_prinfo info;
3787 struct sctp_association *asoc;
3788 int retval = -EINVAL;
3789
3790 if (optlen != sizeof(info))
3791 goto out;
3792
3793 if (copy_from_user(&info, optval, sizeof(info))) {
3794 retval = -EFAULT;
3795 goto out;
3796 }
3797
3798 if (info.pr_policy & ~SCTP_PR_SCTP_MASK)
3799 goto out;
3800
3801 if (info.pr_policy == SCTP_PR_SCTP_NONE)
3802 info.pr_value = 0;
3803
3804 asoc = sctp_id2assoc(sk, info.pr_assoc_id);
3805 if (asoc) {
3806 SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy);
3807 asoc->default_timetolive = info.pr_value;
3808 } else if (!info.pr_assoc_id) {
3809 struct sctp_sock *sp = sctp_sk(sk);
3810
3811 SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy);
3812 sp->default_timetolive = info.pr_value;
3813 } else {
3814 goto out;
3815 }
3816
3817 retval = 0;
3818
3819 out:
3820 return retval;
3821 }
3822
3823 static int sctp_setsockopt_reconfig_supported(struct sock *sk,
3824 char __user *optval,
3825 unsigned int optlen)
3826 {
3827 struct sctp_assoc_value params;
3828 struct sctp_association *asoc;
3829 int retval = -EINVAL;
3830
3831 if (optlen != sizeof(params))
3832 goto out;
3833
3834 if (copy_from_user(&params, optval, optlen)) {
3835 retval = -EFAULT;
3836 goto out;
3837 }
3838
3839 asoc = sctp_id2assoc(sk, params.assoc_id);
3840 if (asoc) {
3841 asoc->reconf_enable = !!params.assoc_value;
3842 } else if (!params.assoc_id) {
3843 struct sctp_sock *sp = sctp_sk(sk);
3844
3845 sp->ep->reconf_enable = !!params.assoc_value;
3846 } else {
3847 goto out;
3848 }
3849
3850 retval = 0;
3851
3852 out:
3853 return retval;
3854 }
3855
3856 static int sctp_setsockopt_enable_strreset(struct sock *sk,
3857 char __user *optval,
3858 unsigned int optlen)
3859 {
3860 struct sctp_assoc_value params;
3861 struct sctp_association *asoc;
3862 int retval = -EINVAL;
3863
3864 if (optlen != sizeof(params))
3865 goto out;
3866
3867 if (copy_from_user(&params, optval, optlen)) {
3868 retval = -EFAULT;
3869 goto out;
3870 }
3871
3872 if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK))
3873 goto out;
3874
3875 asoc = sctp_id2assoc(sk, params.assoc_id);
3876 if (asoc) {
3877 asoc->strreset_enable = params.assoc_value;
3878 } else if (!params.assoc_id) {
3879 struct sctp_sock *sp = sctp_sk(sk);
3880
3881 sp->ep->strreset_enable = params.assoc_value;
3882 } else {
3883 goto out;
3884 }
3885
3886 retval = 0;
3887
3888 out:
3889 return retval;
3890 }
3891
3892 static int sctp_setsockopt_reset_streams(struct sock *sk,
3893 char __user *optval,
3894 unsigned int optlen)
3895 {
3896 struct sctp_reset_streams *params;
3897 struct sctp_association *asoc;
3898 int retval = -EINVAL;
3899
3900 if (optlen < sizeof(*params))
3901 return -EINVAL;
3902 /* srs_number_streams is u16, so optlen can't be bigger than this. */
3903 optlen = min_t(unsigned int, optlen, USHRT_MAX +
3904 sizeof(__u16) * sizeof(*params));
3905
3906 params = memdup_user(optval, optlen);
3907 if (IS_ERR(params))
3908 return PTR_ERR(params);
3909
3910 if (params->srs_number_streams * sizeof(__u16) >
3911 optlen - sizeof(*params))
3912 goto out;
3913
3914 asoc = sctp_id2assoc(sk, params->srs_assoc_id);
3915 if (!asoc)
3916 goto out;
3917
3918 retval = sctp_send_reset_streams(asoc, params);
3919
3920 out:
3921 kfree(params);
3922 return retval;
3923 }
3924
3925 static int sctp_setsockopt_reset_assoc(struct sock *sk,
3926 char __user *optval,
3927 unsigned int optlen)
3928 {
3929 struct sctp_association *asoc;
3930 sctp_assoc_t associd;
3931 int retval = -EINVAL;
3932
3933 if (optlen != sizeof(associd))
3934 goto out;
3935
3936 if (copy_from_user(&associd, optval, optlen)) {
3937 retval = -EFAULT;
3938 goto out;
3939 }
3940
3941 asoc = sctp_id2assoc(sk, associd);
3942 if (!asoc)
3943 goto out;
3944
3945 retval = sctp_send_reset_assoc(asoc);
3946
3947 out:
3948 return retval;
3949 }
3950
3951 static int sctp_setsockopt_add_streams(struct sock *sk,
3952 char __user *optval,
3953 unsigned int optlen)
3954 {
3955 struct sctp_association *asoc;
3956 struct sctp_add_streams params;
3957 int retval = -EINVAL;
3958
3959 if (optlen != sizeof(params))
3960 goto out;
3961
3962 if (copy_from_user(&params, optval, optlen)) {
3963 retval = -EFAULT;
3964 goto out;
3965 }
3966
3967 asoc = sctp_id2assoc(sk, params.sas_assoc_id);
3968 if (!asoc)
3969 goto out;
3970
3971 retval = sctp_send_add_streams(asoc, &params);
3972
3973 out:
3974 return retval;
3975 }
3976
3977 /* API 6.2 setsockopt(), getsockopt()
3978 *
3979 * Applications use setsockopt() and getsockopt() to set or retrieve
3980 * socket options. Socket options are used to change the default
3981 * behavior of sockets calls. They are described in Section 7.
3982 *
3983 * The syntax is:
3984 *
3985 * ret = getsockopt(int sd, int level, int optname, void __user *optval,
3986 * int __user *optlen);
3987 * ret = setsockopt(int sd, int level, int optname, const void __user *optval,
3988 * int optlen);
3989 *
3990 * sd - the socket descript.
3991 * level - set to IPPROTO_SCTP for all SCTP options.
3992 * optname - the option name.
3993 * optval - the buffer to store the value of the option.
3994 * optlen - the size of the buffer.
3995 */
3996 static int sctp_setsockopt(struct sock *sk, int level, int optname,
3997 char __user *optval, unsigned int optlen)
3998 {
3999 int retval = 0;
4000
4001 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
4002
4003 /* I can hardly begin to describe how wrong this is. This is
4004 * so broken as to be worse than useless. The API draft
4005 * REALLY is NOT helpful here... I am not convinced that the
4006 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP
4007 * are at all well-founded.
4008 */
4009 if (level != SOL_SCTP) {
4010 struct sctp_af *af = sctp_sk(sk)->pf->af;
4011 retval = af->setsockopt(sk, level, optname, optval, optlen);
4012 goto out_nounlock;
4013 }
4014
4015 lock_sock(sk);
4016
4017 switch (optname) {
4018 case SCTP_SOCKOPT_BINDX_ADD:
4019 /* 'optlen' is the size of the addresses buffer. */
4020 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
4021 optlen, SCTP_BINDX_ADD_ADDR);
4022 break;
4023
4024 case SCTP_SOCKOPT_BINDX_REM:
4025 /* 'optlen' is the size of the addresses buffer. */
4026 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
4027 optlen, SCTP_BINDX_REM_ADDR);
4028 break;
4029
4030 case SCTP_SOCKOPT_CONNECTX_OLD:
4031 /* 'optlen' is the size of the addresses buffer. */
4032 retval = sctp_setsockopt_connectx_old(sk,
4033 (struct sockaddr __user *)optval,
4034 optlen);
4035 break;
4036
4037 case SCTP_SOCKOPT_CONNECTX:
4038 /* 'optlen' is the size of the addresses buffer. */
4039 retval = sctp_setsockopt_connectx(sk,
4040 (struct sockaddr __user *)optval,
4041 optlen);
4042 break;
4043
4044 case SCTP_DISABLE_FRAGMENTS:
4045 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen);
4046 break;
4047
4048 case SCTP_EVENTS:
4049 retval = sctp_setsockopt_events(sk, optval, optlen);
4050 break;
4051
4052 case SCTP_AUTOCLOSE:
4053 retval = sctp_setsockopt_autoclose(sk, optval, optlen);
4054 break;
4055
4056 case SCTP_PEER_ADDR_PARAMS:
4057 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
4058 break;
4059
4060 case SCTP_DELAYED_SACK:
4061 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
4062 break;
4063 case SCTP_PARTIAL_DELIVERY_POINT:
4064 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
4065 break;
4066
4067 case SCTP_INITMSG:
4068 retval = sctp_setsockopt_initmsg(sk, optval, optlen);
4069 break;
4070 case SCTP_DEFAULT_SEND_PARAM:
4071 retval = sctp_setsockopt_default_send_param(sk, optval,
4072 optlen);
4073 break;
4074 case SCTP_DEFAULT_SNDINFO:
4075 retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen);
4076 break;
4077 case SCTP_PRIMARY_ADDR:
4078 retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
4079 break;
4080 case SCTP_SET_PEER_PRIMARY_ADDR:
4081 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen);
4082 break;
4083 case SCTP_NODELAY:
4084 retval = sctp_setsockopt_nodelay(sk, optval, optlen);
4085 break;
4086 case SCTP_RTOINFO:
4087 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen);
4088 break;
4089 case SCTP_ASSOCINFO:
4090 retval = sctp_setsockopt_associnfo(sk, optval, optlen);
4091 break;
4092 case SCTP_I_WANT_MAPPED_V4_ADDR:
4093 retval = sctp_setsockopt_mappedv4(sk, optval, optlen);
4094 break;
4095 case SCTP_MAXSEG:
4096 retval = sctp_setsockopt_maxseg(sk, optval, optlen);
4097 break;
4098 case SCTP_ADAPTATION_LAYER:
4099 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
4100 break;
4101 case SCTP_CONTEXT:
4102 retval = sctp_setsockopt_context(sk, optval, optlen);
4103 break;
4104 case SCTP_FRAGMENT_INTERLEAVE:
4105 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen);
4106 break;
4107 case SCTP_MAX_BURST:
4108 retval = sctp_setsockopt_maxburst(sk, optval, optlen);
4109 break;
4110 case SCTP_AUTH_CHUNK:
4111 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen);
4112 break;
4113 case SCTP_HMAC_IDENT:
4114 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen);
4115 break;
4116 case SCTP_AUTH_KEY:
4117 retval = sctp_setsockopt_auth_key(sk, optval, optlen);
4118 break;
4119 case SCTP_AUTH_ACTIVE_KEY:
4120 retval = sctp_setsockopt_active_key(sk, optval, optlen);
4121 break;
4122 case SCTP_AUTH_DELETE_KEY:
4123 retval = sctp_setsockopt_del_key(sk, optval, optlen);
4124 break;
4125 case SCTP_AUTO_ASCONF:
4126 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
4127 break;
4128 case SCTP_PEER_ADDR_THLDS:
4129 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
4130 break;
4131 case SCTP_RECVRCVINFO:
4132 retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
4133 break;
4134 case SCTP_RECVNXTINFO:
4135 retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen);
4136 break;
4137 case SCTP_PR_SUPPORTED:
4138 retval = sctp_setsockopt_pr_supported(sk, optval, optlen);
4139 break;
4140 case SCTP_DEFAULT_PRINFO:
4141 retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
4142 break;
4143 case SCTP_RECONFIG_SUPPORTED:
4144 retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen);
4145 break;
4146 case SCTP_ENABLE_STREAM_RESET:
4147 retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
4148 break;
4149 case SCTP_RESET_STREAMS:
4150 retval = sctp_setsockopt_reset_streams(sk, optval, optlen);
4151 break;
4152 case SCTP_RESET_ASSOC:
4153 retval = sctp_setsockopt_reset_assoc(sk, optval, optlen);
4154 break;
4155 case SCTP_ADD_STREAMS:
4156 retval = sctp_setsockopt_add_streams(sk, optval, optlen);
4157 break;
4158 default:
4159 retval = -ENOPROTOOPT;
4160 break;
4161 }
4162
4163 release_sock(sk);
4164
4165 out_nounlock:
4166 return retval;
4167 }
4168
4169 /* API 3.1.6 connect() - UDP Style Syntax
4170 *
4171 * An application may use the connect() call in the UDP model to initiate an
4172 * association without sending data.
4173 *
4174 * The syntax is:
4175 *
4176 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
4177 *
4178 * sd: the socket descriptor to have a new association added to.
4179 *
4180 * nam: the address structure (either struct sockaddr_in or struct
4181 * sockaddr_in6 defined in RFC2553 [7]).
4182 *
4183 * len: the size of the address.
4184 */
4185 static int sctp_connect(struct sock *sk, struct sockaddr *addr,
4186 int addr_len)
4187 {
4188 int err = 0;
4189 struct sctp_af *af;
4190
4191 lock_sock(sk);
4192
4193 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
4194 addr, addr_len);
4195
4196 /* Validate addr_len before calling common connect/connectx routine. */
4197 af = sctp_get_af_specific(addr->sa_family);
4198 if (!af || addr_len < af->sockaddr_len) {
4199 err = -EINVAL;
4200 } else {
4201 /* Pass correct addr len to common routine (so it knows there
4202 * is only one address being passed.
4203 */
4204 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
4205 }
4206
4207 release_sock(sk);
4208 return err;
4209 }
4210
4211 /* FIXME: Write comments. */
4212 static int sctp_disconnect(struct sock *sk, int flags)
4213 {
4214 return -EOPNOTSUPP; /* STUB */
4215 }
4216
4217 /* 4.1.4 accept() - TCP Style Syntax
4218 *
4219 * Applications use accept() call to remove an established SCTP
4220 * association from the accept queue of the endpoint. A new socket
4221 * descriptor will be returned from accept() to represent the newly
4222 * formed association.
4223 */
4224 static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
4225 {
4226 struct sctp_sock *sp;
4227 struct sctp_endpoint *ep;
4228 struct sock *newsk = NULL;
4229 struct sctp_association *asoc;
4230 long timeo;
4231 int error = 0;
4232
4233 lock_sock(sk);
4234
4235 sp = sctp_sk(sk);
4236 ep = sp->ep;
4237
4238 if (!sctp_style(sk, TCP)) {
4239 error = -EOPNOTSUPP;
4240 goto out;
4241 }
4242
4243 if (!sctp_sstate(sk, LISTENING)) {
4244 error = -EINVAL;
4245 goto out;
4246 }
4247
4248 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
4249
4250 error = sctp_wait_for_accept(sk, timeo);
4251 if (error)
4252 goto out;
4253
4254 /* We treat the list of associations on the endpoint as the accept
4255 * queue and pick the first association on the list.
4256 */
4257 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
4258
4259 newsk = sp->pf->create_accept_sk(sk, asoc, kern);
4260 if (!newsk) {
4261 error = -ENOMEM;
4262 goto out;
4263 }
4264
4265 /* Populate the fields of the newsk from the oldsk and migrate the
4266 * asoc to the newsk.
4267 */
4268 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
4269
4270 out:
4271 release_sock(sk);
4272 *err = error;
4273 return newsk;
4274 }
4275
4276 /* The SCTP ioctl handler. */
4277 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
4278 {
4279 int rc = -ENOTCONN;
4280
4281 lock_sock(sk);
4282
4283 /*
4284 * SEQPACKET-style sockets in LISTENING state are valid, for
4285 * SCTP, so only discard TCP-style sockets in LISTENING state.
4286 */
4287 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
4288 goto out;
4289
4290 switch (cmd) {
4291 case SIOCINQ: {
4292 struct sk_buff *skb;
4293 unsigned int amount = 0;
4294
4295 skb = skb_peek(&sk->sk_receive_queue);
4296 if (skb != NULL) {
4297 /*
4298 * We will only return the amount of this packet since
4299 * that is all that will be read.
4300 */
4301 amount = skb->len;
4302 }
4303 rc = put_user(amount, (int __user *)arg);
4304 break;
4305 }
4306 default:
4307 rc = -ENOIOCTLCMD;
4308 break;
4309 }
4310 out:
4311 release_sock(sk);
4312 return rc;
4313 }
4314
4315 /* This is the function which gets called during socket creation to
4316 * initialized the SCTP-specific portion of the sock.
4317 * The sock structure should already be zero-filled memory.
4318 */
4319 static int sctp_init_sock(struct sock *sk)
4320 {
4321 struct net *net = sock_net(sk);
4322 struct sctp_sock *sp;
4323
4324 pr_debug("%s: sk:%p\n", __func__, sk);
4325
4326 sp = sctp_sk(sk);
4327
4328 /* Initialize the SCTP per socket area. */
4329 switch (sk->sk_type) {
4330 case SOCK_SEQPACKET:
4331 sp->type = SCTP_SOCKET_UDP;
4332 break;
4333 case SOCK_STREAM:
4334 sp->type = SCTP_SOCKET_TCP;
4335 break;
4336 default:
4337 return -ESOCKTNOSUPPORT;
4338 }
4339
4340 sk->sk_gso_type = SKB_GSO_SCTP;
4341
4342 /* Initialize default send parameters. These parameters can be
4343 * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
4344 */
4345 sp->default_stream = 0;
4346 sp->default_ppid = 0;
4347 sp->default_flags = 0;
4348 sp->default_context = 0;
4349 sp->default_timetolive = 0;
4350
4351 sp->default_rcv_context = 0;
4352 sp->max_burst = net->sctp.max_burst;
4353
4354 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
4355
4356 /* Initialize default setup parameters. These parameters
4357 * can be modified with the SCTP_INITMSG socket option or
4358 * overridden by the SCTP_INIT CMSG.
4359 */
4360 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
4361 sp->initmsg.sinit_max_instreams = sctp_max_instreams;
4362 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init;
4363 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
4364
4365 /* Initialize default RTO related parameters. These parameters can
4366 * be modified for with the SCTP_RTOINFO socket option.
4367 */
4368 sp->rtoinfo.srto_initial = net->sctp.rto_initial;
4369 sp->rtoinfo.srto_max = net->sctp.rto_max;
4370 sp->rtoinfo.srto_min = net->sctp.rto_min;
4371
4372 /* Initialize default association related parameters. These parameters
4373 * can be modified with the SCTP_ASSOCINFO socket option.
4374 */
4375 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
4376 sp->assocparams.sasoc_number_peer_destinations = 0;
4377 sp->assocparams.sasoc_peer_rwnd = 0;
4378 sp->assocparams.sasoc_local_rwnd = 0;
4379 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
4380
4381 /* Initialize default event subscriptions. By default, all the
4382 * options are off.
4383 */
4384 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe));
4385
4386 /* Default Peer Address Parameters. These defaults can
4387 * be modified via SCTP_PEER_ADDR_PARAMS
4388 */
4389 sp->hbinterval = net->sctp.hb_interval;
4390 sp->pathmaxrxt = net->sctp.max_retrans_path;
4391 sp->pathmtu = 0; /* allow default discovery */
4392 sp->sackdelay = net->sctp.sack_timeout;
4393 sp->sackfreq = 2;
4394 sp->param_flags = SPP_HB_ENABLE |
4395 SPP_PMTUD_ENABLE |
4396 SPP_SACKDELAY_ENABLE;
4397
4398 /* If enabled no SCTP message fragmentation will be performed.
4399 * Configure through SCTP_DISABLE_FRAGMENTS socket option.
4400 */
4401 sp->disable_fragments = 0;
4402
4403 /* Enable Nagle algorithm by default. */
4404 sp->nodelay = 0;
4405
4406 sp->recvrcvinfo = 0;
4407 sp->recvnxtinfo = 0;
4408
4409 /* Enable by default. */
4410 sp->v4mapped = 1;
4411
4412 /* Auto-close idle associations after the configured
4413 * number of seconds. A value of 0 disables this
4414 * feature. Configure through the SCTP_AUTOCLOSE socket option,
4415 * for UDP-style sockets only.
4416 */
4417 sp->autoclose = 0;
4418
4419 /* User specified fragmentation limit. */
4420 sp->user_frag = 0;
4421
4422 sp->adaptation_ind = 0;
4423
4424 sp->pf = sctp_get_pf_specific(sk->sk_family);
4425
4426 /* Control variables for partial data delivery. */
4427 atomic_set(&sp->pd_mode, 0);
4428 skb_queue_head_init(&sp->pd_lobby);
4429 sp->frag_interleave = 0;
4430
4431 /* Create a per socket endpoint structure. Even if we
4432 * change the data structure relationships, this may still
4433 * be useful for storing pre-connect address information.
4434 */
4435 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL);
4436 if (!sp->ep)
4437 return -ENOMEM;
4438
4439 sp->hmac = NULL;
4440
4441 sk->sk_destruct = sctp_destruct_sock;
4442
4443 SCTP_DBG_OBJCNT_INC(sock);
4444
4445 local_bh_disable();
4446 sk_sockets_allocated_inc(sk);
4447 sock_prot_inuse_add(net, sk->sk_prot, 1);
4448
4449 /* Nothing can fail after this block, otherwise
4450 * sctp_destroy_sock() will be called without addr_wq_lock held
4451 */
4452 if (net->sctp.default_auto_asconf) {
4453 spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
4454 list_add_tail(&sp->auto_asconf_list,
4455 &net->sctp.auto_asconf_splist);
4456 sp->do_auto_asconf = 1;
4457 spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
4458 } else {
4459 sp->do_auto_asconf = 0;
4460 }
4461
4462 local_bh_enable();
4463
4464 return 0;
4465 }
4466
4467 /* Cleanup any SCTP per socket resources. Must be called with
4468 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
4469 */
4470 static void sctp_destroy_sock(struct sock *sk)
4471 {
4472 struct sctp_sock *sp;
4473
4474 pr_debug("%s: sk:%p\n", __func__, sk);
4475
4476 /* Release our hold on the endpoint. */
4477 sp = sctp_sk(sk);
4478 /* This could happen during socket init, thus we bail out
4479 * early, since the rest of the below is not setup either.
4480 */
4481 if (sp->ep == NULL)
4482 return;
4483
4484 if (sp->do_auto_asconf) {
4485 sp->do_auto_asconf = 0;
4486 list_del(&sp->auto_asconf_list);
4487 }
4488 sctp_endpoint_free(sp->ep);
4489 local_bh_disable();
4490 sk_sockets_allocated_dec(sk);
4491 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
4492 local_bh_enable();
4493 }
4494
4495 /* Triggered when there are no references on the socket anymore */
4496 static void sctp_destruct_sock(struct sock *sk)
4497 {
4498 struct sctp_sock *sp = sctp_sk(sk);
4499
4500 /* Free up the HMAC transform. */
4501 crypto_free_shash(sp->hmac);
4502
4503 inet_sock_destruct(sk);
4504 }
4505
4506 /* API 4.1.7 shutdown() - TCP Style Syntax
4507 * int shutdown(int socket, int how);
4508 *
4509 * sd - the socket descriptor of the association to be closed.
4510 * how - Specifies the type of shutdown. The values are
4511 * as follows:
4512 * SHUT_RD
4513 * Disables further receive operations. No SCTP
4514 * protocol action is taken.
4515 * SHUT_WR
4516 * Disables further send operations, and initiates
4517 * the SCTP shutdown sequence.
4518 * SHUT_RDWR
4519 * Disables further send and receive operations
4520 * and initiates the SCTP shutdown sequence.
4521 */
4522 static void sctp_shutdown(struct sock *sk, int how)
4523 {
4524 struct net *net = sock_net(sk);
4525 struct sctp_endpoint *ep;
4526
4527 if (!sctp_style(sk, TCP))
4528 return;
4529
4530 ep = sctp_sk(sk)->ep;
4531 if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
4532 struct sctp_association *asoc;
4533
4534 sk->sk_state = SCTP_SS_CLOSING;
4535 asoc = list_entry(ep->asocs.next,
4536 struct sctp_association, asocs);
4537 sctp_primitive_SHUTDOWN(net, asoc, NULL);
4538 }
4539 }
4540
4541 int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
4542 struct sctp_info *info)
4543 {
4544 struct sctp_transport *prim;
4545 struct list_head *pos;
4546 int mask;
4547
4548 memset(info, 0, sizeof(*info));
4549 if (!asoc) {
4550 struct sctp_sock *sp = sctp_sk(sk);
4551
4552 info->sctpi_s_autoclose = sp->autoclose;
4553 info->sctpi_s_adaptation_ind = sp->adaptation_ind;
4554 info->sctpi_s_pd_point = sp->pd_point;
4555 info->sctpi_s_nodelay = sp->nodelay;
4556 info->sctpi_s_disable_fragments = sp->disable_fragments;
4557 info->sctpi_s_v4mapped = sp->v4mapped;
4558 info->sctpi_s_frag_interleave = sp->frag_interleave;
4559 info->sctpi_s_type = sp->type;
4560
4561 return 0;
4562 }
4563
4564 info->sctpi_tag = asoc->c.my_vtag;
4565 info->sctpi_state = asoc->state;
4566 info->sctpi_rwnd = asoc->a_rwnd;
4567 info->sctpi_unackdata = asoc->unack_data;
4568 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4569 info->sctpi_instrms = asoc->stream.incnt;
4570 info->sctpi_outstrms = asoc->stream.outcnt;
4571 list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
4572 info->sctpi_inqueue++;
4573 list_for_each(pos, &asoc->outqueue.out_chunk_list)
4574 info->sctpi_outqueue++;
4575 info->sctpi_overall_error = asoc->overall_error_count;
4576 info->sctpi_max_burst = asoc->max_burst;
4577 info->sctpi_maxseg = asoc->frag_point;
4578 info->sctpi_peer_rwnd = asoc->peer.rwnd;
4579 info->sctpi_peer_tag = asoc->c.peer_vtag;
4580
4581 mask = asoc->peer.ecn_capable << 1;
4582 mask = (mask | asoc->peer.ipv4_address) << 1;
4583 mask = (mask | asoc->peer.ipv6_address) << 1;
4584 mask = (mask | asoc->peer.hostname_address) << 1;
4585 mask = (mask | asoc->peer.asconf_capable) << 1;
4586 mask = (mask | asoc->peer.prsctp_capable) << 1;
4587 mask = (mask | asoc->peer.auth_capable);
4588 info->sctpi_peer_capable = mask;
4589 mask = asoc->peer.sack_needed << 1;
4590 mask = (mask | asoc->peer.sack_generation) << 1;
4591 mask = (mask | asoc->peer.zero_window_announced);
4592 info->sctpi_peer_sack = mask;
4593
4594 info->sctpi_isacks = asoc->stats.isacks;
4595 info->sctpi_osacks = asoc->stats.osacks;
4596 info->sctpi_opackets = asoc->stats.opackets;
4597 info->sctpi_ipackets = asoc->stats.ipackets;
4598 info->sctpi_rtxchunks = asoc->stats.rtxchunks;
4599 info->sctpi_outofseqtsns = asoc->stats.outofseqtsns;
4600 info->sctpi_idupchunks = asoc->stats.idupchunks;
4601 info->sctpi_gapcnt = asoc->stats.gapcnt;
4602 info->sctpi_ouodchunks = asoc->stats.ouodchunks;
4603 info->sctpi_iuodchunks = asoc->stats.iuodchunks;
4604 info->sctpi_oodchunks = asoc->stats.oodchunks;
4605 info->sctpi_iodchunks = asoc->stats.iodchunks;
4606 info->sctpi_octrlchunks = asoc->stats.octrlchunks;
4607 info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
4608
4609 prim = asoc->peer.primary_path;
4610 memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr));
4611 info->sctpi_p_state = prim->state;
4612 info->sctpi_p_cwnd = prim->cwnd;
4613 info->sctpi_p_srtt = prim->srtt;
4614 info->sctpi_p_rto = jiffies_to_msecs(prim->rto);
4615 info->sctpi_p_hbinterval = prim->hbinterval;
4616 info->sctpi_p_pathmaxrxt = prim->pathmaxrxt;
4617 info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay);
4618 info->sctpi_p_ssthresh = prim->ssthresh;
4619 info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked;
4620 info->sctpi_p_flight_size = prim->flight_size;
4621 info->sctpi_p_error = prim->error_count;
4622
4623 return 0;
4624 }
4625 EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
4626
4627 /* use callback to avoid exporting the core structure */
4628 int sctp_transport_walk_start(struct rhashtable_iter *iter)
4629 {
4630 int err;
4631
4632 rhltable_walk_enter(&sctp_transport_hashtable, iter);
4633
4634 err = rhashtable_walk_start(iter);
4635 if (err && err != -EAGAIN) {
4636 rhashtable_walk_stop(iter);
4637 rhashtable_walk_exit(iter);
4638 return err;
4639 }
4640
4641 return 0;
4642 }
4643
4644 void sctp_transport_walk_stop(struct rhashtable_iter *iter)
4645 {
4646 rhashtable_walk_stop(iter);
4647 rhashtable_walk_exit(iter);
4648 }
4649
4650 struct sctp_transport *sctp_transport_get_next(struct net *net,
4651 struct rhashtable_iter *iter)
4652 {
4653 struct sctp_transport *t;
4654
4655 t = rhashtable_walk_next(iter);
4656 for (; t; t = rhashtable_walk_next(iter)) {
4657 if (IS_ERR(t)) {
4658 if (PTR_ERR(t) == -EAGAIN)
4659 continue;
4660 break;
4661 }
4662
4663 if (!sctp_transport_hold(t))
4664 continue;
4665
4666 if (net_eq(sock_net(t->asoc->base.sk), net) &&
4667 t->asoc->peer.primary_path == t)
4668 break;
4669
4670 sctp_transport_put(t);
4671 }
4672
4673 return t;
4674 }
4675
4676 struct sctp_transport *sctp_transport_get_idx(struct net *net,
4677 struct rhashtable_iter *iter,
4678 int pos)
4679 {
4680 struct sctp_transport *t;
4681
4682 if (!pos)
4683 return SEQ_START_TOKEN;
4684
4685 while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
4686 if (!--pos)
4687 break;
4688 sctp_transport_put(t);
4689 }
4690
4691 return t;
4692 }
4693
4694 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
4695 void *p) {
4696 int err = 0;
4697 int hash = 0;
4698 struct sctp_ep_common *epb;
4699 struct sctp_hashbucket *head;
4700
4701 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
4702 hash++, head++) {
4703 read_lock_bh(&head->lock);
4704 sctp_for_each_hentry(epb, &head->chain) {
4705 err = cb(sctp_ep(epb), p);
4706 if (err)
4707 break;
4708 }
4709 read_unlock_bh(&head->lock);
4710 }
4711
4712 return err;
4713 }
4714 EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
4715
4716 int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
4717 struct net *net,
4718 const union sctp_addr *laddr,
4719 const union sctp_addr *paddr, void *p)
4720 {
4721 struct sctp_transport *transport;
4722 int err;
4723
4724 rcu_read_lock();
4725 transport = sctp_addrs_lookup_transport(net, laddr, paddr);
4726 rcu_read_unlock();
4727 if (!transport)
4728 return -ENOENT;
4729
4730 err = cb(transport, p);
4731 sctp_transport_put(transport);
4732
4733 return err;
4734 }
4735 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
4736
4737 int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
4738 int (*cb_done)(struct sctp_transport *, void *),
4739 struct net *net, int *pos, void *p) {
4740 struct rhashtable_iter hti;
4741 struct sctp_transport *tsp;
4742 int ret;
4743
4744 again:
4745 ret = sctp_transport_walk_start(&hti);
4746 if (ret)
4747 return ret;
4748
4749 tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
4750 for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
4751 ret = cb(tsp, p);
4752 if (ret)
4753 break;
4754 (*pos)++;
4755 sctp_transport_put(tsp);
4756 }
4757 sctp_transport_walk_stop(&hti);
4758
4759 if (ret) {
4760 if (cb_done && !cb_done(tsp, p)) {
4761 (*pos)++;
4762 sctp_transport_put(tsp);
4763 goto again;
4764 }
4765 sctp_transport_put(tsp);
4766 }
4767
4768 return ret;
4769 }
4770 EXPORT_SYMBOL_GPL(sctp_for_each_transport);
4771
4772 /* 7.2.1 Association Status (SCTP_STATUS)
4773
4774 * Applications can retrieve current status information about an
4775 * association, including association state, peer receiver window size,
4776 * number of unacked data chunks, and number of data chunks pending
4777 * receipt. This information is read-only.
4778 */
4779 static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
4780 char __user *optval,
4781 int __user *optlen)
4782 {
4783 struct sctp_status status;
4784 struct sctp_association *asoc = NULL;
4785 struct sctp_transport *transport;
4786 sctp_assoc_t associd;
4787 int retval = 0;
4788
4789 if (len < sizeof(status)) {
4790 retval = -EINVAL;
4791 goto out;
4792 }
4793
4794 len = sizeof(status);
4795 if (copy_from_user(&status, optval, len)) {
4796 retval = -EFAULT;
4797 goto out;
4798 }
4799
4800 associd = status.sstat_assoc_id;
4801 asoc = sctp_id2assoc(sk, associd);
4802 if (!asoc) {
4803 retval = -EINVAL;
4804 goto out;
4805 }
4806
4807 transport = asoc->peer.primary_path;
4808
4809 status.sstat_assoc_id = sctp_assoc2id(asoc);
4810 status.sstat_state = sctp_assoc_to_state(asoc);
4811 status.sstat_rwnd = asoc->peer.rwnd;
4812 status.sstat_unackdata = asoc->unack_data;
4813
4814 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4815 status.sstat_instrms = asoc->stream.incnt;
4816 status.sstat_outstrms = asoc->stream.outcnt;
4817 status.sstat_fragmentation_point = asoc->frag_point;
4818 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4819 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
4820 transport->af_specific->sockaddr_len);
4821 /* Map ipv4 address into v4-mapped-on-v6 address. */
4822 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
4823 (union sctp_addr *)&status.sstat_primary.spinfo_address);
4824 status.sstat_primary.spinfo_state = transport->state;
4825 status.sstat_primary.spinfo_cwnd = transport->cwnd;
4826 status.sstat_primary.spinfo_srtt = transport->srtt;
4827 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto);
4828 status.sstat_primary.spinfo_mtu = transport->pathmtu;
4829
4830 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN)
4831 status.sstat_primary.spinfo_state = SCTP_ACTIVE;
4832
4833 if (put_user(len, optlen)) {
4834 retval = -EFAULT;
4835 goto out;
4836 }
4837
4838 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
4839 __func__, len, status.sstat_state, status.sstat_rwnd,
4840 status.sstat_assoc_id);
4841
4842 if (copy_to_user(optval, &status, len)) {
4843 retval = -EFAULT;
4844 goto out;
4845 }
4846
4847 out:
4848 return retval;
4849 }
4850
4851
4852 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
4853 *
4854 * Applications can retrieve information about a specific peer address
4855 * of an association, including its reachability state, congestion
4856 * window, and retransmission timer values. This information is
4857 * read-only.
4858 */
4859 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
4860 char __user *optval,
4861 int __user *optlen)
4862 {
4863 struct sctp_paddrinfo pinfo;
4864 struct sctp_transport *transport;
4865 int retval = 0;
4866
4867 if (len < sizeof(pinfo)) {
4868 retval = -EINVAL;
4869 goto out;
4870 }
4871
4872 len = sizeof(pinfo);
4873 if (copy_from_user(&pinfo, optval, len)) {
4874 retval = -EFAULT;
4875 goto out;
4876 }
4877
4878 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
4879 pinfo.spinfo_assoc_id);
4880 if (!transport)
4881 return -EINVAL;
4882
4883 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4884 pinfo.spinfo_state = transport->state;
4885 pinfo.spinfo_cwnd = transport->cwnd;
4886 pinfo.spinfo_srtt = transport->srtt;
4887 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto);
4888 pinfo.spinfo_mtu = transport->pathmtu;
4889
4890 if (pinfo.spinfo_state == SCTP_UNKNOWN)
4891 pinfo.spinfo_state = SCTP_ACTIVE;
4892
4893 if (put_user(len, optlen)) {
4894 retval = -EFAULT;
4895 goto out;
4896 }
4897
4898 if (copy_to_user(optval, &pinfo, len)) {
4899 retval = -EFAULT;
4900 goto out;
4901 }
4902
4903 out:
4904 return retval;
4905 }
4906
4907 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
4908 *
4909 * This option is a on/off flag. If enabled no SCTP message
4910 * fragmentation will be performed. Instead if a message being sent
4911 * exceeds the current PMTU size, the message will NOT be sent and
4912 * instead a error will be indicated to the user.
4913 */
4914 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
4915 char __user *optval, int __user *optlen)
4916 {
4917 int val;
4918
4919 if (len < sizeof(int))
4920 return -EINVAL;
4921
4922 len = sizeof(int);
4923 val = (sctp_sk(sk)->disable_fragments == 1);
4924 if (put_user(len, optlen))
4925 return -EFAULT;
4926 if (copy_to_user(optval, &val, len))
4927 return -EFAULT;
4928 return 0;
4929 }
4930
4931 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
4932 *
4933 * This socket option is used to specify various notifications and
4934 * ancillary data the user wishes to receive.
4935 */
4936 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
4937 int __user *optlen)
4938 {
4939 if (len == 0)
4940 return -EINVAL;
4941 if (len > sizeof(struct sctp_event_subscribe))
4942 len = sizeof(struct sctp_event_subscribe);
4943 if (put_user(len, optlen))
4944 return -EFAULT;
4945 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
4946 return -EFAULT;
4947 return 0;
4948 }
4949
4950 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
4951 *
4952 * This socket option is applicable to the UDP-style socket only. When
4953 * set it will cause associations that are idle for more than the
4954 * specified number of seconds to automatically close. An association
4955 * being idle is defined an association that has NOT sent or received
4956 * user data. The special value of '0' indicates that no automatic
4957 * close of any associations should be performed. The option expects an
4958 * integer defining the number of seconds of idle time before an
4959 * association is closed.
4960 */
4961 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
4962 {
4963 /* Applicable to UDP-style socket only */
4964 if (sctp_style(sk, TCP))
4965 return -EOPNOTSUPP;
4966 if (len < sizeof(int))
4967 return -EINVAL;
4968 len = sizeof(int);
4969 if (put_user(len, optlen))
4970 return -EFAULT;
4971 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
4972 return -EFAULT;
4973 return 0;
4974 }
4975
4976 /* Helper routine to branch off an association to a new socket. */
4977 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
4978 {
4979 struct sctp_association *asoc = sctp_id2assoc(sk, id);
4980 struct sctp_sock *sp = sctp_sk(sk);
4981 struct socket *sock;
4982 int err = 0;
4983
4984 /* Do not peel off from one netns to another one. */
4985 if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
4986 return -EINVAL;
4987
4988 if (!asoc)
4989 return -EINVAL;
4990
4991 /* An association cannot be branched off from an already peeled-off
4992 * socket, nor is this supported for tcp style sockets.
4993 */
4994 if (!sctp_style(sk, UDP))
4995 return -EINVAL;
4996
4997 /* Create a new socket. */
4998 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
4999 if (err < 0)
5000 return err;
5001
5002 sctp_copy_sock(sock->sk, sk, asoc);
5003
5004 /* Make peeled-off sockets more like 1-1 accepted sockets.
5005 * Set the daddr and initialize id to something more random
5006 */
5007 sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
5008
5009 /* Populate the fields of the newsk from the oldsk and migrate the
5010 * asoc to the newsk.
5011 */
5012 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
5013
5014 *sockp = sock;
5015
5016 return err;
5017 }
5018 EXPORT_SYMBOL(sctp_do_peeloff);
5019
5020 static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *peeloff,
5021 struct file **newfile, unsigned flags)
5022 {
5023 struct socket *newsock;
5024 int retval;
5025
5026 retval = sctp_do_peeloff(sk, peeloff->associd, &newsock);
5027 if (retval < 0)
5028 goto out;
5029
5030 /* Map the socket to an unused fd that can be returned to the user. */
5031 retval = get_unused_fd_flags(flags & SOCK_CLOEXEC);
5032 if (retval < 0) {
5033 sock_release(newsock);
5034 goto out;
5035 }
5036
5037 *newfile = sock_alloc_file(newsock, 0, NULL);
5038 if (IS_ERR(*newfile)) {
5039 put_unused_fd(retval);
5040 sock_release(newsock);
5041 retval = PTR_ERR(*newfile);
5042 *newfile = NULL;
5043 return retval;
5044 }
5045
5046 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
5047 retval);
5048
5049 peeloff->sd = retval;
5050
5051 if (flags & SOCK_NONBLOCK)
5052 (*newfile)->f_flags |= O_NONBLOCK;
5053 out:
5054 return retval;
5055 }
5056
5057 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
5058 {
5059 sctp_peeloff_arg_t peeloff;
5060 struct file *newfile = NULL;
5061 int retval = 0;
5062
5063 if (len < sizeof(sctp_peeloff_arg_t))
5064 return -EINVAL;
5065 len = sizeof(sctp_peeloff_arg_t);
5066 if (copy_from_user(&peeloff, optval, len))
5067 return -EFAULT;
5068
5069 retval = sctp_getsockopt_peeloff_common(sk, &peeloff, &newfile, 0);
5070 if (retval < 0)
5071 goto out;
5072
5073 /* Return the fd mapped to the new socket. */
5074 if (put_user(len, optlen)) {
5075 fput(newfile);
5076 put_unused_fd(retval);
5077 return -EFAULT;
5078 }
5079
5080 if (copy_to_user(optval, &peeloff, len)) {
5081 fput(newfile);
5082 put_unused_fd(retval);
5083 return -EFAULT;
5084 }
5085 fd_install(retval, newfile);
5086 out:
5087 return retval;
5088 }
5089
5090 static int sctp_getsockopt_peeloff_flags(struct sock *sk, int len,
5091 char __user *optval, int __user *optlen)
5092 {
5093 sctp_peeloff_flags_arg_t peeloff;
5094 struct file *newfile = NULL;
5095 int retval = 0;
5096
5097 if (len < sizeof(sctp_peeloff_flags_arg_t))
5098 return -EINVAL;
5099 len = sizeof(sctp_peeloff_flags_arg_t);
5100 if (copy_from_user(&peeloff, optval, len))
5101 return -EFAULT;
5102
5103 retval = sctp_getsockopt_peeloff_common(sk, &peeloff.p_arg,
5104 &newfile, peeloff.flags);
5105 if (retval < 0)
5106 goto out;
5107
5108 /* Return the fd mapped to the new socket. */
5109 if (put_user(len, optlen)) {
5110 fput(newfile);
5111 put_unused_fd(retval);
5112 return -EFAULT;
5113 }
5114
5115 if (copy_to_user(optval, &peeloff, len)) {
5116 fput(newfile);
5117 put_unused_fd(retval);
5118 return -EFAULT;
5119 }
5120 fd_install(retval, newfile);
5121 out:
5122 return retval;
5123 }
5124
5125 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
5126 *
5127 * Applications can enable or disable heartbeats for any peer address of
5128 * an association, modify an address's heartbeat interval, force a
5129 * heartbeat to be sent immediately, and adjust the address's maximum
5130 * number of retransmissions sent before an address is considered
5131 * unreachable. The following structure is used to access and modify an
5132 * address's parameters:
5133 *
5134 * struct sctp_paddrparams {
5135 * sctp_assoc_t spp_assoc_id;
5136 * struct sockaddr_storage spp_address;
5137 * uint32_t spp_hbinterval;
5138 * uint16_t spp_pathmaxrxt;
5139 * uint32_t spp_pathmtu;
5140 * uint32_t spp_sackdelay;
5141 * uint32_t spp_flags;
5142 * };
5143 *
5144 * spp_assoc_id - (one-to-many style socket) This is filled in the
5145 * application, and identifies the association for
5146 * this query.
5147 * spp_address - This specifies which address is of interest.
5148 * spp_hbinterval - This contains the value of the heartbeat interval,
5149 * in milliseconds. If a value of zero
5150 * is present in this field then no changes are to
5151 * be made to this parameter.
5152 * spp_pathmaxrxt - This contains the maximum number of
5153 * retransmissions before this address shall be
5154 * considered unreachable. If a value of zero
5155 * is present in this field then no changes are to
5156 * be made to this parameter.
5157 * spp_pathmtu - When Path MTU discovery is disabled the value
5158 * specified here will be the "fixed" path mtu.
5159 * Note that if the spp_address field is empty
5160 * then all associations on this address will
5161 * have this fixed path mtu set upon them.
5162 *
5163 * spp_sackdelay - When delayed sack is enabled, this value specifies
5164 * the number of milliseconds that sacks will be delayed
5165 * for. This value will apply to all addresses of an
5166 * association if the spp_address field is empty. Note
5167 * also, that if delayed sack is enabled and this
5168 * value is set to 0, no change is made to the last
5169 * recorded delayed sack timer value.
5170 *
5171 * spp_flags - These flags are used to control various features
5172 * on an association. The flag field may contain
5173 * zero or more of the following options.
5174 *
5175 * SPP_HB_ENABLE - Enable heartbeats on the
5176 * specified address. Note that if the address
5177 * field is empty all addresses for the association
5178 * have heartbeats enabled upon them.
5179 *
5180 * SPP_HB_DISABLE - Disable heartbeats on the
5181 * speicifed address. Note that if the address
5182 * field is empty all addresses for the association
5183 * will have their heartbeats disabled. Note also
5184 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
5185 * mutually exclusive, only one of these two should
5186 * be specified. Enabling both fields will have
5187 * undetermined results.
5188 *
5189 * SPP_HB_DEMAND - Request a user initiated heartbeat
5190 * to be made immediately.
5191 *
5192 * SPP_PMTUD_ENABLE - This field will enable PMTU
5193 * discovery upon the specified address. Note that
5194 * if the address feild is empty then all addresses
5195 * on the association are effected.
5196 *
5197 * SPP_PMTUD_DISABLE - This field will disable PMTU
5198 * discovery upon the specified address. Note that
5199 * if the address feild is empty then all addresses
5200 * on the association are effected. Not also that
5201 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
5202 * exclusive. Enabling both will have undetermined
5203 * results.
5204 *
5205 * SPP_SACKDELAY_ENABLE - Setting this flag turns
5206 * on delayed sack. The time specified in spp_sackdelay
5207 * is used to specify the sack delay for this address. Note
5208 * that if spp_address is empty then all addresses will
5209 * enable delayed sack and take on the sack delay
5210 * value specified in spp_sackdelay.
5211 * SPP_SACKDELAY_DISABLE - Setting this flag turns
5212 * off delayed sack. If the spp_address field is blank then
5213 * delayed sack is disabled for the entire association. Note
5214 * also that this field is mutually exclusive to
5215 * SPP_SACKDELAY_ENABLE, setting both will have undefined
5216 * results.
5217 */
5218 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
5219 char __user *optval, int __user *optlen)
5220 {
5221 struct sctp_paddrparams params;
5222 struct sctp_transport *trans = NULL;
5223 struct sctp_association *asoc = NULL;
5224 struct sctp_sock *sp = sctp_sk(sk);
5225
5226 if (len < sizeof(struct sctp_paddrparams))
5227 return -EINVAL;
5228 len = sizeof(struct sctp_paddrparams);
5229 if (copy_from_user(&params, optval, len))
5230 return -EFAULT;
5231
5232 /* If an address other than INADDR_ANY is specified, and
5233 * no transport is found, then the request is invalid.
5234 */
5235 if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
5236 trans = sctp_addr_id2transport(sk, &params.spp_address,
5237 params.spp_assoc_id);
5238 if (!trans) {
5239 pr_debug("%s: failed no transport\n", __func__);
5240 return -EINVAL;
5241 }
5242 }
5243
5244 /* Get association, if assoc_id != 0 and the socket is a one
5245 * to many style socket, and an association was not found, then
5246 * the id was invalid.
5247 */
5248 asoc = sctp_id2assoc(sk, params.spp_assoc_id);
5249 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) {
5250 pr_debug("%s: failed no association\n", __func__);
5251 return -EINVAL;
5252 }
5253
5254 if (trans) {
5255 /* Fetch transport values. */
5256 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval);
5257 params.spp_pathmtu = trans->pathmtu;
5258 params.spp_pathmaxrxt = trans->pathmaxrxt;
5259 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay);
5260
5261 /*draft-11 doesn't say what to return in spp_flags*/
5262 params.spp_flags = trans->param_flags;
5263 } else if (asoc) {
5264 /* Fetch association values. */
5265 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval);
5266 params.spp_pathmtu = asoc->pathmtu;
5267 params.spp_pathmaxrxt = asoc->pathmaxrxt;
5268 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay);
5269
5270 /*draft-11 doesn't say what to return in spp_flags*/
5271 params.spp_flags = asoc->param_flags;
5272 } else {
5273 /* Fetch socket values. */
5274 params.spp_hbinterval = sp->hbinterval;
5275 params.spp_pathmtu = sp->pathmtu;
5276 params.spp_sackdelay = sp->sackdelay;
5277 params.spp_pathmaxrxt = sp->pathmaxrxt;
5278
5279 /*draft-11 doesn't say what to return in spp_flags*/
5280 params.spp_flags = sp->param_flags;
5281 }
5282
5283 if (copy_to_user(optval, &params, len))
5284 return -EFAULT;
5285
5286 if (put_user(len, optlen))
5287 return -EFAULT;
5288
5289 return 0;
5290 }
5291
5292 /*
5293 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
5294 *
5295 * This option will effect the way delayed acks are performed. This
5296 * option allows you to get or set the delayed ack time, in
5297 * milliseconds. It also allows changing the delayed ack frequency.
5298 * Changing the frequency to 1 disables the delayed sack algorithm. If
5299 * the assoc_id is 0, then this sets or gets the endpoints default
5300 * values. If the assoc_id field is non-zero, then the set or get
5301 * effects the specified association for the one to many model (the
5302 * assoc_id field is ignored by the one to one model). Note that if
5303 * sack_delay or sack_freq are 0 when setting this option, then the
5304 * current values will remain unchanged.
5305 *
5306 * struct sctp_sack_info {
5307 * sctp_assoc_t sack_assoc_id;
5308 * uint32_t sack_delay;
5309 * uint32_t sack_freq;
5310 * };
5311 *
5312 * sack_assoc_id - This parameter, indicates which association the user
5313 * is performing an action upon. Note that if this field's value is
5314 * zero then the endpoints default value is changed (effecting future
5315 * associations only).
5316 *
5317 * sack_delay - This parameter contains the number of milliseconds that
5318 * the user is requesting the delayed ACK timer be set to. Note that
5319 * this value is defined in the standard to be between 200 and 500
5320 * milliseconds.
5321 *
5322 * sack_freq - This parameter contains the number of packets that must
5323 * be received before a sack is sent without waiting for the delay
5324 * timer to expire. The default value for this is 2, setting this
5325 * value to 1 will disable the delayed sack algorithm.
5326 */
5327 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
5328 char __user *optval,
5329 int __user *optlen)
5330 {
5331 struct sctp_sack_info params;
5332 struct sctp_association *asoc = NULL;
5333 struct sctp_sock *sp = sctp_sk(sk);
5334
5335 if (len >= sizeof(struct sctp_sack_info)) {
5336 len = sizeof(struct sctp_sack_info);
5337
5338 if (copy_from_user(&params, optval, len))
5339 return -EFAULT;
5340 } else if (len == sizeof(struct sctp_assoc_value)) {
5341 pr_warn_ratelimited(DEPRECATED
5342 "%s (pid %d) "
5343 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
5344 "Use struct sctp_sack_info instead\n",
5345 current->comm, task_pid_nr(current));
5346 if (copy_from_user(&params, optval, len))
5347 return -EFAULT;
5348 } else
5349 return -EINVAL;
5350
5351 /* Get association, if sack_assoc_id != 0 and the socket is a one
5352 * to many style socket, and an association was not found, then
5353 * the id was invalid.
5354 */
5355 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
5356 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
5357 return -EINVAL;
5358
5359 if (asoc) {
5360 /* Fetch association values. */
5361 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
5362 params.sack_delay = jiffies_to_msecs(
5363 asoc->sackdelay);
5364 params.sack_freq = asoc->sackfreq;
5365
5366 } else {
5367 params.sack_delay = 0;
5368 params.sack_freq = 1;
5369 }
5370 } else {
5371 /* Fetch socket values. */
5372 if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
5373 params.sack_delay = sp->sackdelay;
5374 params.sack_freq = sp->sackfreq;
5375 } else {
5376 params.sack_delay = 0;
5377 params.sack_freq = 1;
5378 }
5379 }
5380
5381 if (copy_to_user(optval, &params, len))
5382 return -EFAULT;
5383
5384 if (put_user(len, optlen))
5385 return -EFAULT;
5386
5387 return 0;
5388 }
5389
5390 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
5391 *
5392 * Applications can specify protocol parameters for the default association
5393 * initialization. The option name argument to setsockopt() and getsockopt()
5394 * is SCTP_INITMSG.
5395 *
5396 * Setting initialization parameters is effective only on an unconnected
5397 * socket (for UDP-style sockets only future associations are effected
5398 * by the change). With TCP-style sockets, this option is inherited by
5399 * sockets derived from a listener socket.
5400 */
5401 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
5402 {
5403 if (len < sizeof(struct sctp_initmsg))
5404 return -EINVAL;
5405 len = sizeof(struct sctp_initmsg);
5406 if (put_user(len, optlen))
5407 return -EFAULT;
5408 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
5409 return -EFAULT;
5410 return 0;
5411 }
5412
5413
5414 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
5415 char __user *optval, int __user *optlen)
5416 {
5417 struct sctp_association *asoc;
5418 int cnt = 0;
5419 struct sctp_getaddrs getaddrs;
5420 struct sctp_transport *from;
5421 void __user *to;
5422 union sctp_addr temp;
5423 struct sctp_sock *sp = sctp_sk(sk);
5424 int addrlen;
5425 size_t space_left;
5426 int bytes_copied;
5427
5428 if (len < sizeof(struct sctp_getaddrs))
5429 return -EINVAL;
5430
5431 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
5432 return -EFAULT;
5433
5434 /* For UDP-style sockets, id specifies the association to query. */
5435 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
5436 if (!asoc)
5437 return -EINVAL;
5438
5439 to = optval + offsetof(struct sctp_getaddrs, addrs);
5440 space_left = len - offsetof(struct sctp_getaddrs, addrs);
5441
5442 list_for_each_entry(from, &asoc->peer.transport_addr_list,
5443 transports) {
5444 memcpy(&temp, &from->ipaddr, sizeof(temp));
5445 addrlen = sctp_get_pf_specific(sk->sk_family)
5446 ->addr_to_user(sp, &temp);
5447 if (space_left < addrlen)
5448 return -ENOMEM;
5449 if (copy_to_user(to, &temp, addrlen))
5450 return -EFAULT;
5451 to += addrlen;
5452 cnt++;
5453 space_left -= addrlen;
5454 }
5455
5456 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
5457 return -EFAULT;
5458 bytes_copied = ((char __user *)to) - optval;
5459 if (put_user(bytes_copied, optlen))
5460 return -EFAULT;
5461
5462 return 0;
5463 }
5464
5465 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
5466 size_t space_left, int *bytes_copied)
5467 {
5468 struct sctp_sockaddr_entry *addr;
5469 union sctp_addr temp;
5470 int cnt = 0;
5471 int addrlen;
5472 struct net *net = sock_net(sk);
5473
5474 rcu_read_lock();
5475 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
5476 if (!addr->valid)
5477 continue;
5478
5479 if ((PF_INET == sk->sk_family) &&
5480 (AF_INET6 == addr->a.sa.sa_family))
5481 continue;
5482 if ((PF_INET6 == sk->sk_family) &&
5483 inet_v6_ipv6only(sk) &&
5484 (AF_INET == addr->a.sa.sa_family))
5485 continue;
5486 memcpy(&temp, &addr->a, sizeof(temp));
5487 if (!temp.v4.sin_port)
5488 temp.v4.sin_port = htons(port);
5489
5490 addrlen = sctp_get_pf_specific(sk->sk_family)
5491 ->addr_to_user(sctp_sk(sk), &temp);
5492
5493 if (space_left < addrlen) {
5494 cnt = -ENOMEM;
5495 break;
5496 }
5497 memcpy(to, &temp, addrlen);
5498
5499 to += addrlen;
5500 cnt++;
5501 space_left -= addrlen;
5502 *bytes_copied += addrlen;
5503 }
5504 rcu_read_unlock();
5505
5506 return cnt;
5507 }
5508
5509
5510 static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
5511 char __user *optval, int __user *optlen)
5512 {
5513 struct sctp_bind_addr *bp;
5514 struct sctp_association *asoc;
5515 int cnt = 0;
5516 struct sctp_getaddrs getaddrs;
5517 struct sctp_sockaddr_entry *addr;
5518 void __user *to;
5519 union sctp_addr temp;
5520 struct sctp_sock *sp = sctp_sk(sk);
5521 int addrlen;
5522 int err = 0;
5523 size_t space_left;
5524 int bytes_copied = 0;
5525 void *addrs;
5526 void *buf;
5527
5528 if (len < sizeof(struct sctp_getaddrs))
5529 return -EINVAL;
5530
5531 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
5532 return -EFAULT;
5533
5534 /*
5535 * For UDP-style sockets, id specifies the association to query.
5536 * If the id field is set to the value '0' then the locally bound
5537 * addresses are returned without regard to any particular
5538 * association.
5539 */
5540 if (0 == getaddrs.assoc_id) {
5541 bp = &sctp_sk(sk)->ep->base.bind_addr;
5542 } else {
5543 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
5544 if (!asoc)
5545 return -EINVAL;
5546 bp = &asoc->base.bind_addr;
5547 }
5548
5549 to = optval + offsetof(struct sctp_getaddrs, addrs);
5550 space_left = len - offsetof(struct sctp_getaddrs, addrs);
5551
5552 addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN);
5553 if (!addrs)
5554 return -ENOMEM;
5555
5556 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
5557 * addresses from the global local address list.
5558 */
5559 if (sctp_list_single_entry(&bp->address_list)) {
5560 addr = list_entry(bp->address_list.next,
5561 struct sctp_sockaddr_entry, list);
5562 if (sctp_is_any(sk, &addr->a)) {
5563 cnt = sctp_copy_laddrs(sk, bp->port, addrs,
5564 space_left, &bytes_copied);
5565 if (cnt < 0) {
5566 err = cnt;
5567 goto out;
5568 }
5569 goto copy_getaddrs;
5570 }
5571 }
5572
5573 buf = addrs;
5574 /* Protection on the bound address list is not needed since
5575 * in the socket option context we hold a socket lock and
5576 * thus the bound address list can't change.
5577 */
5578 list_for_each_entry(addr, &bp->address_list, list) {
5579 memcpy(&temp, &addr->a, sizeof(temp));
5580 addrlen = sctp_get_pf_specific(sk->sk_family)
5581 ->addr_to_user(sp, &temp);
5582 if (space_left < addrlen) {
5583 err = -ENOMEM; /*fixme: right error?*/
5584 goto out;
5585 }
5586 memcpy(buf, &temp, addrlen);
5587 buf += addrlen;
5588 bytes_copied += addrlen;
5589 cnt++;
5590 space_left -= addrlen;
5591 }
5592
5593 copy_getaddrs:
5594 if (copy_to_user(to, addrs, bytes_copied)) {
5595 err = -EFAULT;
5596 goto out;
5597 }
5598 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
5599 err = -EFAULT;
5600 goto out;
5601 }
5602 /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
5603 * but we can't change it anymore.
5604 */
5605 if (put_user(bytes_copied, optlen))
5606 err = -EFAULT;
5607 out:
5608 kfree(addrs);
5609 return err;
5610 }
5611
5612 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
5613 *
5614 * Requests that the local SCTP stack use the enclosed peer address as
5615 * the association primary. The enclosed address must be one of the
5616 * association peer's addresses.
5617 */
5618 static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
5619 char __user *optval, int __user *optlen)
5620 {
5621 struct sctp_prim prim;
5622 struct sctp_association *asoc;
5623 struct sctp_sock *sp = sctp_sk(sk);
5624
5625 if (len < sizeof(struct sctp_prim))
5626 return -EINVAL;
5627
5628 len = sizeof(struct sctp_prim);
5629
5630 if (copy_from_user(&prim, optval, len))
5631 return -EFAULT;
5632
5633 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
5634 if (!asoc)
5635 return -EINVAL;
5636
5637 if (!asoc->peer.primary_path)
5638 return -ENOTCONN;
5639
5640 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
5641 asoc->peer.primary_path->af_specific->sockaddr_len);
5642
5643 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp,
5644 (union sctp_addr *)&prim.ssp_addr);
5645
5646 if (put_user(len, optlen))
5647 return -EFAULT;
5648 if (copy_to_user(optval, &prim, len))
5649 return -EFAULT;
5650
5651 return 0;
5652 }
5653
5654 /*
5655 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
5656 *
5657 * Requests that the local endpoint set the specified Adaptation Layer
5658 * Indication parameter for all future INIT and INIT-ACK exchanges.
5659 */
5660 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
5661 char __user *optval, int __user *optlen)
5662 {
5663 struct sctp_setadaptation adaptation;
5664
5665 if (len < sizeof(struct sctp_setadaptation))
5666 return -EINVAL;
5667
5668 len = sizeof(struct sctp_setadaptation);
5669
5670 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
5671
5672 if (put_user(len, optlen))
5673 return -EFAULT;
5674 if (copy_to_user(optval, &adaptation, len))
5675 return -EFAULT;
5676
5677 return 0;
5678 }
5679
5680 /*
5681 *
5682 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
5683 *
5684 * Applications that wish to use the sendto() system call may wish to
5685 * specify a default set of parameters that would normally be supplied
5686 * through the inclusion of ancillary data. This socket option allows
5687 * such an application to set the default sctp_sndrcvinfo structure.
5688
5689
5690 * The application that wishes to use this socket option simply passes
5691 * in to this call the sctp_sndrcvinfo structure defined in Section
5692 * 5.2.2) The input parameters accepted by this call include
5693 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
5694 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
5695 * to this call if the caller is using the UDP model.
5696 *
5697 * For getsockopt, it get the default sctp_sndrcvinfo structure.
5698 */
5699 static int sctp_getsockopt_default_send_param(struct sock *sk,
5700 int len, char __user *optval,
5701 int __user *optlen)
5702 {
5703 struct sctp_sock *sp = sctp_sk(sk);
5704 struct sctp_association *asoc;
5705 struct sctp_sndrcvinfo info;
5706
5707 if (len < sizeof(info))
5708 return -EINVAL;
5709
5710 len = sizeof(info);
5711
5712 if (copy_from_user(&info, optval, len))
5713 return -EFAULT;
5714
5715 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
5716 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
5717 return -EINVAL;
5718 if (asoc) {
5719 info.sinfo_stream = asoc->default_stream;
5720 info.sinfo_flags = asoc->default_flags;
5721 info.sinfo_ppid = asoc->default_ppid;
5722 info.sinfo_context = asoc->default_context;
5723 info.sinfo_timetolive = asoc->default_timetolive;
5724 } else {
5725 info.sinfo_stream = sp->default_stream;
5726 info.sinfo_flags = sp->default_flags;
5727 info.sinfo_ppid = sp->default_ppid;
5728 info.sinfo_context = sp->default_context;
5729 info.sinfo_timetolive = sp->default_timetolive;
5730 }
5731
5732 if (put_user(len, optlen))
5733 return -EFAULT;
5734 if (copy_to_user(optval, &info, len))
5735 return -EFAULT;
5736
5737 return 0;
5738 }
5739
5740 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
5741 * (SCTP_DEFAULT_SNDINFO)
5742 */
5743 static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len,
5744 char __user *optval,
5745 int __user *optlen)
5746 {
5747 struct sctp_sock *sp = sctp_sk(sk);
5748 struct sctp_association *asoc;
5749 struct sctp_sndinfo info;
5750
5751 if (len < sizeof(info))
5752 return -EINVAL;
5753
5754 len = sizeof(info);
5755
5756 if (copy_from_user(&info, optval, len))
5757 return -EFAULT;
5758
5759 asoc = sctp_id2assoc(sk, info.snd_assoc_id);
5760 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
5761 return -EINVAL;
5762 if (asoc) {
5763 info.snd_sid = asoc->default_stream;
5764 info.snd_flags = asoc->default_flags;
5765 info.snd_ppid = asoc->default_ppid;
5766 info.snd_context = asoc->default_context;
5767 } else {
5768 info.snd_sid = sp->default_stream;
5769 info.snd_flags = sp->default_flags;
5770 info.snd_ppid = sp->default_ppid;
5771 info.snd_context = sp->default_context;
5772 }
5773
5774 if (put_user(len, optlen))
5775 return -EFAULT;
5776 if (copy_to_user(optval, &info, len))
5777 return -EFAULT;
5778
5779 return 0;
5780 }
5781
5782 /*
5783 *
5784 * 7.1.5 SCTP_NODELAY
5785 *
5786 * Turn on/off any Nagle-like algorithm. This means that packets are
5787 * generally sent as soon as possible and no unnecessary delays are
5788 * introduced, at the cost of more packets in the network. Expects an
5789 * integer boolean flag.
5790 */
5791
5792 static int sctp_getsockopt_nodelay(struct sock *sk, int len,
5793 char __user *optval, int __user *optlen)
5794 {
5795 int val;
5796
5797 if (len < sizeof(int))
5798 return -EINVAL;
5799
5800 len = sizeof(int);
5801 val = (sctp_sk(sk)->nodelay == 1);
5802 if (put_user(len, optlen))
5803 return -EFAULT;
5804 if (copy_to_user(optval, &val, len))
5805 return -EFAULT;
5806 return 0;
5807 }
5808
5809 /*
5810 *
5811 * 7.1.1 SCTP_RTOINFO
5812 *
5813 * The protocol parameters used to initialize and bound retransmission
5814 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
5815 * and modify these parameters.
5816 * All parameters are time values, in milliseconds. A value of 0, when
5817 * modifying the parameters, indicates that the current value should not
5818 * be changed.
5819 *
5820 */
5821 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
5822 char __user *optval,
5823 int __user *optlen) {
5824 struct sctp_rtoinfo rtoinfo;
5825 struct sctp_association *asoc;
5826
5827 if (len < sizeof (struct sctp_rtoinfo))
5828 return -EINVAL;
5829
5830 len = sizeof(struct sctp_rtoinfo);
5831
5832 if (copy_from_user(&rtoinfo, optval, len))
5833 return -EFAULT;
5834
5835 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
5836
5837 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
5838 return -EINVAL;
5839
5840 /* Values corresponding to the specific association. */
5841 if (asoc) {
5842 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial);
5843 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max);
5844 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min);
5845 } else {
5846 /* Values corresponding to the endpoint. */
5847 struct sctp_sock *sp = sctp_sk(sk);
5848
5849 rtoinfo.srto_initial = sp->rtoinfo.srto_initial;
5850 rtoinfo.srto_max = sp->rtoinfo.srto_max;
5851 rtoinfo.srto_min = sp->rtoinfo.srto_min;
5852 }
5853
5854 if (put_user(len, optlen))
5855 return -EFAULT;
5856
5857 if (copy_to_user(optval, &rtoinfo, len))
5858 return -EFAULT;
5859
5860 return 0;
5861 }
5862
5863 /*
5864 *
5865 * 7.1.2 SCTP_ASSOCINFO
5866 *
5867 * This option is used to tune the maximum retransmission attempts
5868 * of the association.
5869 * Returns an error if the new association retransmission value is
5870 * greater than the sum of the retransmission value of the peer.
5871 * See [SCTP] for more information.
5872 *
5873 */
5874 static int sctp_getsockopt_associnfo(struct sock *sk, int len,
5875 char __user *optval,
5876 int __user *optlen)
5877 {
5878
5879 struct sctp_assocparams assocparams;
5880 struct sctp_association *asoc;
5881 struct list_head *pos;
5882 int cnt = 0;
5883
5884 if (len < sizeof (struct sctp_assocparams))
5885 return -EINVAL;
5886
5887 len = sizeof(struct sctp_assocparams);
5888
5889 if (copy_from_user(&assocparams, optval, len))
5890 return -EFAULT;
5891
5892 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
5893
5894 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
5895 return -EINVAL;
5896
5897 /* Values correspoinding to the specific association */
5898 if (asoc) {
5899 assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
5900 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
5901 assocparams.sasoc_local_rwnd = asoc->a_rwnd;
5902 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
5903
5904 list_for_each(pos, &asoc->peer.transport_addr_list) {
5905 cnt++;
5906 }
5907
5908 assocparams.sasoc_number_peer_destinations = cnt;
5909 } else {
5910 /* Values corresponding to the endpoint */
5911 struct sctp_sock *sp = sctp_sk(sk);
5912
5913 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt;
5914 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd;
5915 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd;
5916 assocparams.sasoc_cookie_life =
5917 sp->assocparams.sasoc_cookie_life;
5918 assocparams.sasoc_number_peer_destinations =
5919 sp->assocparams.
5920 sasoc_number_peer_destinations;
5921 }
5922
5923 if (put_user(len, optlen))
5924 return -EFAULT;
5925
5926 if (copy_to_user(optval, &assocparams, len))
5927 return -EFAULT;
5928
5929 return 0;
5930 }
5931
5932 /*
5933 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
5934 *
5935 * This socket option is a boolean flag which turns on or off mapped V4
5936 * addresses. If this option is turned on and the socket is type
5937 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
5938 * If this option is turned off, then no mapping will be done of V4
5939 * addresses and a user will receive both PF_INET6 and PF_INET type
5940 * addresses on the socket.
5941 */
5942 static int sctp_getsockopt_mappedv4(struct sock *sk, int len,
5943 char __user *optval, int __user *optlen)
5944 {
5945 int val;
5946 struct sctp_sock *sp = sctp_sk(sk);
5947
5948 if (len < sizeof(int))
5949 return -EINVAL;
5950
5951 len = sizeof(int);
5952 val = sp->v4mapped;
5953 if (put_user(len, optlen))
5954 return -EFAULT;
5955 if (copy_to_user(optval, &val, len))
5956 return -EFAULT;
5957
5958 return 0;
5959 }
5960
5961 /*
5962 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
5963 * (chapter and verse is quoted at sctp_setsockopt_context())
5964 */
5965 static int sctp_getsockopt_context(struct sock *sk, int len,
5966 char __user *optval, int __user *optlen)
5967 {
5968 struct sctp_assoc_value params;
5969 struct sctp_sock *sp;
5970 struct sctp_association *asoc;
5971
5972 if (len < sizeof(struct sctp_assoc_value))
5973 return -EINVAL;
5974
5975 len = sizeof(struct sctp_assoc_value);
5976
5977 if (copy_from_user(&params, optval, len))
5978 return -EFAULT;
5979
5980 sp = sctp_sk(sk);
5981
5982 if (params.assoc_id != 0) {
5983 asoc = sctp_id2assoc(sk, params.assoc_id);
5984 if (!asoc)
5985 return -EINVAL;
5986 params.assoc_value = asoc->default_rcv_context;
5987 } else {
5988 params.assoc_value = sp->default_rcv_context;
5989 }
5990
5991 if (put_user(len, optlen))
5992 return -EFAULT;
5993 if (copy_to_user(optval, &params, len))
5994 return -EFAULT;
5995
5996 return 0;
5997 }
5998
5999 /*
6000 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
6001 * This option will get or set the maximum size to put in any outgoing
6002 * SCTP DATA chunk. If a message is larger than this size it will be
6003 * fragmented by SCTP into the specified size. Note that the underlying
6004 * SCTP implementation may fragment into smaller sized chunks when the
6005 * PMTU of the underlying association is smaller than the value set by
6006 * the user. The default value for this option is '0' which indicates
6007 * the user is NOT limiting fragmentation and only the PMTU will effect
6008 * SCTP's choice of DATA chunk size. Note also that values set larger
6009 * than the maximum size of an IP datagram will effectively let SCTP
6010 * control fragmentation (i.e. the same as setting this option to 0).
6011 *
6012 * The following structure is used to access and modify this parameter:
6013 *
6014 * struct sctp_assoc_value {
6015 * sctp_assoc_t assoc_id;
6016 * uint32_t assoc_value;
6017 * };
6018 *
6019 * assoc_id: This parameter is ignored for one-to-one style sockets.
6020 * For one-to-many style sockets this parameter indicates which
6021 * association the user is performing an action upon. Note that if
6022 * this field's value is zero then the endpoints default value is
6023 * changed (effecting future associations only).
6024 * assoc_value: This parameter specifies the maximum size in bytes.
6025 */
6026 static int sctp_getsockopt_maxseg(struct sock *sk, int len,
6027 char __user *optval, int __user *optlen)
6028 {
6029 struct sctp_assoc_value params;
6030 struct sctp_association *asoc;
6031
6032 if (len == sizeof(int)) {
6033 pr_warn_ratelimited(DEPRECATED
6034 "%s (pid %d) "
6035 "Use of int in maxseg socket option.\n"
6036 "Use struct sctp_assoc_value instead\n",
6037 current->comm, task_pid_nr(current));
6038 params.assoc_id = 0;
6039 } else if (len >= sizeof(struct sctp_assoc_value)) {
6040 len = sizeof(struct sctp_assoc_value);
6041 if (copy_from_user(&params, optval, len))
6042 return -EFAULT;
6043 } else
6044 return -EINVAL;
6045
6046 asoc = sctp_id2assoc(sk, params.assoc_id);
6047 if (!asoc && params.assoc_id && sctp_style(sk, UDP))
6048 return -EINVAL;
6049
6050 if (asoc)
6051 params.assoc_value = asoc->frag_point;
6052 else
6053 params.assoc_value = sctp_sk(sk)->user_frag;
6054
6055 if (put_user(len, optlen))
6056 return -EFAULT;
6057 if (len == sizeof(int)) {
6058 if (copy_to_user(optval, &params.assoc_value, len))
6059 return -EFAULT;
6060 } else {
6061 if (copy_to_user(optval, &params, len))
6062 return -EFAULT;
6063 }
6064
6065 return 0;
6066 }
6067
6068 /*
6069 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
6070 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
6071 */
6072 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len,
6073 char __user *optval, int __user *optlen)
6074 {
6075 int val;
6076
6077 if (len < sizeof(int))
6078 return -EINVAL;
6079
6080 len = sizeof(int);
6081
6082 val = sctp_sk(sk)->frag_interleave;
6083 if (put_user(len, optlen))
6084 return -EFAULT;
6085 if (copy_to_user(optval, &val, len))
6086 return -EFAULT;
6087
6088 return 0;
6089 }
6090
6091 /*
6092 * 7.1.25. Set or Get the sctp partial delivery point
6093 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
6094 */
6095 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
6096 char __user *optval,
6097 int __user *optlen)
6098 {
6099 u32 val;
6100
6101 if (len < sizeof(u32))
6102 return -EINVAL;
6103
6104 len = sizeof(u32);
6105
6106 val = sctp_sk(sk)->pd_point;
6107 if (put_user(len, optlen))
6108 return -EFAULT;
6109 if (copy_to_user(optval, &val, len))
6110 return -EFAULT;
6111
6112 return 0;
6113 }
6114
6115 /*
6116 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
6117 * (chapter and verse is quoted at sctp_setsockopt_maxburst())
6118 */
6119 static int sctp_getsockopt_maxburst(struct sock *sk, int len,
6120 char __user *optval,
6121 int __user *optlen)
6122 {
6123 struct sctp_assoc_value params;
6124 struct sctp_sock *sp;
6125 struct sctp_association *asoc;
6126
6127 if (len == sizeof(int)) {
6128 pr_warn_ratelimited(DEPRECATED
6129 "%s (pid %d) "
6130 "Use of int in max_burst socket option.\n"
6131 "Use struct sctp_assoc_value instead\n",
6132 current->comm, task_pid_nr(current));
6133 params.assoc_id = 0;
6134 } else if (len >= sizeof(struct sctp_assoc_value)) {
6135 len = sizeof(struct sctp_assoc_value);
6136 if (copy_from_user(&params, optval, len))
6137 return -EFAULT;
6138 } else
6139 return -EINVAL;
6140
6141 sp = sctp_sk(sk);
6142
6143 if (params.assoc_id != 0) {
6144 asoc = sctp_id2assoc(sk, params.assoc_id);
6145 if (!asoc)
6146 return -EINVAL;
6147 params.assoc_value = asoc->max_burst;
6148 } else
6149 params.assoc_value = sp->max_burst;
6150
6151 if (len == sizeof(int)) {
6152 if (copy_to_user(optval, &params.assoc_value, len))
6153 return -EFAULT;
6154 } else {
6155 if (copy_to_user(optval, &params, len))
6156 return -EFAULT;
6157 }
6158
6159 return 0;
6160
6161 }
6162
6163 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
6164 char __user *optval, int __user *optlen)
6165 {
6166 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6167 struct sctp_hmacalgo __user *p = (void __user *)optval;
6168 struct sctp_hmac_algo_param *hmacs;
6169 __u16 data_len = 0;
6170 u32 num_idents;
6171 int i;
6172
6173 if (!ep->auth_enable)
6174 return -EACCES;
6175
6176 hmacs = ep->auth_hmacs_list;
6177 data_len = ntohs(hmacs->param_hdr.length) -
6178 sizeof(struct sctp_paramhdr);
6179
6180 if (len < sizeof(struct sctp_hmacalgo) + data_len)
6181 return -EINVAL;
6182
6183 len = sizeof(struct sctp_hmacalgo) + data_len;
6184 num_idents = data_len / sizeof(u16);
6185
6186 if (put_user(len, optlen))
6187 return -EFAULT;
6188 if (put_user(num_idents, &p->shmac_num_idents))
6189 return -EFAULT;
6190 for (i = 0; i < num_idents; i++) {
6191 __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
6192
6193 if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
6194 return -EFAULT;
6195 }
6196 return 0;
6197 }
6198
6199 static int sctp_getsockopt_active_key(struct sock *sk, int len,
6200 char __user *optval, int __user *optlen)
6201 {
6202 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6203 struct sctp_authkeyid val;
6204 struct sctp_association *asoc;
6205
6206 if (!ep->auth_enable)
6207 return -EACCES;
6208
6209 if (len < sizeof(struct sctp_authkeyid))
6210 return -EINVAL;
6211
6212 len = sizeof(struct sctp_authkeyid);
6213 if (copy_from_user(&val, optval, len))
6214 return -EFAULT;
6215
6216 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
6217 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
6218 return -EINVAL;
6219
6220 if (asoc)
6221 val.scact_keynumber = asoc->active_key_id;
6222 else
6223 val.scact_keynumber = ep->active_key_id;
6224
6225 if (put_user(len, optlen))
6226 return -EFAULT;
6227 if (copy_to_user(optval, &val, len))
6228 return -EFAULT;
6229
6230 return 0;
6231 }
6232
6233 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
6234 char __user *optval, int __user *optlen)
6235 {
6236 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6237 struct sctp_authchunks __user *p = (void __user *)optval;
6238 struct sctp_authchunks val;
6239 struct sctp_association *asoc;
6240 struct sctp_chunks_param *ch;
6241 u32 num_chunks = 0;
6242 char __user *to;
6243
6244 if (!ep->auth_enable)
6245 return -EACCES;
6246
6247 if (len < sizeof(struct sctp_authchunks))
6248 return -EINVAL;
6249
6250 if (copy_from_user(&val, optval, sizeof(val)))
6251 return -EFAULT;
6252
6253 to = p->gauth_chunks;
6254 asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
6255 if (!asoc)
6256 return -EINVAL;
6257
6258 ch = asoc->peer.peer_chunks;
6259 if (!ch)
6260 goto num;
6261
6262 /* See if the user provided enough room for all the data */
6263 num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
6264 if (len < num_chunks)
6265 return -EINVAL;
6266
6267 if (copy_to_user(to, ch->chunks, num_chunks))
6268 return -EFAULT;
6269 num:
6270 len = sizeof(struct sctp_authchunks) + num_chunks;
6271 if (put_user(len, optlen))
6272 return -EFAULT;
6273 if (put_user(num_chunks, &p->gauth_number_of_chunks))
6274 return -EFAULT;
6275 return 0;
6276 }
6277
6278 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
6279 char __user *optval, int __user *optlen)
6280 {
6281 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6282 struct sctp_authchunks __user *p = (void __user *)optval;
6283 struct sctp_authchunks val;
6284 struct sctp_association *asoc;
6285 struct sctp_chunks_param *ch;
6286 u32 num_chunks = 0;
6287 char __user *to;
6288
6289 if (!ep->auth_enable)
6290 return -EACCES;
6291
6292 if (len < sizeof(struct sctp_authchunks))
6293 return -EINVAL;
6294
6295 if (copy_from_user(&val, optval, sizeof(val)))
6296 return -EFAULT;
6297
6298 to = p->gauth_chunks;
6299 asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
6300 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP))
6301 return -EINVAL;
6302
6303 if (asoc)
6304 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks;
6305 else
6306 ch = ep->auth_chunk_list;
6307
6308 if (!ch)
6309 goto num;
6310
6311 num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
6312 if (len < sizeof(struct sctp_authchunks) + num_chunks)
6313 return -EINVAL;
6314
6315 if (copy_to_user(to, ch->chunks, num_chunks))
6316 return -EFAULT;
6317 num:
6318 len = sizeof(struct sctp_authchunks) + num_chunks;
6319 if (put_user(len, optlen))
6320 return -EFAULT;
6321 if (put_user(num_chunks, &p->gauth_number_of_chunks))
6322 return -EFAULT;
6323
6324 return 0;
6325 }
6326
6327 /*
6328 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
6329 * This option gets the current number of associations that are attached
6330 * to a one-to-many style socket. The option value is an uint32_t.
6331 */
6332 static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
6333 char __user *optval, int __user *optlen)
6334 {
6335 struct sctp_sock *sp = sctp_sk(sk);
6336 struct sctp_association *asoc;
6337 u32 val = 0;
6338
6339 if (sctp_style(sk, TCP))
6340 return -EOPNOTSUPP;
6341
6342 if (len < sizeof(u32))
6343 return -EINVAL;
6344
6345 len = sizeof(u32);
6346
6347 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
6348 val++;
6349 }
6350
6351 if (put_user(len, optlen))
6352 return -EFAULT;
6353 if (copy_to_user(optval, &val, len))
6354 return -EFAULT;
6355
6356 return 0;
6357 }
6358
6359 /*
6360 * 8.1.23 SCTP_AUTO_ASCONF
6361 * See the corresponding setsockopt entry as description
6362 */
6363 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
6364 char __user *optval, int __user *optlen)
6365 {
6366 int val = 0;
6367
6368 if (len < sizeof(int))
6369 return -EINVAL;
6370
6371 len = sizeof(int);
6372 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
6373 val = 1;
6374 if (put_user(len, optlen))
6375 return -EFAULT;
6376 if (copy_to_user(optval, &val, len))
6377 return -EFAULT;
6378 return 0;
6379 }
6380
6381 /*
6382 * 8.2.6. Get the Current Identifiers of Associations
6383 * (SCTP_GET_ASSOC_ID_LIST)
6384 *
6385 * This option gets the current list of SCTP association identifiers of
6386 * the SCTP associations handled by a one-to-many style socket.
6387 */
6388 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
6389 char __user *optval, int __user *optlen)
6390 {
6391 struct sctp_sock *sp = sctp_sk(sk);
6392 struct sctp_association *asoc;
6393 struct sctp_assoc_ids *ids;
6394 u32 num = 0;
6395
6396 if (sctp_style(sk, TCP))
6397 return -EOPNOTSUPP;
6398
6399 if (len < sizeof(struct sctp_assoc_ids))
6400 return -EINVAL;
6401
6402 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
6403 num++;
6404 }
6405
6406 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
6407 return -EINVAL;
6408
6409 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
6410
6411 ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
6412 if (unlikely(!ids))
6413 return -ENOMEM;
6414
6415 ids->gaids_number_of_ids = num;
6416 num = 0;
6417 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
6418 ids->gaids_assoc_id[num++] = asoc->assoc_id;
6419 }
6420
6421 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
6422 kfree(ids);
6423 return -EFAULT;
6424 }
6425
6426 kfree(ids);
6427 return 0;
6428 }
6429
6430 /*
6431 * SCTP_PEER_ADDR_THLDS
6432 *
6433 * This option allows us to fetch the partially failed threshold for one or all
6434 * transports in an association. See Section 6.1 of:
6435 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
6436 */
6437 static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
6438 char __user *optval,
6439 int len,
6440 int __user *optlen)
6441 {
6442 struct sctp_paddrthlds val;
6443 struct sctp_transport *trans;
6444 struct sctp_association *asoc;
6445
6446 if (len < sizeof(struct sctp_paddrthlds))
6447 return -EINVAL;
6448 len = sizeof(struct sctp_paddrthlds);
6449 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
6450 return -EFAULT;
6451
6452 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
6453 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
6454 if (!asoc)
6455 return -ENOENT;
6456
6457 val.spt_pathpfthld = asoc->pf_retrans;
6458 val.spt_pathmaxrxt = asoc->pathmaxrxt;
6459 } else {
6460 trans = sctp_addr_id2transport(sk, &val.spt_address,
6461 val.spt_assoc_id);
6462 if (!trans)
6463 return -ENOENT;
6464
6465 val.spt_pathmaxrxt = trans->pathmaxrxt;
6466 val.spt_pathpfthld = trans->pf_retrans;
6467 }
6468
6469 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
6470 return -EFAULT;
6471
6472 return 0;
6473 }
6474
6475 /*
6476 * SCTP_GET_ASSOC_STATS
6477 *
6478 * This option retrieves local per endpoint statistics. It is modeled
6479 * after OpenSolaris' implementation
6480 */
6481 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
6482 char __user *optval,
6483 int __user *optlen)
6484 {
6485 struct sctp_assoc_stats sas;
6486 struct sctp_association *asoc = NULL;
6487
6488 /* User must provide at least the assoc id */
6489 if (len < sizeof(sctp_assoc_t))
6490 return -EINVAL;
6491
6492 /* Allow the struct to grow and fill in as much as possible */
6493 len = min_t(size_t, len, sizeof(sas));
6494
6495 if (copy_from_user(&sas, optval, len))
6496 return -EFAULT;
6497
6498 asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
6499 if (!asoc)
6500 return -EINVAL;
6501
6502 sas.sas_rtxchunks = asoc->stats.rtxchunks;
6503 sas.sas_gapcnt = asoc->stats.gapcnt;
6504 sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
6505 sas.sas_osacks = asoc->stats.osacks;
6506 sas.sas_isacks = asoc->stats.isacks;
6507 sas.sas_octrlchunks = asoc->stats.octrlchunks;
6508 sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
6509 sas.sas_oodchunks = asoc->stats.oodchunks;
6510 sas.sas_iodchunks = asoc->stats.iodchunks;
6511 sas.sas_ouodchunks = asoc->stats.ouodchunks;
6512 sas.sas_iuodchunks = asoc->stats.iuodchunks;
6513 sas.sas_idupchunks = asoc->stats.idupchunks;
6514 sas.sas_opackets = asoc->stats.opackets;
6515 sas.sas_ipackets = asoc->stats.ipackets;
6516
6517 /* New high max rto observed, will return 0 if not a single
6518 * RTO update took place. obs_rto_ipaddr will be bogus
6519 * in such a case
6520 */
6521 sas.sas_maxrto = asoc->stats.max_obs_rto;
6522 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
6523 sizeof(struct sockaddr_storage));
6524
6525 /* Mark beginning of a new observation period */
6526 asoc->stats.max_obs_rto = asoc->rto_min;
6527
6528 if (put_user(len, optlen))
6529 return -EFAULT;
6530
6531 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id);
6532
6533 if (copy_to_user(optval, &sas, len))
6534 return -EFAULT;
6535
6536 return 0;
6537 }
6538
6539 static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len,
6540 char __user *optval,
6541 int __user *optlen)
6542 {
6543 int val = 0;
6544
6545 if (len < sizeof(int))
6546 return -EINVAL;
6547
6548 len = sizeof(int);
6549 if (sctp_sk(sk)->recvrcvinfo)
6550 val = 1;
6551 if (put_user(len, optlen))
6552 return -EFAULT;
6553 if (copy_to_user(optval, &val, len))
6554 return -EFAULT;
6555
6556 return 0;
6557 }
6558
6559 static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len,
6560 char __user *optval,
6561 int __user *optlen)
6562 {
6563 int val = 0;
6564
6565 if (len < sizeof(int))
6566 return -EINVAL;
6567
6568 len = sizeof(int);
6569 if (sctp_sk(sk)->recvnxtinfo)
6570 val = 1;
6571 if (put_user(len, optlen))
6572 return -EFAULT;
6573 if (copy_to_user(optval, &val, len))
6574 return -EFAULT;
6575
6576 return 0;
6577 }
6578
6579 static int sctp_getsockopt_pr_supported(struct sock *sk, int len,
6580 char __user *optval,
6581 int __user *optlen)
6582 {
6583 struct sctp_assoc_value params;
6584 struct sctp_association *asoc;
6585 int retval = -EFAULT;
6586
6587 if (len < sizeof(params)) {
6588 retval = -EINVAL;
6589 goto out;
6590 }
6591
6592 len = sizeof(params);
6593 if (copy_from_user(&params, optval, len))
6594 goto out;
6595
6596 asoc = sctp_id2assoc(sk, params.assoc_id);
6597 if (asoc) {
6598 params.assoc_value = asoc->prsctp_enable;
6599 } else if (!params.assoc_id) {
6600 struct sctp_sock *sp = sctp_sk(sk);
6601
6602 params.assoc_value = sp->ep->prsctp_enable;
6603 } else {
6604 retval = -EINVAL;
6605 goto out;
6606 }
6607
6608 if (put_user(len, optlen))
6609 goto out;
6610
6611 if (copy_to_user(optval, &params, len))
6612 goto out;
6613
6614 retval = 0;
6615
6616 out:
6617 return retval;
6618 }
6619
6620 static int sctp_getsockopt_default_prinfo(struct sock *sk, int len,
6621 char __user *optval,
6622 int __user *optlen)
6623 {
6624 struct sctp_default_prinfo info;
6625 struct sctp_association *asoc;
6626 int retval = -EFAULT;
6627
6628 if (len < sizeof(info)) {
6629 retval = -EINVAL;
6630 goto out;
6631 }
6632
6633 len = sizeof(info);
6634 if (copy_from_user(&info, optval, len))
6635 goto out;
6636
6637 asoc = sctp_id2assoc(sk, info.pr_assoc_id);
6638 if (asoc) {
6639 info.pr_policy = SCTP_PR_POLICY(asoc->default_flags);
6640 info.pr_value = asoc->default_timetolive;
6641 } else if (!info.pr_assoc_id) {
6642 struct sctp_sock *sp = sctp_sk(sk);
6643
6644 info.pr_policy = SCTP_PR_POLICY(sp->default_flags);
6645 info.pr_value = sp->default_timetolive;
6646 } else {
6647 retval = -EINVAL;
6648 goto out;
6649 }
6650
6651 if (put_user(len, optlen))
6652 goto out;
6653
6654 if (copy_to_user(optval, &info, len))
6655 goto out;
6656
6657 retval = 0;
6658
6659 out:
6660 return retval;
6661 }
6662
6663 static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
6664 char __user *optval,
6665 int __user *optlen)
6666 {
6667 struct sctp_prstatus params;
6668 struct sctp_association *asoc;
6669 int policy;
6670 int retval = -EINVAL;
6671
6672 if (len < sizeof(params))
6673 goto out;
6674
6675 len = sizeof(params);
6676 if (copy_from_user(&params, optval, len)) {
6677 retval = -EFAULT;
6678 goto out;
6679 }
6680
6681 policy = params.sprstat_policy;
6682 if (policy & ~SCTP_PR_SCTP_MASK)
6683 goto out;
6684
6685 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
6686 if (!asoc)
6687 goto out;
6688
6689 if (policy == SCTP_PR_SCTP_NONE) {
6690 params.sprstat_abandoned_unsent = 0;
6691 params.sprstat_abandoned_sent = 0;
6692 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
6693 params.sprstat_abandoned_unsent +=
6694 asoc->abandoned_unsent[policy];
6695 params.sprstat_abandoned_sent +=
6696 asoc->abandoned_sent[policy];
6697 }
6698 } else {
6699 params.sprstat_abandoned_unsent =
6700 asoc->abandoned_unsent[__SCTP_PR_INDEX(policy)];
6701 params.sprstat_abandoned_sent =
6702 asoc->abandoned_sent[__SCTP_PR_INDEX(policy)];
6703 }
6704
6705 if (put_user(len, optlen)) {
6706 retval = -EFAULT;
6707 goto out;
6708 }
6709
6710 if (copy_to_user(optval, &params, len)) {
6711 retval = -EFAULT;
6712 goto out;
6713 }
6714
6715 retval = 0;
6716
6717 out:
6718 return retval;
6719 }
6720
6721 static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
6722 char __user *optval,
6723 int __user *optlen)
6724 {
6725 struct sctp_stream_out *streamout;
6726 struct sctp_association *asoc;
6727 struct sctp_prstatus params;
6728 int retval = -EINVAL;
6729 int policy;
6730
6731 if (len < sizeof(params))
6732 goto out;
6733
6734 len = sizeof(params);
6735 if (copy_from_user(&params, optval, len)) {
6736 retval = -EFAULT;
6737 goto out;
6738 }
6739
6740 policy = params.sprstat_policy;
6741 if (policy & ~SCTP_PR_SCTP_MASK)
6742 goto out;
6743
6744 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
6745 if (!asoc || params.sprstat_sid >= asoc->stream.outcnt)
6746 goto out;
6747
6748 streamout = &asoc->stream.out[params.sprstat_sid];
6749 if (policy == SCTP_PR_SCTP_NONE) {
6750 params.sprstat_abandoned_unsent = 0;
6751 params.sprstat_abandoned_sent = 0;
6752 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
6753 params.sprstat_abandoned_unsent +=
6754 streamout->abandoned_unsent[policy];
6755 params.sprstat_abandoned_sent +=
6756 streamout->abandoned_sent[policy];
6757 }
6758 } else {
6759 params.sprstat_abandoned_unsent =
6760 streamout->abandoned_unsent[__SCTP_PR_INDEX(policy)];
6761 params.sprstat_abandoned_sent =
6762 streamout->abandoned_sent[__SCTP_PR_INDEX(policy)];
6763 }
6764
6765 if (put_user(len, optlen) || copy_to_user(optval, &params, len)) {
6766 retval = -EFAULT;
6767 goto out;
6768 }
6769
6770 retval = 0;
6771
6772 out:
6773 return retval;
6774 }
6775
6776 static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len,
6777 char __user *optval,
6778 int __user *optlen)
6779 {
6780 struct sctp_assoc_value params;
6781 struct sctp_association *asoc;
6782 int retval = -EFAULT;
6783
6784 if (len < sizeof(params)) {
6785 retval = -EINVAL;
6786 goto out;
6787 }
6788
6789 len = sizeof(params);
6790 if (copy_from_user(&params, optval, len))
6791 goto out;
6792
6793 asoc = sctp_id2assoc(sk, params.assoc_id);
6794 if (asoc) {
6795 params.assoc_value = asoc->reconf_enable;
6796 } else if (!params.assoc_id) {
6797 struct sctp_sock *sp = sctp_sk(sk);
6798
6799 params.assoc_value = sp->ep->reconf_enable;
6800 } else {
6801 retval = -EINVAL;
6802 goto out;
6803 }
6804
6805 if (put_user(len, optlen))
6806 goto out;
6807
6808 if (copy_to_user(optval, &params, len))
6809 goto out;
6810
6811 retval = 0;
6812
6813 out:
6814 return retval;
6815 }
6816
6817 static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
6818 char __user *optval,
6819 int __user *optlen)
6820 {
6821 struct sctp_assoc_value params;
6822 struct sctp_association *asoc;
6823 int retval = -EFAULT;
6824
6825 if (len < sizeof(params)) {
6826 retval = -EINVAL;
6827 goto out;
6828 }
6829
6830 len = sizeof(params);
6831 if (copy_from_user(&params, optval, len))
6832 goto out;
6833
6834 asoc = sctp_id2assoc(sk, params.assoc_id);
6835 if (asoc) {
6836 params.assoc_value = asoc->strreset_enable;
6837 } else if (!params.assoc_id) {
6838 struct sctp_sock *sp = sctp_sk(sk);
6839
6840 params.assoc_value = sp->ep->strreset_enable;
6841 } else {
6842 retval = -EINVAL;
6843 goto out;
6844 }
6845
6846 if (put_user(len, optlen))
6847 goto out;
6848
6849 if (copy_to_user(optval, &params, len))
6850 goto out;
6851
6852 retval = 0;
6853
6854 out:
6855 return retval;
6856 }
6857
6858 static int sctp_getsockopt(struct sock *sk, int level, int optname,
6859 char __user *optval, int __user *optlen)
6860 {
6861 int retval = 0;
6862 int len;
6863
6864 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
6865
6866 /* I can hardly begin to describe how wrong this is. This is
6867 * so broken as to be worse than useless. The API draft
6868 * REALLY is NOT helpful here... I am not convinced that the
6869 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP
6870 * are at all well-founded.
6871 */
6872 if (level != SOL_SCTP) {
6873 struct sctp_af *af = sctp_sk(sk)->pf->af;
6874
6875 retval = af->getsockopt(sk, level, optname, optval, optlen);
6876 return retval;
6877 }
6878
6879 if (get_user(len, optlen))
6880 return -EFAULT;
6881
6882 if (len < 0)
6883 return -EINVAL;
6884
6885 lock_sock(sk);
6886
6887 switch (optname) {
6888 case SCTP_STATUS:
6889 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen);
6890 break;
6891 case SCTP_DISABLE_FRAGMENTS:
6892 retval = sctp_getsockopt_disable_fragments(sk, len, optval,
6893 optlen);
6894 break;
6895 case SCTP_EVENTS:
6896 retval = sctp_getsockopt_events(sk, len, optval, optlen);
6897 break;
6898 case SCTP_AUTOCLOSE:
6899 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen);
6900 break;
6901 case SCTP_SOCKOPT_PEELOFF:
6902 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
6903 break;
6904 case SCTP_SOCKOPT_PEELOFF_FLAGS:
6905 retval = sctp_getsockopt_peeloff_flags(sk, len, optval, optlen);
6906 break;
6907 case SCTP_PEER_ADDR_PARAMS:
6908 retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
6909 optlen);
6910 break;
6911 case SCTP_DELAYED_SACK:
6912 retval = sctp_getsockopt_delayed_ack(sk, len, optval,
6913 optlen);
6914 break;
6915 case SCTP_INITMSG:
6916 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
6917 break;
6918 case SCTP_GET_PEER_ADDRS:
6919 retval = sctp_getsockopt_peer_addrs(sk, len, optval,
6920 optlen);
6921 break;
6922 case SCTP_GET_LOCAL_ADDRS:
6923 retval = sctp_getsockopt_local_addrs(sk, len, optval,
6924 optlen);
6925 break;
6926 case SCTP_SOCKOPT_CONNECTX3:
6927 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen);
6928 break;
6929 case SCTP_DEFAULT_SEND_PARAM:
6930 retval = sctp_getsockopt_default_send_param(sk, len,
6931 optval, optlen);
6932 break;
6933 case SCTP_DEFAULT_SNDINFO:
6934 retval = sctp_getsockopt_default_sndinfo(sk, len,
6935 optval, optlen);
6936 break;
6937 case SCTP_PRIMARY_ADDR:
6938 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
6939 break;
6940 case SCTP_NODELAY:
6941 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen);
6942 break;
6943 case SCTP_RTOINFO:
6944 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen);
6945 break;
6946 case SCTP_ASSOCINFO:
6947 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen);
6948 break;
6949 case SCTP_I_WANT_MAPPED_V4_ADDR:
6950 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen);
6951 break;
6952 case SCTP_MAXSEG:
6953 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen);
6954 break;
6955 case SCTP_GET_PEER_ADDR_INFO:
6956 retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
6957 optlen);
6958 break;
6959 case SCTP_ADAPTATION_LAYER:
6960 retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
6961 optlen);
6962 break;
6963 case SCTP_CONTEXT:
6964 retval = sctp_getsockopt_context(sk, len, optval, optlen);
6965 break;
6966 case SCTP_FRAGMENT_INTERLEAVE:
6967 retval = sctp_getsockopt_fragment_interleave(sk, len, optval,
6968 optlen);
6969 break;
6970 case SCTP_PARTIAL_DELIVERY_POINT:
6971 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval,
6972 optlen);
6973 break;
6974 case SCTP_MAX_BURST:
6975 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen);
6976 break;
6977 case SCTP_AUTH_KEY:
6978 case SCTP_AUTH_CHUNK:
6979 case SCTP_AUTH_DELETE_KEY:
6980 retval = -EOPNOTSUPP;
6981 break;
6982 case SCTP_HMAC_IDENT:
6983 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen);
6984 break;
6985 case SCTP_AUTH_ACTIVE_KEY:
6986 retval = sctp_getsockopt_active_key(sk, len, optval, optlen);
6987 break;
6988 case SCTP_PEER_AUTH_CHUNKS:
6989 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval,
6990 optlen);
6991 break;
6992 case SCTP_LOCAL_AUTH_CHUNKS:
6993 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval,
6994 optlen);
6995 break;
6996 case SCTP_GET_ASSOC_NUMBER:
6997 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
6998 break;
6999 case SCTP_GET_ASSOC_ID_LIST:
7000 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
7001 break;
7002 case SCTP_AUTO_ASCONF:
7003 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
7004 break;
7005 case SCTP_PEER_ADDR_THLDS:
7006 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
7007 break;
7008 case SCTP_GET_ASSOC_STATS:
7009 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
7010 break;
7011 case SCTP_RECVRCVINFO:
7012 retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen);
7013 break;
7014 case SCTP_RECVNXTINFO:
7015 retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen);
7016 break;
7017 case SCTP_PR_SUPPORTED:
7018 retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen);
7019 break;
7020 case SCTP_DEFAULT_PRINFO:
7021 retval = sctp_getsockopt_default_prinfo(sk, len, optval,
7022 optlen);
7023 break;
7024 case SCTP_PR_ASSOC_STATUS:
7025 retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
7026 optlen);
7027 break;
7028 case SCTP_PR_STREAM_STATUS:
7029 retval = sctp_getsockopt_pr_streamstatus(sk, len, optval,
7030 optlen);
7031 break;
7032 case SCTP_RECONFIG_SUPPORTED:
7033 retval = sctp_getsockopt_reconfig_supported(sk, len, optval,
7034 optlen);
7035 break;
7036 case SCTP_ENABLE_STREAM_RESET:
7037 retval = sctp_getsockopt_enable_strreset(sk, len, optval,
7038 optlen);
7039 break;
7040 default:
7041 retval = -ENOPROTOOPT;
7042 break;
7043 }
7044
7045 release_sock(sk);
7046 return retval;
7047 }
7048
7049 static int sctp_hash(struct sock *sk)
7050 {
7051 /* STUB */
7052 return 0;
7053 }
7054
7055 static void sctp_unhash(struct sock *sk)
7056 {
7057 /* STUB */
7058 }
7059
7060 /* Check if port is acceptable. Possibly find first available port.
7061 *
7062 * The port hash table (contained in the 'global' SCTP protocol storage
7063 * returned by struct sctp_protocol *sctp_get_protocol()). The hash
7064 * table is an array of 4096 lists (sctp_bind_hashbucket). Each
7065 * list (the list number is the port number hashed out, so as you
7066 * would expect from a hash function, all the ports in a given list have
7067 * such a number that hashes out to the same list number; you were
7068 * expecting that, right?); so each list has a set of ports, with a
7069 * link to the socket (struct sock) that uses it, the port number and
7070 * a fastreuse flag (FIXME: NPI ipg).
7071 */
7072 static struct sctp_bind_bucket *sctp_bucket_create(
7073 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
7074
7075 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
7076 {
7077 struct sctp_bind_hashbucket *head; /* hash list */
7078 struct sctp_bind_bucket *pp;
7079 unsigned short snum;
7080 int ret;
7081
7082 snum = ntohs(addr->v4.sin_port);
7083
7084 pr_debug("%s: begins, snum:%d\n", __func__, snum);
7085
7086 local_bh_disable();
7087
7088 if (snum == 0) {
7089 /* Search for an available port. */
7090 int low, high, remaining, index;
7091 unsigned int rover;
7092 struct net *net = sock_net(sk);
7093
7094 inet_get_local_port_range(net, &low, &high);
7095 remaining = (high - low) + 1;
7096 rover = prandom_u32() % remaining + low;
7097
7098 do {
7099 rover++;
7100 if ((rover < low) || (rover > high))
7101 rover = low;
7102 if (inet_is_local_reserved_port(net, rover))
7103 continue;
7104 index = sctp_phashfn(sock_net(sk), rover);
7105 head = &sctp_port_hashtable[index];
7106 spin_lock(&head->lock);
7107 sctp_for_each_hentry(pp, &head->chain)
7108 if ((pp->port == rover) &&
7109 net_eq(sock_net(sk), pp->net))
7110 goto next;
7111 break;
7112 next:
7113 spin_unlock(&head->lock);
7114 } while (--remaining > 0);
7115
7116 /* Exhausted local port range during search? */
7117 ret = 1;
7118 if (remaining <= 0)
7119 goto fail;
7120
7121 /* OK, here is the one we will use. HEAD (the port
7122 * hash table list entry) is non-NULL and we hold it's
7123 * mutex.
7124 */
7125 snum = rover;
7126 } else {
7127 /* We are given an specific port number; we verify
7128 * that it is not being used. If it is used, we will
7129 * exahust the search in the hash list corresponding
7130 * to the port number (snum) - we detect that with the
7131 * port iterator, pp being NULL.
7132 */
7133 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
7134 spin_lock(&head->lock);
7135 sctp_for_each_hentry(pp, &head->chain) {
7136 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
7137 goto pp_found;
7138 }
7139 }
7140 pp = NULL;
7141 goto pp_not_found;
7142 pp_found:
7143 if (!hlist_empty(&pp->owner)) {
7144 /* We had a port hash table hit - there is an
7145 * available port (pp != NULL) and it is being
7146 * used by other socket (pp->owner not empty); that other
7147 * socket is going to be sk2.
7148 */
7149 int reuse = sk->sk_reuse;
7150 struct sock *sk2;
7151
7152 pr_debug("%s: found a possible match\n", __func__);
7153
7154 if (pp->fastreuse && sk->sk_reuse &&
7155 sk->sk_state != SCTP_SS_LISTENING)
7156 goto success;
7157
7158 /* Run through the list of sockets bound to the port
7159 * (pp->port) [via the pointers bind_next and
7160 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
7161 * we get the endpoint they describe and run through
7162 * the endpoint's list of IP (v4 or v6) addresses,
7163 * comparing each of the addresses with the address of
7164 * the socket sk. If we find a match, then that means
7165 * that this port/socket (sk) combination are already
7166 * in an endpoint.
7167 */
7168 sk_for_each_bound(sk2, &pp->owner) {
7169 struct sctp_endpoint *ep2;
7170 ep2 = sctp_sk(sk2)->ep;
7171
7172 if (sk == sk2 ||
7173 (reuse && sk2->sk_reuse &&
7174 sk2->sk_state != SCTP_SS_LISTENING))
7175 continue;
7176
7177 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr,
7178 sctp_sk(sk2), sctp_sk(sk))) {
7179 ret = (long)sk2;
7180 goto fail_unlock;
7181 }
7182 }
7183
7184 pr_debug("%s: found a match\n", __func__);
7185 }
7186 pp_not_found:
7187 /* If there was a hash table miss, create a new port. */
7188 ret = 1;
7189 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
7190 goto fail_unlock;
7191
7192 /* In either case (hit or miss), make sure fastreuse is 1 only
7193 * if sk->sk_reuse is too (that is, if the caller requested
7194 * SO_REUSEADDR on this socket -sk-).
7195 */
7196 if (hlist_empty(&pp->owner)) {
7197 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING)
7198 pp->fastreuse = 1;
7199 else
7200 pp->fastreuse = 0;
7201 } else if (pp->fastreuse &&
7202 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING))
7203 pp->fastreuse = 0;
7204
7205 /* We are set, so fill up all the data in the hash table
7206 * entry, tie the socket list information with the rest of the
7207 * sockets FIXME: Blurry, NPI (ipg).
7208 */
7209 success:
7210 if (!sctp_sk(sk)->bind_hash) {
7211 inet_sk(sk)->inet_num = snum;
7212 sk_add_bind_node(sk, &pp->owner);
7213 sctp_sk(sk)->bind_hash = pp;
7214 }
7215 ret = 0;
7216
7217 fail_unlock:
7218 spin_unlock(&head->lock);
7219
7220 fail:
7221 local_bh_enable();
7222 return ret;
7223 }
7224
7225 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
7226 * port is requested.
7227 */
7228 static int sctp_get_port(struct sock *sk, unsigned short snum)
7229 {
7230 union sctp_addr addr;
7231 struct sctp_af *af = sctp_sk(sk)->pf->af;
7232
7233 /* Set up a dummy address struct from the sk. */
7234 af->from_sk(&addr, sk);
7235 addr.v4.sin_port = htons(snum);
7236
7237 /* Note: sk->sk_num gets filled in if ephemeral port request. */
7238 return !!sctp_get_port_local(sk, &addr);
7239 }
7240
7241 /*
7242 * Move a socket to LISTENING state.
7243 */
7244 static int sctp_listen_start(struct sock *sk, int backlog)
7245 {
7246 struct sctp_sock *sp = sctp_sk(sk);
7247 struct sctp_endpoint *ep = sp->ep;
7248 struct crypto_shash *tfm = NULL;
7249 char alg[32];
7250
7251 /* Allocate HMAC for generating cookie. */
7252 if (!sp->hmac && sp->sctp_hmac_alg) {
7253 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
7254 tfm = crypto_alloc_shash(alg, 0, 0);
7255 if (IS_ERR(tfm)) {
7256 net_info_ratelimited("failed to load transform for %s: %ld\n",
7257 sp->sctp_hmac_alg, PTR_ERR(tfm));
7258 return -ENOSYS;
7259 }
7260 sctp_sk(sk)->hmac = tfm;
7261 }
7262
7263 /*
7264 * If a bind() or sctp_bindx() is not called prior to a listen()
7265 * call that allows new associations to be accepted, the system
7266 * picks an ephemeral port and will choose an address set equivalent
7267 * to binding with a wildcard address.
7268 *
7269 * This is not currently spelled out in the SCTP sockets
7270 * extensions draft, but follows the practice as seen in TCP
7271 * sockets.
7272 *
7273 */
7274 sk->sk_state = SCTP_SS_LISTENING;
7275 if (!ep->base.bind_addr.port) {
7276 if (sctp_autobind(sk))
7277 return -EAGAIN;
7278 } else {
7279 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
7280 sk->sk_state = SCTP_SS_CLOSED;
7281 return -EADDRINUSE;
7282 }
7283 }
7284
7285 sk->sk_max_ack_backlog = backlog;
7286 sctp_hash_endpoint(ep);
7287 return 0;
7288 }
7289
7290 /*
7291 * 4.1.3 / 5.1.3 listen()
7292 *
7293 * By default, new associations are not accepted for UDP style sockets.
7294 * An application uses listen() to mark a socket as being able to
7295 * accept new associations.
7296 *
7297 * On TCP style sockets, applications use listen() to ready the SCTP
7298 * endpoint for accepting inbound associations.
7299 *
7300 * On both types of endpoints a backlog of '0' disables listening.
7301 *
7302 * Move a socket to LISTENING state.
7303 */
7304 int sctp_inet_listen(struct socket *sock, int backlog)
7305 {
7306 struct sock *sk = sock->sk;
7307 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
7308 int err = -EINVAL;
7309
7310 if (unlikely(backlog < 0))
7311 return err;
7312
7313 lock_sock(sk);
7314
7315 /* Peeled-off sockets are not allowed to listen(). */
7316 if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
7317 goto out;
7318
7319 if (sock->state != SS_UNCONNECTED)
7320 goto out;
7321
7322 if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
7323 goto out;
7324
7325 /* If backlog is zero, disable listening. */
7326 if (!backlog) {
7327 if (sctp_sstate(sk, CLOSED))
7328 goto out;
7329
7330 err = 0;
7331 sctp_unhash_endpoint(ep);
7332 sk->sk_state = SCTP_SS_CLOSED;
7333 if (sk->sk_reuse)
7334 sctp_sk(sk)->bind_hash->fastreuse = 1;
7335 goto out;
7336 }
7337
7338 /* If we are already listening, just update the backlog */
7339 if (sctp_sstate(sk, LISTENING))
7340 sk->sk_max_ack_backlog = backlog;
7341 else {
7342 err = sctp_listen_start(sk, backlog);
7343 if (err)
7344 goto out;
7345 }
7346
7347 err = 0;
7348 out:
7349 release_sock(sk);
7350 return err;
7351 }
7352
7353 /*
7354 * This function is done by modeling the current datagram_poll() and the
7355 * tcp_poll(). Note that, based on these implementations, we don't
7356 * lock the socket in this function, even though it seems that,
7357 * ideally, locking or some other mechanisms can be used to ensure
7358 * the integrity of the counters (sndbuf and wmem_alloc) used
7359 * in this place. We assume that we don't need locks either until proven
7360 * otherwise.
7361 *
7362 * Another thing to note is that we include the Async I/O support
7363 * here, again, by modeling the current TCP/UDP code. We don't have
7364 * a good way to test with it yet.
7365 */
7366 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
7367 {
7368 struct sock *sk = sock->sk;
7369 struct sctp_sock *sp = sctp_sk(sk);
7370 unsigned int mask;
7371
7372 poll_wait(file, sk_sleep(sk), wait);
7373
7374 sock_rps_record_flow(sk);
7375
7376 /* A TCP-style listening socket becomes readable when the accept queue
7377 * is not empty.
7378 */
7379 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
7380 return (!list_empty(&sp->ep->asocs)) ?
7381 (POLLIN | POLLRDNORM) : 0;
7382
7383 mask = 0;
7384
7385 /* Is there any exceptional events? */
7386 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
7387 mask |= POLLERR |
7388 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
7389 if (sk->sk_shutdown & RCV_SHUTDOWN)
7390 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
7391 if (sk->sk_shutdown == SHUTDOWN_MASK)
7392 mask |= POLLHUP;
7393
7394 /* Is it readable? Reconsider this code with TCP-style support. */
7395 if (!skb_queue_empty(&sk->sk_receive_queue))
7396 mask |= POLLIN | POLLRDNORM;
7397
7398 /* The association is either gone or not ready. */
7399 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
7400 return mask;
7401
7402 /* Is it writable? */
7403 if (sctp_writeable(sk)) {
7404 mask |= POLLOUT | POLLWRNORM;
7405 } else {
7406 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
7407 /*
7408 * Since the socket is not locked, the buffer
7409 * might be made available after the writeable check and
7410 * before the bit is set. This could cause a lost I/O
7411 * signal. tcp_poll() has a race breaker for this race
7412 * condition. Based on their implementation, we put
7413 * in the following code to cover it as well.
7414 */
7415 if (sctp_writeable(sk))
7416 mask |= POLLOUT | POLLWRNORM;
7417 }
7418 return mask;
7419 }
7420
7421 /********************************************************************
7422 * 2nd Level Abstractions
7423 ********************************************************************/
7424
7425 static struct sctp_bind_bucket *sctp_bucket_create(
7426 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
7427 {
7428 struct sctp_bind_bucket *pp;
7429
7430 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
7431 if (pp) {
7432 SCTP_DBG_OBJCNT_INC(bind_bucket);
7433 pp->port = snum;
7434 pp->fastreuse = 0;
7435 INIT_HLIST_HEAD(&pp->owner);
7436 pp->net = net;
7437 hlist_add_head(&pp->node, &head->chain);
7438 }
7439 return pp;
7440 }
7441
7442 /* Caller must hold hashbucket lock for this tb with local BH disabled */
7443 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
7444 {
7445 if (pp && hlist_empty(&pp->owner)) {
7446 __hlist_del(&pp->node);
7447 kmem_cache_free(sctp_bucket_cachep, pp);
7448 SCTP_DBG_OBJCNT_DEC(bind_bucket);
7449 }
7450 }
7451
7452 /* Release this socket's reference to a local port. */
7453 static inline void __sctp_put_port(struct sock *sk)
7454 {
7455 struct sctp_bind_hashbucket *head =
7456 &sctp_port_hashtable[sctp_phashfn(sock_net(sk),
7457 inet_sk(sk)->inet_num)];
7458 struct sctp_bind_bucket *pp;
7459
7460 spin_lock(&head->lock);
7461 pp = sctp_sk(sk)->bind_hash;
7462 __sk_del_bind_node(sk);
7463 sctp_sk(sk)->bind_hash = NULL;
7464 inet_sk(sk)->inet_num = 0;
7465 sctp_bucket_destroy(pp);
7466 spin_unlock(&head->lock);
7467 }
7468
7469 void sctp_put_port(struct sock *sk)
7470 {
7471 local_bh_disable();
7472 __sctp_put_port(sk);
7473 local_bh_enable();
7474 }
7475
7476 /*
7477 * The system picks an ephemeral port and choose an address set equivalent
7478 * to binding with a wildcard address.
7479 * One of those addresses will be the primary address for the association.
7480 * This automatically enables the multihoming capability of SCTP.
7481 */
7482 static int sctp_autobind(struct sock *sk)
7483 {
7484 union sctp_addr autoaddr;
7485 struct sctp_af *af;
7486 __be16 port;
7487
7488 /* Initialize a local sockaddr structure to INADDR_ANY. */
7489 af = sctp_sk(sk)->pf->af;
7490
7491 port = htons(inet_sk(sk)->inet_num);
7492 af->inaddr_any(&autoaddr, port);
7493
7494 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
7495 }
7496
7497 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
7498 *
7499 * From RFC 2292
7500 * 4.2 The cmsghdr Structure *
7501 *
7502 * When ancillary data is sent or received, any number of ancillary data
7503 * objects can be specified by the msg_control and msg_controllen members of
7504 * the msghdr structure, because each object is preceded by
7505 * a cmsghdr structure defining the object's length (the cmsg_len member).
7506 * Historically Berkeley-derived implementations have passed only one object
7507 * at a time, but this API allows multiple objects to be
7508 * passed in a single call to sendmsg() or recvmsg(). The following example
7509 * shows two ancillary data objects in a control buffer.
7510 *
7511 * |<--------------------------- msg_controllen -------------------------->|
7512 * | |
7513 *
7514 * |<----- ancillary data object ----->|<----- ancillary data object ----->|
7515 *
7516 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
7517 * | | |
7518 *
7519 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
7520 *
7521 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
7522 * | | | | |
7523 *
7524 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
7525 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
7526 *
7527 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
7528 *
7529 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
7530 * ^
7531 * |
7532 *
7533 * msg_control
7534 * points here
7535 */
7536 static int sctp_msghdr_parse(const struct msghdr *msg, struct sctp_cmsgs *cmsgs)
7537 {
7538 struct msghdr *my_msg = (struct msghdr *)msg;
7539 struct cmsghdr *cmsg;
7540
7541 for_each_cmsghdr(cmsg, my_msg) {
7542 if (!CMSG_OK(my_msg, cmsg))
7543 return -EINVAL;
7544
7545 /* Should we parse this header or ignore? */
7546 if (cmsg->cmsg_level != IPPROTO_SCTP)
7547 continue;
7548
7549 /* Strictly check lengths following example in SCM code. */
7550 switch (cmsg->cmsg_type) {
7551 case SCTP_INIT:
7552 /* SCTP Socket API Extension
7553 * 5.3.1 SCTP Initiation Structure (SCTP_INIT)
7554 *
7555 * This cmsghdr structure provides information for
7556 * initializing new SCTP associations with sendmsg().
7557 * The SCTP_INITMSG socket option uses this same data
7558 * structure. This structure is not used for
7559 * recvmsg().
7560 *
7561 * cmsg_level cmsg_type cmsg_data[]
7562 * ------------ ------------ ----------------------
7563 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
7564 */
7565 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg)))
7566 return -EINVAL;
7567
7568 cmsgs->init = CMSG_DATA(cmsg);
7569 break;
7570
7571 case SCTP_SNDRCV:
7572 /* SCTP Socket API Extension
7573 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
7574 *
7575 * This cmsghdr structure specifies SCTP options for
7576 * sendmsg() and describes SCTP header information
7577 * about a received message through recvmsg().
7578 *
7579 * cmsg_level cmsg_type cmsg_data[]
7580 * ------------ ------------ ----------------------
7581 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
7582 */
7583 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
7584 return -EINVAL;
7585
7586 cmsgs->srinfo = CMSG_DATA(cmsg);
7587
7588 if (cmsgs->srinfo->sinfo_flags &
7589 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
7590 SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK |
7591 SCTP_ABORT | SCTP_EOF))
7592 return -EINVAL;
7593 break;
7594
7595 case SCTP_SNDINFO:
7596 /* SCTP Socket API Extension
7597 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
7598 *
7599 * This cmsghdr structure specifies SCTP options for
7600 * sendmsg(). This structure and SCTP_RCVINFO replaces
7601 * SCTP_SNDRCV which has been deprecated.
7602 *
7603 * cmsg_level cmsg_type cmsg_data[]
7604 * ------------ ------------ ---------------------
7605 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo
7606 */
7607 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo)))
7608 return -EINVAL;
7609
7610 cmsgs->sinfo = CMSG_DATA(cmsg);
7611
7612 if (cmsgs->sinfo->snd_flags &
7613 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
7614 SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK |
7615 SCTP_ABORT | SCTP_EOF))
7616 return -EINVAL;
7617 break;
7618 default:
7619 return -EINVAL;
7620 }
7621 }
7622
7623 return 0;
7624 }
7625
7626 /*
7627 * Wait for a packet..
7628 * Note: This function is the same function as in core/datagram.c
7629 * with a few modifications to make lksctp work.
7630 */
7631 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
7632 {
7633 int error;
7634 DEFINE_WAIT(wait);
7635
7636 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
7637
7638 /* Socket errors? */
7639 error = sock_error(sk);
7640 if (error)
7641 goto out;
7642
7643 if (!skb_queue_empty(&sk->sk_receive_queue))
7644 goto ready;
7645
7646 /* Socket shut down? */
7647 if (sk->sk_shutdown & RCV_SHUTDOWN)
7648 goto out;
7649
7650 /* Sequenced packets can come disconnected. If so we report the
7651 * problem.
7652 */
7653 error = -ENOTCONN;
7654
7655 /* Is there a good reason to think that we may receive some data? */
7656 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING))
7657 goto out;
7658
7659 /* Handle signals. */
7660 if (signal_pending(current))
7661 goto interrupted;
7662
7663 /* Let another process have a go. Since we are going to sleep
7664 * anyway. Note: This may cause odd behaviors if the message
7665 * does not fit in the user's buffer, but this seems to be the
7666 * only way to honor MSG_DONTWAIT realistically.
7667 */
7668 release_sock(sk);
7669 *timeo_p = schedule_timeout(*timeo_p);
7670 lock_sock(sk);
7671
7672 ready:
7673 finish_wait(sk_sleep(sk), &wait);
7674 return 0;
7675
7676 interrupted:
7677 error = sock_intr_errno(*timeo_p);
7678
7679 out:
7680 finish_wait(sk_sleep(sk), &wait);
7681 *err = error;
7682 return error;
7683 }
7684
7685 /* Receive a datagram.
7686 * Note: This is pretty much the same routine as in core/datagram.c
7687 * with a few changes to make lksctp work.
7688 */
7689 struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
7690 int noblock, int *err)
7691 {
7692 int error;
7693 struct sk_buff *skb;
7694 long timeo;
7695
7696 timeo = sock_rcvtimeo(sk, noblock);
7697
7698 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo,
7699 MAX_SCHEDULE_TIMEOUT);
7700
7701 do {
7702 /* Again only user level code calls this function,
7703 * so nothing interrupt level
7704 * will suddenly eat the receive_queue.
7705 *
7706 * Look at current nfs client by the way...
7707 * However, this function was correct in any case. 8)
7708 */
7709 if (flags & MSG_PEEK) {
7710 skb = skb_peek(&sk->sk_receive_queue);
7711 if (skb)
7712 refcount_inc(&skb->users);
7713 } else {
7714 skb = __skb_dequeue(&sk->sk_receive_queue);
7715 }
7716
7717 if (skb)
7718 return skb;
7719
7720 /* Caller is allowed not to check sk->sk_err before calling. */
7721 error = sock_error(sk);
7722 if (error)
7723 goto no_packet;
7724
7725 if (sk->sk_shutdown & RCV_SHUTDOWN)
7726 break;
7727
7728 if (sk_can_busy_loop(sk)) {
7729 sk_busy_loop(sk, noblock);
7730
7731 if (!skb_queue_empty(&sk->sk_receive_queue))
7732 continue;
7733 }
7734
7735 /* User doesn't want to wait. */
7736 error = -EAGAIN;
7737 if (!timeo)
7738 goto no_packet;
7739 } while (sctp_wait_for_packet(sk, err, &timeo) == 0);
7740
7741 return NULL;
7742
7743 no_packet:
7744 *err = error;
7745 return NULL;
7746 }
7747
7748 /* If sndbuf has changed, wake up per association sndbuf waiters. */
7749 static void __sctp_write_space(struct sctp_association *asoc)
7750 {
7751 struct sock *sk = asoc->base.sk;
7752
7753 if (sctp_wspace(asoc) <= 0)
7754 return;
7755
7756 if (waitqueue_active(&asoc->wait))
7757 wake_up_interruptible(&asoc->wait);
7758
7759 if (sctp_writeable(sk)) {
7760 struct socket_wq *wq;
7761
7762 rcu_read_lock();
7763 wq = rcu_dereference(sk->sk_wq);
7764 if (wq) {
7765 if (waitqueue_active(&wq->wait))
7766 wake_up_interruptible(&wq->wait);
7767
7768 /* Note that we try to include the Async I/O support
7769 * here by modeling from the current TCP/UDP code.
7770 * We have not tested with it yet.
7771 */
7772 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
7773 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
7774 }
7775 rcu_read_unlock();
7776 }
7777 }
7778
7779 static void sctp_wake_up_waiters(struct sock *sk,
7780 struct sctp_association *asoc)
7781 {
7782 struct sctp_association *tmp = asoc;
7783
7784 /* We do accounting for the sndbuf space per association,
7785 * so we only need to wake our own association.
7786 */
7787 if (asoc->ep->sndbuf_policy)
7788 return __sctp_write_space(asoc);
7789
7790 /* If association goes down and is just flushing its
7791 * outq, then just normally notify others.
7792 */
7793 if (asoc->base.dead)
7794 return sctp_write_space(sk);
7795
7796 /* Accounting for the sndbuf space is per socket, so we
7797 * need to wake up others, try to be fair and in case of
7798 * other associations, let them have a go first instead
7799 * of just doing a sctp_write_space() call.
7800 *
7801 * Note that we reach sctp_wake_up_waiters() only when
7802 * associations free up queued chunks, thus we are under
7803 * lock and the list of associations on a socket is
7804 * guaranteed not to change.
7805 */
7806 for (tmp = list_next_entry(tmp, asocs); 1;
7807 tmp = list_next_entry(tmp, asocs)) {
7808 /* Manually skip the head element. */
7809 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
7810 continue;
7811 /* Wake up association. */
7812 __sctp_write_space(tmp);
7813 /* We've reached the end. */
7814 if (tmp == asoc)
7815 break;
7816 }
7817 }
7818
7819 /* Do accounting for the sndbuf space.
7820 * Decrement the used sndbuf space of the corresponding association by the
7821 * data size which was just transmitted(freed).
7822 */
7823 static void sctp_wfree(struct sk_buff *skb)
7824 {
7825 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
7826 struct sctp_association *asoc = chunk->asoc;
7827 struct sock *sk = asoc->base.sk;
7828
7829 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
7830 sizeof(struct sk_buff) +
7831 sizeof(struct sctp_chunk);
7832
7833 WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc));
7834
7835 /*
7836 * This undoes what is done via sctp_set_owner_w and sk_mem_charge
7837 */
7838 sk->sk_wmem_queued -= skb->truesize;
7839 sk_mem_uncharge(sk, skb->truesize);
7840
7841 sock_wfree(skb);
7842 sctp_wake_up_waiters(sk, asoc);
7843
7844 sctp_association_put(asoc);
7845 }
7846
7847 /* Do accounting for the receive space on the socket.
7848 * Accounting for the association is done in ulpevent.c
7849 * We set this as a destructor for the cloned data skbs so that
7850 * accounting is done at the correct time.
7851 */
7852 void sctp_sock_rfree(struct sk_buff *skb)
7853 {
7854 struct sock *sk = skb->sk;
7855 struct sctp_ulpevent *event = sctp_skb2event(skb);
7856
7857 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
7858
7859 /*
7860 * Mimic the behavior of sock_rfree
7861 */
7862 sk_mem_uncharge(sk, event->rmem_len);
7863 }
7864
7865
7866 /* Helper function to wait for space in the sndbuf. */
7867 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
7868 size_t msg_len)
7869 {
7870 struct sock *sk = asoc->base.sk;
7871 long current_timeo = *timeo_p;
7872 DEFINE_WAIT(wait);
7873 int err = 0;
7874
7875 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
7876 *timeo_p, msg_len);
7877
7878 /* Increment the association's refcnt. */
7879 sctp_association_hold(asoc);
7880
7881 /* Wait on the association specific sndbuf space. */
7882 for (;;) {
7883 prepare_to_wait_exclusive(&asoc->wait, &wait,
7884 TASK_INTERRUPTIBLE);
7885 if (asoc->base.dead)
7886 goto do_dead;
7887 if (!*timeo_p)
7888 goto do_nonblock;
7889 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
7890 goto do_error;
7891 if (signal_pending(current))
7892 goto do_interrupted;
7893 if (msg_len <= sctp_wspace(asoc))
7894 break;
7895
7896 /* Let another process have a go. Since we are going
7897 * to sleep anyway.
7898 */
7899 release_sock(sk);
7900 current_timeo = schedule_timeout(current_timeo);
7901 lock_sock(sk);
7902 if (sk != asoc->base.sk)
7903 goto do_error;
7904
7905 *timeo_p = current_timeo;
7906 }
7907
7908 out:
7909 finish_wait(&asoc->wait, &wait);
7910
7911 /* Release the association's refcnt. */
7912 sctp_association_put(asoc);
7913
7914 return err;
7915
7916 do_dead:
7917 err = -ESRCH;
7918 goto out;
7919
7920 do_error:
7921 err = -EPIPE;
7922 goto out;
7923
7924 do_interrupted:
7925 err = sock_intr_errno(*timeo_p);
7926 goto out;
7927
7928 do_nonblock:
7929 err = -EAGAIN;
7930 goto out;
7931 }
7932
7933 void sctp_data_ready(struct sock *sk)
7934 {
7935 struct socket_wq *wq;
7936
7937 rcu_read_lock();
7938 wq = rcu_dereference(sk->sk_wq);
7939 if (skwq_has_sleeper(wq))
7940 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
7941 POLLRDNORM | POLLRDBAND);
7942 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
7943 rcu_read_unlock();
7944 }
7945
7946 /* If socket sndbuf has changed, wake up all per association waiters. */
7947 void sctp_write_space(struct sock *sk)
7948 {
7949 struct sctp_association *asoc;
7950
7951 /* Wake up the tasks in each wait queue. */
7952 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) {
7953 __sctp_write_space(asoc);
7954 }
7955 }
7956
7957 /* Is there any sndbuf space available on the socket?
7958 *
7959 * Note that sk_wmem_alloc is the sum of the send buffers on all of the
7960 * associations on the same socket. For a UDP-style socket with
7961 * multiple associations, it is possible for it to be "unwriteable"
7962 * prematurely. I assume that this is acceptable because
7963 * a premature "unwriteable" is better than an accidental "writeable" which
7964 * would cause an unwanted block under certain circumstances. For the 1-1
7965 * UDP-style sockets or TCP-style sockets, this code should work.
7966 * - Daisy
7967 */
7968 static int sctp_writeable(struct sock *sk)
7969 {
7970 int amt = 0;
7971
7972 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
7973 if (amt < 0)
7974 amt = 0;
7975 return amt;
7976 }
7977
7978 /* Wait for an association to go into ESTABLISHED state. If timeout is 0,
7979 * returns immediately with EINPROGRESS.
7980 */
7981 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
7982 {
7983 struct sock *sk = asoc->base.sk;
7984 int err = 0;
7985 long current_timeo = *timeo_p;
7986 DEFINE_WAIT(wait);
7987
7988 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p);
7989
7990 /* Increment the association's refcnt. */
7991 sctp_association_hold(asoc);
7992
7993 for (;;) {
7994 prepare_to_wait_exclusive(&asoc->wait, &wait,
7995 TASK_INTERRUPTIBLE);
7996 if (!*timeo_p)
7997 goto do_nonblock;
7998 if (sk->sk_shutdown & RCV_SHUTDOWN)
7999 break;
8000 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
8001 asoc->base.dead)
8002 goto do_error;
8003 if (signal_pending(current))
8004 goto do_interrupted;
8005
8006 if (sctp_state(asoc, ESTABLISHED))
8007 break;
8008
8009 /* Let another process have a go. Since we are going
8010 * to sleep anyway.
8011 */
8012 release_sock(sk);
8013 current_timeo = schedule_timeout(current_timeo);
8014 lock_sock(sk);
8015
8016 *timeo_p = current_timeo;
8017 }
8018
8019 out:
8020 finish_wait(&asoc->wait, &wait);
8021
8022 /* Release the association's refcnt. */
8023 sctp_association_put(asoc);
8024
8025 return err;
8026
8027 do_error:
8028 if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
8029 err = -ETIMEDOUT;
8030 else
8031 err = -ECONNREFUSED;
8032 goto out;
8033
8034 do_interrupted:
8035 err = sock_intr_errno(*timeo_p);
8036 goto out;
8037
8038 do_nonblock:
8039 err = -EINPROGRESS;
8040 goto out;
8041 }
8042
8043 static int sctp_wait_for_accept(struct sock *sk, long timeo)
8044 {
8045 struct sctp_endpoint *ep;
8046 int err = 0;
8047 DEFINE_WAIT(wait);
8048
8049 ep = sctp_sk(sk)->ep;
8050
8051
8052 for (;;) {
8053 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
8054 TASK_INTERRUPTIBLE);
8055
8056 if (list_empty(&ep->asocs)) {
8057 release_sock(sk);
8058 timeo = schedule_timeout(timeo);
8059 lock_sock(sk);
8060 }
8061
8062 err = -EINVAL;
8063 if (!sctp_sstate(sk, LISTENING))
8064 break;
8065
8066 err = 0;
8067 if (!list_empty(&ep->asocs))
8068 break;
8069
8070 err = sock_intr_errno(timeo);
8071 if (signal_pending(current))
8072 break;
8073
8074 err = -EAGAIN;
8075 if (!timeo)
8076 break;
8077 }
8078
8079 finish_wait(sk_sleep(sk), &wait);
8080
8081 return err;
8082 }
8083
8084 static void sctp_wait_for_close(struct sock *sk, long timeout)
8085 {
8086 DEFINE_WAIT(wait);
8087
8088 do {
8089 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
8090 if (list_empty(&sctp_sk(sk)->ep->asocs))
8091 break;
8092 release_sock(sk);
8093 timeout = schedule_timeout(timeout);
8094 lock_sock(sk);
8095 } while (!signal_pending(current) && timeout);
8096
8097 finish_wait(sk_sleep(sk), &wait);
8098 }
8099
8100 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
8101 {
8102 struct sk_buff *frag;
8103
8104 if (!skb->data_len)
8105 goto done;
8106
8107 /* Don't forget the fragments. */
8108 skb_walk_frags(skb, frag)
8109 sctp_skb_set_owner_r_frag(frag, sk);
8110
8111 done:
8112 sctp_skb_set_owner_r(skb, sk);
8113 }
8114
8115 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
8116 struct sctp_association *asoc)
8117 {
8118 struct inet_sock *inet = inet_sk(sk);
8119 struct inet_sock *newinet;
8120
8121 newsk->sk_type = sk->sk_type;
8122 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
8123 newsk->sk_flags = sk->sk_flags;
8124 newsk->sk_tsflags = sk->sk_tsflags;
8125 newsk->sk_no_check_tx = sk->sk_no_check_tx;
8126 newsk->sk_no_check_rx = sk->sk_no_check_rx;
8127 newsk->sk_reuse = sk->sk_reuse;
8128
8129 newsk->sk_shutdown = sk->sk_shutdown;
8130 newsk->sk_destruct = sctp_destruct_sock;
8131 newsk->sk_family = sk->sk_family;
8132 newsk->sk_protocol = IPPROTO_SCTP;
8133 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
8134 newsk->sk_sndbuf = sk->sk_sndbuf;
8135 newsk->sk_rcvbuf = sk->sk_rcvbuf;
8136 newsk->sk_lingertime = sk->sk_lingertime;
8137 newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
8138 newsk->sk_sndtimeo = sk->sk_sndtimeo;
8139 newsk->sk_rxhash = sk->sk_rxhash;
8140
8141 newinet = inet_sk(newsk);
8142
8143 /* Initialize sk's sport, dport, rcv_saddr and daddr for
8144 * getsockname() and getpeername()
8145 */
8146 newinet->inet_sport = inet->inet_sport;
8147 newinet->inet_saddr = inet->inet_saddr;
8148 newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
8149 newinet->inet_dport = htons(asoc->peer.port);
8150 newinet->pmtudisc = inet->pmtudisc;
8151 newinet->inet_id = asoc->next_tsn ^ jiffies;
8152
8153 newinet->uc_ttl = inet->uc_ttl;
8154 newinet->mc_loop = 1;
8155 newinet->mc_ttl = 1;
8156 newinet->mc_index = 0;
8157 newinet->mc_list = NULL;
8158
8159 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
8160 net_enable_timestamp();
8161
8162 security_sk_clone(sk, newsk);
8163 }
8164
8165 static inline void sctp_copy_descendant(struct sock *sk_to,
8166 const struct sock *sk_from)
8167 {
8168 int ancestor_size = sizeof(struct inet_sock) +
8169 sizeof(struct sctp_sock) -
8170 offsetof(struct sctp_sock, auto_asconf_list);
8171
8172 if (sk_from->sk_family == PF_INET6)
8173 ancestor_size += sizeof(struct ipv6_pinfo);
8174
8175 __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
8176 }
8177
8178 /* Populate the fields of the newsk from the oldsk and migrate the assoc
8179 * and its messages to the newsk.
8180 */
8181 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
8182 struct sctp_association *assoc,
8183 enum sctp_socket_type type)
8184 {
8185 struct sctp_sock *oldsp = sctp_sk(oldsk);
8186 struct sctp_sock *newsp = sctp_sk(newsk);
8187 struct sctp_bind_bucket *pp; /* hash list port iterator */
8188 struct sctp_endpoint *newep = newsp->ep;
8189 struct sk_buff *skb, *tmp;
8190 struct sctp_ulpevent *event;
8191 struct sctp_bind_hashbucket *head;
8192
8193 /* Migrate socket buffer sizes and all the socket level options to the
8194 * new socket.
8195 */
8196 newsk->sk_sndbuf = oldsk->sk_sndbuf;
8197 newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
8198 /* Brute force copy old sctp opt. */
8199 sctp_copy_descendant(newsk, oldsk);
8200
8201 /* Restore the ep value that was overwritten with the above structure
8202 * copy.
8203 */
8204 newsp->ep = newep;
8205 newsp->hmac = NULL;
8206
8207 /* Hook this new socket in to the bind_hash list. */
8208 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
8209 inet_sk(oldsk)->inet_num)];
8210 spin_lock_bh(&head->lock);
8211 pp = sctp_sk(oldsk)->bind_hash;
8212 sk_add_bind_node(newsk, &pp->owner);
8213 sctp_sk(newsk)->bind_hash = pp;
8214 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
8215 spin_unlock_bh(&head->lock);
8216
8217 /* Copy the bind_addr list from the original endpoint to the new
8218 * endpoint so that we can handle restarts properly
8219 */
8220 sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
8221 &oldsp->ep->base.bind_addr, GFP_KERNEL);
8222
8223 /* Move any messages in the old socket's receive queue that are for the
8224 * peeled off association to the new socket's receive queue.
8225 */
8226 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
8227 event = sctp_skb2event(skb);
8228 if (event->asoc == assoc) {
8229 __skb_unlink(skb, &oldsk->sk_receive_queue);
8230 __skb_queue_tail(&newsk->sk_receive_queue, skb);
8231 sctp_skb_set_owner_r_frag(skb, newsk);
8232 }
8233 }
8234
8235 /* Clean up any messages pending delivery due to partial
8236 * delivery. Three cases:
8237 * 1) No partial deliver; no work.
8238 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
8239 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
8240 */
8241 skb_queue_head_init(&newsp->pd_lobby);
8242 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
8243
8244 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
8245 struct sk_buff_head *queue;
8246
8247 /* Decide which queue to move pd_lobby skbs to. */
8248 if (assoc->ulpq.pd_mode) {
8249 queue = &newsp->pd_lobby;
8250 } else
8251 queue = &newsk->sk_receive_queue;
8252
8253 /* Walk through the pd_lobby, looking for skbs that
8254 * need moved to the new socket.
8255 */
8256 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
8257 event = sctp_skb2event(skb);
8258 if (event->asoc == assoc) {
8259 __skb_unlink(skb, &oldsp->pd_lobby);
8260 __skb_queue_tail(queue, skb);
8261 sctp_skb_set_owner_r_frag(skb, newsk);
8262 }
8263 }
8264
8265 /* Clear up any skbs waiting for the partial
8266 * delivery to finish.
8267 */
8268 if (assoc->ulpq.pd_mode)
8269 sctp_clear_pd(oldsk, NULL);
8270
8271 }
8272
8273 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
8274 sctp_skb_set_owner_r_frag(skb, newsk);
8275
8276 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
8277 sctp_skb_set_owner_r_frag(skb, newsk);
8278
8279 /* Set the type of socket to indicate that it is peeled off from the
8280 * original UDP-style socket or created with the accept() call on a
8281 * TCP-style socket..
8282 */
8283 newsp->type = type;
8284
8285 /* Mark the new socket "in-use" by the user so that any packets
8286 * that may arrive on the association after we've moved it are
8287 * queued to the backlog. This prevents a potential race between
8288 * backlog processing on the old socket and new-packet processing
8289 * on the new socket.
8290 *
8291 * The caller has just allocated newsk so we can guarantee that other
8292 * paths won't try to lock it and then oldsk.
8293 */
8294 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
8295 sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
8296 sctp_assoc_migrate(assoc, newsk);
8297 sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
8298
8299 /* If the association on the newsk is already closed before accept()
8300 * is called, set RCV_SHUTDOWN flag.
8301 */
8302 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
8303 newsk->sk_state = SCTP_SS_CLOSED;
8304 newsk->sk_shutdown |= RCV_SHUTDOWN;
8305 } else {
8306 newsk->sk_state = SCTP_SS_ESTABLISHED;
8307 }
8308
8309 release_sock(newsk);
8310 }
8311
8312
8313 /* This proto struct describes the ULP interface for SCTP. */
8314 struct proto sctp_prot = {
8315 .name = "SCTP",
8316 .owner = THIS_MODULE,
8317 .close = sctp_close,
8318 .connect = sctp_connect,
8319 .disconnect = sctp_disconnect,
8320 .accept = sctp_accept,
8321 .ioctl = sctp_ioctl,
8322 .init = sctp_init_sock,
8323 .destroy = sctp_destroy_sock,
8324 .shutdown = sctp_shutdown,
8325 .setsockopt = sctp_setsockopt,
8326 .getsockopt = sctp_getsockopt,
8327 .sendmsg = sctp_sendmsg,
8328 .recvmsg = sctp_recvmsg,
8329 .bind = sctp_bind,
8330 .backlog_rcv = sctp_backlog_rcv,
8331 .hash = sctp_hash,
8332 .unhash = sctp_unhash,
8333 .get_port = sctp_get_port,
8334 .obj_size = sizeof(struct sctp_sock),
8335 .sysctl_mem = sysctl_sctp_mem,
8336 .sysctl_rmem = sysctl_sctp_rmem,
8337 .sysctl_wmem = sysctl_sctp_wmem,
8338 .memory_pressure = &sctp_memory_pressure,
8339 .enter_memory_pressure = sctp_enter_memory_pressure,
8340 .memory_allocated = &sctp_memory_allocated,
8341 .sockets_allocated = &sctp_sockets_allocated,
8342 };
8343
8344 #if IS_ENABLED(CONFIG_IPV6)
8345
8346 #include <net/transp_v6.h>
8347 static void sctp_v6_destroy_sock(struct sock *sk)
8348 {
8349 sctp_destroy_sock(sk);
8350 inet6_destroy_sock(sk);
8351 }
8352
8353 struct proto sctpv6_prot = {
8354 .name = "SCTPv6",
8355 .owner = THIS_MODULE,
8356 .close = sctp_close,
8357 .connect = sctp_connect,
8358 .disconnect = sctp_disconnect,
8359 .accept = sctp_accept,
8360 .ioctl = sctp_ioctl,
8361 .init = sctp_init_sock,
8362 .destroy = sctp_v6_destroy_sock,
8363 .shutdown = sctp_shutdown,
8364 .setsockopt = sctp_setsockopt,
8365 .getsockopt = sctp_getsockopt,
8366 .sendmsg = sctp_sendmsg,
8367 .recvmsg = sctp_recvmsg,
8368 .bind = sctp_bind,
8369 .backlog_rcv = sctp_backlog_rcv,
8370 .hash = sctp_hash,
8371 .unhash = sctp_unhash,
8372 .get_port = sctp_get_port,
8373 .obj_size = sizeof(struct sctp6_sock),
8374 .sysctl_mem = sysctl_sctp_mem,
8375 .sysctl_rmem = sysctl_sctp_rmem,
8376 .sysctl_wmem = sysctl_sctp_wmem,
8377 .memory_pressure = &sctp_memory_pressure,
8378 .enter_memory_pressure = sctp_enter_memory_pressure,
8379 .memory_allocated = &sctp_memory_allocated,
8380 .sockets_allocated = &sctp_sockets_allocated,
8381 };
8382 #endif /* IS_ENABLED(CONFIG_IPV6) */