1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
6 * This file is part of the SCTP kernel implementation
8 * These functions work with the state functions in sctp_sm_statefuns.c
9 * to implement that state operations. These functions implement the
10 * steps which require modifying existing data structures.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, see
26 * <http://www.gnu.org/licenses/>.
28 * Please send any bug reports or fixes you make to the
30 * lksctp developers <linux-sctp@vger.kernel.org>
32 * Written or modified by:
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Karl Knutson <karl@athena.chicago.il.us>
35 * Jon Grimm <jgrimm@austin.ibm.com>
36 * Hui Huang <hui.huang@nokia.com>
37 * Dajiang Zhang <dajiang.zhang@nokia.com>
38 * Daisy Chang <daisyc@us.ibm.com>
39 * Sridhar Samudrala <sri@us.ibm.com>
40 * Ardelle Fan <ardelle.fan@intel.com>
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/skbuff.h>
46 #include <linux/types.h>
47 #include <linux/socket.h>
49 #include <linux/gfp.h>
51 #include <net/sctp/sctp.h>
52 #include <net/sctp/sm.h>
54 static int sctp_cmd_interpreter(enum sctp_event event_type
,
55 union sctp_subtype subtype
,
56 enum sctp_state state
,
57 struct sctp_endpoint
*ep
,
58 struct sctp_association
*asoc
,
60 enum sctp_disposition status
,
61 struct sctp_cmd_seq
*commands
,
63 static int sctp_side_effects(enum sctp_event event_type
,
64 union sctp_subtype subtype
,
65 enum sctp_state state
,
66 struct sctp_endpoint
*ep
,
67 struct sctp_association
**asoc
,
69 enum sctp_disposition status
,
70 struct sctp_cmd_seq
*commands
,
73 /********************************************************************
75 ********************************************************************/
77 /* A helper function for delayed processing of INET ECN CE bit. */
78 static void sctp_do_ecn_ce_work(struct sctp_association
*asoc
,
81 /* Save the TSN away for comparison when we receive CWR */
83 asoc
->last_ecne_tsn
= lowest_tsn
;
87 /* Helper function for delayed processing of SCTP ECNE chunk. */
88 /* RFC 2960 Appendix A
90 * RFC 2481 details a specific bit for a sender to send in
91 * the header of its next outbound TCP segment to indicate to
92 * its peer that it has reduced its congestion window. This
93 * is termed the CWR bit. For SCTP the same indication is made
94 * by including the CWR chunk. This chunk contains one data
95 * element, i.e. the TSN number that was sent in the ECNE chunk.
96 * This element represents the lowest TSN number in the datagram
97 * that was originally marked with the CE bit.
99 static struct sctp_chunk
*sctp_do_ecn_ecne_work(struct sctp_association
*asoc
,
101 struct sctp_chunk
*chunk
)
103 struct sctp_chunk
*repl
;
105 /* Our previously transmitted packet ran into some congestion
106 * so we should take action by reducing cwnd and ssthresh
107 * and then ACK our peer that we we've done so by
111 /* First, try to determine if we want to actually lower
112 * our cwnd variables. Only lower them if the ECNE looks more
113 * recent than the last response.
115 if (TSN_lt(asoc
->last_cwr_tsn
, lowest_tsn
)) {
116 struct sctp_transport
*transport
;
118 /* Find which transport's congestion variables
119 * need to be adjusted.
121 transport
= sctp_assoc_lookup_tsn(asoc
, lowest_tsn
);
123 /* Update the congestion variables. */
125 sctp_transport_lower_cwnd(transport
,
126 SCTP_LOWER_CWND_ECNE
);
127 asoc
->last_cwr_tsn
= lowest_tsn
;
130 /* Always try to quiet the other end. In case of lost CWR,
131 * resend last_cwr_tsn.
133 repl
= sctp_make_cwr(asoc
, asoc
->last_cwr_tsn
, chunk
);
135 /* If we run out of memory, it will look like a lost CWR. We'll
136 * get back in sync eventually.
141 /* Helper function to do delayed processing of ECN CWR chunk. */
142 static void sctp_do_ecn_cwr_work(struct sctp_association
*asoc
,
145 /* Turn off ECNE getting auto-prepended to every outgoing
151 /* Generate SACK if necessary. We call this at the end of a packet. */
152 static int sctp_gen_sack(struct sctp_association
*asoc
, int force
,
153 struct sctp_cmd_seq
*commands
)
155 struct sctp_transport
*trans
= asoc
->peer
.last_data_from
;
156 __u32 ctsn
, max_tsn_seen
;
157 struct sctp_chunk
*sack
;
161 (!trans
&& (asoc
->param_flags
& SPP_SACKDELAY_DISABLE
)) ||
162 (trans
&& (trans
->param_flags
& SPP_SACKDELAY_DISABLE
)))
163 asoc
->peer
.sack_needed
= 1;
165 ctsn
= sctp_tsnmap_get_ctsn(&asoc
->peer
.tsn_map
);
166 max_tsn_seen
= sctp_tsnmap_get_max_tsn_seen(&asoc
->peer
.tsn_map
);
168 /* From 12.2 Parameters necessary per association (i.e. the TCB):
170 * Ack State : This flag indicates if the next received packet
171 * : is to be responded to with a SACK. ...
172 * : When DATA chunks are out of order, SACK's
173 * : are not delayed (see Section 6).
175 * [This is actually not mentioned in Section 6, but we
176 * implement it here anyway. --piggy]
178 if (max_tsn_seen
!= ctsn
)
179 asoc
->peer
.sack_needed
= 1;
181 /* From 6.2 Acknowledgement on Reception of DATA Chunks:
183 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
184 * an acknowledgement SHOULD be generated for at least every
185 * second packet (not every second DATA chunk) received, and
186 * SHOULD be generated within 200 ms of the arrival of any
187 * unacknowledged DATA chunk. ...
189 if (!asoc
->peer
.sack_needed
) {
190 asoc
->peer
.sack_cnt
++;
192 /* Set the SACK delay timeout based on the
193 * SACK delay for the last transport
194 * data was received from, or the default
195 * for the association.
198 /* We will need a SACK for the next packet. */
199 if (asoc
->peer
.sack_cnt
>= trans
->sackfreq
- 1)
200 asoc
->peer
.sack_needed
= 1;
202 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_SACK
] =
205 /* We will need a SACK for the next packet. */
206 if (asoc
->peer
.sack_cnt
>= asoc
->sackfreq
- 1)
207 asoc
->peer
.sack_needed
= 1;
209 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_SACK
] =
213 /* Restart the SACK timer. */
214 sctp_add_cmd_sf(commands
, SCTP_CMD_TIMER_RESTART
,
215 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK
));
217 __u32 old_a_rwnd
= asoc
->a_rwnd
;
219 asoc
->a_rwnd
= asoc
->rwnd
;
220 sack
= sctp_make_sack(asoc
);
222 asoc
->a_rwnd
= old_a_rwnd
;
226 asoc
->peer
.sack_needed
= 0;
227 asoc
->peer
.sack_cnt
= 0;
229 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
, SCTP_CHUNK(sack
));
231 /* Stop the SACK timer. */
232 sctp_add_cmd_sf(commands
, SCTP_CMD_TIMER_STOP
,
233 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK
));
242 /* When the T3-RTX timer expires, it calls this function to create the
243 * relevant state machine event.
245 void sctp_generate_t3_rtx_event(unsigned long peer
)
247 struct sctp_transport
*transport
= (struct sctp_transport
*) peer
;
248 struct sctp_association
*asoc
= transport
->asoc
;
249 struct sock
*sk
= asoc
->base
.sk
;
250 struct net
*net
= sock_net(sk
);
253 /* Check whether a task is in the sock. */
256 if (sock_owned_by_user(sk
)) {
257 pr_debug("%s: sock is busy\n", __func__
);
259 /* Try again later. */
260 if (!mod_timer(&transport
->T3_rtx_timer
, jiffies
+ (HZ
/20)))
261 sctp_transport_hold(transport
);
265 /* Run through the state machine. */
266 error
= sctp_do_sm(net
, SCTP_EVENT_T_TIMEOUT
,
267 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX
),
270 transport
, GFP_ATOMIC
);
277 sctp_transport_put(transport
);
280 /* This is a sa interface for producing timeout events. It works
281 * for timeouts which use the association as their parameter.
283 static void sctp_generate_timeout_event(struct sctp_association
*asoc
,
284 enum sctp_event_timeout timeout_type
)
286 struct sock
*sk
= asoc
->base
.sk
;
287 struct net
*net
= sock_net(sk
);
291 if (sock_owned_by_user(sk
)) {
292 pr_debug("%s: sock is busy: timer %d\n", __func__
,
295 /* Try again later. */
296 if (!mod_timer(&asoc
->timers
[timeout_type
], jiffies
+ (HZ
/20)))
297 sctp_association_hold(asoc
);
301 /* Is this association really dead and just waiting around for
302 * the timer to let go of the reference?
307 /* Run through the state machine. */
308 error
= sctp_do_sm(net
, SCTP_EVENT_T_TIMEOUT
,
309 SCTP_ST_TIMEOUT(timeout_type
),
310 asoc
->state
, asoc
->ep
, asoc
,
311 (void *)timeout_type
, GFP_ATOMIC
);
318 sctp_association_put(asoc
);
321 static void sctp_generate_t1_cookie_event(unsigned long data
)
323 struct sctp_association
*asoc
= (struct sctp_association
*) data
;
324 sctp_generate_timeout_event(asoc
, SCTP_EVENT_TIMEOUT_T1_COOKIE
);
327 static void sctp_generate_t1_init_event(unsigned long data
)
329 struct sctp_association
*asoc
= (struct sctp_association
*) data
;
330 sctp_generate_timeout_event(asoc
, SCTP_EVENT_TIMEOUT_T1_INIT
);
333 static void sctp_generate_t2_shutdown_event(unsigned long data
)
335 struct sctp_association
*asoc
= (struct sctp_association
*) data
;
336 sctp_generate_timeout_event(asoc
, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
);
339 static void sctp_generate_t4_rto_event(unsigned long data
)
341 struct sctp_association
*asoc
= (struct sctp_association
*) data
;
342 sctp_generate_timeout_event(asoc
, SCTP_EVENT_TIMEOUT_T4_RTO
);
345 static void sctp_generate_t5_shutdown_guard_event(unsigned long data
)
347 struct sctp_association
*asoc
= (struct sctp_association
*)data
;
348 sctp_generate_timeout_event(asoc
,
349 SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
);
351 } /* sctp_generate_t5_shutdown_guard_event() */
353 static void sctp_generate_autoclose_event(unsigned long data
)
355 struct sctp_association
*asoc
= (struct sctp_association
*) data
;
356 sctp_generate_timeout_event(asoc
, SCTP_EVENT_TIMEOUT_AUTOCLOSE
);
359 /* Generate a heart beat event. If the sock is busy, reschedule. Make
360 * sure that the transport is still valid.
362 void sctp_generate_heartbeat_event(unsigned long data
)
364 struct sctp_transport
*transport
= (struct sctp_transport
*) data
;
365 struct sctp_association
*asoc
= transport
->asoc
;
366 struct sock
*sk
= asoc
->base
.sk
;
367 struct net
*net
= sock_net(sk
);
368 u32 elapsed
, timeout
;
372 if (sock_owned_by_user(sk
)) {
373 pr_debug("%s: sock is busy\n", __func__
);
375 /* Try again later. */
376 if (!mod_timer(&transport
->hb_timer
, jiffies
+ (HZ
/20)))
377 sctp_transport_hold(transport
);
381 /* Check if we should still send the heartbeat or reschedule */
382 elapsed
= jiffies
- transport
->last_time_sent
;
383 timeout
= sctp_transport_timeout(transport
);
384 if (elapsed
< timeout
) {
385 elapsed
= timeout
- elapsed
;
386 if (!mod_timer(&transport
->hb_timer
, jiffies
+ elapsed
))
387 sctp_transport_hold(transport
);
391 error
= sctp_do_sm(net
, SCTP_EVENT_T_TIMEOUT
,
392 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT
),
393 asoc
->state
, asoc
->ep
, asoc
,
394 transport
, GFP_ATOMIC
);
401 sctp_transport_put(transport
);
404 /* Handle the timeout of the ICMP protocol unreachable timer. Trigger
405 * the correct state machine transition that will close the association.
407 void sctp_generate_proto_unreach_event(unsigned long data
)
409 struct sctp_transport
*transport
= (struct sctp_transport
*)data
;
410 struct sctp_association
*asoc
= transport
->asoc
;
411 struct sock
*sk
= asoc
->base
.sk
;
412 struct net
*net
= sock_net(sk
);
415 if (sock_owned_by_user(sk
)) {
416 pr_debug("%s: sock is busy\n", __func__
);
418 /* Try again later. */
419 if (!mod_timer(&transport
->proto_unreach_timer
,
421 sctp_association_hold(asoc
);
425 /* Is this structure just waiting around for us to actually
431 sctp_do_sm(net
, SCTP_EVENT_T_OTHER
,
432 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH
),
433 asoc
->state
, asoc
->ep
, asoc
, transport
, GFP_ATOMIC
);
437 sctp_association_put(asoc
);
440 /* Handle the timeout of the RE-CONFIG timer. */
441 void sctp_generate_reconf_event(unsigned long data
)
443 struct sctp_transport
*transport
= (struct sctp_transport
*)data
;
444 struct sctp_association
*asoc
= transport
->asoc
;
445 struct sock
*sk
= asoc
->base
.sk
;
446 struct net
*net
= sock_net(sk
);
450 if (sock_owned_by_user(sk
)) {
451 pr_debug("%s: sock is busy\n", __func__
);
453 /* Try again later. */
454 if (!mod_timer(&transport
->reconf_timer
, jiffies
+ (HZ
/ 20)))
455 sctp_transport_hold(transport
);
459 error
= sctp_do_sm(net
, SCTP_EVENT_T_TIMEOUT
,
460 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF
),
461 asoc
->state
, asoc
->ep
, asoc
,
462 transport
, GFP_ATOMIC
);
469 sctp_transport_put(transport
);
472 /* Inject a SACK Timeout event into the state machine. */
473 static void sctp_generate_sack_event(unsigned long data
)
475 struct sctp_association
*asoc
= (struct sctp_association
*)data
;
476 sctp_generate_timeout_event(asoc
, SCTP_EVENT_TIMEOUT_SACK
);
479 sctp_timer_event_t
*sctp_timer_events
[SCTP_NUM_TIMEOUT_TYPES
] = {
481 sctp_generate_t1_cookie_event
,
482 sctp_generate_t1_init_event
,
483 sctp_generate_t2_shutdown_event
,
485 sctp_generate_t4_rto_event
,
486 sctp_generate_t5_shutdown_guard_event
,
489 sctp_generate_sack_event
,
490 sctp_generate_autoclose_event
,
494 /* RFC 2960 8.2 Path Failure Detection
496 * When its peer endpoint is multi-homed, an endpoint should keep a
497 * error counter for each of the destination transport addresses of the
500 * Each time the T3-rtx timer expires on any address, or when a
501 * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
502 * the error counter of that destination address will be incremented.
503 * When the value in the error counter exceeds the protocol parameter
504 * 'Path.Max.Retrans' of that destination address, the endpoint should
505 * mark the destination transport address as inactive, and a
506 * notification SHOULD be sent to the upper layer.
509 static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq
*commands
,
510 struct sctp_association
*asoc
,
511 struct sctp_transport
*transport
,
514 struct net
*net
= sock_net(asoc
->base
.sk
);
516 /* The check for association's overall error counter exceeding the
517 * threshold is done in the state function.
519 /* We are here due to a timer expiration. If the timer was
520 * not a HEARTBEAT, then normal error tracking is done.
521 * If the timer was a heartbeat, we only increment error counts
522 * when we already have an outstanding HEARTBEAT that has not
524 * Additionally, some tranport states inhibit error increments.
527 asoc
->overall_error_count
++;
528 if (transport
->state
!= SCTP_INACTIVE
)
529 transport
->error_count
++;
530 } else if (transport
->hb_sent
) {
531 if (transport
->state
!= SCTP_UNCONFIRMED
)
532 asoc
->overall_error_count
++;
533 if (transport
->state
!= SCTP_INACTIVE
)
534 transport
->error_count
++;
537 /* If the transport error count is greater than the pf_retrans
538 * threshold, and less than pathmaxrtx, and if the current state
539 * is SCTP_ACTIVE, then mark this transport as Partially Failed,
540 * see SCTP Quick Failover Draft, section 5.1
542 if (net
->sctp
.pf_enable
&&
543 (transport
->state
== SCTP_ACTIVE
) &&
544 (asoc
->pf_retrans
< transport
->pathmaxrxt
) &&
545 (transport
->error_count
> asoc
->pf_retrans
)) {
547 sctp_assoc_control_transport(asoc
, transport
,
551 /* Update the hb timer to resend a heartbeat every rto */
552 sctp_transport_reset_hb_timer(transport
);
555 if (transport
->state
!= SCTP_INACTIVE
&&
556 (transport
->error_count
> transport
->pathmaxrxt
)) {
557 pr_debug("%s: association:%p transport addr:%pISpc failed\n",
558 __func__
, asoc
, &transport
->ipaddr
.sa
);
560 sctp_assoc_control_transport(asoc
, transport
,
562 SCTP_FAILED_THRESHOLD
);
565 /* E2) For the destination address for which the timer
566 * expires, set RTO <- RTO * 2 ("back off the timer"). The
567 * maximum value discussed in rule C7 above (RTO.max) may be
568 * used to provide an upper bound to this doubling operation.
570 * Special Case: the first HB doesn't trigger exponential backoff.
571 * The first unacknowledged HB triggers it. We do this with a flag
572 * that indicates that we have an outstanding HB.
574 if (!is_hb
|| transport
->hb_sent
) {
575 transport
->rto
= min((transport
->rto
* 2), transport
->asoc
->rto_max
);
576 sctp_max_rto(asoc
, transport
);
580 /* Worker routine to handle INIT command failure. */
581 static void sctp_cmd_init_failed(struct sctp_cmd_seq
*commands
,
582 struct sctp_association
*asoc
,
585 struct sctp_ulpevent
*event
;
587 event
= sctp_ulpevent_make_assoc_change(asoc
, 0, SCTP_CANT_STR_ASSOC
,
588 (__u16
)error
, 0, 0, NULL
,
592 sctp_add_cmd_sf(commands
, SCTP_CMD_EVENT_ULP
,
593 SCTP_ULPEVENT(event
));
595 sctp_add_cmd_sf(commands
, SCTP_CMD_NEW_STATE
,
596 SCTP_STATE(SCTP_STATE_CLOSED
));
598 /* SEND_FAILED sent later when cleaning up the association. */
599 asoc
->outqueue
.error
= error
;
600 sctp_add_cmd_sf(commands
, SCTP_CMD_DELETE_TCB
, SCTP_NULL());
603 /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
604 static void sctp_cmd_assoc_failed(struct sctp_cmd_seq
*commands
,
605 struct sctp_association
*asoc
,
606 enum sctp_event event_type
,
607 union sctp_subtype subtype
,
608 struct sctp_chunk
*chunk
,
611 struct sctp_ulpevent
*event
;
612 struct sctp_chunk
*abort
;
614 /* Cancel any partial delivery in progress. */
615 sctp_ulpq_abort_pd(&asoc
->ulpq
, GFP_ATOMIC
);
617 if (event_type
== SCTP_EVENT_T_CHUNK
&& subtype
.chunk
== SCTP_CID_ABORT
)
618 event
= sctp_ulpevent_make_assoc_change(asoc
, 0, SCTP_COMM_LOST
,
619 (__u16
)error
, 0, 0, chunk
,
622 event
= sctp_ulpevent_make_assoc_change(asoc
, 0, SCTP_COMM_LOST
,
623 (__u16
)error
, 0, 0, NULL
,
626 sctp_add_cmd_sf(commands
, SCTP_CMD_EVENT_ULP
,
627 SCTP_ULPEVENT(event
));
629 if (asoc
->overall_error_count
>= asoc
->max_retrans
) {
630 abort
= sctp_make_violation_max_retrans(asoc
, chunk
);
632 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
,
636 sctp_add_cmd_sf(commands
, SCTP_CMD_NEW_STATE
,
637 SCTP_STATE(SCTP_STATE_CLOSED
));
639 /* SEND_FAILED sent later when cleaning up the association. */
640 asoc
->outqueue
.error
= error
;
641 sctp_add_cmd_sf(commands
, SCTP_CMD_DELETE_TCB
, SCTP_NULL());
644 /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
645 * inside the cookie. In reality, this is only used for INIT-ACK processing
646 * since all other cases use "temporary" associations and can do all
647 * their work in statefuns directly.
649 static int sctp_cmd_process_init(struct sctp_cmd_seq
*commands
,
650 struct sctp_association
*asoc
,
651 struct sctp_chunk
*chunk
,
652 struct sctp_init_chunk
*peer_init
,
657 /* We only process the init as a sideeffect in a single
658 * case. This is when we process the INIT-ACK. If we
659 * fail during INIT processing (due to malloc problems),
660 * just return the error and stop processing the stack.
662 if (!sctp_process_init(asoc
, chunk
, sctp_source(chunk
), peer_init
, gfp
))
670 /* Helper function to break out starting up of heartbeat timers. */
671 static void sctp_cmd_hb_timers_start(struct sctp_cmd_seq
*cmds
,
672 struct sctp_association
*asoc
)
674 struct sctp_transport
*t
;
676 /* Start a heartbeat timer for each transport on the association.
677 * hold a reference on the transport to make sure none of
678 * the needed data structures go away.
680 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
, transports
)
681 sctp_transport_reset_hb_timer(t
);
684 static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq
*cmds
,
685 struct sctp_association
*asoc
)
687 struct sctp_transport
*t
;
689 /* Stop all heartbeat timers. */
691 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
693 if (del_timer(&t
->hb_timer
))
694 sctp_transport_put(t
);
698 /* Helper function to stop any pending T3-RTX timers */
699 static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq
*cmds
,
700 struct sctp_association
*asoc
)
702 struct sctp_transport
*t
;
704 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
706 if (del_timer(&t
->T3_rtx_timer
))
707 sctp_transport_put(t
);
712 /* Helper function to handle the reception of an HEARTBEAT ACK. */
713 static void sctp_cmd_transport_on(struct sctp_cmd_seq
*cmds
,
714 struct sctp_association
*asoc
,
715 struct sctp_transport
*t
,
716 struct sctp_chunk
*chunk
)
718 struct sctp_sender_hb_info
*hbinfo
;
719 int was_unconfirmed
= 0;
721 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
722 * HEARTBEAT should clear the error counter of the destination
723 * transport address to which the HEARTBEAT was sent.
728 * Although RFC4960 specifies that the overall error count must
729 * be cleared when a HEARTBEAT ACK is received, we make an
730 * exception while in SHUTDOWN PENDING. If the peer keeps its
731 * window shut forever, we may never be able to transmit our
732 * outstanding data and rely on the retransmission limit be reached
733 * to shutdown the association.
735 if (t
->asoc
->state
< SCTP_STATE_SHUTDOWN_PENDING
)
736 t
->asoc
->overall_error_count
= 0;
738 /* Clear the hb_sent flag to signal that we had a good
743 /* Mark the destination transport address as active if it is not so
746 if ((t
->state
== SCTP_INACTIVE
) || (t
->state
== SCTP_UNCONFIRMED
)) {
748 sctp_assoc_control_transport(asoc
, t
, SCTP_TRANSPORT_UP
,
749 SCTP_HEARTBEAT_SUCCESS
);
752 if (t
->state
== SCTP_PF
)
753 sctp_assoc_control_transport(asoc
, t
, SCTP_TRANSPORT_UP
,
754 SCTP_HEARTBEAT_SUCCESS
);
756 /* HB-ACK was received for a the proper HB. Consider this
760 sctp_transport_dst_confirm(t
);
762 /* The receiver of the HEARTBEAT ACK should also perform an
763 * RTT measurement for that destination transport address
764 * using the time value carried in the HEARTBEAT ACK chunk.
765 * If the transport's rto_pending variable has been cleared,
766 * it was most likely due to a retransmit. However, we want
767 * to re-enable it to properly update the rto.
769 if (t
->rto_pending
== 0)
772 hbinfo
= (struct sctp_sender_hb_info
*)chunk
->skb
->data
;
773 sctp_transport_update_rto(t
, (jiffies
- hbinfo
->sent_at
));
775 /* Update the heartbeat timer. */
776 sctp_transport_reset_hb_timer(t
);
778 if (was_unconfirmed
&& asoc
->peer
.transport_count
== 1)
779 sctp_transport_immediate_rtx(t
);
783 /* Helper function to process the process SACK command. */
784 static int sctp_cmd_process_sack(struct sctp_cmd_seq
*cmds
,
785 struct sctp_association
*asoc
,
786 struct sctp_chunk
*chunk
)
790 if (sctp_outq_sack(&asoc
->outqueue
, chunk
)) {
791 struct net
*net
= sock_net(asoc
->base
.sk
);
793 /* There are no more TSNs awaiting SACK. */
794 err
= sctp_do_sm(net
, SCTP_EVENT_T_OTHER
,
795 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN
),
796 asoc
->state
, asoc
->ep
, asoc
, NULL
,
803 /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
804 * the transport for a shutdown chunk.
806 static void sctp_cmd_setup_t2(struct sctp_cmd_seq
*cmds
,
807 struct sctp_association
*asoc
,
808 struct sctp_chunk
*chunk
)
810 struct sctp_transport
*t
;
812 if (chunk
->transport
)
813 t
= chunk
->transport
;
815 t
= sctp_assoc_choose_alter_transport(asoc
,
816 asoc
->shutdown_last_sent_to
);
817 chunk
->transport
= t
;
819 asoc
->shutdown_last_sent_to
= t
;
820 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
] = t
->rto
;
823 static void sctp_cmd_assoc_update(struct sctp_cmd_seq
*cmds
,
824 struct sctp_association
*asoc
,
825 struct sctp_association
*new)
827 struct net
*net
= sock_net(asoc
->base
.sk
);
828 struct sctp_chunk
*abort
;
830 if (!sctp_assoc_update(asoc
, new))
833 abort
= sctp_make_abort(asoc
, NULL
, sizeof(struct sctp_errhdr
));
835 sctp_init_cause(abort
, SCTP_ERROR_RSRC_LOW
, 0);
836 sctp_add_cmd_sf(cmds
, SCTP_CMD_REPLY
, SCTP_CHUNK(abort
));
838 sctp_add_cmd_sf(cmds
, SCTP_CMD_SET_SK_ERR
, SCTP_ERROR(ECONNABORTED
));
839 sctp_add_cmd_sf(cmds
, SCTP_CMD_ASSOC_FAILED
,
840 SCTP_PERR(SCTP_ERROR_RSRC_LOW
));
841 SCTP_INC_STATS(net
, SCTP_MIB_ABORTEDS
);
842 SCTP_DEC_STATS(net
, SCTP_MIB_CURRESTAB
);
845 /* Helper function to change the state of an association. */
846 static void sctp_cmd_new_state(struct sctp_cmd_seq
*cmds
,
847 struct sctp_association
*asoc
,
848 enum sctp_state state
)
850 struct sock
*sk
= asoc
->base
.sk
;
854 pr_debug("%s: asoc:%p[%s]\n", __func__
, asoc
, sctp_state_tbl
[state
]);
856 if (sctp_style(sk
, TCP
)) {
857 /* Change the sk->sk_state of a TCP-style socket that has
858 * successfully completed a connect() call.
860 if (sctp_state(asoc
, ESTABLISHED
) && sctp_sstate(sk
, CLOSED
))
861 sk
->sk_state
= SCTP_SS_ESTABLISHED
;
863 /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
864 if (sctp_state(asoc
, SHUTDOWN_RECEIVED
) &&
865 sctp_sstate(sk
, ESTABLISHED
)) {
866 sk
->sk_state
= SCTP_SS_CLOSING
;
867 sk
->sk_shutdown
|= RCV_SHUTDOWN
;
871 if (sctp_state(asoc
, COOKIE_WAIT
)) {
872 /* Reset init timeouts since they may have been
873 * increased due to timer expirations.
875 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T1_INIT
] =
877 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T1_COOKIE
] =
881 if (sctp_state(asoc
, ESTABLISHED
) ||
882 sctp_state(asoc
, CLOSED
) ||
883 sctp_state(asoc
, SHUTDOWN_RECEIVED
)) {
884 /* Wake up any processes waiting in the asoc's wait queue in
885 * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
887 if (waitqueue_active(&asoc
->wait
))
888 wake_up_interruptible(&asoc
->wait
);
890 /* Wake up any processes waiting in the sk's sleep queue of
891 * a TCP-style or UDP-style peeled-off socket in
892 * sctp_wait_for_accept() or sctp_wait_for_packet().
893 * For a UDP-style socket, the waiters are woken up by the
896 if (!sctp_style(sk
, UDP
))
897 sk
->sk_state_change(sk
);
900 if (sctp_state(asoc
, SHUTDOWN_PENDING
) &&
901 !sctp_outq_is_empty(&asoc
->outqueue
))
902 sctp_outq_uncork(&asoc
->outqueue
, GFP_ATOMIC
);
905 /* Helper function to delete an association. */
906 static void sctp_cmd_delete_tcb(struct sctp_cmd_seq
*cmds
,
907 struct sctp_association
*asoc
)
909 struct sock
*sk
= asoc
->base
.sk
;
911 /* If it is a non-temporary association belonging to a TCP-style
912 * listening socket that is not closed, do not free it so that accept()
913 * can pick it up later.
915 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
) &&
916 (!asoc
->temp
) && (sk
->sk_shutdown
!= SHUTDOWN_MASK
))
919 sctp_association_free(asoc
);
923 * ADDIP Section 4.1 ASCONF Chunk Procedures
924 * A4) Start a T-4 RTO timer, using the RTO value of the selected
925 * destination address (we use active path instead of primary path just
926 * because primary path may be inactive.
928 static void sctp_cmd_setup_t4(struct sctp_cmd_seq
*cmds
,
929 struct sctp_association
*asoc
,
930 struct sctp_chunk
*chunk
)
932 struct sctp_transport
*t
;
934 t
= sctp_assoc_choose_alter_transport(asoc
, chunk
->transport
);
935 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T4_RTO
] = t
->rto
;
936 chunk
->transport
= t
;
939 /* Process an incoming Operation Error Chunk. */
940 static void sctp_cmd_process_operr(struct sctp_cmd_seq
*cmds
,
941 struct sctp_association
*asoc
,
942 struct sctp_chunk
*chunk
)
944 struct sctp_errhdr
*err_hdr
;
945 struct sctp_ulpevent
*ev
;
947 while (chunk
->chunk_end
> chunk
->skb
->data
) {
948 err_hdr
= (struct sctp_errhdr
*)(chunk
->skb
->data
);
950 ev
= sctp_ulpevent_make_remote_error(asoc
, chunk
, 0,
955 sctp_ulpq_tail_event(&asoc
->ulpq
, ev
);
957 switch (err_hdr
->cause
) {
958 case SCTP_ERROR_UNKNOWN_CHUNK
:
960 struct sctp_chunkhdr
*unk_chunk_hdr
;
962 unk_chunk_hdr
= (struct sctp_chunkhdr
*)
964 switch (unk_chunk_hdr
->type
) {
965 /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
966 * an ERROR chunk reporting that it did not recognized
967 * the ASCONF chunk type, the sender of the ASCONF MUST
968 * NOT send any further ASCONF chunks and MUST stop its
971 case SCTP_CID_ASCONF
:
972 if (asoc
->peer
.asconf_capable
== 0)
975 asoc
->peer
.asconf_capable
= 0;
976 sctp_add_cmd_sf(cmds
, SCTP_CMD_TIMER_STOP
,
977 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO
));
990 /* Process variable FWDTSN chunk information. */
991 static void sctp_cmd_process_fwdtsn(struct sctp_ulpq
*ulpq
,
992 struct sctp_chunk
*chunk
)
994 struct sctp_fwdtsn_skip
*skip
;
996 /* Walk through all the skipped SSNs */
997 sctp_walk_fwdtsn(skip
, chunk
) {
998 sctp_ulpq_skip(ulpq
, ntohs(skip
->stream
), ntohs(skip
->ssn
));
1002 /* Helper function to remove the association non-primary peer
1005 static void sctp_cmd_del_non_primary(struct sctp_association
*asoc
)
1007 struct sctp_transport
*t
;
1008 struct list_head
*temp
;
1009 struct list_head
*pos
;
1011 list_for_each_safe(pos
, temp
, &asoc
->peer
.transport_addr_list
) {
1012 t
= list_entry(pos
, struct sctp_transport
, transports
);
1013 if (!sctp_cmp_addr_exact(&t
->ipaddr
,
1014 &asoc
->peer
.primary_addr
)) {
1015 sctp_assoc_rm_peer(asoc
, t
);
1020 /* Helper function to set sk_err on a 1-1 style socket. */
1021 static void sctp_cmd_set_sk_err(struct sctp_association
*asoc
, int error
)
1023 struct sock
*sk
= asoc
->base
.sk
;
1025 if (!sctp_style(sk
, UDP
))
1029 /* Helper function to generate an association change event */
1030 static void sctp_cmd_assoc_change(struct sctp_cmd_seq
*commands
,
1031 struct sctp_association
*asoc
,
1034 struct sctp_ulpevent
*ev
;
1036 ev
= sctp_ulpevent_make_assoc_change(asoc
, 0, state
, 0,
1037 asoc
->c
.sinit_num_ostreams
,
1038 asoc
->c
.sinit_max_instreams
,
1041 sctp_ulpq_tail_event(&asoc
->ulpq
, ev
);
1044 /* Helper function to generate an adaptation indication event */
1045 static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq
*commands
,
1046 struct sctp_association
*asoc
)
1048 struct sctp_ulpevent
*ev
;
1050 ev
= sctp_ulpevent_make_adaptation_indication(asoc
, GFP_ATOMIC
);
1053 sctp_ulpq_tail_event(&asoc
->ulpq
, ev
);
1057 static void sctp_cmd_t1_timer_update(struct sctp_association
*asoc
,
1058 enum sctp_event_timeout timer
,
1061 struct sctp_transport
*t
;
1063 t
= asoc
->init_last_sent_to
;
1064 asoc
->init_err_counter
++;
1066 if (t
->init_sent_count
> (asoc
->init_cycle
+ 1)) {
1067 asoc
->timeouts
[timer
] *= 2;
1068 if (asoc
->timeouts
[timer
] > asoc
->max_init_timeo
) {
1069 asoc
->timeouts
[timer
] = asoc
->max_init_timeo
;
1073 pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d"
1074 " cycle:%d timeout:%ld\n", __func__
, name
,
1075 asoc
->init_err_counter
, asoc
->init_cycle
,
1076 asoc
->timeouts
[timer
]);
1081 /* Send the whole message, chunk by chunk, to the outqueue.
1082 * This way the whole message is queued up and bundling if
1083 * encouraged for small fragments.
1085 static void sctp_cmd_send_msg(struct sctp_association
*asoc
,
1086 struct sctp_datamsg
*msg
, gfp_t gfp
)
1088 struct sctp_chunk
*chunk
;
1090 list_for_each_entry(chunk
, &msg
->chunks
, frag_list
)
1091 sctp_outq_tail(&asoc
->outqueue
, chunk
, gfp
);
1095 /* Sent the next ASCONF packet currently stored in the association.
1096 * This happens after the ASCONF_ACK was succeffully processed.
1098 static void sctp_cmd_send_asconf(struct sctp_association
*asoc
)
1100 struct net
*net
= sock_net(asoc
->base
.sk
);
1102 /* Send the next asconf chunk from the addip chunk
1105 if (!list_empty(&asoc
->addip_chunk_list
)) {
1106 struct list_head
*entry
= asoc
->addip_chunk_list
.next
;
1107 struct sctp_chunk
*asconf
= list_entry(entry
,
1108 struct sctp_chunk
, list
);
1109 list_del_init(entry
);
1111 /* Hold the chunk until an ASCONF_ACK is received. */
1112 sctp_chunk_hold(asconf
);
1113 if (sctp_primitive_ASCONF(net
, asoc
, asconf
))
1114 sctp_chunk_free(asconf
);
1116 asoc
->addip_last_asconf
= asconf
;
1121 /* These three macros allow us to pull the debugging code out of the
1122 * main flow of sctp_do_sm() to keep attention focused on the real
1123 * functionality there.
1125 #define debug_pre_sfn() \
1126 pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \
1127 ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \
1128 asoc, sctp_state_tbl[state], state_fn->name)
1130 #define debug_post_sfn() \
1131 pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \
1132 sctp_status_tbl[status])
1134 #define debug_post_sfx() \
1135 pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \
1136 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
1137 sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED])
1140 * This is the master state machine processing function.
1142 * If you want to understand all of lksctp, this is a
1143 * good place to start.
1145 int sctp_do_sm(struct net
*net
, enum sctp_event event_type
,
1146 union sctp_subtype subtype
, enum sctp_state state
,
1147 struct sctp_endpoint
*ep
, struct sctp_association
*asoc
,
1148 void *event_arg
, gfp_t gfp
)
1150 typedef const char *(printfn_t
)(union sctp_subtype
);
1151 static printfn_t
*table
[] = {
1152 NULL
, sctp_cname
, sctp_tname
, sctp_oname
, sctp_pname
,
1154 printfn_t
*debug_fn
__attribute__ ((unused
)) = table
[event_type
];
1155 const struct sctp_sm_table_entry
*state_fn
;
1156 struct sctp_cmd_seq commands
;
1157 enum sctp_disposition status
;
1160 /* Look up the state function, run it, and then process the
1161 * side effects. These three steps are the heart of lksctp.
1163 state_fn
= sctp_sm_lookup_event(net
, event_type
, state
, subtype
);
1165 sctp_init_cmd_seq(&commands
);
1168 status
= state_fn
->fn(net
, ep
, asoc
, subtype
, event_arg
, &commands
);
1171 error
= sctp_side_effects(event_type
, subtype
, state
,
1172 ep
, &asoc
, event_arg
, status
,
1179 /*****************************************************************
1180 * This the master state function side effect processing function.
1181 *****************************************************************/
1182 static int sctp_side_effects(enum sctp_event event_type
,
1183 union sctp_subtype subtype
,
1184 enum sctp_state state
,
1185 struct sctp_endpoint
*ep
,
1186 struct sctp_association
**asoc
,
1188 enum sctp_disposition status
,
1189 struct sctp_cmd_seq
*commands
,
1194 /* FIXME - Most of the dispositions left today would be categorized
1195 * as "exceptional" dispositions. For those dispositions, it
1196 * may not be proper to run through any of the commands at all.
1197 * For example, the command interpreter might be run only with
1198 * disposition SCTP_DISPOSITION_CONSUME.
1200 if (0 != (error
= sctp_cmd_interpreter(event_type
, subtype
, state
,
1207 case SCTP_DISPOSITION_DISCARD
:
1208 pr_debug("%s: ignored sctp protocol event - state:%d, "
1209 "event_type:%d, event_id:%d\n", __func__
, state
,
1210 event_type
, subtype
.chunk
);
1213 case SCTP_DISPOSITION_NOMEM
:
1214 /* We ran out of memory, so we need to discard this
1217 /* BUG--we should now recover some memory, probably by
1223 case SCTP_DISPOSITION_DELETE_TCB
:
1224 case SCTP_DISPOSITION_ABORT
:
1225 /* This should now be a command. */
1229 case SCTP_DISPOSITION_CONSUME
:
1231 * We should no longer have much work to do here as the
1232 * real work has been done as explicit commands above.
1236 case SCTP_DISPOSITION_VIOLATION
:
1237 net_err_ratelimited("protocol violation state %d chunkid %d\n",
1238 state
, subtype
.chunk
);
1241 case SCTP_DISPOSITION_NOT_IMPL
:
1242 pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
1243 state
, event_type
, subtype
.chunk
);
1246 case SCTP_DISPOSITION_BUG
:
1247 pr_err("bug in state %d, event_type %d, event_id %d\n",
1248 state
, event_type
, subtype
.chunk
);
1253 pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
1254 status
, state
, event_type
, subtype
.chunk
);
1263 /********************************************************************
1264 * 2nd Level Abstractions
1265 ********************************************************************/
1267 /* This is the side-effect interpreter. */
1268 static int sctp_cmd_interpreter(enum sctp_event event_type
,
1269 union sctp_subtype subtype
,
1270 enum sctp_state state
,
1271 struct sctp_endpoint
*ep
,
1272 struct sctp_association
*asoc
,
1274 enum sctp_disposition status
,
1275 struct sctp_cmd_seq
*commands
,
1278 struct sctp_sock
*sp
= sctp_sk(ep
->base
.sk
);
1279 struct sctp_chunk
*chunk
= NULL
, *new_obj
;
1280 struct sctp_packet
*packet
;
1281 struct sctp_sackhdr sackh
;
1282 struct timer_list
*timer
;
1283 struct sctp_transport
*t
;
1284 unsigned long timeout
;
1285 struct sctp_cmd
*cmd
;
1290 if (SCTP_EVENT_T_TIMEOUT
!= event_type
)
1293 /* Note: This whole file is a huge candidate for rework.
1294 * For example, each command could either have its own handler, so
1295 * the loop would look like:
1297 * cmd->handle(x, y, z)
1300 while (NULL
!= (cmd
= sctp_next_cmd(commands
))) {
1301 switch (cmd
->verb
) {
1306 case SCTP_CMD_NEW_ASOC
:
1307 /* Register a new association. */
1309 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1313 /* Register with the endpoint. */
1314 asoc
= cmd
->obj
.asoc
;
1315 BUG_ON(asoc
->peer
.primary_path
== NULL
);
1316 sctp_endpoint_add_asoc(ep
, asoc
);
1319 case SCTP_CMD_UPDATE_ASSOC
:
1320 sctp_cmd_assoc_update(commands
, asoc
, cmd
->obj
.asoc
);
1323 case SCTP_CMD_PURGE_OUTQUEUE
:
1324 sctp_outq_teardown(&asoc
->outqueue
);
1327 case SCTP_CMD_DELETE_TCB
:
1329 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1332 /* Delete the current association. */
1333 sctp_cmd_delete_tcb(commands
, asoc
);
1337 case SCTP_CMD_NEW_STATE
:
1338 /* Enter a new state. */
1339 sctp_cmd_new_state(commands
, asoc
, cmd
->obj
.state
);
1342 case SCTP_CMD_REPORT_TSN
:
1343 /* Record the arrival of a TSN. */
1344 error
= sctp_tsnmap_mark(&asoc
->peer
.tsn_map
,
1345 cmd
->obj
.u32
, NULL
);
1348 case SCTP_CMD_REPORT_FWDTSN
:
1349 /* Move the Cumulattive TSN Ack ahead. */
1350 sctp_tsnmap_skip(&asoc
->peer
.tsn_map
, cmd
->obj
.u32
);
1352 /* purge the fragmentation queue */
1353 sctp_ulpq_reasm_flushtsn(&asoc
->ulpq
, cmd
->obj
.u32
);
1355 /* Abort any in progress partial delivery. */
1356 sctp_ulpq_abort_pd(&asoc
->ulpq
, GFP_ATOMIC
);
1359 case SCTP_CMD_PROCESS_FWDTSN
:
1360 sctp_cmd_process_fwdtsn(&asoc
->ulpq
, cmd
->obj
.chunk
);
1363 case SCTP_CMD_GEN_SACK
:
1364 /* Generate a Selective ACK.
1365 * The argument tells us whether to just count
1366 * the packet and MAYBE generate a SACK, or
1369 force
= cmd
->obj
.i32
;
1370 error
= sctp_gen_sack(asoc
, force
, commands
);
1373 case SCTP_CMD_PROCESS_SACK
:
1374 /* Process an inbound SACK. */
1375 error
= sctp_cmd_process_sack(commands
, asoc
,
1379 case SCTP_CMD_GEN_INIT_ACK
:
1380 /* Generate an INIT ACK chunk. */
1381 new_obj
= sctp_make_init_ack(asoc
, chunk
, GFP_ATOMIC
,
1386 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
,
1387 SCTP_CHUNK(new_obj
));
1390 case SCTP_CMD_PEER_INIT
:
1391 /* Process a unified INIT from the peer.
1392 * Note: Only used during INIT-ACK processing. If
1393 * there is an error just return to the outter
1394 * layer which will bail.
1396 error
= sctp_cmd_process_init(commands
, asoc
, chunk
,
1397 cmd
->obj
.init
, gfp
);
1400 case SCTP_CMD_GEN_COOKIE_ECHO
:
1401 /* Generate a COOKIE ECHO chunk. */
1402 new_obj
= sctp_make_cookie_echo(asoc
, chunk
);
1405 sctp_chunk_free(cmd
->obj
.chunk
);
1408 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
,
1409 SCTP_CHUNK(new_obj
));
1411 /* If there is an ERROR chunk to be sent along with
1412 * the COOKIE_ECHO, send it, too.
1415 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
,
1416 SCTP_CHUNK(cmd
->obj
.chunk
));
1418 if (new_obj
->transport
) {
1419 new_obj
->transport
->init_sent_count
++;
1420 asoc
->init_last_sent_to
= new_obj
->transport
;
1423 /* FIXME - Eventually come up with a cleaner way to
1424 * enabling COOKIE-ECHO + DATA bundling during
1425 * multihoming stale cookie scenarios, the following
1426 * command plays with asoc->peer.retran_path to
1427 * avoid the problem of sending the COOKIE-ECHO and
1428 * DATA in different paths, which could result
1429 * in the association being ABORTed if the DATA chunk
1430 * is processed first by the server. Checking the
1431 * init error counter simply causes this command
1432 * to be executed only during failed attempts of
1433 * association establishment.
1435 if ((asoc
->peer
.retran_path
!=
1436 asoc
->peer
.primary_path
) &&
1437 (asoc
->init_err_counter
> 0)) {
1438 sctp_add_cmd_sf(commands
,
1439 SCTP_CMD_FORCE_PRIM_RETRAN
,
1445 case SCTP_CMD_GEN_SHUTDOWN
:
1446 /* Generate SHUTDOWN when in SHUTDOWN_SENT state.
1447 * Reset error counts.
1449 asoc
->overall_error_count
= 0;
1451 /* Generate a SHUTDOWN chunk. */
1452 new_obj
= sctp_make_shutdown(asoc
, chunk
);
1455 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
,
1456 SCTP_CHUNK(new_obj
));
1459 case SCTP_CMD_CHUNK_ULP
:
1460 /* Send a chunk to the sockets layer. */
1461 pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
1462 __func__
, cmd
->obj
.chunk
, &asoc
->ulpq
);
1464 sctp_ulpq_tail_data(&asoc
->ulpq
, cmd
->obj
.chunk
,
1468 case SCTP_CMD_EVENT_ULP
:
1469 /* Send a notification to the sockets layer. */
1470 pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
1471 __func__
, cmd
->obj
.ulpevent
, &asoc
->ulpq
);
1473 sctp_ulpq_tail_event(&asoc
->ulpq
, cmd
->obj
.ulpevent
);
1476 case SCTP_CMD_REPLY
:
1477 /* If an caller has not already corked, do cork. */
1478 if (!asoc
->outqueue
.cork
) {
1479 sctp_outq_cork(&asoc
->outqueue
);
1482 /* Send a chunk to our peer. */
1483 sctp_outq_tail(&asoc
->outqueue
, cmd
->obj
.chunk
, gfp
);
1486 case SCTP_CMD_SEND_PKT
:
1487 /* Send a full packet to our peer. */
1488 packet
= cmd
->obj
.packet
;
1489 sctp_packet_transmit(packet
, gfp
);
1490 sctp_ootb_pkt_free(packet
);
1493 case SCTP_CMD_T1_RETRAN
:
1494 /* Mark a transport for retransmission. */
1495 sctp_retransmit(&asoc
->outqueue
, cmd
->obj
.transport
,
1499 case SCTP_CMD_RETRAN
:
1500 /* Mark a transport for retransmission. */
1501 sctp_retransmit(&asoc
->outqueue
, cmd
->obj
.transport
,
1505 case SCTP_CMD_ECN_CE
:
1506 /* Do delayed CE processing. */
1507 sctp_do_ecn_ce_work(asoc
, cmd
->obj
.u32
);
1510 case SCTP_CMD_ECN_ECNE
:
1511 /* Do delayed ECNE processing. */
1512 new_obj
= sctp_do_ecn_ecne_work(asoc
, cmd
->obj
.u32
,
1515 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
,
1516 SCTP_CHUNK(new_obj
));
1519 case SCTP_CMD_ECN_CWR
:
1520 /* Do delayed CWR processing. */
1521 sctp_do_ecn_cwr_work(asoc
, cmd
->obj
.u32
);
1524 case SCTP_CMD_SETUP_T2
:
1525 sctp_cmd_setup_t2(commands
, asoc
, cmd
->obj
.chunk
);
1528 case SCTP_CMD_TIMER_START_ONCE
:
1529 timer
= &asoc
->timers
[cmd
->obj
.to
];
1531 if (timer_pending(timer
))
1535 case SCTP_CMD_TIMER_START
:
1536 timer
= &asoc
->timers
[cmd
->obj
.to
];
1537 timeout
= asoc
->timeouts
[cmd
->obj
.to
];
1540 timer
->expires
= jiffies
+ timeout
;
1541 sctp_association_hold(asoc
);
1545 case SCTP_CMD_TIMER_RESTART
:
1546 timer
= &asoc
->timers
[cmd
->obj
.to
];
1547 timeout
= asoc
->timeouts
[cmd
->obj
.to
];
1548 if (!mod_timer(timer
, jiffies
+ timeout
))
1549 sctp_association_hold(asoc
);
1552 case SCTP_CMD_TIMER_STOP
:
1553 timer
= &asoc
->timers
[cmd
->obj
.to
];
1554 if (del_timer(timer
))
1555 sctp_association_put(asoc
);
1558 case SCTP_CMD_INIT_CHOOSE_TRANSPORT
:
1559 chunk
= cmd
->obj
.chunk
;
1560 t
= sctp_assoc_choose_alter_transport(asoc
,
1561 asoc
->init_last_sent_to
);
1562 asoc
->init_last_sent_to
= t
;
1563 chunk
->transport
= t
;
1564 t
->init_sent_count
++;
1565 /* Set the new transport as primary */
1566 sctp_assoc_set_primary(asoc
, t
);
1569 case SCTP_CMD_INIT_RESTART
:
1570 /* Do the needed accounting and updates
1571 * associated with restarting an initialization
1572 * timer. Only multiply the timeout by two if
1573 * all transports have been tried at the current
1576 sctp_cmd_t1_timer_update(asoc
,
1577 SCTP_EVENT_TIMEOUT_T1_INIT
,
1580 sctp_add_cmd_sf(commands
, SCTP_CMD_TIMER_RESTART
,
1581 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT
));
1584 case SCTP_CMD_COOKIEECHO_RESTART
:
1585 /* Do the needed accounting and updates
1586 * associated with restarting an initialization
1587 * timer. Only multiply the timeout by two if
1588 * all transports have been tried at the current
1591 sctp_cmd_t1_timer_update(asoc
,
1592 SCTP_EVENT_TIMEOUT_T1_COOKIE
,
1595 /* If we've sent any data bundled with
1596 * COOKIE-ECHO we need to resend.
1598 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
1600 sctp_retransmit_mark(&asoc
->outqueue
, t
,
1604 sctp_add_cmd_sf(commands
,
1605 SCTP_CMD_TIMER_RESTART
,
1606 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE
));
1609 case SCTP_CMD_INIT_FAILED
:
1610 sctp_cmd_init_failed(commands
, asoc
, cmd
->obj
.u32
);
1613 case SCTP_CMD_ASSOC_FAILED
:
1614 sctp_cmd_assoc_failed(commands
, asoc
, event_type
,
1615 subtype
, chunk
, cmd
->obj
.u32
);
1618 case SCTP_CMD_INIT_COUNTER_INC
:
1619 asoc
->init_err_counter
++;
1622 case SCTP_CMD_INIT_COUNTER_RESET
:
1623 asoc
->init_err_counter
= 0;
1624 asoc
->init_cycle
= 0;
1625 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
1627 t
->init_sent_count
= 0;
1631 case SCTP_CMD_REPORT_DUP
:
1632 sctp_tsnmap_mark_dup(&asoc
->peer
.tsn_map
,
1636 case SCTP_CMD_REPORT_BAD_TAG
:
1637 pr_debug("%s: vtag mismatch!\n", __func__
);
1640 case SCTP_CMD_STRIKE
:
1641 /* Mark one strike against a transport. */
1642 sctp_do_8_2_transport_strike(commands
, asoc
,
1643 cmd
->obj
.transport
, 0);
1646 case SCTP_CMD_TRANSPORT_IDLE
:
1647 t
= cmd
->obj
.transport
;
1648 sctp_transport_lower_cwnd(t
, SCTP_LOWER_CWND_INACTIVE
);
1651 case SCTP_CMD_TRANSPORT_HB_SENT
:
1652 t
= cmd
->obj
.transport
;
1653 sctp_do_8_2_transport_strike(commands
, asoc
,
1658 case SCTP_CMD_TRANSPORT_ON
:
1659 t
= cmd
->obj
.transport
;
1660 sctp_cmd_transport_on(commands
, asoc
, t
, chunk
);
1663 case SCTP_CMD_HB_TIMERS_START
:
1664 sctp_cmd_hb_timers_start(commands
, asoc
);
1667 case SCTP_CMD_HB_TIMER_UPDATE
:
1668 t
= cmd
->obj
.transport
;
1669 sctp_transport_reset_hb_timer(t
);
1672 case SCTP_CMD_HB_TIMERS_STOP
:
1673 sctp_cmd_hb_timers_stop(commands
, asoc
);
1676 case SCTP_CMD_REPORT_ERROR
:
1677 error
= cmd
->obj
.error
;
1680 case SCTP_CMD_PROCESS_CTSN
:
1681 /* Dummy up a SACK for processing. */
1682 sackh
.cum_tsn_ack
= cmd
->obj
.be32
;
1683 sackh
.a_rwnd
= htonl(asoc
->peer
.rwnd
+
1684 asoc
->outqueue
.outstanding_bytes
);
1685 sackh
.num_gap_ack_blocks
= 0;
1686 sackh
.num_dup_tsns
= 0;
1687 chunk
->subh
.sack_hdr
= &sackh
;
1688 sctp_add_cmd_sf(commands
, SCTP_CMD_PROCESS_SACK
,
1692 case SCTP_CMD_DISCARD_PACKET
:
1693 /* We need to discard the whole packet.
1694 * Uncork the queue since there might be
1697 chunk
->pdiscard
= 1;
1699 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1704 case SCTP_CMD_RTO_PENDING
:
1705 t
= cmd
->obj
.transport
;
1709 case SCTP_CMD_PART_DELIVER
:
1710 sctp_ulpq_partial_delivery(&asoc
->ulpq
, GFP_ATOMIC
);
1713 case SCTP_CMD_RENEGE
:
1714 sctp_ulpq_renege(&asoc
->ulpq
, cmd
->obj
.chunk
,
1718 case SCTP_CMD_SETUP_T4
:
1719 sctp_cmd_setup_t4(commands
, asoc
, cmd
->obj
.chunk
);
1722 case SCTP_CMD_PROCESS_OPERR
:
1723 sctp_cmd_process_operr(commands
, asoc
, chunk
);
1725 case SCTP_CMD_CLEAR_INIT_TAG
:
1726 asoc
->peer
.i
.init_tag
= 0;
1728 case SCTP_CMD_DEL_NON_PRIMARY
:
1729 sctp_cmd_del_non_primary(asoc
);
1731 case SCTP_CMD_T3_RTX_TIMERS_STOP
:
1732 sctp_cmd_t3_rtx_timers_stop(commands
, asoc
);
1734 case SCTP_CMD_FORCE_PRIM_RETRAN
:
1735 t
= asoc
->peer
.retran_path
;
1736 asoc
->peer
.retran_path
= asoc
->peer
.primary_path
;
1737 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1739 asoc
->peer
.retran_path
= t
;
1741 case SCTP_CMD_SET_SK_ERR
:
1742 sctp_cmd_set_sk_err(asoc
, cmd
->obj
.error
);
1744 case SCTP_CMD_ASSOC_CHANGE
:
1745 sctp_cmd_assoc_change(commands
, asoc
,
1748 case SCTP_CMD_ADAPTATION_IND
:
1749 sctp_cmd_adaptation_ind(commands
, asoc
);
1752 case SCTP_CMD_ASSOC_SHKEY
:
1753 error
= sctp_auth_asoc_init_active_key(asoc
,
1756 case SCTP_CMD_UPDATE_INITTAG
:
1757 asoc
->peer
.i
.init_tag
= cmd
->obj
.u32
;
1759 case SCTP_CMD_SEND_MSG
:
1760 if (!asoc
->outqueue
.cork
) {
1761 sctp_outq_cork(&asoc
->outqueue
);
1764 sctp_cmd_send_msg(asoc
, cmd
->obj
.msg
, gfp
);
1766 case SCTP_CMD_SEND_NEXT_ASCONF
:
1767 sctp_cmd_send_asconf(asoc
);
1769 case SCTP_CMD_PURGE_ASCONF_QUEUE
:
1770 sctp_asconf_queue_teardown(asoc
);
1773 case SCTP_CMD_SET_ASOC
:
1774 if (asoc
&& local_cork
) {
1775 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1778 asoc
= cmd
->obj
.asoc
;
1782 pr_warn("Impossible command: %u\n",
1792 /* If this is in response to a received chunk, wait until
1793 * we are done with the packet to open the queue so that we don't
1794 * send multiple packets in response to a single request.
1796 if (asoc
&& SCTP_EVENT_T_CHUNK
== event_type
&& chunk
) {
1797 if (chunk
->end_of_packet
|| chunk
->singleton
)
1798 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1799 } else if (local_cork
)
1800 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1802 if (sp
->data_ready_signalled
)
1803 sp
->data_ready_signalled
= 0;