Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / net / sctp / outqueue.c
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
6 *
7 * This file is part of the SCTP kernel implementation
8 *
9 * These functions implement the sctp_outq class. The outqueue handles
10 * bundling and queueing of outgoing SCTP chunks.
11 *
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, write to
26 * the Free Software Foundation, 59 Temple Place - Suite 330,
27 * Boston, MA 02111-1307, USA.
28 *
29 * Please send any bug reports or fixes you make to the
30 * email address(es):
31 * lksctp developers <lksctp-developers@lists.sourceforge.net>
32 *
33 * Or submit a bug report through the following website:
34 * http://www.sf.net/projects/lksctp
35 *
36 * Written or modified by:
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Karl Knutson <karl@athena.chicago.il.us>
39 * Perry Melange <pmelange@null.cc.uic.edu>
40 * Xingang Guo <xingang.guo@intel.com>
41 * Hui Huang <hui.huang@nokia.com>
42 * Sridhar Samudrala <sri@us.ibm.com>
43 * Jon Grimm <jgrimm@us.ibm.com>
44 *
45 * Any bugs reported given to us we will try to fix... any fixes shared will
46 * be incorporated into the next SCTP release.
47 */
48
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
51 #include <linux/types.h>
52 #include <linux/list.h> /* For struct list_head */
53 #include <linux/socket.h>
54 #include <linux/ip.h>
55 #include <linux/slab.h>
56 #include <net/sock.h> /* For skb_set_owner_w */
57
58 #include <net/sctp/sctp.h>
59 #include <net/sctp/sm.h>
60
61 /* Declare internal functions here. */
62 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
63 static void sctp_check_transmitted(struct sctp_outq *q,
64 struct list_head *transmitted_queue,
65 struct sctp_transport *transport,
66 union sctp_addr *saddr,
67 struct sctp_sackhdr *sack,
68 __u32 *highest_new_tsn);
69
70 static void sctp_mark_missing(struct sctp_outq *q,
71 struct list_head *transmitted_queue,
72 struct sctp_transport *transport,
73 __u32 highest_new_tsn,
74 int count_of_newacks);
75
76 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
77
78 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
79
80 /* Add data to the front of the queue. */
81 static inline void sctp_outq_head_data(struct sctp_outq *q,
82 struct sctp_chunk *ch)
83 {
84 list_add(&ch->list, &q->out_chunk_list);
85 q->out_qlen += ch->skb->len;
86 }
87
88 /* Take data from the front of the queue. */
89 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
90 {
91 struct sctp_chunk *ch = NULL;
92
93 if (!list_empty(&q->out_chunk_list)) {
94 struct list_head *entry = q->out_chunk_list.next;
95
96 ch = list_entry(entry, struct sctp_chunk, list);
97 list_del_init(entry);
98 q->out_qlen -= ch->skb->len;
99 }
100 return ch;
101 }
102 /* Add data chunk to the end of the queue. */
103 static inline void sctp_outq_tail_data(struct sctp_outq *q,
104 struct sctp_chunk *ch)
105 {
106 list_add_tail(&ch->list, &q->out_chunk_list);
107 q->out_qlen += ch->skb->len;
108 }
109
110 /*
111 * SFR-CACC algorithm:
112 * D) If count_of_newacks is greater than or equal to 2
113 * and t was not sent to the current primary then the
114 * sender MUST NOT increment missing report count for t.
115 */
116 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
117 struct sctp_transport *transport,
118 int count_of_newacks)
119 {
120 if (count_of_newacks >=2 && transport != primary)
121 return 1;
122 return 0;
123 }
124
125 /*
126 * SFR-CACC algorithm:
127 * F) If count_of_newacks is less than 2, let d be the
128 * destination to which t was sent. If cacc_saw_newack
129 * is 0 for destination d, then the sender MUST NOT
130 * increment missing report count for t.
131 */
132 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
133 int count_of_newacks)
134 {
135 if (count_of_newacks < 2 &&
136 (transport && !transport->cacc.cacc_saw_newack))
137 return 1;
138 return 0;
139 }
140
141 /*
142 * SFR-CACC algorithm:
143 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
144 * execute steps C, D, F.
145 *
146 * C has been implemented in sctp_outq_sack
147 */
148 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
149 struct sctp_transport *transport,
150 int count_of_newacks)
151 {
152 if (!primary->cacc.cycling_changeover) {
153 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
154 return 1;
155 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
156 return 1;
157 return 0;
158 }
159 return 0;
160 }
161
162 /*
163 * SFR-CACC algorithm:
164 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
165 * than next_tsn_at_change of the current primary, then
166 * the sender MUST NOT increment missing report count
167 * for t.
168 */
169 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
170 {
171 if (primary->cacc.cycling_changeover &&
172 TSN_lt(tsn, primary->cacc.next_tsn_at_change))
173 return 1;
174 return 0;
175 }
176
177 /*
178 * SFR-CACC algorithm:
179 * 3) If the missing report count for TSN t is to be
180 * incremented according to [RFC2960] and
181 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
182 * then the sender MUST further execute steps 3.1 and
183 * 3.2 to determine if the missing report count for
184 * TSN t SHOULD NOT be incremented.
185 *
186 * 3.3) If 3.1 and 3.2 do not dictate that the missing
187 * report count for t should not be incremented, then
188 * the sender SHOULD increment missing report count for
189 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
190 */
191 static inline int sctp_cacc_skip(struct sctp_transport *primary,
192 struct sctp_transport *transport,
193 int count_of_newacks,
194 __u32 tsn)
195 {
196 if (primary->cacc.changeover_active &&
197 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
198 sctp_cacc_skip_3_2(primary, tsn)))
199 return 1;
200 return 0;
201 }
202
203 /* Initialize an existing sctp_outq. This does the boring stuff.
204 * You still need to define handlers if you really want to DO
205 * something with this structure...
206 */
207 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
208 {
209 q->asoc = asoc;
210 INIT_LIST_HEAD(&q->out_chunk_list);
211 INIT_LIST_HEAD(&q->control_chunk_list);
212 INIT_LIST_HEAD(&q->retransmit);
213 INIT_LIST_HEAD(&q->sacked);
214 INIT_LIST_HEAD(&q->abandoned);
215
216 q->fast_rtx = 0;
217 q->outstanding_bytes = 0;
218 q->empty = 1;
219 q->cork = 0;
220 q->out_qlen = 0;
221 }
222
223 /* Free the outqueue structure and any related pending chunks.
224 */
225 static void __sctp_outq_teardown(struct sctp_outq *q)
226 {
227 struct sctp_transport *transport;
228 struct list_head *lchunk, *temp;
229 struct sctp_chunk *chunk, *tmp;
230
231 /* Throw away unacknowledged chunks. */
232 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
233 transports) {
234 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
235 chunk = list_entry(lchunk, struct sctp_chunk,
236 transmitted_list);
237 /* Mark as part of a failed message. */
238 sctp_chunk_fail(chunk, q->error);
239 sctp_chunk_free(chunk);
240 }
241 }
242
243 /* Throw away chunks that have been gap ACKed. */
244 list_for_each_safe(lchunk, temp, &q->sacked) {
245 list_del_init(lchunk);
246 chunk = list_entry(lchunk, struct sctp_chunk,
247 transmitted_list);
248 sctp_chunk_fail(chunk, q->error);
249 sctp_chunk_free(chunk);
250 }
251
252 /* Throw away any chunks in the retransmit queue. */
253 list_for_each_safe(lchunk, temp, &q->retransmit) {
254 list_del_init(lchunk);
255 chunk = list_entry(lchunk, struct sctp_chunk,
256 transmitted_list);
257 sctp_chunk_fail(chunk, q->error);
258 sctp_chunk_free(chunk);
259 }
260
261 /* Throw away any chunks that are in the abandoned queue. */
262 list_for_each_safe(lchunk, temp, &q->abandoned) {
263 list_del_init(lchunk);
264 chunk = list_entry(lchunk, struct sctp_chunk,
265 transmitted_list);
266 sctp_chunk_fail(chunk, q->error);
267 sctp_chunk_free(chunk);
268 }
269
270 /* Throw away any leftover data chunks. */
271 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
272
273 /* Mark as send failure. */
274 sctp_chunk_fail(chunk, q->error);
275 sctp_chunk_free(chunk);
276 }
277
278 /* Throw away any leftover control chunks. */
279 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
280 list_del_init(&chunk->list);
281 sctp_chunk_free(chunk);
282 }
283 }
284
285 void sctp_outq_teardown(struct sctp_outq *q)
286 {
287 __sctp_outq_teardown(q);
288 sctp_outq_init(q->asoc, q);
289 }
290
291 /* Free the outqueue structure and any related pending chunks. */
292 void sctp_outq_free(struct sctp_outq *q)
293 {
294 /* Throw away leftover chunks. */
295 __sctp_outq_teardown(q);
296 }
297
298 /* Put a new chunk in an sctp_outq. */
299 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
300 {
301 struct net *net = sock_net(q->asoc->base.sk);
302 int error = 0;
303
304 SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
305 q, chunk, chunk && chunk->chunk_hdr ?
306 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
307 : "Illegal Chunk");
308
309 /* If it is data, queue it up, otherwise, send it
310 * immediately.
311 */
312 if (sctp_chunk_is_data(chunk)) {
313 /* Is it OK to queue data chunks? */
314 /* From 9. Termination of Association
315 *
316 * When either endpoint performs a shutdown, the
317 * association on each peer will stop accepting new
318 * data from its user and only deliver data in queue
319 * at the time of sending or receiving the SHUTDOWN
320 * chunk.
321 */
322 switch (q->asoc->state) {
323 case SCTP_STATE_CLOSED:
324 case SCTP_STATE_SHUTDOWN_PENDING:
325 case SCTP_STATE_SHUTDOWN_SENT:
326 case SCTP_STATE_SHUTDOWN_RECEIVED:
327 case SCTP_STATE_SHUTDOWN_ACK_SENT:
328 /* Cannot send after transport endpoint shutdown */
329 error = -ESHUTDOWN;
330 break;
331
332 default:
333 SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n",
334 q, chunk, chunk && chunk->chunk_hdr ?
335 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
336 : "Illegal Chunk");
337
338 sctp_outq_tail_data(q, chunk);
339 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
340 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
341 else
342 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
343 q->empty = 0;
344 break;
345 }
346 } else {
347 list_add_tail(&chunk->list, &q->control_chunk_list);
348 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
349 }
350
351 if (error < 0)
352 return error;
353
354 if (!q->cork)
355 error = sctp_outq_flush(q, 0);
356
357 return error;
358 }
359
360 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list
361 * and the abandoned list are in ascending order.
362 */
363 static void sctp_insert_list(struct list_head *head, struct list_head *new)
364 {
365 struct list_head *pos;
366 struct sctp_chunk *nchunk, *lchunk;
367 __u32 ntsn, ltsn;
368 int done = 0;
369
370 nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
371 ntsn = ntohl(nchunk->subh.data_hdr->tsn);
372
373 list_for_each(pos, head) {
374 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
375 ltsn = ntohl(lchunk->subh.data_hdr->tsn);
376 if (TSN_lt(ntsn, ltsn)) {
377 list_add(new, pos->prev);
378 done = 1;
379 break;
380 }
381 }
382 if (!done)
383 list_add_tail(new, head);
384 }
385
386 /* Mark all the eligible packets on a transport for retransmission. */
387 void sctp_retransmit_mark(struct sctp_outq *q,
388 struct sctp_transport *transport,
389 __u8 reason)
390 {
391 struct list_head *lchunk, *ltemp;
392 struct sctp_chunk *chunk;
393
394 /* Walk through the specified transmitted queue. */
395 list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
396 chunk = list_entry(lchunk, struct sctp_chunk,
397 transmitted_list);
398
399 /* If the chunk is abandoned, move it to abandoned list. */
400 if (sctp_chunk_abandoned(chunk)) {
401 list_del_init(lchunk);
402 sctp_insert_list(&q->abandoned, lchunk);
403
404 /* If this chunk has not been previousely acked,
405 * stop considering it 'outstanding'. Our peer
406 * will most likely never see it since it will
407 * not be retransmitted
408 */
409 if (!chunk->tsn_gap_acked) {
410 if (chunk->transport)
411 chunk->transport->flight_size -=
412 sctp_data_size(chunk);
413 q->outstanding_bytes -= sctp_data_size(chunk);
414 q->asoc->peer.rwnd += sctp_data_size(chunk);
415 }
416 continue;
417 }
418
419 /* If we are doing retransmission due to a timeout or pmtu
420 * discovery, only the chunks that are not yet acked should
421 * be added to the retransmit queue.
422 */
423 if ((reason == SCTP_RTXR_FAST_RTX &&
424 (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
425 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
426 /* RFC 2960 6.2.1 Processing a Received SACK
427 *
428 * C) Any time a DATA chunk is marked for
429 * retransmission (via either T3-rtx timer expiration
430 * (Section 6.3.3) or via fast retransmit
431 * (Section 7.2.4)), add the data size of those
432 * chunks to the rwnd.
433 */
434 q->asoc->peer.rwnd += sctp_data_size(chunk);
435 q->outstanding_bytes -= sctp_data_size(chunk);
436 if (chunk->transport)
437 transport->flight_size -= sctp_data_size(chunk);
438
439 /* sctpimpguide-05 Section 2.8.2
440 * M5) If a T3-rtx timer expires, the
441 * 'TSN.Missing.Report' of all affected TSNs is set
442 * to 0.
443 */
444 chunk->tsn_missing_report = 0;
445
446 /* If a chunk that is being used for RTT measurement
447 * has to be retransmitted, we cannot use this chunk
448 * anymore for RTT measurements. Reset rto_pending so
449 * that a new RTT measurement is started when a new
450 * data chunk is sent.
451 */
452 if (chunk->rtt_in_progress) {
453 chunk->rtt_in_progress = 0;
454 transport->rto_pending = 0;
455 }
456
457 /* Move the chunk to the retransmit queue. The chunks
458 * on the retransmit queue are always kept in order.
459 */
460 list_del_init(lchunk);
461 sctp_insert_list(&q->retransmit, lchunk);
462 }
463 }
464
465 SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
466 "cwnd: %d, ssthresh: %d, flight_size: %d, "
467 "pba: %d\n", __func__,
468 transport, reason,
469 transport->cwnd, transport->ssthresh,
470 transport->flight_size,
471 transport->partial_bytes_acked);
472
473 }
474
475 /* Mark all the eligible packets on a transport for retransmission and force
476 * one packet out.
477 */
478 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
479 sctp_retransmit_reason_t reason)
480 {
481 struct net *net = sock_net(q->asoc->base.sk);
482 int error = 0;
483
484 switch(reason) {
485 case SCTP_RTXR_T3_RTX:
486 SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
487 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
488 /* Update the retran path if the T3-rtx timer has expired for
489 * the current retran path.
490 */
491 if (transport == transport->asoc->peer.retran_path)
492 sctp_assoc_update_retran_path(transport->asoc);
493 transport->asoc->rtx_data_chunks +=
494 transport->asoc->unack_data;
495 break;
496 case SCTP_RTXR_FAST_RTX:
497 SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
498 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
499 q->fast_rtx = 1;
500 break;
501 case SCTP_RTXR_PMTUD:
502 SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
503 break;
504 case SCTP_RTXR_T1_RTX:
505 SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
506 transport->asoc->init_retries++;
507 break;
508 default:
509 BUG();
510 }
511
512 sctp_retransmit_mark(q, transport, reason);
513
514 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
515 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
516 * following the procedures outlined in C1 - C5.
517 */
518 if (reason == SCTP_RTXR_T3_RTX)
519 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
520
521 /* Flush the queues only on timeout, since fast_rtx is only
522 * triggered during sack processing and the queue
523 * will be flushed at the end.
524 */
525 if (reason != SCTP_RTXR_FAST_RTX)
526 error = sctp_outq_flush(q, /* rtx_timeout */ 1);
527
528 if (error)
529 q->asoc->base.sk->sk_err = -error;
530 }
531
532 /*
533 * Transmit DATA chunks on the retransmit queue. Upon return from
534 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
535 * need to be transmitted by the caller.
536 * We assume that pkt->transport has already been set.
537 *
538 * The return value is a normal kernel error return value.
539 */
540 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
541 int rtx_timeout, int *start_timer)
542 {
543 struct list_head *lqueue;
544 struct sctp_transport *transport = pkt->transport;
545 sctp_xmit_t status;
546 struct sctp_chunk *chunk, *chunk1;
547 int fast_rtx;
548 int error = 0;
549 int timer = 0;
550 int done = 0;
551
552 lqueue = &q->retransmit;
553 fast_rtx = q->fast_rtx;
554
555 /* This loop handles time-out retransmissions, fast retransmissions,
556 * and retransmissions due to opening of whindow.
557 *
558 * RFC 2960 6.3.3 Handle T3-rtx Expiration
559 *
560 * E3) Determine how many of the earliest (i.e., lowest TSN)
561 * outstanding DATA chunks for the address for which the
562 * T3-rtx has expired will fit into a single packet, subject
563 * to the MTU constraint for the path corresponding to the
564 * destination transport address to which the retransmission
565 * is being sent (this may be different from the address for
566 * which the timer expires [see Section 6.4]). Call this value
567 * K. Bundle and retransmit those K DATA chunks in a single
568 * packet to the destination endpoint.
569 *
570 * [Just to be painfully clear, if we are retransmitting
571 * because a timeout just happened, we should send only ONE
572 * packet of retransmitted data.]
573 *
574 * For fast retransmissions we also send only ONE packet. However,
575 * if we are just flushing the queue due to open window, we'll
576 * try to send as much as possible.
577 */
578 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
579 /* If the chunk is abandoned, move it to abandoned list. */
580 if (sctp_chunk_abandoned(chunk)) {
581 list_del_init(&chunk->transmitted_list);
582 sctp_insert_list(&q->abandoned,
583 &chunk->transmitted_list);
584 continue;
585 }
586
587 /* Make sure that Gap Acked TSNs are not retransmitted. A
588 * simple approach is just to move such TSNs out of the
589 * way and into a 'transmitted' queue and skip to the
590 * next chunk.
591 */
592 if (chunk->tsn_gap_acked) {
593 list_move_tail(&chunk->transmitted_list,
594 &transport->transmitted);
595 continue;
596 }
597
598 /* If we are doing fast retransmit, ignore non-fast_rtransmit
599 * chunks
600 */
601 if (fast_rtx && !chunk->fast_retransmit)
602 continue;
603
604 redo:
605 /* Attempt to append this chunk to the packet. */
606 status = sctp_packet_append_chunk(pkt, chunk);
607
608 switch (status) {
609 case SCTP_XMIT_PMTU_FULL:
610 if (!pkt->has_data && !pkt->has_cookie_echo) {
611 /* If this packet did not contain DATA then
612 * retransmission did not happen, so do it
613 * again. We'll ignore the error here since
614 * control chunks are already freed so there
615 * is nothing we can do.
616 */
617 sctp_packet_transmit(pkt);
618 goto redo;
619 }
620
621 /* Send this packet. */
622 error = sctp_packet_transmit(pkt);
623
624 /* If we are retransmitting, we should only
625 * send a single packet.
626 * Otherwise, try appending this chunk again.
627 */
628 if (rtx_timeout || fast_rtx)
629 done = 1;
630 else
631 goto redo;
632
633 /* Bundle next chunk in the next round. */
634 break;
635
636 case SCTP_XMIT_RWND_FULL:
637 /* Send this packet. */
638 error = sctp_packet_transmit(pkt);
639
640 /* Stop sending DATA as there is no more room
641 * at the receiver.
642 */
643 done = 1;
644 break;
645
646 case SCTP_XMIT_NAGLE_DELAY:
647 /* Send this packet. */
648 error = sctp_packet_transmit(pkt);
649
650 /* Stop sending DATA because of nagle delay. */
651 done = 1;
652 break;
653
654 default:
655 /* The append was successful, so add this chunk to
656 * the transmitted list.
657 */
658 list_move_tail(&chunk->transmitted_list,
659 &transport->transmitted);
660
661 /* Mark the chunk as ineligible for fast retransmit
662 * after it is retransmitted.
663 */
664 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
665 chunk->fast_retransmit = SCTP_DONT_FRTX;
666
667 q->empty = 0;
668 q->asoc->stats.rtxchunks++;
669 break;
670 }
671
672 /* Set the timer if there were no errors */
673 if (!error && !timer)
674 timer = 1;
675
676 if (done)
677 break;
678 }
679
680 /* If we are here due to a retransmit timeout or a fast
681 * retransmit and if there are any chunks left in the retransmit
682 * queue that could not fit in the PMTU sized packet, they need
683 * to be marked as ineligible for a subsequent fast retransmit.
684 */
685 if (rtx_timeout || fast_rtx) {
686 list_for_each_entry(chunk1, lqueue, transmitted_list) {
687 if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
688 chunk1->fast_retransmit = SCTP_DONT_FRTX;
689 }
690 }
691
692 *start_timer = timer;
693
694 /* Clear fast retransmit hint */
695 if (fast_rtx)
696 q->fast_rtx = 0;
697
698 return error;
699 }
700
701 /* Cork the outqueue so queued chunks are really queued. */
702 int sctp_outq_uncork(struct sctp_outq *q)
703 {
704 if (q->cork)
705 q->cork = 0;
706
707 return sctp_outq_flush(q, 0);
708 }
709
710
711 /*
712 * Try to flush an outqueue.
713 *
714 * Description: Send everything in q which we legally can, subject to
715 * congestion limitations.
716 * * Note: This function can be called from multiple contexts so appropriate
717 * locking concerns must be made. Today we use the sock lock to protect
718 * this function.
719 */
720 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
721 {
722 struct sctp_packet *packet;
723 struct sctp_packet singleton;
724 struct sctp_association *asoc = q->asoc;
725 __u16 sport = asoc->base.bind_addr.port;
726 __u16 dport = asoc->peer.port;
727 __u32 vtag = asoc->peer.i.init_tag;
728 struct sctp_transport *transport = NULL;
729 struct sctp_transport *new_transport;
730 struct sctp_chunk *chunk, *tmp;
731 sctp_xmit_t status;
732 int error = 0;
733 int start_timer = 0;
734 int one_packet = 0;
735
736 /* These transports have chunks to send. */
737 struct list_head transport_list;
738 struct list_head *ltransport;
739
740 INIT_LIST_HEAD(&transport_list);
741 packet = NULL;
742
743 /*
744 * 6.10 Bundling
745 * ...
746 * When bundling control chunks with DATA chunks, an
747 * endpoint MUST place control chunks first in the outbound
748 * SCTP packet. The transmitter MUST transmit DATA chunks
749 * within a SCTP packet in increasing order of TSN.
750 * ...
751 */
752
753 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
754 /* RFC 5061, 5.3
755 * F1) This means that until such time as the ASCONF
756 * containing the add is acknowledged, the sender MUST
757 * NOT use the new IP address as a source for ANY SCTP
758 * packet except on carrying an ASCONF Chunk.
759 */
760 if (asoc->src_out_of_asoc_ok &&
761 chunk->chunk_hdr->type != SCTP_CID_ASCONF)
762 continue;
763
764 list_del_init(&chunk->list);
765
766 /* Pick the right transport to use. */
767 new_transport = chunk->transport;
768
769 if (!new_transport) {
770 /*
771 * If we have a prior transport pointer, see if
772 * the destination address of the chunk
773 * matches the destination address of the
774 * current transport. If not a match, then
775 * try to look up the transport with a given
776 * destination address. We do this because
777 * after processing ASCONFs, we may have new
778 * transports created.
779 */
780 if (transport &&
781 sctp_cmp_addr_exact(&chunk->dest,
782 &transport->ipaddr))
783 new_transport = transport;
784 else
785 new_transport = sctp_assoc_lookup_paddr(asoc,
786 &chunk->dest);
787
788 /* if we still don't have a new transport, then
789 * use the current active path.
790 */
791 if (!new_transport)
792 new_transport = asoc->peer.active_path;
793 } else if ((new_transport->state == SCTP_INACTIVE) ||
794 (new_transport->state == SCTP_UNCONFIRMED) ||
795 (new_transport->state == SCTP_PF)) {
796 /* If the chunk is Heartbeat or Heartbeat Ack,
797 * send it to chunk->transport, even if it's
798 * inactive.
799 *
800 * 3.3.6 Heartbeat Acknowledgement:
801 * ...
802 * A HEARTBEAT ACK is always sent to the source IP
803 * address of the IP datagram containing the
804 * HEARTBEAT chunk to which this ack is responding.
805 * ...
806 *
807 * ASCONF_ACKs also must be sent to the source.
808 */
809 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
810 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK &&
811 chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK)
812 new_transport = asoc->peer.active_path;
813 }
814
815 /* Are we switching transports?
816 * Take care of transport locks.
817 */
818 if (new_transport != transport) {
819 transport = new_transport;
820 if (list_empty(&transport->send_ready)) {
821 list_add_tail(&transport->send_ready,
822 &transport_list);
823 }
824 packet = &transport->packet;
825 sctp_packet_config(packet, vtag,
826 asoc->peer.ecn_capable);
827 }
828
829 switch (chunk->chunk_hdr->type) {
830 /*
831 * 6.10 Bundling
832 * ...
833 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
834 * COMPLETE with any other chunks. [Send them immediately.]
835 */
836 case SCTP_CID_INIT:
837 case SCTP_CID_INIT_ACK:
838 case SCTP_CID_SHUTDOWN_COMPLETE:
839 sctp_packet_init(&singleton, transport, sport, dport);
840 sctp_packet_config(&singleton, vtag, 0);
841 sctp_packet_append_chunk(&singleton, chunk);
842 error = sctp_packet_transmit(&singleton);
843 if (error < 0)
844 return error;
845 break;
846
847 case SCTP_CID_ABORT:
848 if (sctp_test_T_bit(chunk)) {
849 packet->vtag = asoc->c.my_vtag;
850 }
851 /* The following chunks are "response" chunks, i.e.
852 * they are generated in response to something we
853 * received. If we are sending these, then we can
854 * send only 1 packet containing these chunks.
855 */
856 case SCTP_CID_HEARTBEAT_ACK:
857 case SCTP_CID_SHUTDOWN_ACK:
858 case SCTP_CID_COOKIE_ACK:
859 case SCTP_CID_COOKIE_ECHO:
860 case SCTP_CID_ERROR:
861 case SCTP_CID_ECN_CWR:
862 case SCTP_CID_ASCONF_ACK:
863 one_packet = 1;
864 /* Fall through */
865
866 case SCTP_CID_SACK:
867 case SCTP_CID_HEARTBEAT:
868 case SCTP_CID_SHUTDOWN:
869 case SCTP_CID_ECN_ECNE:
870 case SCTP_CID_ASCONF:
871 case SCTP_CID_FWD_TSN:
872 status = sctp_packet_transmit_chunk(packet, chunk,
873 one_packet);
874 if (status != SCTP_XMIT_OK) {
875 /* put the chunk back */
876 list_add(&chunk->list, &q->control_chunk_list);
877 } else {
878 asoc->stats.octrlchunks++;
879 /* PR-SCTP C5) If a FORWARD TSN is sent, the
880 * sender MUST assure that at least one T3-rtx
881 * timer is running.
882 */
883 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN)
884 sctp_transport_reset_timers(transport);
885 }
886 break;
887
888 default:
889 /* We built a chunk with an illegal type! */
890 BUG();
891 }
892 }
893
894 if (q->asoc->src_out_of_asoc_ok)
895 goto sctp_flush_out;
896
897 /* Is it OK to send data chunks? */
898 switch (asoc->state) {
899 case SCTP_STATE_COOKIE_ECHOED:
900 /* Only allow bundling when this packet has a COOKIE-ECHO
901 * chunk.
902 */
903 if (!packet || !packet->has_cookie_echo)
904 break;
905
906 /* fallthru */
907 case SCTP_STATE_ESTABLISHED:
908 case SCTP_STATE_SHUTDOWN_PENDING:
909 case SCTP_STATE_SHUTDOWN_RECEIVED:
910 /*
911 * RFC 2960 6.1 Transmission of DATA Chunks
912 *
913 * C) When the time comes for the sender to transmit,
914 * before sending new DATA chunks, the sender MUST
915 * first transmit any outstanding DATA chunks which
916 * are marked for retransmission (limited by the
917 * current cwnd).
918 */
919 if (!list_empty(&q->retransmit)) {
920 if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
921 goto sctp_flush_out;
922 if (transport == asoc->peer.retran_path)
923 goto retran;
924
925 /* Switch transports & prepare the packet. */
926
927 transport = asoc->peer.retran_path;
928
929 if (list_empty(&transport->send_ready)) {
930 list_add_tail(&transport->send_ready,
931 &transport_list);
932 }
933
934 packet = &transport->packet;
935 sctp_packet_config(packet, vtag,
936 asoc->peer.ecn_capable);
937 retran:
938 error = sctp_outq_flush_rtx(q, packet,
939 rtx_timeout, &start_timer);
940
941 if (start_timer)
942 sctp_transport_reset_timers(transport);
943
944 /* This can happen on COOKIE-ECHO resend. Only
945 * one chunk can get bundled with a COOKIE-ECHO.
946 */
947 if (packet->has_cookie_echo)
948 goto sctp_flush_out;
949
950 /* Don't send new data if there is still data
951 * waiting to retransmit.
952 */
953 if (!list_empty(&q->retransmit))
954 goto sctp_flush_out;
955 }
956
957 /* Apply Max.Burst limitation to the current transport in
958 * case it will be used for new data. We are going to
959 * rest it before we return, but we want to apply the limit
960 * to the currently queued data.
961 */
962 if (transport)
963 sctp_transport_burst_limited(transport);
964
965 /* Finally, transmit new packets. */
966 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
967 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
968 * stream identifier.
969 */
970 if (chunk->sinfo.sinfo_stream >=
971 asoc->c.sinit_num_ostreams) {
972
973 /* Mark as failed send. */
974 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
975 sctp_chunk_free(chunk);
976 continue;
977 }
978
979 /* Has this chunk expired? */
980 if (sctp_chunk_abandoned(chunk)) {
981 sctp_chunk_fail(chunk, 0);
982 sctp_chunk_free(chunk);
983 continue;
984 }
985
986 /* If there is a specified transport, use it.
987 * Otherwise, we want to use the active path.
988 */
989 new_transport = chunk->transport;
990 if (!new_transport ||
991 ((new_transport->state == SCTP_INACTIVE) ||
992 (new_transport->state == SCTP_UNCONFIRMED) ||
993 (new_transport->state == SCTP_PF)))
994 new_transport = asoc->peer.active_path;
995 if (new_transport->state == SCTP_UNCONFIRMED)
996 continue;
997
998 /* Change packets if necessary. */
999 if (new_transport != transport) {
1000 transport = new_transport;
1001
1002 /* Schedule to have this transport's
1003 * packet flushed.
1004 */
1005 if (list_empty(&transport->send_ready)) {
1006 list_add_tail(&transport->send_ready,
1007 &transport_list);
1008 }
1009
1010 packet = &transport->packet;
1011 sctp_packet_config(packet, vtag,
1012 asoc->peer.ecn_capable);
1013 /* We've switched transports, so apply the
1014 * Burst limit to the new transport.
1015 */
1016 sctp_transport_burst_limited(transport);
1017 }
1018
1019 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ",
1020 q, chunk,
1021 chunk && chunk->chunk_hdr ?
1022 sctp_cname(SCTP_ST_CHUNK(
1023 chunk->chunk_hdr->type))
1024 : "Illegal Chunk");
1025
1026 SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "
1027 "%p skb->users %d.\n",
1028 ntohl(chunk->subh.data_hdr->tsn),
1029 chunk->skb ?chunk->skb->head : NULL,
1030 chunk->skb ?
1031 atomic_read(&chunk->skb->users) : -1);
1032
1033 /* Add the chunk to the packet. */
1034 status = sctp_packet_transmit_chunk(packet, chunk, 0);
1035
1036 switch (status) {
1037 case SCTP_XMIT_PMTU_FULL:
1038 case SCTP_XMIT_RWND_FULL:
1039 case SCTP_XMIT_NAGLE_DELAY:
1040 /* We could not append this chunk, so put
1041 * the chunk back on the output queue.
1042 */
1043 SCTP_DEBUG_PRINTK("sctp_outq_flush: could "
1044 "not transmit TSN: 0x%x, status: %d\n",
1045 ntohl(chunk->subh.data_hdr->tsn),
1046 status);
1047 sctp_outq_head_data(q, chunk);
1048 goto sctp_flush_out;
1049 break;
1050
1051 case SCTP_XMIT_OK:
1052 /* The sender is in the SHUTDOWN-PENDING state,
1053 * The sender MAY set the I-bit in the DATA
1054 * chunk header.
1055 */
1056 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1057 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1058 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1059 asoc->stats.ouodchunks++;
1060 else
1061 asoc->stats.oodchunks++;
1062
1063 break;
1064
1065 default:
1066 BUG();
1067 }
1068
1069 /* BUG: We assume that the sctp_packet_transmit()
1070 * call below will succeed all the time and add the
1071 * chunk to the transmitted list and restart the
1072 * timers.
1073 * It is possible that the call can fail under OOM
1074 * conditions.
1075 *
1076 * Is this really a problem? Won't this behave
1077 * like a lost TSN?
1078 */
1079 list_add_tail(&chunk->transmitted_list,
1080 &transport->transmitted);
1081
1082 sctp_transport_reset_timers(transport);
1083
1084 q->empty = 0;
1085
1086 /* Only let one DATA chunk get bundled with a
1087 * COOKIE-ECHO chunk.
1088 */
1089 if (packet->has_cookie_echo)
1090 goto sctp_flush_out;
1091 }
1092 break;
1093
1094 default:
1095 /* Do nothing. */
1096 break;
1097 }
1098
1099 sctp_flush_out:
1100
1101 /* Before returning, examine all the transports touched in
1102 * this call. Right now, we bluntly force clear all the
1103 * transports. Things might change after we implement Nagle.
1104 * But such an examination is still required.
1105 *
1106 * --xguo
1107 */
1108 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
1109 struct sctp_transport *t = list_entry(ltransport,
1110 struct sctp_transport,
1111 send_ready);
1112 packet = &t->packet;
1113 if (!sctp_packet_empty(packet))
1114 error = sctp_packet_transmit(packet);
1115
1116 /* Clear the burst limited state, if any */
1117 sctp_transport_burst_reset(t);
1118 }
1119
1120 return error;
1121 }
1122
1123 /* Update unack_data based on the incoming SACK chunk */
1124 static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1125 struct sctp_sackhdr *sack)
1126 {
1127 sctp_sack_variable_t *frags;
1128 __u16 unack_data;
1129 int i;
1130
1131 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1132
1133 frags = sack->variable;
1134 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1135 unack_data -= ((ntohs(frags[i].gab.end) -
1136 ntohs(frags[i].gab.start) + 1));
1137 }
1138
1139 assoc->unack_data = unack_data;
1140 }
1141
1142 /* This is where we REALLY process a SACK.
1143 *
1144 * Process the SACK against the outqueue. Mostly, this just frees
1145 * things off the transmitted queue.
1146 */
1147 int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1148 {
1149 struct sctp_association *asoc = q->asoc;
1150 struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
1151 struct sctp_transport *transport;
1152 struct sctp_chunk *tchunk = NULL;
1153 struct list_head *lchunk, *transport_list, *temp;
1154 sctp_sack_variable_t *frags = sack->variable;
1155 __u32 sack_ctsn, ctsn, tsn;
1156 __u32 highest_tsn, highest_new_tsn;
1157 __u32 sack_a_rwnd;
1158 unsigned int outstanding;
1159 struct sctp_transport *primary = asoc->peer.primary_path;
1160 int count_of_newacks = 0;
1161 int gap_ack_blocks;
1162 u8 accum_moved = 0;
1163
1164 /* Grab the association's destination address list. */
1165 transport_list = &asoc->peer.transport_addr_list;
1166
1167 sack_ctsn = ntohl(sack->cum_tsn_ack);
1168 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1169 asoc->stats.gapcnt += gap_ack_blocks;
1170 /*
1171 * SFR-CACC algorithm:
1172 * On receipt of a SACK the sender SHOULD execute the
1173 * following statements.
1174 *
1175 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1176 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1177 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1178 * all destinations.
1179 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1180 * is set the receiver of the SACK MUST take the following actions:
1181 *
1182 * A) Initialize the cacc_saw_newack to 0 for all destination
1183 * addresses.
1184 *
1185 * Only bother if changeover_active is set. Otherwise, this is
1186 * totally suboptimal to do on every SACK.
1187 */
1188 if (primary->cacc.changeover_active) {
1189 u8 clear_cycling = 0;
1190
1191 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1192 primary->cacc.changeover_active = 0;
1193 clear_cycling = 1;
1194 }
1195
1196 if (clear_cycling || gap_ack_blocks) {
1197 list_for_each_entry(transport, transport_list,
1198 transports) {
1199 if (clear_cycling)
1200 transport->cacc.cycling_changeover = 0;
1201 if (gap_ack_blocks)
1202 transport->cacc.cacc_saw_newack = 0;
1203 }
1204 }
1205 }
1206
1207 /* Get the highest TSN in the sack. */
1208 highest_tsn = sack_ctsn;
1209 if (gap_ack_blocks)
1210 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1211
1212 if (TSN_lt(asoc->highest_sacked, highest_tsn))
1213 asoc->highest_sacked = highest_tsn;
1214
1215 highest_new_tsn = sack_ctsn;
1216
1217 /* Run through the retransmit queue. Credit bytes received
1218 * and free those chunks that we can.
1219 */
1220 sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
1221
1222 /* Run through the transmitted queue.
1223 * Credit bytes received and free those chunks which we can.
1224 *
1225 * This is a MASSIVE candidate for optimization.
1226 */
1227 list_for_each_entry(transport, transport_list, transports) {
1228 sctp_check_transmitted(q, &transport->transmitted,
1229 transport, &chunk->source, sack,
1230 &highest_new_tsn);
1231 /*
1232 * SFR-CACC algorithm:
1233 * C) Let count_of_newacks be the number of
1234 * destinations for which cacc_saw_newack is set.
1235 */
1236 if (transport->cacc.cacc_saw_newack)
1237 count_of_newacks ++;
1238 }
1239
1240 /* Move the Cumulative TSN Ack Point if appropriate. */
1241 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1242 asoc->ctsn_ack_point = sack_ctsn;
1243 accum_moved = 1;
1244 }
1245
1246 if (gap_ack_blocks) {
1247
1248 if (asoc->fast_recovery && accum_moved)
1249 highest_new_tsn = highest_tsn;
1250
1251 list_for_each_entry(transport, transport_list, transports)
1252 sctp_mark_missing(q, &transport->transmitted, transport,
1253 highest_new_tsn, count_of_newacks);
1254 }
1255
1256 /* Update unack_data field in the assoc. */
1257 sctp_sack_update_unack_data(asoc, sack);
1258
1259 ctsn = asoc->ctsn_ack_point;
1260
1261 /* Throw away stuff rotting on the sack queue. */
1262 list_for_each_safe(lchunk, temp, &q->sacked) {
1263 tchunk = list_entry(lchunk, struct sctp_chunk,
1264 transmitted_list);
1265 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1266 if (TSN_lte(tsn, ctsn)) {
1267 list_del_init(&tchunk->transmitted_list);
1268 sctp_chunk_free(tchunk);
1269 }
1270 }
1271
1272 /* ii) Set rwnd equal to the newly received a_rwnd minus the
1273 * number of bytes still outstanding after processing the
1274 * Cumulative TSN Ack and the Gap Ack Blocks.
1275 */
1276
1277 sack_a_rwnd = ntohl(sack->a_rwnd);
1278 outstanding = q->outstanding_bytes;
1279
1280 if (outstanding < sack_a_rwnd)
1281 sack_a_rwnd -= outstanding;
1282 else
1283 sack_a_rwnd = 0;
1284
1285 asoc->peer.rwnd = sack_a_rwnd;
1286
1287 sctp_generate_fwdtsn(q, sack_ctsn);
1288
1289 SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",
1290 __func__, sack_ctsn);
1291 SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, "
1292 "%p is 0x%x. Adv peer ack point: 0x%x\n",
1293 __func__, asoc, ctsn, asoc->adv_peer_ack_point);
1294
1295 /* See if all chunks are acked.
1296 * Make sure the empty queue handler will get run later.
1297 */
1298 q->empty = (list_empty(&q->out_chunk_list) &&
1299 list_empty(&q->retransmit));
1300 if (!q->empty)
1301 goto finish;
1302
1303 list_for_each_entry(transport, transport_list, transports) {
1304 q->empty = q->empty && list_empty(&transport->transmitted);
1305 if (!q->empty)
1306 goto finish;
1307 }
1308
1309 SCTP_DEBUG_PRINTK("sack queue is empty.\n");
1310 finish:
1311 return q->empty;
1312 }
1313
1314 /* Is the outqueue empty? */
1315 int sctp_outq_is_empty(const struct sctp_outq *q)
1316 {
1317 return q->empty;
1318 }
1319
1320 /********************************************************************
1321 * 2nd Level Abstractions
1322 ********************************************************************/
1323
1324 /* Go through a transport's transmitted list or the association's retransmit
1325 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1326 * The retransmit list will not have an associated transport.
1327 *
1328 * I added coherent debug information output. --xguo
1329 *
1330 * Instead of printing 'sacked' or 'kept' for each TSN on the
1331 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1332 * KEPT TSN6-TSN7, etc.
1333 */
1334 static void sctp_check_transmitted(struct sctp_outq *q,
1335 struct list_head *transmitted_queue,
1336 struct sctp_transport *transport,
1337 union sctp_addr *saddr,
1338 struct sctp_sackhdr *sack,
1339 __u32 *highest_new_tsn_in_sack)
1340 {
1341 struct list_head *lchunk;
1342 struct sctp_chunk *tchunk;
1343 struct list_head tlist;
1344 __u32 tsn;
1345 __u32 sack_ctsn;
1346 __u32 rtt;
1347 __u8 restart_timer = 0;
1348 int bytes_acked = 0;
1349 int migrate_bytes = 0;
1350
1351 /* These state variables are for coherent debug output. --xguo */
1352
1353 #if SCTP_DEBUG
1354 __u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */
1355 __u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */
1356 __u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */
1357 __u32 dbg_last_kept_tsn = 0; /* ...and finishes here. */
1358
1359 /* 0 : The last TSN was ACKed.
1360 * 1 : The last TSN was NOT ACKed (i.e. KEPT).
1361 * -1: We need to initialize.
1362 */
1363 int dbg_prt_state = -1;
1364 #endif /* SCTP_DEBUG */
1365
1366 sack_ctsn = ntohl(sack->cum_tsn_ack);
1367
1368 INIT_LIST_HEAD(&tlist);
1369
1370 /* The while loop will skip empty transmitted queues. */
1371 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1372 tchunk = list_entry(lchunk, struct sctp_chunk,
1373 transmitted_list);
1374
1375 if (sctp_chunk_abandoned(tchunk)) {
1376 /* Move the chunk to abandoned list. */
1377 sctp_insert_list(&q->abandoned, lchunk);
1378
1379 /* If this chunk has not been acked, stop
1380 * considering it as 'outstanding'.
1381 */
1382 if (!tchunk->tsn_gap_acked) {
1383 if (tchunk->transport)
1384 tchunk->transport->flight_size -=
1385 sctp_data_size(tchunk);
1386 q->outstanding_bytes -= sctp_data_size(tchunk);
1387 }
1388 continue;
1389 }
1390
1391 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1392 if (sctp_acked(sack, tsn)) {
1393 /* If this queue is the retransmit queue, the
1394 * retransmit timer has already reclaimed
1395 * the outstanding bytes for this chunk, so only
1396 * count bytes associated with a transport.
1397 */
1398 if (transport) {
1399 /* If this chunk is being used for RTT
1400 * measurement, calculate the RTT and update
1401 * the RTO using this value.
1402 *
1403 * 6.3.1 C5) Karn's algorithm: RTT measurements
1404 * MUST NOT be made using packets that were
1405 * retransmitted (and thus for which it is
1406 * ambiguous whether the reply was for the
1407 * first instance of the packet or a later
1408 * instance).
1409 */
1410 if (!tchunk->tsn_gap_acked &&
1411 tchunk->rtt_in_progress) {
1412 tchunk->rtt_in_progress = 0;
1413 rtt = jiffies - tchunk->sent_at;
1414 sctp_transport_update_rto(transport,
1415 rtt);
1416 }
1417 }
1418
1419 /* If the chunk hasn't been marked as ACKED,
1420 * mark it and account bytes_acked if the
1421 * chunk had a valid transport (it will not
1422 * have a transport if ASCONF had deleted it
1423 * while DATA was outstanding).
1424 */
1425 if (!tchunk->tsn_gap_acked) {
1426 tchunk->tsn_gap_acked = 1;
1427 *highest_new_tsn_in_sack = tsn;
1428 bytes_acked += sctp_data_size(tchunk);
1429 if (!tchunk->transport)
1430 migrate_bytes += sctp_data_size(tchunk);
1431 }
1432
1433 if (TSN_lte(tsn, sack_ctsn)) {
1434 /* RFC 2960 6.3.2 Retransmission Timer Rules
1435 *
1436 * R3) Whenever a SACK is received
1437 * that acknowledges the DATA chunk
1438 * with the earliest outstanding TSN
1439 * for that address, restart T3-rtx
1440 * timer for that address with its
1441 * current RTO.
1442 */
1443 restart_timer = 1;
1444
1445 if (!tchunk->tsn_gap_acked) {
1446 /*
1447 * SFR-CACC algorithm:
1448 * 2) If the SACK contains gap acks
1449 * and the flag CHANGEOVER_ACTIVE is
1450 * set the receiver of the SACK MUST
1451 * take the following action:
1452 *
1453 * B) For each TSN t being acked that
1454 * has not been acked in any SACK so
1455 * far, set cacc_saw_newack to 1 for
1456 * the destination that the TSN was
1457 * sent to.
1458 */
1459 if (transport &&
1460 sack->num_gap_ack_blocks &&
1461 q->asoc->peer.primary_path->cacc.
1462 changeover_active)
1463 transport->cacc.cacc_saw_newack
1464 = 1;
1465 }
1466
1467 list_add_tail(&tchunk->transmitted_list,
1468 &q->sacked);
1469 } else {
1470 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1471 * M2) Each time a SACK arrives reporting
1472 * 'Stray DATA chunk(s)' record the highest TSN
1473 * reported as newly acknowledged, call this
1474 * value 'HighestTSNinSack'. A newly
1475 * acknowledged DATA chunk is one not
1476 * previously acknowledged in a SACK.
1477 *
1478 * When the SCTP sender of data receives a SACK
1479 * chunk that acknowledges, for the first time,
1480 * the receipt of a DATA chunk, all the still
1481 * unacknowledged DATA chunks whose TSN is
1482 * older than that newly acknowledged DATA
1483 * chunk, are qualified as 'Stray DATA chunks'.
1484 */
1485 list_add_tail(lchunk, &tlist);
1486 }
1487
1488 #if SCTP_DEBUG
1489 switch (dbg_prt_state) {
1490 case 0: /* last TSN was ACKed */
1491 if (dbg_last_ack_tsn + 1 == tsn) {
1492 /* This TSN belongs to the
1493 * current ACK range.
1494 */
1495 break;
1496 }
1497
1498 if (dbg_last_ack_tsn != dbg_ack_tsn) {
1499 /* Display the end of the
1500 * current range.
1501 */
1502 SCTP_DEBUG_PRINTK_CONT("-%08x",
1503 dbg_last_ack_tsn);
1504 }
1505
1506 /* Start a new range. */
1507 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1508 dbg_ack_tsn = tsn;
1509 break;
1510
1511 case 1: /* The last TSN was NOT ACKed. */
1512 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1513 /* Display the end of current range. */
1514 SCTP_DEBUG_PRINTK_CONT("-%08x",
1515 dbg_last_kept_tsn);
1516 }
1517
1518 SCTP_DEBUG_PRINTK_CONT("\n");
1519
1520 /* FALL THROUGH... */
1521 default:
1522 /* This is the first-ever TSN we examined. */
1523 /* Start a new range of ACK-ed TSNs. */
1524 SCTP_DEBUG_PRINTK("ACKed: %08x", tsn);
1525 dbg_prt_state = 0;
1526 dbg_ack_tsn = tsn;
1527 }
1528
1529 dbg_last_ack_tsn = tsn;
1530 #endif /* SCTP_DEBUG */
1531
1532 } else {
1533 if (tchunk->tsn_gap_acked) {
1534 SCTP_DEBUG_PRINTK("%s: Receiver reneged on "
1535 "data TSN: 0x%x\n",
1536 __func__,
1537 tsn);
1538 tchunk->tsn_gap_acked = 0;
1539
1540 if (tchunk->transport)
1541 bytes_acked -= sctp_data_size(tchunk);
1542
1543 /* RFC 2960 6.3.2 Retransmission Timer Rules
1544 *
1545 * R4) Whenever a SACK is received missing a
1546 * TSN that was previously acknowledged via a
1547 * Gap Ack Block, start T3-rtx for the
1548 * destination address to which the DATA
1549 * chunk was originally
1550 * transmitted if it is not already running.
1551 */
1552 restart_timer = 1;
1553 }
1554
1555 list_add_tail(lchunk, &tlist);
1556
1557 #if SCTP_DEBUG
1558 /* See the above comments on ACK-ed TSNs. */
1559 switch (dbg_prt_state) {
1560 case 1:
1561 if (dbg_last_kept_tsn + 1 == tsn)
1562 break;
1563
1564 if (dbg_last_kept_tsn != dbg_kept_tsn)
1565 SCTP_DEBUG_PRINTK_CONT("-%08x",
1566 dbg_last_kept_tsn);
1567
1568 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1569 dbg_kept_tsn = tsn;
1570 break;
1571
1572 case 0:
1573 if (dbg_last_ack_tsn != dbg_ack_tsn)
1574 SCTP_DEBUG_PRINTK_CONT("-%08x",
1575 dbg_last_ack_tsn);
1576 SCTP_DEBUG_PRINTK_CONT("\n");
1577
1578 /* FALL THROUGH... */
1579 default:
1580 SCTP_DEBUG_PRINTK("KEPT: %08x",tsn);
1581 dbg_prt_state = 1;
1582 dbg_kept_tsn = tsn;
1583 }
1584
1585 dbg_last_kept_tsn = tsn;
1586 #endif /* SCTP_DEBUG */
1587 }
1588 }
1589
1590 #if SCTP_DEBUG
1591 /* Finish off the last range, displaying its ending TSN. */
1592 switch (dbg_prt_state) {
1593 case 0:
1594 if (dbg_last_ack_tsn != dbg_ack_tsn) {
1595 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn);
1596 } else {
1597 SCTP_DEBUG_PRINTK_CONT("\n");
1598 }
1599 break;
1600
1601 case 1:
1602 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1603 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn);
1604 } else {
1605 SCTP_DEBUG_PRINTK_CONT("\n");
1606 }
1607 }
1608 #endif /* SCTP_DEBUG */
1609 if (transport) {
1610 if (bytes_acked) {
1611 struct sctp_association *asoc = transport->asoc;
1612
1613 /* We may have counted DATA that was migrated
1614 * to this transport due to DEL-IP operation.
1615 * Subtract those bytes, since the were never
1616 * send on this transport and shouldn't be
1617 * credited to this transport.
1618 */
1619 bytes_acked -= migrate_bytes;
1620
1621 /* 8.2. When an outstanding TSN is acknowledged,
1622 * the endpoint shall clear the error counter of
1623 * the destination transport address to which the
1624 * DATA chunk was last sent.
1625 * The association's overall error counter is
1626 * also cleared.
1627 */
1628 transport->error_count = 0;
1629 transport->asoc->overall_error_count = 0;
1630
1631 /*
1632 * While in SHUTDOWN PENDING, we may have started
1633 * the T5 shutdown guard timer after reaching the
1634 * retransmission limit. Stop that timer as soon
1635 * as the receiver acknowledged any data.
1636 */
1637 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1638 del_timer(&asoc->timers
1639 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1640 sctp_association_put(asoc);
1641
1642 /* Mark the destination transport address as
1643 * active if it is not so marked.
1644 */
1645 if ((transport->state == SCTP_INACTIVE ||
1646 transport->state == SCTP_UNCONFIRMED) &&
1647 sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
1648 sctp_assoc_control_transport(
1649 transport->asoc,
1650 transport,
1651 SCTP_TRANSPORT_UP,
1652 SCTP_RECEIVED_SACK);
1653 }
1654
1655 sctp_transport_raise_cwnd(transport, sack_ctsn,
1656 bytes_acked);
1657
1658 transport->flight_size -= bytes_acked;
1659 if (transport->flight_size == 0)
1660 transport->partial_bytes_acked = 0;
1661 q->outstanding_bytes -= bytes_acked + migrate_bytes;
1662 } else {
1663 /* RFC 2960 6.1, sctpimpguide-06 2.15.2
1664 * When a sender is doing zero window probing, it
1665 * should not timeout the association if it continues
1666 * to receive new packets from the receiver. The
1667 * reason is that the receiver MAY keep its window
1668 * closed for an indefinite time.
1669 * A sender is doing zero window probing when the
1670 * receiver's advertised window is zero, and there is
1671 * only one data chunk in flight to the receiver.
1672 *
1673 * Allow the association to timeout while in SHUTDOWN
1674 * PENDING or SHUTDOWN RECEIVED in case the receiver
1675 * stays in zero window mode forever.
1676 */
1677 if (!q->asoc->peer.rwnd &&
1678 !list_empty(&tlist) &&
1679 (sack_ctsn+2 == q->asoc->next_tsn) &&
1680 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1681 SCTP_DEBUG_PRINTK("%s: SACK received for zero "
1682 "window probe: %u\n",
1683 __func__, sack_ctsn);
1684 q->asoc->overall_error_count = 0;
1685 transport->error_count = 0;
1686 }
1687 }
1688
1689 /* RFC 2960 6.3.2 Retransmission Timer Rules
1690 *
1691 * R2) Whenever all outstanding data sent to an address have
1692 * been acknowledged, turn off the T3-rtx timer of that
1693 * address.
1694 */
1695 if (!transport->flight_size) {
1696 if (del_timer(&transport->T3_rtx_timer))
1697 sctp_transport_put(transport);
1698 } else if (restart_timer) {
1699 if (!mod_timer(&transport->T3_rtx_timer,
1700 jiffies + transport->rto))
1701 sctp_transport_hold(transport);
1702 }
1703 }
1704
1705 list_splice(&tlist, transmitted_queue);
1706 }
1707
1708 /* Mark chunks as missing and consequently may get retransmitted. */
1709 static void sctp_mark_missing(struct sctp_outq *q,
1710 struct list_head *transmitted_queue,
1711 struct sctp_transport *transport,
1712 __u32 highest_new_tsn_in_sack,
1713 int count_of_newacks)
1714 {
1715 struct sctp_chunk *chunk;
1716 __u32 tsn;
1717 char do_fast_retransmit = 0;
1718 struct sctp_association *asoc = q->asoc;
1719 struct sctp_transport *primary = asoc->peer.primary_path;
1720
1721 list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1722
1723 tsn = ntohl(chunk->subh.data_hdr->tsn);
1724
1725 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1726 * 'Unacknowledged TSN's', if the TSN number of an
1727 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1728 * value, increment the 'TSN.Missing.Report' count on that
1729 * chunk if it has NOT been fast retransmitted or marked for
1730 * fast retransmit already.
1731 */
1732 if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1733 !chunk->tsn_gap_acked &&
1734 TSN_lt(tsn, highest_new_tsn_in_sack)) {
1735
1736 /* SFR-CACC may require us to skip marking
1737 * this chunk as missing.
1738 */
1739 if (!transport || !sctp_cacc_skip(primary,
1740 chunk->transport,
1741 count_of_newacks, tsn)) {
1742 chunk->tsn_missing_report++;
1743
1744 SCTP_DEBUG_PRINTK(
1745 "%s: TSN 0x%x missing counter: %d\n",
1746 __func__, tsn,
1747 chunk->tsn_missing_report);
1748 }
1749 }
1750 /*
1751 * M4) If any DATA chunk is found to have a
1752 * 'TSN.Missing.Report'
1753 * value larger than or equal to 3, mark that chunk for
1754 * retransmission and start the fast retransmit procedure.
1755 */
1756
1757 if (chunk->tsn_missing_report >= 3) {
1758 chunk->fast_retransmit = SCTP_NEED_FRTX;
1759 do_fast_retransmit = 1;
1760 }
1761 }
1762
1763 if (transport) {
1764 if (do_fast_retransmit)
1765 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1766
1767 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
1768 "ssthresh: %d, flight_size: %d, pba: %d\n",
1769 __func__, transport, transport->cwnd,
1770 transport->ssthresh, transport->flight_size,
1771 transport->partial_bytes_acked);
1772 }
1773 }
1774
1775 /* Is the given TSN acked by this packet? */
1776 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1777 {
1778 int i;
1779 sctp_sack_variable_t *frags;
1780 __u16 gap;
1781 __u32 ctsn = ntohl(sack->cum_tsn_ack);
1782
1783 if (TSN_lte(tsn, ctsn))
1784 goto pass;
1785
1786 /* 3.3.4 Selective Acknowledgement (SACK) (3):
1787 *
1788 * Gap Ack Blocks:
1789 * These fields contain the Gap Ack Blocks. They are repeated
1790 * for each Gap Ack Block up to the number of Gap Ack Blocks
1791 * defined in the Number of Gap Ack Blocks field. All DATA
1792 * chunks with TSNs greater than or equal to (Cumulative TSN
1793 * Ack + Gap Ack Block Start) and less than or equal to
1794 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1795 * Block are assumed to have been received correctly.
1796 */
1797
1798 frags = sack->variable;
1799 gap = tsn - ctsn;
1800 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
1801 if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
1802 TSN_lte(gap, ntohs(frags[i].gab.end)))
1803 goto pass;
1804 }
1805
1806 return 0;
1807 pass:
1808 return 1;
1809 }
1810
1811 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1812 int nskips, __be16 stream)
1813 {
1814 int i;
1815
1816 for (i = 0; i < nskips; i++) {
1817 if (skiplist[i].stream == stream)
1818 return i;
1819 }
1820 return i;
1821 }
1822
1823 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1824 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1825 {
1826 struct sctp_association *asoc = q->asoc;
1827 struct sctp_chunk *ftsn_chunk = NULL;
1828 struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1829 int nskips = 0;
1830 int skip_pos = 0;
1831 __u32 tsn;
1832 struct sctp_chunk *chunk;
1833 struct list_head *lchunk, *temp;
1834
1835 if (!asoc->peer.prsctp_capable)
1836 return;
1837
1838 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1839 * received SACK.
1840 *
1841 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1842 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1843 */
1844 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1845 asoc->adv_peer_ack_point = ctsn;
1846
1847 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1848 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1849 * the chunk next in the out-queue space is marked as "abandoned" as
1850 * shown in the following example:
1851 *
1852 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1853 * and the Advanced.Peer.Ack.Point is updated to this value:
1854 *
1855 * out-queue at the end of ==> out-queue after Adv.Ack.Point
1856 * normal SACK processing local advancement
1857 * ... ...
1858 * Adv.Ack.Pt-> 102 acked 102 acked
1859 * 103 abandoned 103 abandoned
1860 * 104 abandoned Adv.Ack.P-> 104 abandoned
1861 * 105 105
1862 * 106 acked 106 acked
1863 * ... ...
1864 *
1865 * In this example, the data sender successfully advanced the
1866 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1867 */
1868 list_for_each_safe(lchunk, temp, &q->abandoned) {
1869 chunk = list_entry(lchunk, struct sctp_chunk,
1870 transmitted_list);
1871 tsn = ntohl(chunk->subh.data_hdr->tsn);
1872
1873 /* Remove any chunks in the abandoned queue that are acked by
1874 * the ctsn.
1875 */
1876 if (TSN_lte(tsn, ctsn)) {
1877 list_del_init(lchunk);
1878 sctp_chunk_free(chunk);
1879 } else {
1880 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1881 asoc->adv_peer_ack_point = tsn;
1882 if (chunk->chunk_hdr->flags &
1883 SCTP_DATA_UNORDERED)
1884 continue;
1885 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1886 nskips,
1887 chunk->subh.data_hdr->stream);
1888 ftsn_skip_arr[skip_pos].stream =
1889 chunk->subh.data_hdr->stream;
1890 ftsn_skip_arr[skip_pos].ssn =
1891 chunk->subh.data_hdr->ssn;
1892 if (skip_pos == nskips)
1893 nskips++;
1894 if (nskips == 10)
1895 break;
1896 } else
1897 break;
1898 }
1899 }
1900
1901 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1902 * is greater than the Cumulative TSN ACK carried in the received
1903 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1904 * chunk containing the latest value of the
1905 * "Advanced.Peer.Ack.Point".
1906 *
1907 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1908 * list each stream and sequence number in the forwarded TSN. This
1909 * information will enable the receiver to easily find any
1910 * stranded TSN's waiting on stream reorder queues. Each stream
1911 * SHOULD only be reported once; this means that if multiple
1912 * abandoned messages occur in the same stream then only the
1913 * highest abandoned stream sequence number is reported. If the
1914 * total size of the FORWARD TSN does NOT fit in a single MTU then
1915 * the sender of the FORWARD TSN SHOULD lower the
1916 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1917 * single MTU.
1918 */
1919 if (asoc->adv_peer_ack_point > ctsn)
1920 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1921 nskips, &ftsn_skip_arr[0]);
1922
1923 if (ftsn_chunk) {
1924 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1925 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1926 }
1927 }