[IPVS]: Rename tcp_{init,exit}() --> ip_vs_tcp_{init,exit}()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / dccp / output.c
CommitLineData
7c657876
ACM
1/*
2 * net/dccp/output.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/dccp.h>
15#include <linux/skbuff.h>
16
17#include <net/sock.h>
18
19#include "ccid.h"
20#include "dccp.h"
21
22static inline void dccp_event_ack_sent(struct sock *sk)
23{
24 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
25}
26
27/*
28 * All SKB's seen here are completely headerless. It is our
29 * job to build the DCCP header, and pass the packet down to
30 * IP so it can do the same plus pass the packet off to the
31 * device.
32 */
33int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
34{
35 if (likely(skb != NULL)) {
36 const struct inet_sock *inet = inet_sk(sk);
37 struct dccp_sock *dp = dccp_sk(sk);
38 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
39 struct dccp_hdr *dh;
40 /* XXX For now we're using only 48 bits sequence numbers */
41 const int dccp_header_size = sizeof(*dh) +
42 sizeof(struct dccp_hdr_ext) +
7690af3f 43 dccp_packet_hdr_len(dcb->dccpd_type);
7c657876
ACM
44 int err, set_ack = 1;
45 u64 ackno = dp->dccps_gsr;
46
47 /*
7690af3f
ACM
48 * FIXME: study DCCP_PKT_SYNC[ACK] to see what is the right
49 * thing to do here...
7c657876
ACM
50 */
51 dccp_inc_seqno(&dp->dccps_gss);
52
53 dcb->dccpd_seq = dp->dccps_gss;
54 dccp_insert_options(sk, skb);
55
56 switch (dcb->dccpd_type) {
57 case DCCP_PKT_DATA:
58 set_ack = 0;
59 break;
60 case DCCP_PKT_SYNC:
61 case DCCP_PKT_SYNCACK:
62 ackno = dcb->dccpd_seq;
63 break;
64 }
65
66 skb->h.raw = skb_push(skb, dccp_header_size);
67 dh = dccp_hdr(skb);
7690af3f
ACM
68 /*
69 * Data packets are not cloned as they are never retransmitted
70 */
7c657876
ACM
71 if (skb_cloned(skb))
72 skb_set_owner_w(skb, sk);
73
74 /* Build DCCP header and checksum it. */
75 memset(dh, 0, dccp_header_size);
76 dh->dccph_type = dcb->dccpd_type;
77 dh->dccph_sport = inet->sport;
78 dh->dccph_dport = inet->dport;
79 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
80 dh->dccph_ccval = dcb->dccpd_ccval;
81 /* XXX For now we're using only 48 bits sequence numbers */
82 dh->dccph_x = 1;
83
84 dp->dccps_awh = dp->dccps_gss;
85 dccp_hdr_set_seq(dh, dp->dccps_gss);
86 if (set_ack)
87 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
88
89 switch (dcb->dccpd_type) {
90 case DCCP_PKT_REQUEST:
7690af3f
ACM
91 dccp_hdr_request(skb)->dccph_req_service =
92 dcb->dccpd_service;
7c657876
ACM
93 break;
94 case DCCP_PKT_RESET:
7690af3f
ACM
95 dccp_hdr_reset(skb)->dccph_reset_code =
96 dcb->dccpd_reset_code;
7c657876
ACM
97 break;
98 }
99
95b81ef7
YN
100 dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr,
101 inet->daddr);
7c657876
ACM
102
103 if (dcb->dccpd_type == DCCP_PKT_ACK ||
104 dcb->dccpd_type == DCCP_PKT_DATAACK)
105 dccp_event_ack_sent(sk);
106
107 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
108
109 err = ip_queue_xmit(skb, 0);
110 if (err <= 0)
111 return err;
112
113 /* NET_XMIT_CN is special. It does not guarantee,
114 * that this packet is lost. It tells that device
115 * is about to start to drop packets or already
116 * drops some packets of the same priority and
117 * invokes us to send less aggressively.
118 */
119 return err == NET_XMIT_CN ? 0 : err;
120 }
121 return -ENOBUFS;
122}
123
124unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
125{
126 struct dccp_sock *dp = dccp_sk(sk);
127 int mss_now;
128
129 /*
7690af3f
ACM
130 * FIXME: we really should be using the af_specific thing to support
131 * IPv6.
132 * mss_now = pmtu - tp->af_specific->net_header_len -
133 * sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext);
7c657876 134 */
7690af3f
ACM
135 mss_now = pmtu - sizeof(struct iphdr) - sizeof(struct dccp_hdr) -
136 sizeof(struct dccp_hdr_ext);
7c657876
ACM
137
138 /* Now subtract optional transport overhead */
139 mss_now -= dp->dccps_ext_header_len;
140
141 /*
142 * FIXME: this should come from the CCID infrastructure, where, say,
143 * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets
144 * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
145 * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
146 * make it a multiple of 4
147 */
148
149 mss_now -= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4;
150
151 /* And store cached results */
152 dp->dccps_pmtu_cookie = pmtu;
153 dp->dccps_mss_cache = mss_now;
154
155 return mss_now;
156}
157
27258ee5
ACM
158int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, const int len)
159{
160 const struct dccp_sock *dp = dccp_sk(sk);
161 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, len);
162
163 if (err == 0) {
164 const struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
165 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
166
167 if (sk->sk_state == DCCP_PARTOPEN) {
168 /* See 8.1.5. Handshake Completion */
169 inet_csk_schedule_ack(sk);
170 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
171 inet_csk(sk)->icsk_rto,
172 DCCP_RTO_MAX);
173 dcb->dccpd_type = DCCP_PKT_DATAACK;
174 /*
175 * FIXME: we really should have a
176 * dccps_ack_pending or use icsk.
177 */
178 } else if (inet_csk_ack_scheduled(sk) ||
179 (dp->dccps_options.dccpo_send_ack_vector &&
180 ap->dccpap_buf_ackno != DCCP_MAX_SEQNO + 1 &&
181 ap->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1))
182 dcb->dccpd_type = DCCP_PKT_DATAACK;
183 else
184 dcb->dccpd_type = DCCP_PKT_DATA;
185
186 err = dccp_transmit_skb(sk, skb);
187 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
188 }
189
190 return err;
191}
192
7c657876
ACM
193int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
194{
195 if (inet_sk_rebuild_header(sk) != 0)
196 return -EHOSTUNREACH; /* Routing failure or similar. */
197
198 return dccp_transmit_skb(sk, (skb_cloned(skb) ?
199 pskb_copy(skb, GFP_ATOMIC):
200 skb_clone(skb, GFP_ATOMIC)));
201}
202
203struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
204 struct request_sock *req)
205{
206 struct dccp_hdr *dh;
207 const int dccp_header_size = sizeof(struct dccp_hdr) +
208 sizeof(struct dccp_hdr_ext) +
209 sizeof(struct dccp_hdr_response);
210 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN +
211 dccp_header_size, 1,
212 GFP_ATOMIC);
213 if (skb == NULL)
214 return NULL;
215
216 /* Reserve space for headers. */
217 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size);
218
219 skb->dst = dst_clone(dst);
220 skb->csum = 0;
221
222 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
223 DCCP_SKB_CB(skb)->dccpd_seq = dccp_rsk(req)->dreq_iss;
224 dccp_insert_options(sk, skb);
225
226 skb->h.raw = skb_push(skb, dccp_header_size);
227
228 dh = dccp_hdr(skb);
229 memset(dh, 0, dccp_header_size);
230
231 dh->dccph_sport = inet_sk(sk)->sport;
232 dh->dccph_dport = inet_rsk(req)->rmt_port;
7690af3f
ACM
233 dh->dccph_doff = (dccp_header_size +
234 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
7c657876
ACM
235 dh->dccph_type = DCCP_PKT_RESPONSE;
236 dh->dccph_x = 1;
237 dccp_hdr_set_seq(dh, dccp_rsk(req)->dreq_iss);
238 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dccp_rsk(req)->dreq_isr);
239
95b81ef7
YN
240 dh->dccph_checksum = dccp_v4_checksum(skb, inet_rsk(req)->loc_addr,
241 inet_rsk(req)->rmt_addr);
7c657876
ACM
242
243 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
244 return skb;
245}
246
247struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
248 const enum dccp_reset_codes code)
249
250{
251 struct dccp_hdr *dh;
252 struct dccp_sock *dp = dccp_sk(sk);
253 const int dccp_header_size = sizeof(struct dccp_hdr) +
254 sizeof(struct dccp_hdr_ext) +
255 sizeof(struct dccp_hdr_reset);
256 struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN +
257 dccp_header_size, 1,
258 GFP_ATOMIC);
259 if (skb == NULL)
260 return NULL;
261
262 /* Reserve space for headers. */
263 skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size);
264
265 skb->dst = dst_clone(dst);
266 skb->csum = 0;
267
268 dccp_inc_seqno(&dp->dccps_gss);
269
270 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
271 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
272 DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss;
273 dccp_insert_options(sk, skb);
274
275 skb->h.raw = skb_push(skb, dccp_header_size);
276
277 dh = dccp_hdr(skb);
278 memset(dh, 0, dccp_header_size);
279
280 dh->dccph_sport = inet_sk(sk)->sport;
281 dh->dccph_dport = inet_sk(sk)->dport;
7690af3f
ACM
282 dh->dccph_doff = (dccp_header_size +
283 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
7c657876
ACM
284 dh->dccph_type = DCCP_PKT_RESET;
285 dh->dccph_x = 1;
286 dccp_hdr_set_seq(dh, dp->dccps_gss);
287 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr);
288
289 dccp_hdr_reset(skb)->dccph_reset_code = code;
290
95b81ef7
YN
291 dh->dccph_checksum = dccp_v4_checksum(skb, inet_sk(sk)->saddr,
292 inet_sk(sk)->daddr);
7c657876
ACM
293
294 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
295 return skb;
296}
297
298/*
299 * Do all connect socket setups that can be done AF independent.
300 */
301static inline void dccp_connect_init(struct sock *sk)
302{
303 struct dst_entry *dst = __sk_dst_get(sk);
304 struct inet_connection_sock *icsk = inet_csk(sk);
305
306 sk->sk_err = 0;
307 sock_reset_flag(sk, SOCK_DONE);
308
309 dccp_sync_mss(sk, dst_mtu(dst));
310
311 /*
312 * FIXME: set dp->{dccps_swh,dccps_swl}, with
313 * something like dccp_inc_seq
314 */
315
316 icsk->icsk_retransmits = 0;
317}
318
319int dccp_connect(struct sock *sk)
320{
321 struct sk_buff *skb;
322 struct inet_connection_sock *icsk = inet_csk(sk);
323
324 dccp_connect_init(sk);
325
326 skb = alloc_skb(MAX_DCCP_HEADER + 15, sk->sk_allocation);
327 if (unlikely(skb == NULL))
328 return -ENOBUFS;
329
330 /* Reserve space for headers. */
331 skb_reserve(skb, MAX_DCCP_HEADER);
332
333 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
334 /* FIXME: set service to something meaningful, coming
335 * from userspace*/
336 DCCP_SKB_CB(skb)->dccpd_service = 0;
337 skb->csum = 0;
338 skb_set_owner_w(skb, sk);
339
340 BUG_TRAP(sk->sk_send_head == NULL);
341 sk->sk_send_head = skb;
342 dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
343 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
344
345 /* Timer for repeating the REQUEST until an answer. */
27258ee5
ACM
346 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
347 icsk->icsk_rto, DCCP_RTO_MAX);
7c657876
ACM
348 return 0;
349}
350
351void dccp_send_ack(struct sock *sk)
352{
353 /* If we have been reset, we may not send again. */
354 if (sk->sk_state != DCCP_CLOSED) {
355 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC);
356
357 if (skb == NULL) {
358 inet_csk_schedule_ack(sk);
359 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
7690af3f
ACM
360 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
361 TCP_DELACK_MAX,
362 DCCP_RTO_MAX);
7c657876
ACM
363 return;
364 }
365
366 /* Reserve space for headers */
367 skb_reserve(skb, MAX_DCCP_HEADER);
368 skb->csum = 0;
369 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
370 skb_set_owner_w(skb, sk);
371 dccp_transmit_skb(sk, skb);
372 }
373}
374
375EXPORT_SYMBOL_GPL(dccp_send_ack);
376
377void dccp_send_delayed_ack(struct sock *sk)
378{
379 struct inet_connection_sock *icsk = inet_csk(sk);
380 /*
381 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
382 * with using 2s, and active senders also piggyback the ACK into a
383 * DATAACK packet, so this is really for quiescent senders.
384 */
385 unsigned long timeout = jiffies + 2 * HZ;
386
387 /* Use new timeout only if there wasn't a older one earlier. */
388 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
389 /* If delack timer was blocked or is about to expire,
390 * send ACK now.
391 *
392 * FIXME: check the "about to expire" part
393 */
394 if (icsk->icsk_ack.blocked) {
395 dccp_send_ack(sk);
396 return;
397 }
398
399 if (!time_before(timeout, icsk->icsk_ack.timeout))
400 timeout = icsk->icsk_ack.timeout;
401 }
402 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
403 icsk->icsk_ack.timeout = timeout;
404 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
405}
406
407void dccp_send_sync(struct sock *sk, u64 seq)
408{
409 /*
410 * We are not putting this on the write queue, so
411 * dccp_transmit_skb() will set the ownership to this
412 * sock.
413 */
414 struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC);
415
416 if (skb == NULL)
417 /* FIXME: how to make sure the sync is sent? */
418 return;
419
420 /* Reserve space for headers and prepare control bits. */
421 skb_reserve(skb, MAX_DCCP_HEADER);
422 skb->csum = 0;
423 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_SYNC;
424 DCCP_SKB_CB(skb)->dccpd_seq = seq;
425
426 skb_set_owner_w(skb, sk);
427 dccp_transmit_skb(sk, skb);
428}
429
7690af3f
ACM
430/*
431 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
432 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
433 * any circumstances.
7c657876
ACM
434 */
435void dccp_send_close(struct sock *sk)
436{
437 struct dccp_sock *dp = dccp_sk(sk);
438 struct sk_buff *skb;
439
440 /* Socket is locked, keep trying until memory is available. */
441 for (;;) {
442 skb = alloc_skb(sk->sk_prot->max_header, GFP_KERNEL);
443 if (skb != NULL)
444 break;
445 yield();
446 }
447
448 /* Reserve space for headers and prepare control bits. */
449 skb_reserve(skb, sk->sk_prot->max_header);
450 skb->csum = 0;
7690af3f
ACM
451 DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ?
452 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
7c657876
ACM
453
454 skb_set_owner_w(skb, sk);
455 dccp_transmit_skb(sk, skb);
456
457 ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
458 ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
459}