rxrpc: Fix several cases where a padded len isn't checked in ticket decode
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / dccp / dccp.h
1 #ifndef _DCCP_H
2 #define _DCCP_H
3 /*
4 * net/dccp/dccp.h
5 *
6 * An implementation of the DCCP protocol
7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15 #include <linux/dccp.h>
16 #include <linux/ktime.h>
17 #include <net/snmp.h>
18 #include <net/sock.h>
19 #include <net/tcp.h>
20 #include "ackvec.h"
21
22 /*
23 * DCCP - specific warning and debugging macros.
24 */
25 #define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \
26 __func__, ##a)
27 #define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \
28 __FILE__, __LINE__, __func__)
29 #define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0)
30 #define DCCP_BUG_ON(cond) do { if (unlikely((cond) != 0)) \
31 DCCP_BUG("\"%s\" holds (exception!)", \
32 __stringify(cond)); \
33 } while (0)
34
35 #define DCCP_PRINTK(enable, fmt, args...) do { if (enable) \
36 printk(fmt, ##args); \
37 } while(0)
38 #define DCCP_PR_DEBUG(enable, fmt, a...) DCCP_PRINTK(enable, KERN_DEBUG \
39 "%s: " fmt, __func__, ##a)
40
41 #ifdef CONFIG_IP_DCCP_DEBUG
42 extern bool dccp_debug;
43 #define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a)
44 #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
45 #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
46 #else
47 #define dccp_pr_debug(format, a...)
48 #define dccp_pr_debug_cat(format, a...)
49 #define dccp_debug(format, a...)
50 #endif
51
52 extern struct inet_hashinfo dccp_hashinfo;
53
54 extern struct percpu_counter dccp_orphan_count;
55
56 extern void dccp_time_wait(struct sock *sk, int state, int timeo);
57
58 /*
59 * Set safe upper bounds for header and option length. Since Data Offset is 8
60 * bits (RFC 4340, sec. 5.1), the total header length can never be more than
61 * 4 * 255 = 1020 bytes. The largest possible header length is 28 bytes (X=1):
62 * - DCCP-Response with ACK Subheader and 4 bytes of Service code OR
63 * - DCCP-Reset with ACK Subheader and 4 bytes of Reset Code fields
64 * Hence a safe upper bound for the maximum option length is 1020-28 = 992
65 */
66 #define MAX_DCCP_SPECIFIC_HEADER (255 * sizeof(uint32_t))
67 #define DCCP_MAX_PACKET_HDR 28
68 #define DCCP_MAX_OPT_LEN (MAX_DCCP_SPECIFIC_HEADER - DCCP_MAX_PACKET_HDR)
69 #define MAX_DCCP_HEADER (MAX_DCCP_SPECIFIC_HEADER + MAX_HEADER)
70
71 /* Upper bound for initial feature-negotiation overhead (padded to 32 bits) */
72 #define DCCP_FEATNEG_OVERHEAD (32 * sizeof(uint32_t))
73
74 #define DCCP_TIMEWAIT_LEN (60 * HZ) /* how long to wait to destroy TIME-WAIT
75 * state, about 60 seconds */
76
77 /* RFC 1122, 4.2.3.1 initial RTO value */
78 #define DCCP_TIMEOUT_INIT ((unsigned int)(3 * HZ))
79
80 /*
81 * The maximum back-off value for retransmissions. This is needed for
82 * - retransmitting client-Requests (sec. 8.1.1),
83 * - retransmitting Close/CloseReq when closing (sec. 8.3),
84 * - feature-negotiation retransmission (sec. 6.6.3),
85 * - Acks in client-PARTOPEN state (sec. 8.1.5).
86 */
87 #define DCCP_RTO_MAX ((unsigned int)(64 * HZ))
88
89 /*
90 * RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4
91 */
92 #define DCCP_SANE_RTT_MIN 100
93 #define DCCP_FALLBACK_RTT (USEC_PER_SEC / 5)
94 #define DCCP_SANE_RTT_MAX (3 * USEC_PER_SEC)
95
96 /* sysctl variables for DCCP */
97 extern int sysctl_dccp_request_retries;
98 extern int sysctl_dccp_retries1;
99 extern int sysctl_dccp_retries2;
100 extern int sysctl_dccp_tx_qlen;
101 extern int sysctl_dccp_sync_ratelimit;
102
103 /*
104 * 48-bit sequence number arithmetic (signed and unsigned)
105 */
106 #define INT48_MIN 0x800000000000LL /* 2^47 */
107 #define UINT48_MAX 0xFFFFFFFFFFFFLL /* 2^48 - 1 */
108 #define COMPLEMENT48(x) (0x1000000000000LL - (x)) /* 2^48 - x */
109 #define TO_SIGNED48(x) (((x) < INT48_MIN)? (x) : -COMPLEMENT48( (x)))
110 #define TO_UNSIGNED48(x) (((x) >= 0)? (x) : COMPLEMENT48(-(x)))
111 #define ADD48(a, b) (((a) + (b)) & UINT48_MAX)
112 #define SUB48(a, b) ADD48((a), COMPLEMENT48(b))
113
114 static inline void dccp_set_seqno(u64 *seqno, u64 value)
115 {
116 *seqno = value & UINT48_MAX;
117 }
118
119 static inline void dccp_inc_seqno(u64 *seqno)
120 {
121 *seqno = ADD48(*seqno, 1);
122 }
123
124 /* signed mod-2^48 distance: pos. if seqno1 < seqno2, neg. if seqno1 > seqno2 */
125 static inline s64 dccp_delta_seqno(const u64 seqno1, const u64 seqno2)
126 {
127 u64 delta = SUB48(seqno2, seqno1);
128
129 return TO_SIGNED48(delta);
130 }
131
132 /* is seq1 < seq2 ? */
133 static inline int before48(const u64 seq1, const u64 seq2)
134 {
135 return (s64)((seq2 << 16) - (seq1 << 16)) > 0;
136 }
137
138 /* is seq1 > seq2 ? */
139 #define after48(seq1, seq2) before48(seq2, seq1)
140
141 /* is seq2 <= seq1 <= seq3 ? */
142 static inline int between48(const u64 seq1, const u64 seq2, const u64 seq3)
143 {
144 return (seq3 << 16) - (seq2 << 16) >= (seq1 << 16) - (seq2 << 16);
145 }
146
147 static inline u64 max48(const u64 seq1, const u64 seq2)
148 {
149 return after48(seq1, seq2) ? seq1 : seq2;
150 }
151
152 /**
153 * dccp_loss_count - Approximate the number of lost data packets in a burst loss
154 * @s1: last known sequence number before the loss ('hole')
155 * @s2: first sequence number seen after the 'hole'
156 * @ndp: NDP count on packet with sequence number @s2
157 */
158 static inline u64 dccp_loss_count(const u64 s1, const u64 s2, const u64 ndp)
159 {
160 s64 delta = dccp_delta_seqno(s1, s2);
161
162 WARN_ON(delta < 0);
163 delta -= ndp + 1;
164
165 return delta > 0 ? delta : 0;
166 }
167
168 /**
169 * dccp_loss_free - Evaluate condition for data loss from RFC 4340, 7.7.1
170 */
171 static inline bool dccp_loss_free(const u64 s1, const u64 s2, const u64 ndp)
172 {
173 return dccp_loss_count(s1, s2, ndp) == 0;
174 }
175
176 enum {
177 DCCP_MIB_NUM = 0,
178 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */
179 DCCP_MIB_ESTABRESETS, /* EstabResets */
180 DCCP_MIB_CURRESTAB, /* CurrEstab */
181 DCCP_MIB_OUTSEGS, /* OutSegs */
182 DCCP_MIB_OUTRSTS,
183 DCCP_MIB_ABORTONTIMEOUT,
184 DCCP_MIB_TIMEOUTS,
185 DCCP_MIB_ABORTFAILED,
186 DCCP_MIB_PASSIVEOPENS,
187 DCCP_MIB_ATTEMPTFAILS,
188 DCCP_MIB_OUTDATAGRAMS,
189 DCCP_MIB_INERRS,
190 DCCP_MIB_OPTMANDATORYERROR,
191 DCCP_MIB_INVALIDOPT,
192 __DCCP_MIB_MAX
193 };
194
195 #define DCCP_MIB_MAX __DCCP_MIB_MAX
196 struct dccp_mib {
197 unsigned long mibs[DCCP_MIB_MAX];
198 };
199
200 DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
201 #define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field)
202 #define DCCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(dccp_statistics, field)
203 #define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field)
204
205 /*
206 * Checksumming routines
207 */
208 static inline unsigned int dccp_csum_coverage(const struct sk_buff *skb)
209 {
210 const struct dccp_hdr* dh = dccp_hdr(skb);
211
212 if (dh->dccph_cscov == 0)
213 return skb->len;
214 return (dh->dccph_doff + dh->dccph_cscov - 1) * sizeof(u32);
215 }
216
217 static inline void dccp_csum_outgoing(struct sk_buff *skb)
218 {
219 unsigned int cov = dccp_csum_coverage(skb);
220
221 if (cov >= skb->len)
222 dccp_hdr(skb)->dccph_cscov = 0;
223
224 skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0);
225 }
226
227 extern void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
228
229 extern int dccp_retransmit_skb(struct sock *sk);
230
231 extern void dccp_send_ack(struct sock *sk);
232 extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
233 struct request_sock *rsk);
234
235 extern void dccp_send_sync(struct sock *sk, const u64 seq,
236 const enum dccp_pkt_type pkt_type);
237
238 /*
239 * TX Packet Dequeueing Interface
240 */
241 extern void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
242 extern bool dccp_qpolicy_full(struct sock *sk);
243 extern void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
244 extern struct sk_buff *dccp_qpolicy_top(struct sock *sk);
245 extern struct sk_buff *dccp_qpolicy_pop(struct sock *sk);
246 extern bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
247
248 /*
249 * TX Packet Output and TX Timers
250 */
251 extern void dccp_write_xmit(struct sock *sk);
252 extern void dccp_write_space(struct sock *sk);
253 extern void dccp_flush_write_queue(struct sock *sk, long *time_budget);
254
255 extern void dccp_init_xmit_timers(struct sock *sk);
256 static inline void dccp_clear_xmit_timers(struct sock *sk)
257 {
258 inet_csk_clear_xmit_timers(sk);
259 }
260
261 extern unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
262
263 extern const char *dccp_packet_name(const int type);
264
265 extern void dccp_set_state(struct sock *sk, const int state);
266 extern void dccp_done(struct sock *sk);
267
268 extern int dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp,
269 struct sk_buff const *skb);
270
271 extern int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
272
273 extern struct sock *dccp_create_openreq_child(struct sock *sk,
274 const struct request_sock *req,
275 const struct sk_buff *skb);
276
277 extern int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
278
279 extern struct sock *dccp_v4_request_recv_sock(struct sock *sk,
280 struct sk_buff *skb,
281 struct request_sock *req,
282 struct dst_entry *dst);
283 extern struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
284 struct request_sock *req,
285 struct request_sock **prev);
286
287 extern int dccp_child_process(struct sock *parent, struct sock *child,
288 struct sk_buff *skb);
289 extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
290 struct dccp_hdr *dh, unsigned int len);
291 extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
292 const struct dccp_hdr *dh, const unsigned int len);
293
294 extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
295 extern void dccp_destroy_sock(struct sock *sk);
296
297 extern void dccp_close(struct sock *sk, long timeout);
298 extern struct sk_buff *dccp_make_response(struct sock *sk,
299 struct dst_entry *dst,
300 struct request_sock *req);
301
302 extern int dccp_connect(struct sock *sk);
303 extern int dccp_disconnect(struct sock *sk, int flags);
304 extern int dccp_getsockopt(struct sock *sk, int level, int optname,
305 char __user *optval, int __user *optlen);
306 extern int dccp_setsockopt(struct sock *sk, int level, int optname,
307 char __user *optval, unsigned int optlen);
308 #ifdef CONFIG_COMPAT
309 extern int compat_dccp_getsockopt(struct sock *sk,
310 int level, int optname,
311 char __user *optval, int __user *optlen);
312 extern int compat_dccp_setsockopt(struct sock *sk,
313 int level, int optname,
314 char __user *optval, unsigned int optlen);
315 #endif
316 extern int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
317 extern int dccp_sendmsg(struct kiocb *iocb, struct sock *sk,
318 struct msghdr *msg, size_t size);
319 extern int dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
320 struct msghdr *msg, size_t len, int nonblock,
321 int flags, int *addr_len);
322 extern void dccp_shutdown(struct sock *sk, int how);
323 extern int inet_dccp_listen(struct socket *sock, int backlog);
324 extern unsigned int dccp_poll(struct file *file, struct socket *sock,
325 poll_table *wait);
326 extern int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
327 int addr_len);
328
329 extern struct sk_buff *dccp_ctl_make_reset(struct sock *sk,
330 struct sk_buff *skb);
331 extern int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
332 extern void dccp_send_close(struct sock *sk, const int active);
333 extern int dccp_invalid_packet(struct sk_buff *skb);
334 extern u32 dccp_sample_rtt(struct sock *sk, long delta);
335
336 static inline int dccp_bad_service_code(const struct sock *sk,
337 const __be32 service)
338 {
339 const struct dccp_sock *dp = dccp_sk(sk);
340
341 if (dp->dccps_service == service)
342 return 0;
343 return !dccp_list_has_service(dp->dccps_service_list, service);
344 }
345
346 /**
347 * dccp_skb_cb - DCCP per-packet control information
348 * @dccpd_type: one of %dccp_pkt_type (or unknown)
349 * @dccpd_ccval: CCVal field (5.1), see e.g. RFC 4342, 8.1
350 * @dccpd_reset_code: one of %dccp_reset_codes
351 * @dccpd_reset_data: Data1..3 fields (depend on @dccpd_reset_code)
352 * @dccpd_opt_len: total length of all options (5.8) in the packet
353 * @dccpd_seq: sequence number
354 * @dccpd_ack_seq: acknowledgment number subheader field value
355 *
356 * This is used for transmission as well as for reception.
357 */
358 struct dccp_skb_cb {
359 union {
360 struct inet_skb_parm h4;
361 #if IS_ENABLED(CONFIG_IPV6)
362 struct inet6_skb_parm h6;
363 #endif
364 } header;
365 __u8 dccpd_type:4;
366 __u8 dccpd_ccval:4;
367 __u8 dccpd_reset_code,
368 dccpd_reset_data[3];
369 __u16 dccpd_opt_len;
370 __u64 dccpd_seq;
371 __u64 dccpd_ack_seq;
372 };
373
374 #define DCCP_SKB_CB(__skb) ((struct dccp_skb_cb *)&((__skb)->cb[0]))
375
376 /* RFC 4340, sec. 7.7 */
377 static inline int dccp_non_data_packet(const struct sk_buff *skb)
378 {
379 const __u8 type = DCCP_SKB_CB(skb)->dccpd_type;
380
381 return type == DCCP_PKT_ACK ||
382 type == DCCP_PKT_CLOSE ||
383 type == DCCP_PKT_CLOSEREQ ||
384 type == DCCP_PKT_RESET ||
385 type == DCCP_PKT_SYNC ||
386 type == DCCP_PKT_SYNCACK;
387 }
388
389 /* RFC 4340, sec. 7.7 */
390 static inline int dccp_data_packet(const struct sk_buff *skb)
391 {
392 const __u8 type = DCCP_SKB_CB(skb)->dccpd_type;
393
394 return type == DCCP_PKT_DATA ||
395 type == DCCP_PKT_DATAACK ||
396 type == DCCP_PKT_REQUEST ||
397 type == DCCP_PKT_RESPONSE;
398 }
399
400 static inline int dccp_packet_without_ack(const struct sk_buff *skb)
401 {
402 const __u8 type = DCCP_SKB_CB(skb)->dccpd_type;
403
404 return type == DCCP_PKT_DATA || type == DCCP_PKT_REQUEST;
405 }
406
407 #define DCCP_PKT_WITHOUT_ACK_SEQ (UINT48_MAX << 2)
408
409 static inline void dccp_hdr_set_seq(struct dccp_hdr *dh, const u64 gss)
410 {
411 struct dccp_hdr_ext *dhx = (struct dccp_hdr_ext *)((void *)dh +
412 sizeof(*dh));
413 dh->dccph_seq2 = 0;
414 dh->dccph_seq = htons((gss >> 32) & 0xfffff);
415 dhx->dccph_seq_low = htonl(gss & 0xffffffff);
416 }
417
418 static inline void dccp_hdr_set_ack(struct dccp_hdr_ack_bits *dhack,
419 const u64 gsr)
420 {
421 dhack->dccph_reserved1 = 0;
422 dhack->dccph_ack_nr_high = htons(gsr >> 32);
423 dhack->dccph_ack_nr_low = htonl(gsr & 0xffffffff);
424 }
425
426 static inline void dccp_update_gsr(struct sock *sk, u64 seq)
427 {
428 struct dccp_sock *dp = dccp_sk(sk);
429
430 if (after48(seq, dp->dccps_gsr))
431 dp->dccps_gsr = seq;
432 /* Sequence validity window depends on remote Sequence Window (7.5.1) */
433 dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4);
434 /*
435 * Adjust SWL so that it is not below ISR. In contrast to RFC 4340,
436 * 7.5.1 we perform this check beyond the initial handshake: W/W' are
437 * always > 32, so for the first W/W' packets in the lifetime of a
438 * connection we always have to adjust SWL.
439 * A second reason why we are doing this is that the window depends on
440 * the feature-remote value of Sequence Window: nothing stops the peer
441 * from updating this value while we are busy adjusting SWL for the
442 * first W packets (we would have to count from scratch again then).
443 * Therefore it is safer to always make sure that the Sequence Window
444 * is not artificially extended by a peer who grows SWL downwards by
445 * continually updating the feature-remote Sequence-Window.
446 * If sequence numbers wrap it is bad luck. But that will take a while
447 * (48 bit), and this measure prevents Sequence-number attacks.
448 */
449 if (before48(dp->dccps_swl, dp->dccps_isr))
450 dp->dccps_swl = dp->dccps_isr;
451 dp->dccps_swh = ADD48(dp->dccps_gsr, (3 * dp->dccps_r_seq_win) / 4);
452 }
453
454 static inline void dccp_update_gss(struct sock *sk, u64 seq)
455 {
456 struct dccp_sock *dp = dccp_sk(sk);
457
458 dp->dccps_gss = seq;
459 /* Ack validity window depends on local Sequence Window value (7.5.1) */
460 dp->dccps_awl = SUB48(ADD48(dp->dccps_gss, 1), dp->dccps_l_seq_win);
461 /* Adjust AWL so that it is not below ISS - see comment above for SWL */
462 if (before48(dp->dccps_awl, dp->dccps_iss))
463 dp->dccps_awl = dp->dccps_iss;
464 dp->dccps_awh = dp->dccps_gss;
465 }
466
467 static inline int dccp_ackvec_pending(const struct sock *sk)
468 {
469 return dccp_sk(sk)->dccps_hc_rx_ackvec != NULL &&
470 !dccp_ackvec_is_empty(dccp_sk(sk)->dccps_hc_rx_ackvec);
471 }
472
473 static inline int dccp_ack_pending(const struct sock *sk)
474 {
475 return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
476 }
477
478 extern int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
479 extern int dccp_feat_finalise_settings(struct dccp_sock *dp);
480 extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
481 extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
482 struct sk_buff *skb);
483 extern int dccp_feat_activate_values(struct sock *sk, struct list_head *fn);
484 extern void dccp_feat_list_purge(struct list_head *fn_list);
485
486 extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
487 extern int dccp_insert_options_rsk(struct dccp_request_sock*, struct sk_buff*);
488 extern int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
489 extern u32 dccp_timestamp(void);
490 extern void dccp_timestamping_init(void);
491 extern int dccp_insert_option(struct sk_buff *skb, unsigned char option,
492 const void *value, unsigned char len);
493
494 #ifdef CONFIG_SYSCTL
495 extern int dccp_sysctl_init(void);
496 extern void dccp_sysctl_exit(void);
497 #else
498 static inline int dccp_sysctl_init(void)
499 {
500 return 0;
501 }
502
503 static inline void dccp_sysctl_exit(void)
504 {
505 }
506 #endif
507
508 #endif /* _DCCP_H */