ipv4: fix nexthop attlen check in fib_nh_match
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / inet_diag.c
1 /*
2 * inet_diag.c Module for monitoring INET transport protocols sockets.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/cache.h>
19 #include <linux/init.h>
20 #include <linux/time.h>
21
22 #include <net/icmp.h>
23 #include <net/tcp.h>
24 #include <net/ipv6.h>
25 #include <net/inet_common.h>
26 #include <net/inet_connection_sock.h>
27 #include <net/inet_hashtables.h>
28 #include <net/inet_timewait_sock.h>
29 #include <net/inet6_hashtables.h>
30 #include <net/netlink.h>
31
32 #include <linux/inet.h>
33 #include <linux/stddef.h>
34
35 #include <linux/inet_diag.h>
36 #include <linux/sock_diag.h>
37
38 static const struct inet_diag_handler **inet_diag_table;
39
40 struct inet_diag_entry {
41 __be32 *saddr;
42 __be32 *daddr;
43 u16 sport;
44 u16 dport;
45 u16 family;
46 u16 userlocks;
47 #if IS_ENABLED(CONFIG_IPV6)
48 struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */
49 struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */
50 #endif
51 };
52
53 static DEFINE_MUTEX(inet_diag_table_mutex);
54
55 static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
56 {
57 if (!inet_diag_table[proto])
58 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
59 NETLINK_SOCK_DIAG, AF_INET, proto);
60
61 mutex_lock(&inet_diag_table_mutex);
62 if (!inet_diag_table[proto])
63 return ERR_PTR(-ENOENT);
64
65 return inet_diag_table[proto];
66 }
67
68 static inline void inet_diag_unlock_handler(
69 const struct inet_diag_handler *handler)
70 {
71 mutex_unlock(&inet_diag_table_mutex);
72 }
73
74 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
75 struct sk_buff *skb, struct inet_diag_req_v2 *req,
76 struct user_namespace *user_ns,
77 u32 portid, u32 seq, u16 nlmsg_flags,
78 const struct nlmsghdr *unlh)
79 {
80 const struct inet_sock *inet = inet_sk(sk);
81 struct inet_diag_msg *r;
82 struct nlmsghdr *nlh;
83 struct nlattr *attr;
84 void *info = NULL;
85 const struct inet_diag_handler *handler;
86 int ext = req->idiag_ext;
87
88 handler = inet_diag_table[req->sdiag_protocol];
89 BUG_ON(handler == NULL);
90
91 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
92 nlmsg_flags);
93 if (!nlh)
94 return -EMSGSIZE;
95
96 r = nlmsg_data(nlh);
97 BUG_ON(sk->sk_state == TCP_TIME_WAIT);
98
99 r->idiag_family = sk->sk_family;
100 r->idiag_state = sk->sk_state;
101 r->idiag_timer = 0;
102 r->idiag_retrans = 0;
103
104 r->id.idiag_if = sk->sk_bound_dev_if;
105 sock_diag_save_cookie(sk, r->id.idiag_cookie);
106
107 r->id.idiag_sport = inet->inet_sport;
108 r->id.idiag_dport = inet->inet_dport;
109
110 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
111 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
112
113 r->id.idiag_src[0] = inet->inet_rcv_saddr;
114 r->id.idiag_dst[0] = inet->inet_daddr;
115
116 if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
117 goto errout;
118
119 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
120 * hence this needs to be included regardless of socket family.
121 */
122 if (ext & (1 << (INET_DIAG_TOS - 1)))
123 if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
124 goto errout;
125
126 #if IS_ENABLED(CONFIG_IPV6)
127 if (r->idiag_family == AF_INET6) {
128 const struct ipv6_pinfo *np = inet6_sk(sk);
129
130 *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
131 *(struct in6_addr *)r->id.idiag_dst = np->daddr;
132
133 if (ext & (1 << (INET_DIAG_TCLASS - 1)))
134 if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0)
135 goto errout;
136 }
137 #endif
138
139 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
140 r->idiag_inode = sock_i_ino(sk);
141
142 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
143 struct inet_diag_meminfo minfo = {
144 .idiag_rmem = sk_rmem_alloc_get(sk),
145 .idiag_wmem = sk->sk_wmem_queued,
146 .idiag_fmem = sk->sk_forward_alloc,
147 .idiag_tmem = sk_wmem_alloc_get(sk),
148 };
149
150 if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
151 goto errout;
152 }
153
154 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
155 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
156 goto errout;
157
158 if (icsk == NULL) {
159 handler->idiag_get_info(sk, r, NULL);
160 goto out;
161 }
162
163 #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
164
165 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
166 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
167 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
168 r->idiag_timer = 1;
169 r->idiag_retrans = icsk->icsk_retransmits;
170 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
171 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
172 r->idiag_timer = 4;
173 r->idiag_retrans = icsk->icsk_probes_out;
174 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
175 } else if (timer_pending(&sk->sk_timer)) {
176 r->idiag_timer = 2;
177 r->idiag_retrans = icsk->icsk_probes_out;
178 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
179 } else {
180 r->idiag_timer = 0;
181 r->idiag_expires = 0;
182 }
183 #undef EXPIRES_IN_MS
184
185 if (ext & (1 << (INET_DIAG_INFO - 1))) {
186 attr = nla_reserve(skb, INET_DIAG_INFO,
187 sizeof(struct tcp_info));
188 if (!attr)
189 goto errout;
190
191 info = nla_data(attr);
192 }
193
194 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
195 if (nla_put_string(skb, INET_DIAG_CONG,
196 icsk->icsk_ca_ops->name) < 0)
197 goto errout;
198
199 handler->idiag_get_info(sk, r, info);
200
201 if (sk->sk_state < TCP_TIME_WAIT &&
202 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
203 icsk->icsk_ca_ops->get_info(sk, ext, skb);
204
205 out:
206 return nlmsg_end(skb, nlh);
207
208 errout:
209 nlmsg_cancel(skb, nlh);
210 return -EMSGSIZE;
211 }
212 EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
213
214 static int inet_csk_diag_fill(struct sock *sk,
215 struct sk_buff *skb, struct inet_diag_req_v2 *req,
216 struct user_namespace *user_ns,
217 u32 portid, u32 seq, u16 nlmsg_flags,
218 const struct nlmsghdr *unlh)
219 {
220 return inet_sk_diag_fill(sk, inet_csk(sk),
221 skb, req, user_ns, portid, seq, nlmsg_flags, unlh);
222 }
223
224 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
225 struct sk_buff *skb, struct inet_diag_req_v2 *req,
226 u32 portid, u32 seq, u16 nlmsg_flags,
227 const struct nlmsghdr *unlh)
228 {
229 long tmo;
230 struct inet_diag_msg *r;
231 struct nlmsghdr *nlh;
232
233 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
234 nlmsg_flags);
235 if (!nlh)
236 return -EMSGSIZE;
237
238 r = nlmsg_data(nlh);
239 BUG_ON(tw->tw_state != TCP_TIME_WAIT);
240
241 tmo = tw->tw_ttd - jiffies;
242 if (tmo < 0)
243 tmo = 0;
244
245 r->idiag_family = tw->tw_family;
246 r->idiag_retrans = 0;
247
248 r->id.idiag_if = tw->tw_bound_dev_if;
249 sock_diag_save_cookie(tw, r->id.idiag_cookie);
250
251 r->id.idiag_sport = tw->tw_sport;
252 r->id.idiag_dport = tw->tw_dport;
253
254 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
255 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
256
257 r->id.idiag_src[0] = tw->tw_rcv_saddr;
258 r->id.idiag_dst[0] = tw->tw_daddr;
259
260 r->idiag_state = tw->tw_substate;
261 r->idiag_timer = 3;
262 r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
263 r->idiag_rqueue = 0;
264 r->idiag_wqueue = 0;
265 r->idiag_uid = 0;
266 r->idiag_inode = 0;
267 #if IS_ENABLED(CONFIG_IPV6)
268 if (tw->tw_family == AF_INET6) {
269 const struct inet6_timewait_sock *tw6 =
270 inet6_twsk((struct sock *)tw);
271
272 *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
273 *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
274 }
275 #endif
276
277 return nlmsg_end(skb, nlh);
278 }
279
280 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
281 struct inet_diag_req_v2 *r,
282 struct user_namespace *user_ns,
283 u32 portid, u32 seq, u16 nlmsg_flags,
284 const struct nlmsghdr *unlh)
285 {
286 if (sk->sk_state == TCP_TIME_WAIT)
287 return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
288 skb, r, portid, seq, nlmsg_flags,
289 unlh);
290 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, nlmsg_flags, unlh);
291 }
292
293 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
294 const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
295 {
296 int err;
297 struct sock *sk;
298 struct sk_buff *rep;
299 struct net *net = sock_net(in_skb->sk);
300
301 err = -EINVAL;
302 if (req->sdiag_family == AF_INET) {
303 sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
304 req->id.idiag_dport, req->id.idiag_src[0],
305 req->id.idiag_sport, req->id.idiag_if);
306 }
307 #if IS_ENABLED(CONFIG_IPV6)
308 else if (req->sdiag_family == AF_INET6) {
309 sk = inet6_lookup(net, hashinfo,
310 (struct in6_addr *)req->id.idiag_dst,
311 req->id.idiag_dport,
312 (struct in6_addr *)req->id.idiag_src,
313 req->id.idiag_sport,
314 req->id.idiag_if);
315 }
316 #endif
317 else {
318 goto out_nosk;
319 }
320
321 err = -ENOENT;
322 if (sk == NULL)
323 goto out_nosk;
324
325 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
326 if (err)
327 goto out;
328
329 rep = nlmsg_new(sizeof(struct inet_diag_msg) +
330 sizeof(struct inet_diag_meminfo) +
331 sizeof(struct tcp_info) + 64, GFP_KERNEL);
332 if (!rep) {
333 err = -ENOMEM;
334 goto out;
335 }
336
337 err = sk_diag_fill(sk, rep, req,
338 sk_user_ns(NETLINK_CB(in_skb).sk),
339 NETLINK_CB(in_skb).portid,
340 nlh->nlmsg_seq, 0, nlh);
341 if (err < 0) {
342 WARN_ON(err == -EMSGSIZE);
343 nlmsg_free(rep);
344 goto out;
345 }
346 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
347 MSG_DONTWAIT);
348 if (err > 0)
349 err = 0;
350
351 out:
352 if (sk) {
353 if (sk->sk_state == TCP_TIME_WAIT)
354 inet_twsk_put((struct inet_timewait_sock *)sk);
355 else
356 sock_put(sk);
357 }
358 out_nosk:
359 return err;
360 }
361 EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
362
363 static int inet_diag_get_exact(struct sk_buff *in_skb,
364 const struct nlmsghdr *nlh,
365 struct inet_diag_req_v2 *req)
366 {
367 const struct inet_diag_handler *handler;
368 int err;
369
370 handler = inet_diag_lock_handler(req->sdiag_protocol);
371 if (IS_ERR(handler))
372 err = PTR_ERR(handler);
373 else
374 err = handler->dump_one(in_skb, nlh, req);
375 inet_diag_unlock_handler(handler);
376
377 return err;
378 }
379
380 static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
381 {
382 int words = bits >> 5;
383
384 bits &= 0x1f;
385
386 if (words) {
387 if (memcmp(a1, a2, words << 2))
388 return 0;
389 }
390 if (bits) {
391 __be32 w1, w2;
392 __be32 mask;
393
394 w1 = a1[words];
395 w2 = a2[words];
396
397 mask = htonl((0xffffffff) << (32 - bits));
398
399 if ((w1 ^ w2) & mask)
400 return 0;
401 }
402
403 return 1;
404 }
405
406
407 static int inet_diag_bc_run(const struct nlattr *_bc,
408 const struct inet_diag_entry *entry)
409 {
410 const void *bc = nla_data(_bc);
411 int len = nla_len(_bc);
412
413 while (len > 0) {
414 int yes = 1;
415 const struct inet_diag_bc_op *op = bc;
416
417 switch (op->code) {
418 case INET_DIAG_BC_NOP:
419 break;
420 case INET_DIAG_BC_JMP:
421 yes = 0;
422 break;
423 case INET_DIAG_BC_S_GE:
424 yes = entry->sport >= op[1].no;
425 break;
426 case INET_DIAG_BC_S_LE:
427 yes = entry->sport <= op[1].no;
428 break;
429 case INET_DIAG_BC_D_GE:
430 yes = entry->dport >= op[1].no;
431 break;
432 case INET_DIAG_BC_D_LE:
433 yes = entry->dport <= op[1].no;
434 break;
435 case INET_DIAG_BC_AUTO:
436 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
437 break;
438 case INET_DIAG_BC_S_COND:
439 case INET_DIAG_BC_D_COND: {
440 struct inet_diag_hostcond *cond;
441 __be32 *addr;
442
443 cond = (struct inet_diag_hostcond *)(op + 1);
444 if (cond->port != -1 &&
445 cond->port != (op->code == INET_DIAG_BC_S_COND ?
446 entry->sport : entry->dport)) {
447 yes = 0;
448 break;
449 }
450
451 if (op->code == INET_DIAG_BC_S_COND)
452 addr = entry->saddr;
453 else
454 addr = entry->daddr;
455
456 if (cond->family != AF_UNSPEC &&
457 cond->family != entry->family) {
458 if (entry->family == AF_INET6 &&
459 cond->family == AF_INET) {
460 if (addr[0] == 0 && addr[1] == 0 &&
461 addr[2] == htonl(0xffff) &&
462 bitstring_match(addr + 3,
463 cond->addr,
464 cond->prefix_len))
465 break;
466 }
467 yes = 0;
468 break;
469 }
470
471 if (cond->prefix_len == 0)
472 break;
473 if (bitstring_match(addr, cond->addr,
474 cond->prefix_len))
475 break;
476 yes = 0;
477 break;
478 }
479 }
480
481 if (yes) {
482 len -= op->yes;
483 bc += op->yes;
484 } else {
485 len -= op->no;
486 bc += op->no;
487 }
488 }
489 return len == 0;
490 }
491
492 int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
493 {
494 struct inet_diag_entry entry;
495 struct inet_sock *inet = inet_sk(sk);
496
497 if (bc == NULL)
498 return 1;
499
500 entry.family = sk->sk_family;
501 #if IS_ENABLED(CONFIG_IPV6)
502 if (entry.family == AF_INET6) {
503 struct ipv6_pinfo *np = inet6_sk(sk);
504
505 entry.saddr = np->rcv_saddr.s6_addr32;
506 entry.daddr = np->daddr.s6_addr32;
507 } else
508 #endif
509 {
510 entry.saddr = &inet->inet_rcv_saddr;
511 entry.daddr = &inet->inet_daddr;
512 }
513 entry.sport = inet->inet_num;
514 entry.dport = ntohs(inet->inet_dport);
515 entry.userlocks = sk->sk_userlocks;
516
517 return inet_diag_bc_run(bc, &entry);
518 }
519 EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
520
521 static int valid_cc(const void *bc, int len, int cc)
522 {
523 while (len >= 0) {
524 const struct inet_diag_bc_op *op = bc;
525
526 if (cc > len)
527 return 0;
528 if (cc == len)
529 return 1;
530 if (op->yes < 4 || op->yes & 3)
531 return 0;
532 len -= op->yes;
533 bc += op->yes;
534 }
535 return 0;
536 }
537
538 /* Validate an inet_diag_hostcond. */
539 static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
540 int *min_len)
541 {
542 int addr_len;
543 struct inet_diag_hostcond *cond;
544
545 /* Check hostcond space. */
546 *min_len += sizeof(struct inet_diag_hostcond);
547 if (len < *min_len)
548 return false;
549 cond = (struct inet_diag_hostcond *)(op + 1);
550
551 /* Check address family and address length. */
552 switch (cond->family) {
553 case AF_UNSPEC:
554 addr_len = 0;
555 break;
556 case AF_INET:
557 addr_len = sizeof(struct in_addr);
558 break;
559 case AF_INET6:
560 addr_len = sizeof(struct in6_addr);
561 break;
562 default:
563 return false;
564 }
565 *min_len += addr_len;
566 if (len < *min_len)
567 return false;
568
569 /* Check prefix length (in bits) vs address length (in bytes). */
570 if (cond->prefix_len > 8 * addr_len)
571 return false;
572
573 return true;
574 }
575
576 /* Validate a port comparison operator. */
577 static inline bool valid_port_comparison(const struct inet_diag_bc_op *op,
578 int len, int *min_len)
579 {
580 /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
581 *min_len += sizeof(struct inet_diag_bc_op);
582 if (len < *min_len)
583 return false;
584 return true;
585 }
586
587 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
588 {
589 const void *bc = bytecode;
590 int len = bytecode_len;
591
592 while (len > 0) {
593 const struct inet_diag_bc_op *op = bc;
594 int min_len = sizeof(struct inet_diag_bc_op);
595
596 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
597 switch (op->code) {
598 case INET_DIAG_BC_S_COND:
599 case INET_DIAG_BC_D_COND:
600 if (!valid_hostcond(bc, len, &min_len))
601 return -EINVAL;
602 break;
603 case INET_DIAG_BC_S_GE:
604 case INET_DIAG_BC_S_LE:
605 case INET_DIAG_BC_D_GE:
606 case INET_DIAG_BC_D_LE:
607 if (!valid_port_comparison(bc, len, &min_len))
608 return -EINVAL;
609 break;
610 case INET_DIAG_BC_AUTO:
611 case INET_DIAG_BC_JMP:
612 case INET_DIAG_BC_NOP:
613 break;
614 default:
615 return -EINVAL;
616 }
617
618 if (op->code != INET_DIAG_BC_NOP) {
619 if (op->no < min_len || op->no > len + 4 || op->no & 3)
620 return -EINVAL;
621 if (op->no < len &&
622 !valid_cc(bytecode, bytecode_len, len - op->no))
623 return -EINVAL;
624 }
625
626 if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
627 return -EINVAL;
628 bc += op->yes;
629 len -= op->yes;
630 }
631 return len == 0 ? 0 : -EINVAL;
632 }
633
634 static int inet_csk_diag_dump(struct sock *sk,
635 struct sk_buff *skb,
636 struct netlink_callback *cb,
637 struct inet_diag_req_v2 *r,
638 const struct nlattr *bc)
639 {
640 if (!inet_diag_bc_sk(bc, sk))
641 return 0;
642
643 return inet_csk_diag_fill(sk, skb, r,
644 sk_user_ns(NETLINK_CB(cb->skb).sk),
645 NETLINK_CB(cb->skb).portid,
646 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
647 }
648
649 static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
650 struct sk_buff *skb,
651 struct netlink_callback *cb,
652 struct inet_diag_req_v2 *r,
653 const struct nlattr *bc)
654 {
655 if (bc != NULL) {
656 struct inet_diag_entry entry;
657
658 entry.family = tw->tw_family;
659 #if IS_ENABLED(CONFIG_IPV6)
660 if (tw->tw_family == AF_INET6) {
661 struct inet6_timewait_sock *tw6 =
662 inet6_twsk((struct sock *)tw);
663 entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
664 entry.daddr = tw6->tw_v6_daddr.s6_addr32;
665 } else
666 #endif
667 {
668 entry.saddr = &tw->tw_rcv_saddr;
669 entry.daddr = &tw->tw_daddr;
670 }
671 entry.sport = tw->tw_num;
672 entry.dport = ntohs(tw->tw_dport);
673 entry.userlocks = 0;
674
675 if (!inet_diag_bc_run(bc, &entry))
676 return 0;
677 }
678
679 return inet_twsk_diag_fill(tw, skb, r,
680 NETLINK_CB(cb->skb).portid,
681 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
682 }
683
684 /* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses
685 * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6.
686 */
687 static inline void inet_diag_req_addrs(const struct sock *sk,
688 const struct request_sock *req,
689 struct inet_diag_entry *entry)
690 {
691 struct inet_request_sock *ireq = inet_rsk(req);
692
693 #if IS_ENABLED(CONFIG_IPV6)
694 if (sk->sk_family == AF_INET6) {
695 if (req->rsk_ops->family == AF_INET6) {
696 entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32;
697 entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32;
698 } else if (req->rsk_ops->family == AF_INET) {
699 ipv6_addr_set_v4mapped(ireq->loc_addr,
700 &entry->saddr_storage);
701 ipv6_addr_set_v4mapped(ireq->rmt_addr,
702 &entry->daddr_storage);
703 entry->saddr = entry->saddr_storage.s6_addr32;
704 entry->daddr = entry->daddr_storage.s6_addr32;
705 }
706 } else
707 #endif
708 {
709 entry->saddr = &ireq->loc_addr;
710 entry->daddr = &ireq->rmt_addr;
711 }
712 }
713
714 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
715 struct request_sock *req,
716 struct user_namespace *user_ns,
717 u32 portid, u32 seq,
718 const struct nlmsghdr *unlh)
719 {
720 const struct inet_request_sock *ireq = inet_rsk(req);
721 struct inet_sock *inet = inet_sk(sk);
722 struct inet_diag_msg *r;
723 struct nlmsghdr *nlh;
724 long tmo;
725
726 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
727 NLM_F_MULTI);
728 if (!nlh)
729 return -EMSGSIZE;
730
731 r = nlmsg_data(nlh);
732 r->idiag_family = sk->sk_family;
733 r->idiag_state = TCP_SYN_RECV;
734 r->idiag_timer = 1;
735 r->idiag_retrans = req->num_retrans;
736
737 r->id.idiag_if = sk->sk_bound_dev_if;
738 sock_diag_save_cookie(req, r->id.idiag_cookie);
739
740 tmo = req->expires - jiffies;
741 if (tmo < 0)
742 tmo = 0;
743
744 r->id.idiag_sport = inet->inet_sport;
745 r->id.idiag_dport = ireq->rmt_port;
746
747 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
748 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
749
750 r->id.idiag_src[0] = ireq->loc_addr;
751 r->id.idiag_dst[0] = ireq->rmt_addr;
752
753 r->idiag_expires = jiffies_to_msecs(tmo);
754 r->idiag_rqueue = 0;
755 r->idiag_wqueue = 0;
756 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
757 r->idiag_inode = 0;
758 #if IS_ENABLED(CONFIG_IPV6)
759 if (r->idiag_family == AF_INET6) {
760 struct inet_diag_entry entry;
761 inet_diag_req_addrs(sk, req, &entry);
762 memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr));
763 memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr));
764 }
765 #endif
766
767 return nlmsg_end(skb, nlh);
768 }
769
770 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
771 struct netlink_callback *cb,
772 struct inet_diag_req_v2 *r,
773 const struct nlattr *bc)
774 {
775 struct inet_diag_entry entry;
776 struct inet_connection_sock *icsk = inet_csk(sk);
777 struct listen_sock *lopt;
778 struct inet_sock *inet = inet_sk(sk);
779 int j, s_j;
780 int reqnum, s_reqnum;
781 int err = 0;
782
783 s_j = cb->args[3];
784 s_reqnum = cb->args[4];
785
786 if (s_j > 0)
787 s_j--;
788
789 entry.family = sk->sk_family;
790
791 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
792
793 lopt = icsk->icsk_accept_queue.listen_opt;
794 if (!lopt || !lopt->qlen)
795 goto out;
796
797 if (bc != NULL) {
798 entry.sport = inet->inet_num;
799 entry.userlocks = sk->sk_userlocks;
800 }
801
802 for (j = s_j; j < lopt->nr_table_entries; j++) {
803 struct request_sock *req, *head = lopt->syn_table[j];
804
805 reqnum = 0;
806 for (req = head; req; reqnum++, req = req->dl_next) {
807 struct inet_request_sock *ireq = inet_rsk(req);
808
809 if (reqnum < s_reqnum)
810 continue;
811 if (r->id.idiag_dport != ireq->rmt_port &&
812 r->id.idiag_dport)
813 continue;
814
815 if (bc) {
816 inet_diag_req_addrs(sk, req, &entry);
817 entry.dport = ntohs(ireq->rmt_port);
818
819 if (!inet_diag_bc_run(bc, &entry))
820 continue;
821 }
822
823 err = inet_diag_fill_req(skb, sk, req,
824 sk_user_ns(NETLINK_CB(cb->skb).sk),
825 NETLINK_CB(cb->skb).portid,
826 cb->nlh->nlmsg_seq, cb->nlh);
827 if (err < 0) {
828 cb->args[3] = j + 1;
829 cb->args[4] = reqnum;
830 goto out;
831 }
832 }
833
834 s_reqnum = 0;
835 }
836
837 out:
838 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
839
840 return err;
841 }
842
843 void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
844 struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc)
845 {
846 int i, num;
847 int s_i, s_num;
848 struct net *net = sock_net(skb->sk);
849
850 s_i = cb->args[1];
851 s_num = num = cb->args[2];
852
853 if (cb->args[0] == 0) {
854 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
855 goto skip_listen_ht;
856
857 for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
858 struct sock *sk;
859 struct hlist_nulls_node *node;
860 struct inet_listen_hashbucket *ilb;
861
862 num = 0;
863 ilb = &hashinfo->listening_hash[i];
864 spin_lock_bh(&ilb->lock);
865 sk_nulls_for_each(sk, node, &ilb->head) {
866 struct inet_sock *inet = inet_sk(sk);
867
868 if (!net_eq(sock_net(sk), net))
869 continue;
870
871 if (num < s_num) {
872 num++;
873 continue;
874 }
875
876 if (r->sdiag_family != AF_UNSPEC &&
877 sk->sk_family != r->sdiag_family)
878 goto next_listen;
879
880 if (r->id.idiag_sport != inet->inet_sport &&
881 r->id.idiag_sport)
882 goto next_listen;
883
884 if (!(r->idiag_states & TCPF_LISTEN) ||
885 r->id.idiag_dport ||
886 cb->args[3] > 0)
887 goto syn_recv;
888
889 if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
890 spin_unlock_bh(&ilb->lock);
891 goto done;
892 }
893
894 syn_recv:
895 if (!(r->idiag_states & TCPF_SYN_RECV))
896 goto next_listen;
897
898 if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
899 spin_unlock_bh(&ilb->lock);
900 goto done;
901 }
902
903 next_listen:
904 cb->args[3] = 0;
905 cb->args[4] = 0;
906 ++num;
907 }
908 spin_unlock_bh(&ilb->lock);
909
910 s_num = 0;
911 cb->args[3] = 0;
912 cb->args[4] = 0;
913 }
914 skip_listen_ht:
915 cb->args[0] = 1;
916 s_i = num = s_num = 0;
917 }
918
919 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
920 goto out;
921
922 for (i = s_i; i <= hashinfo->ehash_mask; i++) {
923 struct inet_ehash_bucket *head = &hashinfo->ehash[i];
924 spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
925 struct sock *sk;
926 struct hlist_nulls_node *node;
927
928 num = 0;
929
930 if (hlist_nulls_empty(&head->chain) &&
931 hlist_nulls_empty(&head->twchain))
932 continue;
933
934 if (i > s_i)
935 s_num = 0;
936
937 spin_lock_bh(lock);
938 sk_nulls_for_each(sk, node, &head->chain) {
939 struct inet_sock *inet = inet_sk(sk);
940
941 if (!net_eq(sock_net(sk), net))
942 continue;
943 if (num < s_num)
944 goto next_normal;
945 if (!(r->idiag_states & (1 << sk->sk_state)))
946 goto next_normal;
947 if (r->sdiag_family != AF_UNSPEC &&
948 sk->sk_family != r->sdiag_family)
949 goto next_normal;
950 if (r->id.idiag_sport != inet->inet_sport &&
951 r->id.idiag_sport)
952 goto next_normal;
953 if (r->id.idiag_dport != inet->inet_dport &&
954 r->id.idiag_dport)
955 goto next_normal;
956 if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
957 spin_unlock_bh(lock);
958 goto done;
959 }
960 next_normal:
961 ++num;
962 }
963
964 if (r->idiag_states & (TCPF_TIME_WAIT | TCPF_FIN_WAIT2)) {
965 struct inet_timewait_sock *tw;
966
967 inet_twsk_for_each(tw, node,
968 &head->twchain) {
969 if (!net_eq(twsk_net(tw), net))
970 continue;
971
972 if (num < s_num)
973 goto next_dying;
974 if (!(r->idiag_states & (1 << tw->tw_substate)))
975 goto next_dying;
976 if (r->sdiag_family != AF_UNSPEC &&
977 tw->tw_family != r->sdiag_family)
978 goto next_dying;
979 if (r->id.idiag_sport != tw->tw_sport &&
980 r->id.idiag_sport)
981 goto next_dying;
982 if (r->id.idiag_dport != tw->tw_dport &&
983 r->id.idiag_dport)
984 goto next_dying;
985 if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
986 spin_unlock_bh(lock);
987 goto done;
988 }
989 next_dying:
990 ++num;
991 }
992 }
993 spin_unlock_bh(lock);
994 }
995
996 done:
997 cb->args[1] = i;
998 cb->args[2] = num;
999 out:
1000 ;
1001 }
1002 EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
1003
1004 static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
1005 struct inet_diag_req_v2 *r, struct nlattr *bc)
1006 {
1007 const struct inet_diag_handler *handler;
1008 int err = 0;
1009
1010 handler = inet_diag_lock_handler(r->sdiag_protocol);
1011 if (!IS_ERR(handler))
1012 handler->dump(skb, cb, r, bc);
1013 else
1014 err = PTR_ERR(handler);
1015 inet_diag_unlock_handler(handler);
1016
1017 return err ? : skb->len;
1018 }
1019
1020 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
1021 {
1022 struct nlattr *bc = NULL;
1023 int hdrlen = sizeof(struct inet_diag_req_v2);
1024
1025 if (nlmsg_attrlen(cb->nlh, hdrlen))
1026 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
1027
1028 return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
1029 }
1030
1031 static inline int inet_diag_type2proto(int type)
1032 {
1033 switch (type) {
1034 case TCPDIAG_GETSOCK:
1035 return IPPROTO_TCP;
1036 case DCCPDIAG_GETSOCK:
1037 return IPPROTO_DCCP;
1038 default:
1039 return 0;
1040 }
1041 }
1042
1043 static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
1044 {
1045 struct inet_diag_req *rc = nlmsg_data(cb->nlh);
1046 struct inet_diag_req_v2 req;
1047 struct nlattr *bc = NULL;
1048 int hdrlen = sizeof(struct inet_diag_req);
1049
1050 req.sdiag_family = AF_UNSPEC; /* compatibility */
1051 req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
1052 req.idiag_ext = rc->idiag_ext;
1053 req.idiag_states = rc->idiag_states;
1054 req.id = rc->id;
1055
1056 if (nlmsg_attrlen(cb->nlh, hdrlen))
1057 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
1058
1059 return __inet_diag_dump(skb, cb, &req, bc);
1060 }
1061
1062 static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
1063 const struct nlmsghdr *nlh)
1064 {
1065 struct inet_diag_req *rc = nlmsg_data(nlh);
1066 struct inet_diag_req_v2 req;
1067
1068 req.sdiag_family = rc->idiag_family;
1069 req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
1070 req.idiag_ext = rc->idiag_ext;
1071 req.idiag_states = rc->idiag_states;
1072 req.id = rc->id;
1073
1074 return inet_diag_get_exact(in_skb, nlh, &req);
1075 }
1076
1077 static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
1078 {
1079 int hdrlen = sizeof(struct inet_diag_req);
1080 struct net *net = sock_net(skb->sk);
1081
1082 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
1083 nlmsg_len(nlh) < hdrlen)
1084 return -EINVAL;
1085
1086 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1087 if (nlmsg_attrlen(nlh, hdrlen)) {
1088 struct nlattr *attr;
1089
1090 attr = nlmsg_find_attr(nlh, hdrlen,
1091 INET_DIAG_REQ_BYTECODE);
1092 if (attr == NULL ||
1093 nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
1094 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
1095 return -EINVAL;
1096 }
1097 {
1098 struct netlink_dump_control c = {
1099 .dump = inet_diag_dump_compat,
1100 };
1101 return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
1102 }
1103 }
1104
1105 return inet_diag_get_exact_compat(skb, nlh);
1106 }
1107
1108 static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
1109 {
1110 int hdrlen = sizeof(struct inet_diag_req_v2);
1111 struct net *net = sock_net(skb->sk);
1112
1113 if (nlmsg_len(h) < hdrlen)
1114 return -EINVAL;
1115
1116 if (h->nlmsg_flags & NLM_F_DUMP) {
1117 if (nlmsg_attrlen(h, hdrlen)) {
1118 struct nlattr *attr;
1119 attr = nlmsg_find_attr(h, hdrlen,
1120 INET_DIAG_REQ_BYTECODE);
1121 if (attr == NULL ||
1122 nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
1123 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
1124 return -EINVAL;
1125 }
1126 {
1127 struct netlink_dump_control c = {
1128 .dump = inet_diag_dump,
1129 };
1130 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
1131 }
1132 }
1133
1134 return inet_diag_get_exact(skb, h, nlmsg_data(h));
1135 }
1136
1137 static const struct sock_diag_handler inet_diag_handler = {
1138 .family = AF_INET,
1139 .dump = inet_diag_handler_dump,
1140 };
1141
1142 static const struct sock_diag_handler inet6_diag_handler = {
1143 .family = AF_INET6,
1144 .dump = inet_diag_handler_dump,
1145 };
1146
1147 int inet_diag_register(const struct inet_diag_handler *h)
1148 {
1149 const __u16 type = h->idiag_type;
1150 int err = -EINVAL;
1151
1152 if (type >= IPPROTO_MAX)
1153 goto out;
1154
1155 mutex_lock(&inet_diag_table_mutex);
1156 err = -EEXIST;
1157 if (inet_diag_table[type] == NULL) {
1158 inet_diag_table[type] = h;
1159 err = 0;
1160 }
1161 mutex_unlock(&inet_diag_table_mutex);
1162 out:
1163 return err;
1164 }
1165 EXPORT_SYMBOL_GPL(inet_diag_register);
1166
1167 void inet_diag_unregister(const struct inet_diag_handler *h)
1168 {
1169 const __u16 type = h->idiag_type;
1170
1171 if (type >= IPPROTO_MAX)
1172 return;
1173
1174 mutex_lock(&inet_diag_table_mutex);
1175 inet_diag_table[type] = NULL;
1176 mutex_unlock(&inet_diag_table_mutex);
1177 }
1178 EXPORT_SYMBOL_GPL(inet_diag_unregister);
1179
1180 static int __init inet_diag_init(void)
1181 {
1182 const int inet_diag_table_size = (IPPROTO_MAX *
1183 sizeof(struct inet_diag_handler *));
1184 int err = -ENOMEM;
1185
1186 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
1187 if (!inet_diag_table)
1188 goto out;
1189
1190 err = sock_diag_register(&inet_diag_handler);
1191 if (err)
1192 goto out_free_nl;
1193
1194 err = sock_diag_register(&inet6_diag_handler);
1195 if (err)
1196 goto out_free_inet;
1197
1198 sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
1199 out:
1200 return err;
1201
1202 out_free_inet:
1203 sock_diag_unregister(&inet_diag_handler);
1204 out_free_nl:
1205 kfree(inet_diag_table);
1206 goto out;
1207 }
1208
1209 static void __exit inet_diag_exit(void)
1210 {
1211 sock_diag_unregister(&inet6_diag_handler);
1212 sock_diag_unregister(&inet_diag_handler);
1213 sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
1214 kfree(inet_diag_table);
1215 }
1216
1217 module_init(inet_diag_init);
1218 module_exit(inet_diag_exit);
1219 MODULE_LICENSE("GPL");
1220 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
1221 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);