defconfig: exynos9610: Re-add dropped Wi-Fi AP options lost
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / net / ipv4 / inet_diag.c
1 /*
2 * inet_diag.c Module for monitoring INET transport protocols sockets.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/cache.h>
19 #include <linux/init.h>
20 #include <linux/time.h>
21
22 #include <net/icmp.h>
23 #include <net/tcp.h>
24 #include <net/ipv6.h>
25 #include <net/inet_common.h>
26 #include <net/inet_connection_sock.h>
27 #include <net/inet_hashtables.h>
28 #include <net/inet_timewait_sock.h>
29 #include <net/inet6_hashtables.h>
30 #include <net/netlink.h>
31
32 #include <linux/inet.h>
33 #include <linux/stddef.h>
34
35 #include <linux/inet_diag.h>
36 #include <linux/sock_diag.h>
37
38 static const struct inet_diag_handler **inet_diag_table;
39
40 struct inet_diag_entry {
41 const __be32 *saddr;
42 const __be32 *daddr;
43 u16 sport;
44 u16 dport;
45 u16 family;
46 u16 userlocks;
47 u32 ifindex;
48 u32 mark;
49 };
50
51 static DEFINE_MUTEX(inet_diag_table_mutex);
52
53 static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
54 {
55 if (!inet_diag_table[proto])
56 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
57 NETLINK_SOCK_DIAG, AF_INET, proto);
58
59 mutex_lock(&inet_diag_table_mutex);
60 if (!inet_diag_table[proto])
61 return ERR_PTR(-ENOENT);
62
63 return inet_diag_table[proto];
64 }
65
66 static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
67 {
68 mutex_unlock(&inet_diag_table_mutex);
69 }
70
71 void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
72 {
73 r->idiag_family = sk->sk_family;
74
75 r->id.idiag_sport = htons(sk->sk_num);
76 r->id.idiag_dport = sk->sk_dport;
77 r->id.idiag_if = sk->sk_bound_dev_if;
78 sock_diag_save_cookie(sk, r->id.idiag_cookie);
79
80 #if IS_ENABLED(CONFIG_IPV6)
81 if (sk->sk_family == AF_INET6) {
82 *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
83 *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
84 } else
85 #endif
86 {
87 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
88 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
89
90 r->id.idiag_src[0] = sk->sk_rcv_saddr;
91 r->id.idiag_dst[0] = sk->sk_daddr;
92 }
93 }
94 EXPORT_SYMBOL_GPL(inet_diag_msg_common_fill);
95
96 static size_t inet_sk_attr_size(struct sock *sk,
97 const struct inet_diag_req_v2 *req,
98 bool net_admin)
99 {
100 const struct inet_diag_handler *handler;
101 size_t aux = 0;
102
103 handler = inet_diag_table[req->sdiag_protocol];
104 if (handler && handler->idiag_get_aux_size)
105 aux = handler->idiag_get_aux_size(sk, net_admin);
106
107 return nla_total_size(sizeof(struct tcp_info))
108 + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
109 + nla_total_size(1) /* INET_DIAG_TOS */
110 + nla_total_size(1) /* INET_DIAG_TCLASS */
111 + nla_total_size(4) /* INET_DIAG_MARK */
112 + nla_total_size(4) /* INET_DIAG_CLASS_ID */
113 + nla_total_size(sizeof(struct inet_diag_meminfo))
114 + nla_total_size(sizeof(struct inet_diag_msg))
115 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
116 + nla_total_size(TCP_CA_NAME_MAX)
117 + nla_total_size(sizeof(struct tcpvegas_info))
118 + aux
119 + 64;
120 }
121
122 int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
123 struct inet_diag_msg *r, int ext,
124 struct user_namespace *user_ns,
125 bool net_admin)
126 {
127 const struct inet_sock *inet = inet_sk(sk);
128
129 if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
130 goto errout;
131
132 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
133 * hence this needs to be included regardless of socket family.
134 */
135 if (ext & (1 << (INET_DIAG_TOS - 1)))
136 if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
137 goto errout;
138
139 #if IS_ENABLED(CONFIG_IPV6)
140 if (r->idiag_family == AF_INET6) {
141 if (ext & (1 << (INET_DIAG_TCLASS - 1)))
142 if (nla_put_u8(skb, INET_DIAG_TCLASS,
143 inet6_sk(sk)->tclass) < 0)
144 goto errout;
145
146 if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
147 nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
148 goto errout;
149 }
150 #endif
151
152 if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
153 goto errout;
154
155 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
156 r->idiag_inode = sock_i_ino(sk);
157
158 return 0;
159 errout:
160 return 1;
161 }
162 EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill);
163
164 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
165 struct sk_buff *skb, const struct inet_diag_req_v2 *req,
166 struct user_namespace *user_ns,
167 u32 portid, u32 seq, u16 nlmsg_flags,
168 const struct nlmsghdr *unlh,
169 bool net_admin)
170 {
171 const struct tcp_congestion_ops *ca_ops;
172 const struct inet_diag_handler *handler;
173 int ext = req->idiag_ext;
174 struct inet_diag_msg *r;
175 struct nlmsghdr *nlh;
176 struct nlattr *attr;
177 void *info = NULL;
178
179 handler = inet_diag_table[req->sdiag_protocol];
180 BUG_ON(!handler);
181
182 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
183 nlmsg_flags);
184 if (!nlh)
185 return -EMSGSIZE;
186
187 r = nlmsg_data(nlh);
188 BUG_ON(!sk_fullsock(sk));
189
190 inet_diag_msg_common_fill(r, sk);
191 r->idiag_state = sk->sk_state;
192 r->idiag_timer = 0;
193 r->idiag_retrans = 0;
194
195 if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
196 goto errout;
197
198 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
199 struct inet_diag_meminfo minfo = {
200 .idiag_rmem = sk_rmem_alloc_get(sk),
201 .idiag_wmem = sk->sk_wmem_queued,
202 .idiag_fmem = sk->sk_forward_alloc,
203 .idiag_tmem = sk_wmem_alloc_get(sk),
204 };
205
206 if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
207 goto errout;
208 }
209
210 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
211 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
212 goto errout;
213
214 /*
215 * RAW sockets might have user-defined protocols assigned,
216 * so report the one supplied on socket creation.
217 */
218 if (sk->sk_type == SOCK_RAW) {
219 if (nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))
220 goto errout;
221 }
222
223 if (!icsk) {
224 handler->idiag_get_info(sk, r, NULL);
225 goto out;
226 }
227
228 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
229 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
230 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
231 r->idiag_timer = 1;
232 r->idiag_retrans = icsk->icsk_retransmits;
233 r->idiag_expires =
234 jiffies_to_msecs(icsk->icsk_timeout - jiffies);
235 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
236 r->idiag_timer = 4;
237 r->idiag_retrans = icsk->icsk_probes_out;
238 r->idiag_expires =
239 jiffies_to_msecs(icsk->icsk_timeout - jiffies);
240 } else if (timer_pending(&sk->sk_timer)) {
241 r->idiag_timer = 2;
242 r->idiag_retrans = icsk->icsk_probes_out;
243 r->idiag_expires =
244 jiffies_to_msecs(sk->sk_timer.expires - jiffies);
245 } else {
246 r->idiag_timer = 0;
247 r->idiag_expires = 0;
248 }
249
250 if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
251 attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
252 handler->idiag_info_size,
253 INET_DIAG_PAD);
254 if (!attr)
255 goto errout;
256
257 info = nla_data(attr);
258 }
259
260 if (ext & (1 << (INET_DIAG_CONG - 1))) {
261 int err = 0;
262
263 rcu_read_lock();
264 ca_ops = READ_ONCE(icsk->icsk_ca_ops);
265 if (ca_ops)
266 err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name);
267 rcu_read_unlock();
268 if (err < 0)
269 goto errout;
270 }
271
272 handler->idiag_get_info(sk, r, info);
273
274 if (ext & (1 << (INET_DIAG_INFO - 1)) && handler->idiag_get_aux)
275 if (handler->idiag_get_aux(sk, net_admin, skb) < 0)
276 goto errout;
277
278 if (sk->sk_state < TCP_TIME_WAIT) {
279 union tcp_cc_info info;
280 size_t sz = 0;
281 int attr;
282
283 rcu_read_lock();
284 ca_ops = READ_ONCE(icsk->icsk_ca_ops);
285 if (ca_ops && ca_ops->get_info)
286 sz = ca_ops->get_info(sk, ext, &attr, &info);
287 rcu_read_unlock();
288 if (sz && nla_put(skb, attr, sz, &info) < 0)
289 goto errout;
290 }
291
292 if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
293 ext & (1 << (INET_DIAG_TCLASS - 1))) {
294 u32 classid = 0;
295
296 #ifdef CONFIG_SOCK_CGROUP_DATA
297 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
298 #endif
299 /* Fallback to socket priority if class id isn't set.
300 * Classful qdiscs use it as direct reference to class.
301 * For cgroup2 classid is always zero.
302 */
303 if (!classid)
304 classid = sk->sk_priority;
305
306 if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
307 goto errout;
308 }
309
310 out:
311 nlmsg_end(skb, nlh);
312 return 0;
313
314 errout:
315 nlmsg_cancel(skb, nlh);
316 return -EMSGSIZE;
317 }
318 EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
319
320 static int inet_csk_diag_fill(struct sock *sk,
321 struct sk_buff *skb,
322 const struct inet_diag_req_v2 *req,
323 struct user_namespace *user_ns,
324 u32 portid, u32 seq, u16 nlmsg_flags,
325 const struct nlmsghdr *unlh,
326 bool net_admin)
327 {
328 return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns,
329 portid, seq, nlmsg_flags, unlh, net_admin);
330 }
331
332 static int inet_twsk_diag_fill(struct sock *sk,
333 struct sk_buff *skb,
334 u32 portid, u32 seq, u16 nlmsg_flags,
335 const struct nlmsghdr *unlh)
336 {
337 struct inet_timewait_sock *tw = inet_twsk(sk);
338 struct inet_diag_msg *r;
339 struct nlmsghdr *nlh;
340 long tmo;
341
342 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
343 nlmsg_flags);
344 if (!nlh)
345 return -EMSGSIZE;
346
347 r = nlmsg_data(nlh);
348 BUG_ON(tw->tw_state != TCP_TIME_WAIT);
349
350 tmo = tw->tw_timer.expires - jiffies;
351 if (tmo < 0)
352 tmo = 0;
353
354 inet_diag_msg_common_fill(r, sk);
355 r->idiag_retrans = 0;
356
357 r->idiag_state = tw->tw_substate;
358 r->idiag_timer = 3;
359 r->idiag_expires = jiffies_to_msecs(tmo);
360 r->idiag_rqueue = 0;
361 r->idiag_wqueue = 0;
362 r->idiag_uid = 0;
363 r->idiag_inode = 0;
364
365 nlmsg_end(skb, nlh);
366 return 0;
367 }
368
369 static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
370 u32 portid, u32 seq, u16 nlmsg_flags,
371 const struct nlmsghdr *unlh, bool net_admin)
372 {
373 struct request_sock *reqsk = inet_reqsk(sk);
374 struct inet_diag_msg *r;
375 struct nlmsghdr *nlh;
376 long tmo;
377
378 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
379 nlmsg_flags);
380 if (!nlh)
381 return -EMSGSIZE;
382
383 r = nlmsg_data(nlh);
384 inet_diag_msg_common_fill(r, sk);
385 r->idiag_state = TCP_SYN_RECV;
386 r->idiag_timer = 1;
387 r->idiag_retrans = reqsk->num_retrans;
388
389 BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
390 offsetof(struct sock, sk_cookie));
391
392 tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
393 r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
394 r->idiag_rqueue = 0;
395 r->idiag_wqueue = 0;
396 r->idiag_uid = 0;
397 r->idiag_inode = 0;
398
399 if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
400 inet_rsk(reqsk)->ir_mark))
401 return -EMSGSIZE;
402
403 nlmsg_end(skb, nlh);
404 return 0;
405 }
406
407 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
408 const struct inet_diag_req_v2 *r,
409 struct user_namespace *user_ns,
410 u32 portid, u32 seq, u16 nlmsg_flags,
411 const struct nlmsghdr *unlh, bool net_admin)
412 {
413 if (sk->sk_state == TCP_TIME_WAIT)
414 return inet_twsk_diag_fill(sk, skb, portid, seq,
415 nlmsg_flags, unlh);
416
417 if (sk->sk_state == TCP_NEW_SYN_RECV)
418 return inet_req_diag_fill(sk, skb, portid, seq,
419 nlmsg_flags, unlh, net_admin);
420
421 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
422 nlmsg_flags, unlh, net_admin);
423 }
424
425 struct sock *inet_diag_find_one_icsk(struct net *net,
426 struct inet_hashinfo *hashinfo,
427 const struct inet_diag_req_v2 *req)
428 {
429 struct sock *sk;
430
431 rcu_read_lock();
432 if (req->sdiag_family == AF_INET)
433 sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[0],
434 req->id.idiag_dport, req->id.idiag_src[0],
435 req->id.idiag_sport, req->id.idiag_if);
436 #if IS_ENABLED(CONFIG_IPV6)
437 else if (req->sdiag_family == AF_INET6) {
438 if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
439 ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
440 sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[3],
441 req->id.idiag_dport, req->id.idiag_src[3],
442 req->id.idiag_sport, req->id.idiag_if);
443 else
444 sk = inet6_lookup(net, hashinfo, NULL, 0,
445 (struct in6_addr *)req->id.idiag_dst,
446 req->id.idiag_dport,
447 (struct in6_addr *)req->id.idiag_src,
448 req->id.idiag_sport,
449 req->id.idiag_if);
450 }
451 #endif
452 else {
453 rcu_read_unlock();
454 return ERR_PTR(-EINVAL);
455 }
456 rcu_read_unlock();
457 if (!sk)
458 return ERR_PTR(-ENOENT);
459
460 if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
461 sock_gen_put(sk);
462 return ERR_PTR(-ENOENT);
463 }
464
465 return sk;
466 }
467 EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk);
468
469 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
470 struct sk_buff *in_skb,
471 const struct nlmsghdr *nlh,
472 const struct inet_diag_req_v2 *req)
473 {
474 bool net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN);
475 struct net *net = sock_net(in_skb->sk);
476 struct sk_buff *rep;
477 struct sock *sk;
478 int err;
479
480 sk = inet_diag_find_one_icsk(net, hashinfo, req);
481 if (IS_ERR(sk))
482 return PTR_ERR(sk);
483
484 rep = nlmsg_new(inet_sk_attr_size(sk, req, net_admin), GFP_KERNEL);
485 if (!rep) {
486 err = -ENOMEM;
487 goto out;
488 }
489
490 err = sk_diag_fill(sk, rep, req,
491 sk_user_ns(NETLINK_CB(in_skb).sk),
492 NETLINK_CB(in_skb).portid,
493 nlh->nlmsg_seq, 0, nlh, net_admin);
494 if (err < 0) {
495 WARN_ON(err == -EMSGSIZE);
496 nlmsg_free(rep);
497 goto out;
498 }
499 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
500 MSG_DONTWAIT);
501 if (err > 0)
502 err = 0;
503
504 out:
505 if (sk)
506 sock_gen_put(sk);
507
508 return err;
509 }
510 EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
511
512 static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
513 const struct nlmsghdr *nlh,
514 const struct inet_diag_req_v2 *req)
515 {
516 const struct inet_diag_handler *handler;
517 int err;
518
519 handler = inet_diag_lock_handler(req->sdiag_protocol);
520 if (IS_ERR(handler))
521 err = PTR_ERR(handler);
522 else if (cmd == SOCK_DIAG_BY_FAMILY)
523 err = handler->dump_one(in_skb, nlh, req);
524 else if (cmd == SOCK_DESTROY && handler->destroy)
525 err = handler->destroy(in_skb, req);
526 else
527 err = -EOPNOTSUPP;
528 inet_diag_unlock_handler(handler);
529
530 return err;
531 }
532
533 static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
534 {
535 int words = bits >> 5;
536
537 bits &= 0x1f;
538
539 if (words) {
540 if (memcmp(a1, a2, words << 2))
541 return 0;
542 }
543 if (bits) {
544 __be32 w1, w2;
545 __be32 mask;
546
547 w1 = a1[words];
548 w2 = a2[words];
549
550 mask = htonl((0xffffffff) << (32 - bits));
551
552 if ((w1 ^ w2) & mask)
553 return 0;
554 }
555
556 return 1;
557 }
558
559 static int inet_diag_bc_run(const struct nlattr *_bc,
560 const struct inet_diag_entry *entry)
561 {
562 const void *bc = nla_data(_bc);
563 int len = nla_len(_bc);
564
565 while (len > 0) {
566 int yes = 1;
567 const struct inet_diag_bc_op *op = bc;
568
569 switch (op->code) {
570 case INET_DIAG_BC_NOP:
571 break;
572 case INET_DIAG_BC_JMP:
573 yes = 0;
574 break;
575 case INET_DIAG_BC_S_GE:
576 yes = entry->sport >= op[1].no;
577 break;
578 case INET_DIAG_BC_S_LE:
579 yes = entry->sport <= op[1].no;
580 break;
581 case INET_DIAG_BC_D_GE:
582 yes = entry->dport >= op[1].no;
583 break;
584 case INET_DIAG_BC_D_LE:
585 yes = entry->dport <= op[1].no;
586 break;
587 case INET_DIAG_BC_AUTO:
588 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
589 break;
590 case INET_DIAG_BC_S_COND:
591 case INET_DIAG_BC_D_COND: {
592 const struct inet_diag_hostcond *cond;
593 const __be32 *addr;
594
595 cond = (const struct inet_diag_hostcond *)(op + 1);
596 if (cond->port != -1 &&
597 cond->port != (op->code == INET_DIAG_BC_S_COND ?
598 entry->sport : entry->dport)) {
599 yes = 0;
600 break;
601 }
602
603 if (op->code == INET_DIAG_BC_S_COND)
604 addr = entry->saddr;
605 else
606 addr = entry->daddr;
607
608 if (cond->family != AF_UNSPEC &&
609 cond->family != entry->family) {
610 if (entry->family == AF_INET6 &&
611 cond->family == AF_INET) {
612 if (addr[0] == 0 && addr[1] == 0 &&
613 addr[2] == htonl(0xffff) &&
614 bitstring_match(addr + 3,
615 cond->addr,
616 cond->prefix_len))
617 break;
618 }
619 yes = 0;
620 break;
621 }
622
623 if (cond->prefix_len == 0)
624 break;
625 if (bitstring_match(addr, cond->addr,
626 cond->prefix_len))
627 break;
628 yes = 0;
629 break;
630 }
631 case INET_DIAG_BC_DEV_COND: {
632 u32 ifindex;
633
634 ifindex = *((const u32 *)(op + 1));
635 if (ifindex != entry->ifindex)
636 yes = 0;
637 break;
638 }
639 case INET_DIAG_BC_MARK_COND: {
640 struct inet_diag_markcond *cond;
641
642 cond = (struct inet_diag_markcond *)(op + 1);
643 if ((entry->mark & cond->mask) != cond->mark)
644 yes = 0;
645 break;
646 }
647 }
648
649 if (yes) {
650 len -= op->yes;
651 bc += op->yes;
652 } else {
653 len -= op->no;
654 bc += op->no;
655 }
656 }
657 return len == 0;
658 }
659
660 /* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV)
661 */
662 static void entry_fill_addrs(struct inet_diag_entry *entry,
663 const struct sock *sk)
664 {
665 #if IS_ENABLED(CONFIG_IPV6)
666 if (sk->sk_family == AF_INET6) {
667 entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32;
668 entry->daddr = sk->sk_v6_daddr.s6_addr32;
669 } else
670 #endif
671 {
672 entry->saddr = &sk->sk_rcv_saddr;
673 entry->daddr = &sk->sk_daddr;
674 }
675 }
676
677 int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
678 {
679 struct inet_sock *inet = inet_sk(sk);
680 struct inet_diag_entry entry;
681
682 if (!bc)
683 return 1;
684
685 entry.family = sk->sk_family;
686 entry_fill_addrs(&entry, sk);
687 entry.sport = inet->inet_num;
688 entry.dport = ntohs(inet->inet_dport);
689 entry.ifindex = sk->sk_bound_dev_if;
690 entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
691 if (sk_fullsock(sk))
692 entry.mark = sk->sk_mark;
693 else if (sk->sk_state == TCP_NEW_SYN_RECV)
694 entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
695 else
696 entry.mark = 0;
697
698 return inet_diag_bc_run(bc, &entry);
699 }
700 EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
701
702 static int valid_cc(const void *bc, int len, int cc)
703 {
704 while (len >= 0) {
705 const struct inet_diag_bc_op *op = bc;
706
707 if (cc > len)
708 return 0;
709 if (cc == len)
710 return 1;
711 if (op->yes < 4 || op->yes & 3)
712 return 0;
713 len -= op->yes;
714 bc += op->yes;
715 }
716 return 0;
717 }
718
719 /* data is u32 ifindex */
720 static bool valid_devcond(const struct inet_diag_bc_op *op, int len,
721 int *min_len)
722 {
723 /* Check ifindex space. */
724 *min_len += sizeof(u32);
725 if (len < *min_len)
726 return false;
727
728 return true;
729 }
730 /* Validate an inet_diag_hostcond. */
731 static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
732 int *min_len)
733 {
734 struct inet_diag_hostcond *cond;
735 int addr_len;
736
737 /* Check hostcond space. */
738 *min_len += sizeof(struct inet_diag_hostcond);
739 if (len < *min_len)
740 return false;
741 cond = (struct inet_diag_hostcond *)(op + 1);
742
743 /* Check address family and address length. */
744 switch (cond->family) {
745 case AF_UNSPEC:
746 addr_len = 0;
747 break;
748 case AF_INET:
749 addr_len = sizeof(struct in_addr);
750 break;
751 case AF_INET6:
752 addr_len = sizeof(struct in6_addr);
753 break;
754 default:
755 return false;
756 }
757 *min_len += addr_len;
758 if (len < *min_len)
759 return false;
760
761 /* Check prefix length (in bits) vs address length (in bytes). */
762 if (cond->prefix_len > 8 * addr_len)
763 return false;
764
765 return true;
766 }
767
768 /* Validate a port comparison operator. */
769 static bool valid_port_comparison(const struct inet_diag_bc_op *op,
770 int len, int *min_len)
771 {
772 /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
773 *min_len += sizeof(struct inet_diag_bc_op);
774 if (len < *min_len)
775 return false;
776 return true;
777 }
778
779 static bool valid_markcond(const struct inet_diag_bc_op *op, int len,
780 int *min_len)
781 {
782 *min_len += sizeof(struct inet_diag_markcond);
783 return len >= *min_len;
784 }
785
786 static int inet_diag_bc_audit(const struct nlattr *attr,
787 const struct sk_buff *skb)
788 {
789 bool net_admin = netlink_net_capable(skb, CAP_NET_ADMIN);
790 const void *bytecode, *bc;
791 int bytecode_len, len;
792
793 if (!attr || nla_len(attr) < sizeof(struct inet_diag_bc_op))
794 return -EINVAL;
795
796 bytecode = bc = nla_data(attr);
797 len = bytecode_len = nla_len(attr);
798
799 while (len > 0) {
800 int min_len = sizeof(struct inet_diag_bc_op);
801 const struct inet_diag_bc_op *op = bc;
802
803 switch (op->code) {
804 case INET_DIAG_BC_S_COND:
805 case INET_DIAG_BC_D_COND:
806 if (!valid_hostcond(bc, len, &min_len))
807 return -EINVAL;
808 break;
809 case INET_DIAG_BC_DEV_COND:
810 if (!valid_devcond(bc, len, &min_len))
811 return -EINVAL;
812 break;
813 case INET_DIAG_BC_S_GE:
814 case INET_DIAG_BC_S_LE:
815 case INET_DIAG_BC_D_GE:
816 case INET_DIAG_BC_D_LE:
817 if (!valid_port_comparison(bc, len, &min_len))
818 return -EINVAL;
819 break;
820 case INET_DIAG_BC_MARK_COND:
821 if (!net_admin)
822 return -EPERM;
823 if (!valid_markcond(bc, len, &min_len))
824 return -EINVAL;
825 break;
826 case INET_DIAG_BC_AUTO:
827 case INET_DIAG_BC_JMP:
828 case INET_DIAG_BC_NOP:
829 break;
830 default:
831 return -EINVAL;
832 }
833
834 if (op->code != INET_DIAG_BC_NOP) {
835 if (op->no < min_len || op->no > len + 4 || op->no & 3)
836 return -EINVAL;
837 if (op->no < len &&
838 !valid_cc(bytecode, bytecode_len, len - op->no))
839 return -EINVAL;
840 }
841
842 if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
843 return -EINVAL;
844 bc += op->yes;
845 len -= op->yes;
846 }
847 return len == 0 ? 0 : -EINVAL;
848 }
849
850 static int inet_csk_diag_dump(struct sock *sk,
851 struct sk_buff *skb,
852 struct netlink_callback *cb,
853 const struct inet_diag_req_v2 *r,
854 const struct nlattr *bc,
855 bool net_admin)
856 {
857 if (!inet_diag_bc_sk(bc, sk))
858 return 0;
859
860 return inet_csk_diag_fill(sk, skb, r,
861 sk_user_ns(NETLINK_CB(cb->skb).sk),
862 NETLINK_CB(cb->skb).portid,
863 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh,
864 net_admin);
865 }
866
867 static void twsk_build_assert(void)
868 {
869 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
870 offsetof(struct sock, sk_family));
871
872 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) !=
873 offsetof(struct inet_sock, inet_num));
874
875 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) !=
876 offsetof(struct inet_sock, inet_dport));
877
878 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) !=
879 offsetof(struct inet_sock, inet_rcv_saddr));
880
881 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) !=
882 offsetof(struct inet_sock, inet_daddr));
883
884 #if IS_ENABLED(CONFIG_IPV6)
885 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) !=
886 offsetof(struct sock, sk_v6_rcv_saddr));
887
888 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) !=
889 offsetof(struct sock, sk_v6_daddr));
890 #endif
891 }
892
893 void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
894 struct netlink_callback *cb,
895 const struct inet_diag_req_v2 *r, struct nlattr *bc)
896 {
897 bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
898 struct net *net = sock_net(skb->sk);
899 u32 idiag_states = r->idiag_states;
900 int i, num, s_i, s_num;
901 struct sock *sk;
902
903 if (idiag_states & TCPF_SYN_RECV)
904 idiag_states |= TCPF_NEW_SYN_RECV;
905 s_i = cb->args[1];
906 s_num = num = cb->args[2];
907
908 if (cb->args[0] == 0) {
909 if (!(idiag_states & TCPF_LISTEN) || r->id.idiag_dport)
910 goto skip_listen_ht;
911
912 for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
913 struct inet_listen_hashbucket *ilb;
914
915 num = 0;
916 ilb = &hashinfo->listening_hash[i];
917 spin_lock(&ilb->lock);
918 sk_for_each(sk, &ilb->head) {
919 struct inet_sock *inet = inet_sk(sk);
920
921 if (!net_eq(sock_net(sk), net))
922 continue;
923
924 if (num < s_num) {
925 num++;
926 continue;
927 }
928
929 if (r->sdiag_family != AF_UNSPEC &&
930 sk->sk_family != r->sdiag_family)
931 goto next_listen;
932
933 if (r->id.idiag_sport != inet->inet_sport &&
934 r->id.idiag_sport)
935 goto next_listen;
936
937 if (inet_csk_diag_dump(sk, skb, cb, r,
938 bc, net_admin) < 0) {
939 spin_unlock(&ilb->lock);
940 goto done;
941 }
942
943 next_listen:
944 ++num;
945 }
946 spin_unlock(&ilb->lock);
947
948 s_num = 0;
949 }
950 skip_listen_ht:
951 cb->args[0] = 1;
952 s_i = num = s_num = 0;
953 }
954
955 if (!(idiag_states & ~TCPF_LISTEN))
956 goto out;
957
958 #define SKARR_SZ 16
959 for (i = s_i; i <= hashinfo->ehash_mask; i++) {
960 struct inet_ehash_bucket *head = &hashinfo->ehash[i];
961 spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
962 struct hlist_nulls_node *node;
963 struct sock *sk_arr[SKARR_SZ];
964 int num_arr[SKARR_SZ];
965 int idx, accum, res;
966
967 if (hlist_nulls_empty(&head->chain))
968 continue;
969
970 if (i > s_i)
971 s_num = 0;
972
973 next_chunk:
974 num = 0;
975 accum = 0;
976 spin_lock_bh(lock);
977 sk_nulls_for_each(sk, node, &head->chain) {
978 int state;
979
980 if (!net_eq(sock_net(sk), net))
981 continue;
982 if (num < s_num)
983 goto next_normal;
984 state = (sk->sk_state == TCP_TIME_WAIT) ?
985 inet_twsk(sk)->tw_substate : sk->sk_state;
986 if (!(idiag_states & (1 << state)))
987 goto next_normal;
988 if (r->sdiag_family != AF_UNSPEC &&
989 sk->sk_family != r->sdiag_family)
990 goto next_normal;
991 if (r->id.idiag_sport != htons(sk->sk_num) &&
992 r->id.idiag_sport)
993 goto next_normal;
994 if (r->id.idiag_dport != sk->sk_dport &&
995 r->id.idiag_dport)
996 goto next_normal;
997 twsk_build_assert();
998
999 if (!inet_diag_bc_sk(bc, sk))
1000 goto next_normal;
1001
1002 if (!refcount_inc_not_zero(&sk->sk_refcnt))
1003 goto next_normal;
1004
1005 num_arr[accum] = num;
1006 sk_arr[accum] = sk;
1007 if (++accum == SKARR_SZ)
1008 break;
1009 next_normal:
1010 ++num;
1011 }
1012 spin_unlock_bh(lock);
1013 res = 0;
1014 for (idx = 0; idx < accum; idx++) {
1015 if (res >= 0) {
1016 res = sk_diag_fill(sk_arr[idx], skb, r,
1017 sk_user_ns(NETLINK_CB(cb->skb).sk),
1018 NETLINK_CB(cb->skb).portid,
1019 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1020 cb->nlh, net_admin);
1021 if (res < 0)
1022 num = num_arr[idx];
1023 }
1024 sock_gen_put(sk_arr[idx]);
1025 }
1026 if (res < 0)
1027 break;
1028 cond_resched();
1029 if (accum == SKARR_SZ) {
1030 s_num = num + 1;
1031 goto next_chunk;
1032 }
1033 }
1034
1035 done:
1036 cb->args[1] = i;
1037 cb->args[2] = num;
1038 out:
1039 ;
1040 }
1041 EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
1042
1043 static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
1044 const struct inet_diag_req_v2 *r,
1045 struct nlattr *bc)
1046 {
1047 const struct inet_diag_handler *handler;
1048 int err = 0;
1049
1050 handler = inet_diag_lock_handler(r->sdiag_protocol);
1051 if (!IS_ERR(handler))
1052 handler->dump(skb, cb, r, bc);
1053 else
1054 err = PTR_ERR(handler);
1055 inet_diag_unlock_handler(handler);
1056
1057 return err ? : skb->len;
1058 }
1059
1060 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
1061 {
1062 int hdrlen = sizeof(struct inet_diag_req_v2);
1063 struct nlattr *bc = NULL;
1064
1065 if (nlmsg_attrlen(cb->nlh, hdrlen))
1066 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
1067
1068 return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
1069 }
1070
1071 static int inet_diag_type2proto(int type)
1072 {
1073 switch (type) {
1074 case TCPDIAG_GETSOCK:
1075 return IPPROTO_TCP;
1076 case DCCPDIAG_GETSOCK:
1077 return IPPROTO_DCCP;
1078 default:
1079 return 0;
1080 }
1081 }
1082
1083 static int inet_diag_dump_compat(struct sk_buff *skb,
1084 struct netlink_callback *cb)
1085 {
1086 struct inet_diag_req *rc = nlmsg_data(cb->nlh);
1087 int hdrlen = sizeof(struct inet_diag_req);
1088 struct inet_diag_req_v2 req;
1089 struct nlattr *bc = NULL;
1090
1091 req.sdiag_family = AF_UNSPEC; /* compatibility */
1092 req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
1093 req.idiag_ext = rc->idiag_ext;
1094 req.idiag_states = rc->idiag_states;
1095 req.id = rc->id;
1096
1097 if (nlmsg_attrlen(cb->nlh, hdrlen))
1098 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
1099
1100 return __inet_diag_dump(skb, cb, &req, bc);
1101 }
1102
1103 static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
1104 const struct nlmsghdr *nlh)
1105 {
1106 struct inet_diag_req *rc = nlmsg_data(nlh);
1107 struct inet_diag_req_v2 req;
1108
1109 req.sdiag_family = rc->idiag_family;
1110 req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
1111 req.idiag_ext = rc->idiag_ext;
1112 req.idiag_states = rc->idiag_states;
1113 req.id = rc->id;
1114
1115 return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh, &req);
1116 }
1117
1118 static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
1119 {
1120 int hdrlen = sizeof(struct inet_diag_req);
1121 struct net *net = sock_net(skb->sk);
1122
1123 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
1124 nlmsg_len(nlh) < hdrlen)
1125 return -EINVAL;
1126
1127 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1128 if (nlmsg_attrlen(nlh, hdrlen)) {
1129 struct nlattr *attr;
1130 int err;
1131
1132 attr = nlmsg_find_attr(nlh, hdrlen,
1133 INET_DIAG_REQ_BYTECODE);
1134 err = inet_diag_bc_audit(attr, skb);
1135 if (err)
1136 return err;
1137 }
1138 {
1139 struct netlink_dump_control c = {
1140 .dump = inet_diag_dump_compat,
1141 };
1142 return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
1143 }
1144 }
1145
1146 return inet_diag_get_exact_compat(skb, nlh);
1147 }
1148
1149 static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h)
1150 {
1151 int hdrlen = sizeof(struct inet_diag_req_v2);
1152 struct net *net = sock_net(skb->sk);
1153
1154 if (nlmsg_len(h) < hdrlen)
1155 return -EINVAL;
1156
1157 if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
1158 h->nlmsg_flags & NLM_F_DUMP) {
1159 if (nlmsg_attrlen(h, hdrlen)) {
1160 struct nlattr *attr;
1161 int err;
1162
1163 attr = nlmsg_find_attr(h, hdrlen,
1164 INET_DIAG_REQ_BYTECODE);
1165 err = inet_diag_bc_audit(attr, skb);
1166 if (err)
1167 return err;
1168 }
1169 {
1170 struct netlink_dump_control c = {
1171 .dump = inet_diag_dump,
1172 };
1173 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
1174 }
1175 }
1176
1177 return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h));
1178 }
1179
1180 static
1181 int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
1182 {
1183 const struct inet_diag_handler *handler;
1184 struct nlmsghdr *nlh;
1185 struct nlattr *attr;
1186 struct inet_diag_msg *r;
1187 void *info = NULL;
1188 int err = 0;
1189
1190 nlh = nlmsg_put(skb, 0, 0, SOCK_DIAG_BY_FAMILY, sizeof(*r), 0);
1191 if (!nlh)
1192 return -ENOMEM;
1193
1194 r = nlmsg_data(nlh);
1195 memset(r, 0, sizeof(*r));
1196 inet_diag_msg_common_fill(r, sk);
1197 if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_STREAM)
1198 r->id.idiag_sport = inet_sk(sk)->inet_sport;
1199 r->idiag_state = sk->sk_state;
1200
1201 if ((err = nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))) {
1202 nlmsg_cancel(skb, nlh);
1203 return err;
1204 }
1205
1206 handler = inet_diag_lock_handler(sk->sk_protocol);
1207 if (IS_ERR(handler)) {
1208 inet_diag_unlock_handler(handler);
1209 nlmsg_cancel(skb, nlh);
1210 return PTR_ERR(handler);
1211 }
1212
1213 attr = handler->idiag_info_size
1214 ? nla_reserve_64bit(skb, INET_DIAG_INFO,
1215 handler->idiag_info_size,
1216 INET_DIAG_PAD)
1217 : NULL;
1218 if (attr)
1219 info = nla_data(attr);
1220
1221 handler->idiag_get_info(sk, r, info);
1222 inet_diag_unlock_handler(handler);
1223
1224 nlmsg_end(skb, nlh);
1225 return 0;
1226 }
1227
1228 static const struct sock_diag_handler inet_diag_handler = {
1229 .family = AF_INET,
1230 .dump = inet_diag_handler_cmd,
1231 .get_info = inet_diag_handler_get_info,
1232 .destroy = inet_diag_handler_cmd,
1233 };
1234
1235 static const struct sock_diag_handler inet6_diag_handler = {
1236 .family = AF_INET6,
1237 .dump = inet_diag_handler_cmd,
1238 .get_info = inet_diag_handler_get_info,
1239 .destroy = inet_diag_handler_cmd,
1240 };
1241
1242 int inet_diag_register(const struct inet_diag_handler *h)
1243 {
1244 const __u16 type = h->idiag_type;
1245 int err = -EINVAL;
1246
1247 if (type >= IPPROTO_MAX)
1248 goto out;
1249
1250 mutex_lock(&inet_diag_table_mutex);
1251 err = -EEXIST;
1252 if (!inet_diag_table[type]) {
1253 inet_diag_table[type] = h;
1254 err = 0;
1255 }
1256 mutex_unlock(&inet_diag_table_mutex);
1257 out:
1258 return err;
1259 }
1260 EXPORT_SYMBOL_GPL(inet_diag_register);
1261
1262 void inet_diag_unregister(const struct inet_diag_handler *h)
1263 {
1264 const __u16 type = h->idiag_type;
1265
1266 if (type >= IPPROTO_MAX)
1267 return;
1268
1269 mutex_lock(&inet_diag_table_mutex);
1270 inet_diag_table[type] = NULL;
1271 mutex_unlock(&inet_diag_table_mutex);
1272 }
1273 EXPORT_SYMBOL_GPL(inet_diag_unregister);
1274
1275 static int __init inet_diag_init(void)
1276 {
1277 const int inet_diag_table_size = (IPPROTO_MAX *
1278 sizeof(struct inet_diag_handler *));
1279 int err = -ENOMEM;
1280
1281 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
1282 if (!inet_diag_table)
1283 goto out;
1284
1285 err = sock_diag_register(&inet_diag_handler);
1286 if (err)
1287 goto out_free_nl;
1288
1289 err = sock_diag_register(&inet6_diag_handler);
1290 if (err)
1291 goto out_free_inet;
1292
1293 sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
1294 out:
1295 return err;
1296
1297 out_free_inet:
1298 sock_diag_unregister(&inet_diag_handler);
1299 out_free_nl:
1300 kfree(inet_diag_table);
1301 goto out;
1302 }
1303
1304 static void __exit inet_diag_exit(void)
1305 {
1306 sock_diag_unregister(&inet6_diag_handler);
1307 sock_diag_unregister(&inet_diag_handler);
1308 sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
1309 kfree(inet_diag_table);
1310 }
1311
1312 module_init(inet_diag_init);
1313 module_exit(inet_diag_exit);
1314 MODULE_LICENSE("GPL");
1315 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
1316 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);