import PULS_20180308
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / xfrm / xfrm_user.c
1 /* xfrm_user.c: User interface to configure xfrm engine.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 *
11 */
12
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/pfkeyv2.h>
23 #include <linux/ipsec.h>
24 #include <linux/init.h>
25 #include <linux/security.h>
26 #include <net/sock.h>
27 #include <net/xfrm.h>
28 #include <net/netlink.h>
29 #include <net/ah.h>
30 #include <asm/uaccess.h>
31 #if IS_ENABLED(CONFIG_IPV6)
32 #include <linux/in6.h>
33 #endif
34
35 static inline int aead_len(struct xfrm_algo_aead *alg)
36 {
37 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
38 }
39
40 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
41 {
42 struct nlattr *rt = attrs[type];
43 struct xfrm_algo *algp;
44
45 if (!rt)
46 return 0;
47
48 algp = nla_data(rt);
49 if (nla_len(rt) < xfrm_alg_len(algp))
50 return -EINVAL;
51
52 switch (type) {
53 case XFRMA_ALG_AUTH:
54 case XFRMA_ALG_CRYPT:
55 case XFRMA_ALG_COMP:
56 break;
57
58 default:
59 return -EINVAL;
60 }
61
62 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
63 return 0;
64 }
65
66 static int verify_auth_trunc(struct nlattr **attrs)
67 {
68 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
69 struct xfrm_algo_auth *algp;
70
71 if (!rt)
72 return 0;
73
74 algp = nla_data(rt);
75 if (nla_len(rt) < xfrm_alg_auth_len(algp))
76 return -EINVAL;
77
78 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
79 return 0;
80 }
81
82 static int verify_aead(struct nlattr **attrs)
83 {
84 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
85 struct xfrm_algo_aead *algp;
86
87 if (!rt)
88 return 0;
89
90 algp = nla_data(rt);
91 if (nla_len(rt) < aead_len(algp))
92 return -EINVAL;
93
94 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
95 return 0;
96 }
97
98 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
99 xfrm_address_t **addrp)
100 {
101 struct nlattr *rt = attrs[type];
102
103 if (rt && addrp)
104 *addrp = nla_data(rt);
105 }
106
107 static inline int verify_sec_ctx_len(struct nlattr **attrs)
108 {
109 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
110 struct xfrm_user_sec_ctx *uctx;
111
112 if (!rt)
113 return 0;
114
115 uctx = nla_data(rt);
116 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
117 return -EINVAL;
118
119 return 0;
120 }
121
122 static inline int verify_replay(struct xfrm_usersa_info *p,
123 struct nlattr **attrs)
124 {
125 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
126 struct xfrm_replay_state_esn *rs;
127
128 if (p->flags & XFRM_STATE_ESN) {
129 if (!rt)
130 return -EINVAL;
131
132 rs = nla_data(rt);
133
134 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
135 return -EINVAL;
136
137 if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
138 nla_len(rt) != sizeof(*rs))
139 return -EINVAL;
140 }
141
142 if (!rt)
143 return 0;
144
145 if (p->id.proto != IPPROTO_ESP)
146 return -EINVAL;
147
148 if (p->replay_window != 0)
149 return -EINVAL;
150
151 return 0;
152 }
153
154 static int verify_newsa_info(struct xfrm_usersa_info *p,
155 struct nlattr **attrs)
156 {
157 int err;
158
159 err = -EINVAL;
160 switch (p->family) {
161 case AF_INET:
162 break;
163
164 case AF_INET6:
165 #if IS_ENABLED(CONFIG_IPV6)
166 break;
167 #else
168 err = -EAFNOSUPPORT;
169 goto out;
170 #endif
171
172 default:
173 goto out;
174 }
175
176 err = -EINVAL;
177 switch (p->id.proto) {
178 case IPPROTO_AH:
179 if ((!attrs[XFRMA_ALG_AUTH] &&
180 !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
181 attrs[XFRMA_ALG_AEAD] ||
182 attrs[XFRMA_ALG_CRYPT] ||
183 attrs[XFRMA_ALG_COMP] ||
184 attrs[XFRMA_TFCPAD])
185 goto out;
186 break;
187
188 case IPPROTO_ESP:
189 if (attrs[XFRMA_ALG_COMP])
190 goto out;
191 if (!attrs[XFRMA_ALG_AUTH] &&
192 !attrs[XFRMA_ALG_AUTH_TRUNC] &&
193 !attrs[XFRMA_ALG_CRYPT] &&
194 !attrs[XFRMA_ALG_AEAD])
195 goto out;
196 if ((attrs[XFRMA_ALG_AUTH] ||
197 attrs[XFRMA_ALG_AUTH_TRUNC] ||
198 attrs[XFRMA_ALG_CRYPT]) &&
199 attrs[XFRMA_ALG_AEAD])
200 goto out;
201 if (attrs[XFRMA_TFCPAD] &&
202 p->mode != XFRM_MODE_TUNNEL)
203 goto out;
204 break;
205
206 case IPPROTO_COMP:
207 if (!attrs[XFRMA_ALG_COMP] ||
208 attrs[XFRMA_ALG_AEAD] ||
209 attrs[XFRMA_ALG_AUTH] ||
210 attrs[XFRMA_ALG_AUTH_TRUNC] ||
211 attrs[XFRMA_ALG_CRYPT] ||
212 attrs[XFRMA_TFCPAD])
213 goto out;
214 break;
215
216 #if IS_ENABLED(CONFIG_IPV6)
217 case IPPROTO_DSTOPTS:
218 case IPPROTO_ROUTING:
219 if (attrs[XFRMA_ALG_COMP] ||
220 attrs[XFRMA_ALG_AUTH] ||
221 attrs[XFRMA_ALG_AUTH_TRUNC] ||
222 attrs[XFRMA_ALG_AEAD] ||
223 attrs[XFRMA_ALG_CRYPT] ||
224 attrs[XFRMA_ENCAP] ||
225 attrs[XFRMA_SEC_CTX] ||
226 attrs[XFRMA_TFCPAD] ||
227 !attrs[XFRMA_COADDR])
228 goto out;
229 break;
230 #endif
231
232 default:
233 goto out;
234 }
235
236 if ((err = verify_aead(attrs)))
237 goto out;
238 if ((err = verify_auth_trunc(attrs)))
239 goto out;
240 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
241 goto out;
242 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
243 goto out;
244 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
245 goto out;
246 if ((err = verify_sec_ctx_len(attrs)))
247 goto out;
248 if ((err = verify_replay(p, attrs)))
249 goto out;
250
251 err = -EINVAL;
252 switch (p->mode) {
253 case XFRM_MODE_TRANSPORT:
254 case XFRM_MODE_TUNNEL:
255 case XFRM_MODE_ROUTEOPTIMIZATION:
256 case XFRM_MODE_BEET:
257 break;
258
259 default:
260 goto out;
261 }
262
263 err = 0;
264
265 out:
266 return err;
267 }
268
269 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
270 struct xfrm_algo_desc *(*get_byname)(const char *, int),
271 struct nlattr *rta)
272 {
273 struct xfrm_algo *p, *ualg;
274 struct xfrm_algo_desc *algo;
275
276 if (!rta)
277 return 0;
278
279 ualg = nla_data(rta);
280
281 algo = get_byname(ualg->alg_name, 1);
282 if (!algo)
283 return -ENOSYS;
284 *props = algo->desc.sadb_alg_id;
285
286 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
287 if (!p)
288 return -ENOMEM;
289
290 strcpy(p->alg_name, algo->name);
291 *algpp = p;
292 return 0;
293 }
294
295 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
296 struct nlattr *rta)
297 {
298 struct xfrm_algo *ualg;
299 struct xfrm_algo_auth *p;
300 struct xfrm_algo_desc *algo;
301
302 if (!rta)
303 return 0;
304
305 ualg = nla_data(rta);
306
307 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
308 if (!algo)
309 return -ENOSYS;
310 *props = algo->desc.sadb_alg_id;
311
312 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
313 if (!p)
314 return -ENOMEM;
315
316 strcpy(p->alg_name, algo->name);
317 p->alg_key_len = ualg->alg_key_len;
318 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
319 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
320
321 *algpp = p;
322 return 0;
323 }
324
325 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
326 struct nlattr *rta)
327 {
328 struct xfrm_algo_auth *p, *ualg;
329 struct xfrm_algo_desc *algo;
330
331 if (!rta)
332 return 0;
333
334 ualg = nla_data(rta);
335
336 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
337 if (!algo)
338 return -ENOSYS;
339 if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
340 ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
341 return -EINVAL;
342 *props = algo->desc.sadb_alg_id;
343
344 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
345 if (!p)
346 return -ENOMEM;
347
348 strcpy(p->alg_name, algo->name);
349 if (!p->alg_trunc_len)
350 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
351
352 *algpp = p;
353 return 0;
354 }
355
356 static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
357 struct nlattr *rta)
358 {
359 struct xfrm_algo_aead *p, *ualg;
360 struct xfrm_algo_desc *algo;
361
362 if (!rta)
363 return 0;
364
365 ualg = nla_data(rta);
366
367 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
368 if (!algo)
369 return -ENOSYS;
370 *props = algo->desc.sadb_alg_id;
371
372 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
373 if (!p)
374 return -ENOMEM;
375
376 strcpy(p->alg_name, algo->name);
377 *algpp = p;
378 return 0;
379 }
380
381 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
382 struct nlattr *rp)
383 {
384 struct xfrm_replay_state_esn *up;
385 int ulen;
386
387 if (!replay_esn || !rp)
388 return 0;
389
390 up = nla_data(rp);
391 ulen = xfrm_replay_state_esn_len(up);
392
393 /* Check the overall length and the internal bitmap length to avoid
394 * potential overflow. */
395 if (nla_len(rp) < ulen ||
396 xfrm_replay_state_esn_len(replay_esn) != ulen ||
397 replay_esn->bmp_len != up->bmp_len)
398 return -EINVAL;
399
400 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
401 return -EINVAL;
402
403 return 0;
404 }
405
406 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
407 struct xfrm_replay_state_esn **preplay_esn,
408 struct nlattr *rta)
409 {
410 struct xfrm_replay_state_esn *p, *pp, *up;
411 int klen, ulen;
412
413 if (!rta)
414 return 0;
415
416 up = nla_data(rta);
417 klen = xfrm_replay_state_esn_len(up);
418 ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
419
420 p = kzalloc(klen, GFP_KERNEL);
421 if (!p)
422 return -ENOMEM;
423
424 pp = kzalloc(klen, GFP_KERNEL);
425 if (!pp) {
426 kfree(p);
427 return -ENOMEM;
428 }
429
430 memcpy(p, up, ulen);
431 memcpy(pp, up, ulen);
432
433 *replay_esn = p;
434 *preplay_esn = pp;
435
436 return 0;
437 }
438
439 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
440 {
441 int len = 0;
442
443 if (xfrm_ctx) {
444 len += sizeof(struct xfrm_user_sec_ctx);
445 len += xfrm_ctx->ctx_len;
446 }
447 return len;
448 }
449
450 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
451 {
452 memcpy(&x->id, &p->id, sizeof(x->id));
453 memcpy(&x->sel, &p->sel, sizeof(x->sel));
454 memcpy(&x->lft, &p->lft, sizeof(x->lft));
455 x->props.mode = p->mode;
456 x->props.replay_window = p->replay_window;
457 x->props.reqid = p->reqid;
458 x->props.family = p->family;
459 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
460 x->props.flags = p->flags;
461
462 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
463 x->sel.family = p->family;
464 }
465
466 /*
467 * someday when pfkey also has support, we could have the code
468 * somehow made shareable and move it to xfrm_state.c - JHS
469 *
470 */
471 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
472 int update_esn)
473 {
474 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
475 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
476 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
477 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
478 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
479
480 if (re) {
481 struct xfrm_replay_state_esn *replay_esn;
482 replay_esn = nla_data(re);
483 memcpy(x->replay_esn, replay_esn,
484 xfrm_replay_state_esn_len(replay_esn));
485 memcpy(x->preplay_esn, replay_esn,
486 xfrm_replay_state_esn_len(replay_esn));
487 }
488
489 if (rp) {
490 struct xfrm_replay_state *replay;
491 replay = nla_data(rp);
492 memcpy(&x->replay, replay, sizeof(*replay));
493 memcpy(&x->preplay, replay, sizeof(*replay));
494 }
495
496 if (lt) {
497 struct xfrm_lifetime_cur *ltime;
498 ltime = nla_data(lt);
499 x->curlft.bytes = ltime->bytes;
500 x->curlft.packets = ltime->packets;
501 x->curlft.add_time = ltime->add_time;
502 x->curlft.use_time = ltime->use_time;
503 }
504
505 if (et)
506 x->replay_maxage = nla_get_u32(et);
507
508 if (rt)
509 x->replay_maxdiff = nla_get_u32(rt);
510 }
511
512 static struct xfrm_state *xfrm_state_construct(struct net *net,
513 struct xfrm_usersa_info *p,
514 struct nlattr **attrs,
515 int *errp)
516 {
517 struct xfrm_state *x = xfrm_state_alloc(net);
518 int err = -ENOMEM;
519
520 if (!x)
521 goto error_no_put;
522
523 copy_from_user_state(x, p);
524
525 if (attrs[XFRMA_SA_EXTRA_FLAGS])
526 x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
527
528 if ((err = attach_aead(&x->aead, &x->props.ealgo,
529 attrs[XFRMA_ALG_AEAD])))
530 goto error;
531 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
532 attrs[XFRMA_ALG_AUTH_TRUNC])))
533 goto error;
534 if (!x->props.aalgo) {
535 if ((err = attach_auth(&x->aalg, &x->props.aalgo,
536 attrs[XFRMA_ALG_AUTH])))
537 goto error;
538 }
539 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
540 xfrm_ealg_get_byname,
541 attrs[XFRMA_ALG_CRYPT])))
542 goto error;
543 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
544 xfrm_calg_get_byname,
545 attrs[XFRMA_ALG_COMP])))
546 goto error;
547
548 if (attrs[XFRMA_ENCAP]) {
549 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
550 sizeof(*x->encap), GFP_KERNEL);
551 if (x->encap == NULL)
552 goto error;
553 }
554
555 if (attrs[XFRMA_TFCPAD])
556 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
557
558 if (attrs[XFRMA_COADDR]) {
559 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
560 sizeof(*x->coaddr), GFP_KERNEL);
561 if (x->coaddr == NULL)
562 goto error;
563 }
564
565 xfrm_mark_get(attrs, &x->mark);
566
567 err = __xfrm_init_state(x, false);
568 if (err)
569 goto error;
570
571 if (attrs[XFRMA_SEC_CTX] &&
572 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
573 goto error;
574
575 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
576 attrs[XFRMA_REPLAY_ESN_VAL])))
577 goto error;
578
579 x->km.seq = p->seq;
580 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
581 /* sysctl_xfrm_aevent_etime is in 100ms units */
582 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
583
584 if ((err = xfrm_init_replay(x)))
585 goto error;
586
587 /* override default values from above */
588 xfrm_update_ae_params(x, attrs, 0);
589
590 return x;
591
592 error:
593 x->km.state = XFRM_STATE_DEAD;
594 xfrm_state_put(x);
595 error_no_put:
596 *errp = err;
597 return NULL;
598 }
599
600 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
601 struct nlattr **attrs)
602 {
603 struct net *net = sock_net(skb->sk);
604 struct xfrm_usersa_info *p = nlmsg_data(nlh);
605 struct xfrm_state *x;
606 int err;
607 struct km_event c;
608 kuid_t loginuid = audit_get_loginuid(current);
609 u32 sessionid = audit_get_sessionid(current);
610 u32 sid;
611
612 err = verify_newsa_info(p, attrs);
613 if (err)
614 return err;
615
616 x = xfrm_state_construct(net, p, attrs, &err);
617 if (!x)
618 return err;
619
620 xfrm_state_hold(x);
621 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
622 err = xfrm_state_add(x);
623 else
624 err = xfrm_state_update(x);
625
626 security_task_getsecid(current, &sid);
627 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
628
629 if (err < 0) {
630 x->km.state = XFRM_STATE_DEAD;
631 __xfrm_state_put(x);
632 goto out;
633 }
634
635 c.seq = nlh->nlmsg_seq;
636 c.portid = nlh->nlmsg_pid;
637 c.event = nlh->nlmsg_type;
638
639 km_state_notify(x, &c);
640 out:
641 xfrm_state_put(x);
642 return err;
643 }
644
645 static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
646 struct xfrm_usersa_id *p,
647 struct nlattr **attrs,
648 int *errp)
649 {
650 struct xfrm_state *x = NULL;
651 struct xfrm_mark m;
652 int err;
653 u32 mark = xfrm_mark_get(attrs, &m);
654
655 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
656 err = -ESRCH;
657 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
658 } else {
659 xfrm_address_t *saddr = NULL;
660
661 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
662 if (!saddr) {
663 err = -EINVAL;
664 goto out;
665 }
666
667 err = -ESRCH;
668 x = xfrm_state_lookup_byaddr(net, mark,
669 &p->daddr, saddr,
670 p->proto, p->family);
671 }
672
673 out:
674 if (!x && errp)
675 *errp = err;
676 return x;
677 }
678
679 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
680 struct nlattr **attrs)
681 {
682 struct net *net = sock_net(skb->sk);
683 struct xfrm_state *x;
684 int err = -ESRCH;
685 struct km_event c;
686 struct xfrm_usersa_id *p = nlmsg_data(nlh);
687 kuid_t loginuid = audit_get_loginuid(current);
688 u32 sessionid = audit_get_sessionid(current);
689 u32 sid;
690
691 x = xfrm_user_state_lookup(net, p, attrs, &err);
692 if (x == NULL)
693 return err;
694
695 if ((err = security_xfrm_state_delete(x)) != 0)
696 goto out;
697
698 if (xfrm_state_kern(x)) {
699 err = -EPERM;
700 goto out;
701 }
702
703 err = xfrm_state_delete(x);
704
705 if (err < 0)
706 goto out;
707
708 c.seq = nlh->nlmsg_seq;
709 c.portid = nlh->nlmsg_pid;
710 c.event = nlh->nlmsg_type;
711 km_state_notify(x, &c);
712
713 out:
714 security_task_getsecid(current, &sid);
715 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
716 xfrm_state_put(x);
717 return err;
718 }
719
720 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
721 {
722 memset(p, 0, sizeof(*p));
723 memcpy(&p->id, &x->id, sizeof(p->id));
724 memcpy(&p->sel, &x->sel, sizeof(p->sel));
725 memcpy(&p->lft, &x->lft, sizeof(p->lft));
726 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
727 memcpy(&p->stats, &x->stats, sizeof(p->stats));
728 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
729 p->mode = x->props.mode;
730 p->replay_window = x->props.replay_window;
731 p->reqid = x->props.reqid;
732 p->family = x->props.family;
733 p->flags = x->props.flags;
734 p->seq = x->km.seq;
735 }
736
737 struct xfrm_dump_info {
738 struct sk_buff *in_skb;
739 struct sk_buff *out_skb;
740 u32 nlmsg_seq;
741 u16 nlmsg_flags;
742 };
743
744 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
745 {
746 struct xfrm_user_sec_ctx *uctx;
747 struct nlattr *attr;
748 int ctx_size = sizeof(*uctx) + s->ctx_len;
749
750 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
751 if (attr == NULL)
752 return -EMSGSIZE;
753
754 uctx = nla_data(attr);
755 uctx->exttype = XFRMA_SEC_CTX;
756 uctx->len = ctx_size;
757 uctx->ctx_doi = s->ctx_doi;
758 uctx->ctx_alg = s->ctx_alg;
759 uctx->ctx_len = s->ctx_len;
760 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
761
762 return 0;
763 }
764
765 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
766 {
767 struct xfrm_algo *algo;
768 struct nlattr *nla;
769
770 nla = nla_reserve(skb, XFRMA_ALG_AUTH,
771 sizeof(*algo) + (auth->alg_key_len + 7) / 8);
772 if (!nla)
773 return -EMSGSIZE;
774
775 algo = nla_data(nla);
776 strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
777 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
778 algo->alg_key_len = auth->alg_key_len;
779
780 return 0;
781 }
782
783 /* Don't change this without updating xfrm_sa_len! */
784 static int copy_to_user_state_extra(struct xfrm_state *x,
785 struct xfrm_usersa_info *p,
786 struct sk_buff *skb)
787 {
788 int ret = 0;
789
790 copy_to_user_state(x, p);
791
792 if (x->props.extra_flags) {
793 ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS,
794 x->props.extra_flags);
795 if (ret)
796 goto out;
797 }
798
799 if (x->coaddr) {
800 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
801 if (ret)
802 goto out;
803 }
804 if (x->lastused) {
805 ret = nla_put_u64(skb, XFRMA_LASTUSED, x->lastused);
806 if (ret)
807 goto out;
808 }
809 if (x->aead) {
810 ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
811 if (ret)
812 goto out;
813 }
814 if (x->aalg) {
815 ret = copy_to_user_auth(x->aalg, skb);
816 if (!ret)
817 ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
818 xfrm_alg_auth_len(x->aalg), x->aalg);
819 if (ret)
820 goto out;
821 }
822 if (x->ealg) {
823 ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
824 if (ret)
825 goto out;
826 }
827 if (x->calg) {
828 ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
829 if (ret)
830 goto out;
831 }
832 if (x->encap) {
833 ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
834 if (ret)
835 goto out;
836 }
837 if (x->tfcpad) {
838 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
839 if (ret)
840 goto out;
841 }
842 ret = xfrm_mark_put(skb, &x->mark);
843 if (ret)
844 goto out;
845 if (x->replay_esn) {
846 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
847 xfrm_replay_state_esn_len(x->replay_esn),
848 x->replay_esn);
849 if (ret)
850 goto out;
851 }
852 if (x->security)
853 ret = copy_sec_ctx(x->security, skb);
854 out:
855 return ret;
856 }
857
858 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
859 {
860 struct xfrm_dump_info *sp = ptr;
861 struct sk_buff *in_skb = sp->in_skb;
862 struct sk_buff *skb = sp->out_skb;
863 struct xfrm_usersa_info *p;
864 struct nlmsghdr *nlh;
865 int err;
866
867 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
868 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
869 if (nlh == NULL)
870 return -EMSGSIZE;
871
872 p = nlmsg_data(nlh);
873
874 err = copy_to_user_state_extra(x, p, skb);
875 if (err) {
876 nlmsg_cancel(skb, nlh);
877 return err;
878 }
879 nlmsg_end(skb, nlh);
880 return 0;
881 }
882
883 static int xfrm_dump_sa_done(struct netlink_callback *cb)
884 {
885 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
886 xfrm_state_walk_done(walk);
887 return 0;
888 }
889
890 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
891 {
892 struct net *net = sock_net(skb->sk);
893 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
894 struct xfrm_dump_info info;
895
896 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
897 sizeof(cb->args) - sizeof(cb->args[0]));
898
899 info.in_skb = cb->skb;
900 info.out_skb = skb;
901 info.nlmsg_seq = cb->nlh->nlmsg_seq;
902 info.nlmsg_flags = NLM_F_MULTI;
903
904 if (!cb->args[0]) {
905 cb->args[0] = 1;
906 xfrm_state_walk_init(walk, 0);
907 }
908
909 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
910
911 return skb->len;
912 }
913
914 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
915 struct xfrm_state *x, u32 seq)
916 {
917 struct xfrm_dump_info info;
918 struct sk_buff *skb;
919 int err;
920
921 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
922 if (!skb)
923 return ERR_PTR(-ENOMEM);
924
925 info.in_skb = in_skb;
926 info.out_skb = skb;
927 info.nlmsg_seq = seq;
928 info.nlmsg_flags = 0;
929
930 err = dump_one_state(x, 0, &info);
931 if (err) {
932 kfree_skb(skb);
933 return ERR_PTR(err);
934 }
935
936 return skb;
937 }
938
939 static inline size_t xfrm_spdinfo_msgsize(void)
940 {
941 return NLMSG_ALIGN(4)
942 + nla_total_size(sizeof(struct xfrmu_spdinfo))
943 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
944 }
945
946 static int build_spdinfo(struct sk_buff *skb, struct net *net,
947 u32 portid, u32 seq, u32 flags)
948 {
949 struct xfrmk_spdinfo si;
950 struct xfrmu_spdinfo spc;
951 struct xfrmu_spdhinfo sph;
952 struct nlmsghdr *nlh;
953 int err;
954 u32 *f;
955
956 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
957 if (nlh == NULL) /* shouldn't really happen ... */
958 return -EMSGSIZE;
959
960 f = nlmsg_data(nlh);
961 *f = flags;
962 xfrm_spd_getinfo(net, &si);
963 spc.incnt = si.incnt;
964 spc.outcnt = si.outcnt;
965 spc.fwdcnt = si.fwdcnt;
966 spc.inscnt = si.inscnt;
967 spc.outscnt = si.outscnt;
968 spc.fwdscnt = si.fwdscnt;
969 sph.spdhcnt = si.spdhcnt;
970 sph.spdhmcnt = si.spdhmcnt;
971
972 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
973 if (!err)
974 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
975 if (err) {
976 nlmsg_cancel(skb, nlh);
977 return err;
978 }
979
980 return nlmsg_end(skb, nlh);
981 }
982
983 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
984 struct nlattr **attrs)
985 {
986 struct net *net = sock_net(skb->sk);
987 struct sk_buff *r_skb;
988 u32 *flags = nlmsg_data(nlh);
989 u32 sportid = NETLINK_CB(skb).portid;
990 u32 seq = nlh->nlmsg_seq;
991
992 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
993 if (r_skb == NULL)
994 return -ENOMEM;
995
996 if (build_spdinfo(r_skb, net, sportid, seq, *flags) < 0)
997 BUG();
998
999 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1000 }
1001
1002 static inline size_t xfrm_sadinfo_msgsize(void)
1003 {
1004 return NLMSG_ALIGN(4)
1005 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
1006 + nla_total_size(4); /* XFRMA_SAD_CNT */
1007 }
1008
1009 static int build_sadinfo(struct sk_buff *skb, struct net *net,
1010 u32 portid, u32 seq, u32 flags)
1011 {
1012 struct xfrmk_sadinfo si;
1013 struct xfrmu_sadhinfo sh;
1014 struct nlmsghdr *nlh;
1015 int err;
1016 u32 *f;
1017
1018 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
1019 if (nlh == NULL) /* shouldn't really happen ... */
1020 return -EMSGSIZE;
1021
1022 f = nlmsg_data(nlh);
1023 *f = flags;
1024 xfrm_sad_getinfo(net, &si);
1025
1026 sh.sadhmcnt = si.sadhmcnt;
1027 sh.sadhcnt = si.sadhcnt;
1028
1029 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
1030 if (!err)
1031 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
1032 if (err) {
1033 nlmsg_cancel(skb, nlh);
1034 return err;
1035 }
1036
1037 return nlmsg_end(skb, nlh);
1038 }
1039
1040 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1041 struct nlattr **attrs)
1042 {
1043 struct net *net = sock_net(skb->sk);
1044 struct sk_buff *r_skb;
1045 u32 *flags = nlmsg_data(nlh);
1046 u32 sportid = NETLINK_CB(skb).portid;
1047 u32 seq = nlh->nlmsg_seq;
1048
1049 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
1050 if (r_skb == NULL)
1051 return -ENOMEM;
1052
1053 if (build_sadinfo(r_skb, net, sportid, seq, *flags) < 0)
1054 BUG();
1055
1056 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1057 }
1058
1059 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1060 struct nlattr **attrs)
1061 {
1062 struct net *net = sock_net(skb->sk);
1063 struct xfrm_usersa_id *p = nlmsg_data(nlh);
1064 struct xfrm_state *x;
1065 struct sk_buff *resp_skb;
1066 int err = -ESRCH;
1067
1068 x = xfrm_user_state_lookup(net, p, attrs, &err);
1069 if (x == NULL)
1070 goto out_noput;
1071
1072 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1073 if (IS_ERR(resp_skb)) {
1074 err = PTR_ERR(resp_skb);
1075 } else {
1076 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1077 }
1078 xfrm_state_put(x);
1079 out_noput:
1080 return err;
1081 }
1082
1083 static int verify_userspi_info(struct xfrm_userspi_info *p)
1084 {
1085 switch (p->info.id.proto) {
1086 case IPPROTO_AH:
1087 case IPPROTO_ESP:
1088 break;
1089
1090 case IPPROTO_COMP:
1091 /* IPCOMP spi is 16-bits. */
1092 if (p->max >= 0x10000)
1093 return -EINVAL;
1094 break;
1095
1096 default:
1097 return -EINVAL;
1098 }
1099
1100 if (p->min > p->max)
1101 return -EINVAL;
1102
1103 return 0;
1104 }
1105
1106 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
1107 struct nlattr **attrs)
1108 {
1109 struct net *net = sock_net(skb->sk);
1110 struct xfrm_state *x;
1111 struct xfrm_userspi_info *p;
1112 struct sk_buff *resp_skb;
1113 xfrm_address_t *daddr;
1114 int family;
1115 int err;
1116 u32 mark;
1117 struct xfrm_mark m;
1118
1119 p = nlmsg_data(nlh);
1120 err = verify_userspi_info(p);
1121 if (err)
1122 goto out_noput;
1123
1124 family = p->info.family;
1125 daddr = &p->info.id.daddr;
1126
1127 x = NULL;
1128
1129 mark = xfrm_mark_get(attrs, &m);
1130 if (p->info.seq) {
1131 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
1132 if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
1133 xfrm_state_put(x);
1134 x = NULL;
1135 }
1136 }
1137
1138 if (!x)
1139 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
1140 p->info.id.proto, daddr,
1141 &p->info.saddr, 1,
1142 family);
1143 err = -ENOENT;
1144 if (x == NULL)
1145 goto out_noput;
1146
1147 err = xfrm_alloc_spi(x, p->min, p->max);
1148 if (err)
1149 goto out;
1150
1151 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1152 if (IS_ERR(resp_skb)) {
1153 err = PTR_ERR(resp_skb);
1154 goto out;
1155 }
1156
1157 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1158
1159 out:
1160 xfrm_state_put(x);
1161 out_noput:
1162 return err;
1163 }
1164
1165 static int verify_policy_dir(u8 dir)
1166 {
1167 switch (dir) {
1168 case XFRM_POLICY_IN:
1169 case XFRM_POLICY_OUT:
1170 case XFRM_POLICY_FWD:
1171 break;
1172
1173 default:
1174 return -EINVAL;
1175 }
1176
1177 return 0;
1178 }
1179
1180 static int verify_policy_type(u8 type)
1181 {
1182 switch (type) {
1183 case XFRM_POLICY_TYPE_MAIN:
1184 #ifdef CONFIG_XFRM_SUB_POLICY
1185 case XFRM_POLICY_TYPE_SUB:
1186 #endif
1187 break;
1188
1189 default:
1190 return -EINVAL;
1191 }
1192
1193 return 0;
1194 }
1195
1196 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
1197 {
1198 switch (p->share) {
1199 case XFRM_SHARE_ANY:
1200 case XFRM_SHARE_SESSION:
1201 case XFRM_SHARE_USER:
1202 case XFRM_SHARE_UNIQUE:
1203 break;
1204
1205 default:
1206 return -EINVAL;
1207 }
1208
1209 switch (p->action) {
1210 case XFRM_POLICY_ALLOW:
1211 case XFRM_POLICY_BLOCK:
1212 break;
1213
1214 default:
1215 return -EINVAL;
1216 }
1217
1218 switch (p->sel.family) {
1219 case AF_INET:
1220 break;
1221
1222 case AF_INET6:
1223 #if IS_ENABLED(CONFIG_IPV6)
1224 break;
1225 #else
1226 return -EAFNOSUPPORT;
1227 #endif
1228
1229 default:
1230 return -EINVAL;
1231 }
1232
1233 return verify_policy_dir(p->dir);
1234 }
1235
1236 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
1237 {
1238 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1239 struct xfrm_user_sec_ctx *uctx;
1240
1241 if (!rt)
1242 return 0;
1243
1244 uctx = nla_data(rt);
1245 return security_xfrm_policy_alloc(&pol->security, uctx);
1246 }
1247
1248 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1249 int nr)
1250 {
1251 int i;
1252
1253 xp->xfrm_nr = nr;
1254 for (i = 0; i < nr; i++, ut++) {
1255 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1256
1257 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
1258 memcpy(&t->saddr, &ut->saddr,
1259 sizeof(xfrm_address_t));
1260 t->reqid = ut->reqid;
1261 t->mode = ut->mode;
1262 t->share = ut->share;
1263 t->optional = ut->optional;
1264 t->aalgos = ut->aalgos;
1265 t->ealgos = ut->ealgos;
1266 t->calgos = ut->calgos;
1267 /* If all masks are ~0, then we allow all algorithms. */
1268 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
1269 t->encap_family = ut->family;
1270 }
1271 }
1272
1273 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1274 {
1275 int i;
1276
1277 if (nr > XFRM_MAX_DEPTH)
1278 return -EINVAL;
1279
1280 for (i = 0; i < nr; i++) {
1281 /* We never validated the ut->family value, so many
1282 * applications simply leave it at zero. The check was
1283 * never made and ut->family was ignored because all
1284 * templates could be assumed to have the same family as
1285 * the policy itself. Now that we will have ipv4-in-ipv6
1286 * and ipv6-in-ipv4 tunnels, this is no longer true.
1287 */
1288 if (!ut[i].family)
1289 ut[i].family = family;
1290
1291 switch (ut[i].family) {
1292 case AF_INET:
1293 break;
1294 #if IS_ENABLED(CONFIG_IPV6)
1295 case AF_INET6:
1296 break;
1297 #endif
1298 default:
1299 return -EINVAL;
1300 }
1301 }
1302
1303 return 0;
1304 }
1305
1306 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
1307 {
1308 struct nlattr *rt = attrs[XFRMA_TMPL];
1309
1310 if (!rt) {
1311 pol->xfrm_nr = 0;
1312 } else {
1313 struct xfrm_user_tmpl *utmpl = nla_data(rt);
1314 int nr = nla_len(rt) / sizeof(*utmpl);
1315 int err;
1316
1317 err = validate_tmpl(nr, utmpl, pol->family);
1318 if (err)
1319 return err;
1320
1321 copy_templates(pol, utmpl, nr);
1322 }
1323 return 0;
1324 }
1325
1326 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
1327 {
1328 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1329 struct xfrm_userpolicy_type *upt;
1330 u8 type = XFRM_POLICY_TYPE_MAIN;
1331 int err;
1332
1333 if (rt) {
1334 upt = nla_data(rt);
1335 type = upt->type;
1336 }
1337
1338 err = verify_policy_type(type);
1339 if (err)
1340 return err;
1341
1342 *tp = type;
1343 return 0;
1344 }
1345
1346 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1347 {
1348 xp->priority = p->priority;
1349 xp->index = p->index;
1350 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1351 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1352 xp->action = p->action;
1353 xp->flags = p->flags;
1354 xp->family = p->sel.family;
1355 /* XXX xp->share = p->share; */
1356 }
1357
1358 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1359 {
1360 memset(p, 0, sizeof(*p));
1361 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1362 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1363 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1364 p->priority = xp->priority;
1365 p->index = xp->index;
1366 p->sel.family = xp->family;
1367 p->dir = dir;
1368 p->action = xp->action;
1369 p->flags = xp->flags;
1370 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1371 }
1372
1373 static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1374 {
1375 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
1376 int err;
1377
1378 if (!xp) {
1379 *errp = -ENOMEM;
1380 return NULL;
1381 }
1382
1383 copy_from_user_policy(xp, p);
1384
1385 err = copy_from_user_policy_type(&xp->type, attrs);
1386 if (err)
1387 goto error;
1388
1389 if (!(err = copy_from_user_tmpl(xp, attrs)))
1390 err = copy_from_user_sec_ctx(xp, attrs);
1391 if (err)
1392 goto error;
1393
1394 xfrm_mark_get(attrs, &xp->mark);
1395
1396 return xp;
1397 error:
1398 *errp = err;
1399 xp->walk.dead = 1;
1400 xfrm_policy_destroy(xp);
1401 return NULL;
1402 }
1403
1404 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1405 struct nlattr **attrs)
1406 {
1407 struct net *net = sock_net(skb->sk);
1408 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1409 struct xfrm_policy *xp;
1410 struct km_event c;
1411 int err;
1412 int excl;
1413 kuid_t loginuid = audit_get_loginuid(current);
1414 u32 sessionid = audit_get_sessionid(current);
1415 u32 sid;
1416
1417 err = verify_newpolicy_info(p);
1418 if (err)
1419 return err;
1420 err = verify_sec_ctx_len(attrs);
1421 if (err)
1422 return err;
1423
1424 xp = xfrm_policy_construct(net, p, attrs, &err);
1425 if (!xp)
1426 return err;
1427
1428 /* shouldn't excl be based on nlh flags??
1429 * Aha! this is anti-netlink really i.e more pfkey derived
1430 * in netlink excl is a flag and you wouldnt need
1431 * a type XFRM_MSG_UPDPOLICY - JHS */
1432 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1433 err = xfrm_policy_insert(p->dir, xp, excl);
1434 security_task_getsecid(current, &sid);
1435 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1436
1437 if (err) {
1438 security_xfrm_policy_free(xp->security);
1439 kfree(xp);
1440 return err;
1441 }
1442
1443 c.event = nlh->nlmsg_type;
1444 c.seq = nlh->nlmsg_seq;
1445 c.portid = nlh->nlmsg_pid;
1446 km_policy_notify(xp, p->dir, &c);
1447
1448 xfrm_pol_put(xp);
1449
1450 return 0;
1451 }
1452
1453 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1454 {
1455 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1456 int i;
1457
1458 if (xp->xfrm_nr == 0)
1459 return 0;
1460
1461 for (i = 0; i < xp->xfrm_nr; i++) {
1462 struct xfrm_user_tmpl *up = &vec[i];
1463 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1464
1465 memset(up, 0, sizeof(*up));
1466 memcpy(&up->id, &kp->id, sizeof(up->id));
1467 up->family = kp->encap_family;
1468 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1469 up->reqid = kp->reqid;
1470 up->mode = kp->mode;
1471 up->share = kp->share;
1472 up->optional = kp->optional;
1473 up->aalgos = kp->aalgos;
1474 up->ealgos = kp->ealgos;
1475 up->calgos = kp->calgos;
1476 }
1477
1478 return nla_put(skb, XFRMA_TMPL,
1479 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1480 }
1481
1482 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1483 {
1484 if (x->security) {
1485 return copy_sec_ctx(x->security, skb);
1486 }
1487 return 0;
1488 }
1489
1490 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1491 {
1492 if (xp->security)
1493 return copy_sec_ctx(xp->security, skb);
1494 return 0;
1495 }
1496 static inline size_t userpolicy_type_attrsize(void)
1497 {
1498 #ifdef CONFIG_XFRM_SUB_POLICY
1499 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1500 #else
1501 return 0;
1502 #endif
1503 }
1504
1505 #ifdef CONFIG_XFRM_SUB_POLICY
1506 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1507 {
1508 struct xfrm_userpolicy_type upt = {
1509 .type = type,
1510 };
1511
1512 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1513 }
1514
1515 #else
1516 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1517 {
1518 return 0;
1519 }
1520 #endif
1521
1522 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1523 {
1524 struct xfrm_dump_info *sp = ptr;
1525 struct xfrm_userpolicy_info *p;
1526 struct sk_buff *in_skb = sp->in_skb;
1527 struct sk_buff *skb = sp->out_skb;
1528 struct nlmsghdr *nlh;
1529 int err;
1530
1531 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
1532 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1533 if (nlh == NULL)
1534 return -EMSGSIZE;
1535
1536 p = nlmsg_data(nlh);
1537 copy_to_user_policy(xp, p, dir);
1538 err = copy_to_user_tmpl(xp, skb);
1539 if (!err)
1540 err = copy_to_user_sec_ctx(xp, skb);
1541 if (!err)
1542 err = copy_to_user_policy_type(xp->type, skb);
1543 if (!err)
1544 err = xfrm_mark_put(skb, &xp->mark);
1545 if (err) {
1546 nlmsg_cancel(skb, nlh);
1547 return err;
1548 }
1549 nlmsg_end(skb, nlh);
1550 return 0;
1551 }
1552
1553 static int xfrm_dump_policy_done(struct netlink_callback *cb)
1554 {
1555 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1556
1557 xfrm_policy_walk_done(walk);
1558 return 0;
1559 }
1560
1561 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1562 {
1563 struct net *net = sock_net(skb->sk);
1564 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1565 struct xfrm_dump_info info;
1566
1567 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1568 sizeof(cb->args) - sizeof(cb->args[0]));
1569
1570 info.in_skb = cb->skb;
1571 info.out_skb = skb;
1572 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1573 info.nlmsg_flags = NLM_F_MULTI;
1574
1575 if (!cb->args[0]) {
1576 cb->args[0] = 1;
1577 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1578 }
1579
1580 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1581
1582 return skb->len;
1583 }
1584
1585 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1586 struct xfrm_policy *xp,
1587 int dir, u32 seq)
1588 {
1589 struct xfrm_dump_info info;
1590 struct sk_buff *skb;
1591 int err;
1592
1593 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1594 if (!skb)
1595 return ERR_PTR(-ENOMEM);
1596
1597 info.in_skb = in_skb;
1598 info.out_skb = skb;
1599 info.nlmsg_seq = seq;
1600 info.nlmsg_flags = 0;
1601
1602 err = dump_one_policy(xp, dir, 0, &info);
1603 if (err) {
1604 kfree_skb(skb);
1605 return ERR_PTR(err);
1606 }
1607
1608 return skb;
1609 }
1610
1611 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1612 struct nlattr **attrs)
1613 {
1614 struct net *net = sock_net(skb->sk);
1615 struct xfrm_policy *xp;
1616 struct xfrm_userpolicy_id *p;
1617 u8 type = XFRM_POLICY_TYPE_MAIN;
1618 int err;
1619 struct km_event c;
1620 int delete;
1621 struct xfrm_mark m;
1622 u32 mark = xfrm_mark_get(attrs, &m);
1623
1624 p = nlmsg_data(nlh);
1625 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1626
1627 err = copy_from_user_policy_type(&type, attrs);
1628 if (err)
1629 return err;
1630
1631 err = verify_policy_dir(p->dir);
1632 if (err)
1633 return err;
1634
1635 if (p->index)
1636 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
1637 else {
1638 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1639 struct xfrm_sec_ctx *ctx;
1640
1641 err = verify_sec_ctx_len(attrs);
1642 if (err)
1643 return err;
1644
1645 ctx = NULL;
1646 if (rt) {
1647 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1648
1649 err = security_xfrm_policy_alloc(&ctx, uctx);
1650 if (err)
1651 return err;
1652 }
1653 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
1654 ctx, delete, &err);
1655 security_xfrm_policy_free(ctx);
1656 }
1657 if (xp == NULL)
1658 return -ENOENT;
1659
1660 if (!delete) {
1661 struct sk_buff *resp_skb;
1662
1663 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1664 if (IS_ERR(resp_skb)) {
1665 err = PTR_ERR(resp_skb);
1666 } else {
1667 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
1668 NETLINK_CB(skb).portid);
1669 }
1670 } else {
1671 kuid_t loginuid = audit_get_loginuid(current);
1672 u32 sessionid = audit_get_sessionid(current);
1673 u32 sid;
1674
1675 security_task_getsecid(current, &sid);
1676 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1677 sid);
1678
1679 if (err != 0)
1680 goto out;
1681
1682 c.data.byid = p->index;
1683 c.event = nlh->nlmsg_type;
1684 c.seq = nlh->nlmsg_seq;
1685 c.portid = nlh->nlmsg_pid;
1686 km_policy_notify(xp, p->dir, &c);
1687 }
1688
1689 out:
1690 xfrm_pol_put(xp);
1691 if (delete && err == 0)
1692 xfrm_garbage_collect(net);
1693 return err;
1694 }
1695
1696 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1697 struct nlattr **attrs)
1698 {
1699 struct net *net = sock_net(skb->sk);
1700 struct km_event c;
1701 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1702 struct xfrm_audit audit_info;
1703 int err;
1704
1705 audit_info.loginuid = audit_get_loginuid(current);
1706 audit_info.sessionid = audit_get_sessionid(current);
1707 security_task_getsecid(current, &audit_info.secid);
1708 err = xfrm_state_flush(net, p->proto, &audit_info);
1709 if (err) {
1710 if (err == -ESRCH) /* empty table */
1711 return 0;
1712 return err;
1713 }
1714 c.data.proto = p->proto;
1715 c.event = nlh->nlmsg_type;
1716 c.seq = nlh->nlmsg_seq;
1717 c.portid = nlh->nlmsg_pid;
1718 c.net = net;
1719 km_state_notify(NULL, &c);
1720
1721 return 0;
1722 }
1723
1724 static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x)
1725 {
1726 size_t replay_size = x->replay_esn ?
1727 xfrm_replay_state_esn_len(x->replay_esn) :
1728 sizeof(struct xfrm_replay_state);
1729
1730 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1731 + nla_total_size(replay_size)
1732 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1733 + nla_total_size(sizeof(struct xfrm_mark))
1734 + nla_total_size(4) /* XFRM_AE_RTHR */
1735 + nla_total_size(4); /* XFRM_AE_ETHR */
1736 }
1737
1738 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
1739 {
1740 struct xfrm_aevent_id *id;
1741 struct nlmsghdr *nlh;
1742 int err;
1743
1744 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1745 if (nlh == NULL)
1746 return -EMSGSIZE;
1747
1748 id = nlmsg_data(nlh);
1749 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
1750 id->sa_id.spi = x->id.spi;
1751 id->sa_id.family = x->props.family;
1752 id->sa_id.proto = x->id.proto;
1753 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
1754 id->reqid = x->props.reqid;
1755 id->flags = c->data.aevent;
1756
1757 if (x->replay_esn) {
1758 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
1759 xfrm_replay_state_esn_len(x->replay_esn),
1760 x->replay_esn);
1761 } else {
1762 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
1763 &x->replay);
1764 }
1765 if (err)
1766 goto out_cancel;
1767 err = nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1768 if (err)
1769 goto out_cancel;
1770
1771 if (id->flags & XFRM_AE_RTHR) {
1772 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1773 if (err)
1774 goto out_cancel;
1775 }
1776 if (id->flags & XFRM_AE_ETHR) {
1777 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
1778 x->replay_maxage * 10 / HZ);
1779 if (err)
1780 goto out_cancel;
1781 }
1782 err = xfrm_mark_put(skb, &x->mark);
1783 if (err)
1784 goto out_cancel;
1785
1786 return nlmsg_end(skb, nlh);
1787
1788 out_cancel:
1789 nlmsg_cancel(skb, nlh);
1790 return err;
1791 }
1792
1793 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1794 struct nlattr **attrs)
1795 {
1796 struct net *net = sock_net(skb->sk);
1797 struct xfrm_state *x;
1798 struct sk_buff *r_skb;
1799 int err;
1800 struct km_event c;
1801 u32 mark;
1802 struct xfrm_mark m;
1803 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1804 struct xfrm_usersa_id *id = &p->sa_id;
1805
1806 mark = xfrm_mark_get(attrs, &m);
1807
1808 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
1809 if (x == NULL)
1810 return -ESRCH;
1811
1812 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
1813 if (r_skb == NULL) {
1814 xfrm_state_put(x);
1815 return -ENOMEM;
1816 }
1817
1818 /*
1819 * XXX: is this lock really needed - none of the other
1820 * gets lock (the concern is things getting updated
1821 * while we are still reading) - jhs
1822 */
1823 spin_lock_bh(&x->lock);
1824 c.data.aevent = p->flags;
1825 c.seq = nlh->nlmsg_seq;
1826 c.portid = nlh->nlmsg_pid;
1827
1828 if (build_aevent(r_skb, x, &c) < 0)
1829 BUG();
1830 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
1831 spin_unlock_bh(&x->lock);
1832 xfrm_state_put(x);
1833 return err;
1834 }
1835
1836 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1837 struct nlattr **attrs)
1838 {
1839 struct net *net = sock_net(skb->sk);
1840 struct xfrm_state *x;
1841 struct km_event c;
1842 int err = - EINVAL;
1843 u32 mark = 0;
1844 struct xfrm_mark m;
1845 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1846 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1847 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
1848 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
1849
1850 if (!lt && !rp && !re)
1851 return err;
1852
1853 /* pedantic mode - thou shalt sayeth replaceth */
1854 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1855 return err;
1856
1857 mark = xfrm_mark_get(attrs, &m);
1858
1859 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1860 if (x == NULL)
1861 return -ESRCH;
1862
1863 if (x->km.state != XFRM_STATE_VALID)
1864 goto out;
1865
1866 err = xfrm_replay_verify_len(x->replay_esn, rp);
1867 if (err)
1868 goto out;
1869
1870 spin_lock_bh(&x->lock);
1871 xfrm_update_ae_params(x, attrs, 1);
1872 spin_unlock_bh(&x->lock);
1873
1874 c.event = nlh->nlmsg_type;
1875 c.seq = nlh->nlmsg_seq;
1876 c.portid = nlh->nlmsg_pid;
1877 c.data.aevent = XFRM_AE_CU;
1878 km_state_notify(x, &c);
1879 err = 0;
1880 out:
1881 xfrm_state_put(x);
1882 return err;
1883 }
1884
1885 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1886 struct nlattr **attrs)
1887 {
1888 struct net *net = sock_net(skb->sk);
1889 struct km_event c;
1890 u8 type = XFRM_POLICY_TYPE_MAIN;
1891 int err;
1892 struct xfrm_audit audit_info;
1893
1894 err = copy_from_user_policy_type(&type, attrs);
1895 if (err)
1896 return err;
1897
1898 audit_info.loginuid = audit_get_loginuid(current);
1899 audit_info.sessionid = audit_get_sessionid(current);
1900 security_task_getsecid(current, &audit_info.secid);
1901 err = xfrm_policy_flush(net, type, &audit_info);
1902 if (err) {
1903 if (err == -ESRCH) /* empty table */
1904 return 0;
1905 return err;
1906 }
1907
1908 c.data.type = type;
1909 c.event = nlh->nlmsg_type;
1910 c.seq = nlh->nlmsg_seq;
1911 c.portid = nlh->nlmsg_pid;
1912 c.net = net;
1913 km_policy_notify(NULL, 0, &c);
1914 return 0;
1915 }
1916
1917 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1918 struct nlattr **attrs)
1919 {
1920 struct net *net = sock_net(skb->sk);
1921 struct xfrm_policy *xp;
1922 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
1923 struct xfrm_userpolicy_info *p = &up->pol;
1924 u8 type = XFRM_POLICY_TYPE_MAIN;
1925 int err = -ENOENT;
1926 struct xfrm_mark m;
1927 u32 mark = xfrm_mark_get(attrs, &m);
1928
1929 err = copy_from_user_policy_type(&type, attrs);
1930 if (err)
1931 return err;
1932
1933 err = verify_policy_dir(p->dir);
1934 if (err)
1935 return err;
1936
1937 if (p->index)
1938 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
1939 else {
1940 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1941 struct xfrm_sec_ctx *ctx;
1942
1943 err = verify_sec_ctx_len(attrs);
1944 if (err)
1945 return err;
1946
1947 ctx = NULL;
1948 if (rt) {
1949 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1950
1951 err = security_xfrm_policy_alloc(&ctx, uctx);
1952 if (err)
1953 return err;
1954 }
1955 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
1956 &p->sel, ctx, 0, &err);
1957 security_xfrm_policy_free(ctx);
1958 }
1959 if (xp == NULL)
1960 return -ENOENT;
1961
1962 if (unlikely(xp->walk.dead))
1963 goto out;
1964
1965 err = 0;
1966 if (up->hard) {
1967 kuid_t loginuid = audit_get_loginuid(current);
1968 u32 sessionid = audit_get_sessionid(current);
1969 u32 sid;
1970
1971 security_task_getsecid(current, &sid);
1972 xfrm_policy_delete(xp, p->dir);
1973 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
1974
1975 } else {
1976 // reset the timers here?
1977 WARN(1, "Dont know what to do with soft policy expire\n");
1978 }
1979 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
1980
1981 out:
1982 xfrm_pol_put(xp);
1983 return err;
1984 }
1985
1986 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1987 struct nlattr **attrs)
1988 {
1989 struct net *net = sock_net(skb->sk);
1990 struct xfrm_state *x;
1991 int err;
1992 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1993 struct xfrm_usersa_info *p = &ue->state;
1994 struct xfrm_mark m;
1995 u32 mark = xfrm_mark_get(attrs, &m);
1996
1997 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
1998
1999 err = -ENOENT;
2000 if (x == NULL)
2001 return err;
2002
2003 spin_lock_bh(&x->lock);
2004 err = -EINVAL;
2005 if (x->km.state != XFRM_STATE_VALID)
2006 goto out;
2007 km_state_expired(x, ue->hard, nlh->nlmsg_pid);
2008
2009 if (ue->hard) {
2010 kuid_t loginuid = audit_get_loginuid(current);
2011 u32 sessionid = audit_get_sessionid(current);
2012 u32 sid;
2013
2014 security_task_getsecid(current, &sid);
2015 __xfrm_state_delete(x);
2016 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
2017 }
2018 err = 0;
2019 out:
2020 spin_unlock_bh(&x->lock);
2021 xfrm_state_put(x);
2022 return err;
2023 }
2024
2025 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2026 struct nlattr **attrs)
2027 {
2028 struct net *net = sock_net(skb->sk);
2029 struct xfrm_policy *xp;
2030 struct xfrm_user_tmpl *ut;
2031 int i;
2032 struct nlattr *rt = attrs[XFRMA_TMPL];
2033 struct xfrm_mark mark;
2034
2035 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
2036 struct xfrm_state *x = xfrm_state_alloc(net);
2037 int err = -ENOMEM;
2038
2039 if (!x)
2040 goto nomem;
2041
2042 xfrm_mark_get(attrs, &mark);
2043
2044 err = verify_newpolicy_info(&ua->policy);
2045 if (err)
2046 goto bad_policy;
2047
2048 /* build an XP */
2049 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
2050 if (!xp)
2051 goto free_state;
2052
2053 memcpy(&x->id, &ua->id, sizeof(ua->id));
2054 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
2055 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
2056 xp->mark.m = x->mark.m = mark.m;
2057 xp->mark.v = x->mark.v = mark.v;
2058 ut = nla_data(rt);
2059 /* extract the templates and for each call km_key */
2060 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
2061 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
2062 memcpy(&x->id, &t->id, sizeof(x->id));
2063 x->props.mode = t->mode;
2064 x->props.reqid = t->reqid;
2065 x->props.family = ut->family;
2066 t->aalgos = ua->aalgos;
2067 t->ealgos = ua->ealgos;
2068 t->calgos = ua->calgos;
2069 err = km_query(x, t, xp);
2070
2071 }
2072
2073 kfree(x);
2074 kfree(xp);
2075
2076 return 0;
2077
2078 bad_policy:
2079 WARN(1, "BAD policy passed\n");
2080 free_state:
2081 kfree(x);
2082 nomem:
2083 return err;
2084 }
2085
2086 #ifdef CONFIG_XFRM_MIGRATE
2087 static int copy_from_user_migrate(struct xfrm_migrate *ma,
2088 struct xfrm_kmaddress *k,
2089 struct nlattr **attrs, int *num)
2090 {
2091 struct nlattr *rt = attrs[XFRMA_MIGRATE];
2092 struct xfrm_user_migrate *um;
2093 int i, num_migrate;
2094
2095 if (k != NULL) {
2096 struct xfrm_user_kmaddress *uk;
2097
2098 uk = nla_data(attrs[XFRMA_KMADDRESS]);
2099 memcpy(&k->local, &uk->local, sizeof(k->local));
2100 memcpy(&k->remote, &uk->remote, sizeof(k->remote));
2101 k->family = uk->family;
2102 k->reserved = uk->reserved;
2103 }
2104
2105 um = nla_data(rt);
2106 num_migrate = nla_len(rt) / sizeof(*um);
2107
2108 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
2109 return -EINVAL;
2110
2111 for (i = 0; i < num_migrate; i++, um++, ma++) {
2112 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
2113 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
2114 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
2115 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
2116
2117 ma->proto = um->proto;
2118 ma->mode = um->mode;
2119 ma->reqid = um->reqid;
2120
2121 ma->old_family = um->old_family;
2122 ma->new_family = um->new_family;
2123 }
2124
2125 *num = i;
2126 return 0;
2127 }
2128
2129 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2130 struct nlattr **attrs)
2131 {
2132 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
2133 struct xfrm_migrate m[XFRM_MAX_DEPTH];
2134 struct xfrm_kmaddress km, *kmp;
2135 u8 type;
2136 int err;
2137 int n = 0;
2138
2139 if (attrs[XFRMA_MIGRATE] == NULL)
2140 return -EINVAL;
2141
2142 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
2143
2144 err = copy_from_user_policy_type(&type, attrs);
2145 if (err)
2146 return err;
2147
2148 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
2149 if (err)
2150 return err;
2151
2152 if (!n)
2153 return 0;
2154
2155 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp);
2156
2157 return 0;
2158 }
2159 #else
2160 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2161 struct nlattr **attrs)
2162 {
2163 return -ENOPROTOOPT;
2164 }
2165 #endif
2166
2167 #ifdef CONFIG_XFRM_MIGRATE
2168 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
2169 {
2170 struct xfrm_user_migrate um;
2171
2172 memset(&um, 0, sizeof(um));
2173 um.proto = m->proto;
2174 um.mode = m->mode;
2175 um.reqid = m->reqid;
2176 um.old_family = m->old_family;
2177 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
2178 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
2179 um.new_family = m->new_family;
2180 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
2181 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
2182
2183 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
2184 }
2185
2186 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
2187 {
2188 struct xfrm_user_kmaddress uk;
2189
2190 memset(&uk, 0, sizeof(uk));
2191 uk.family = k->family;
2192 uk.reserved = k->reserved;
2193 memcpy(&uk.local, &k->local, sizeof(uk.local));
2194 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
2195
2196 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
2197 }
2198
2199 static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
2200 {
2201 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
2202 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
2203 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
2204 + userpolicy_type_attrsize();
2205 }
2206
2207 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
2208 int num_migrate, const struct xfrm_kmaddress *k,
2209 const struct xfrm_selector *sel, u8 dir, u8 type)
2210 {
2211 const struct xfrm_migrate *mp;
2212 struct xfrm_userpolicy_id *pol_id;
2213 struct nlmsghdr *nlh;
2214 int i, err;
2215
2216 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
2217 if (nlh == NULL)
2218 return -EMSGSIZE;
2219
2220 pol_id = nlmsg_data(nlh);
2221 /* copy data from selector, dir, and type to the pol_id */
2222 memset(pol_id, 0, sizeof(*pol_id));
2223 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
2224 pol_id->dir = dir;
2225
2226 if (k != NULL) {
2227 err = copy_to_user_kmaddress(k, skb);
2228 if (err)
2229 goto out_cancel;
2230 }
2231 err = copy_to_user_policy_type(type, skb);
2232 if (err)
2233 goto out_cancel;
2234 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
2235 err = copy_to_user_migrate(mp, skb);
2236 if (err)
2237 goto out_cancel;
2238 }
2239
2240 return nlmsg_end(skb, nlh);
2241
2242 out_cancel:
2243 nlmsg_cancel(skb, nlh);
2244 return err;
2245 }
2246
2247 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2248 const struct xfrm_migrate *m, int num_migrate,
2249 const struct xfrm_kmaddress *k)
2250 {
2251 struct net *net = &init_net;
2252 struct sk_buff *skb;
2253
2254 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
2255 if (skb == NULL)
2256 return -ENOMEM;
2257
2258 /* build migrate */
2259 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
2260 BUG();
2261
2262 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
2263 }
2264 #else
2265 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2266 const struct xfrm_migrate *m, int num_migrate,
2267 const struct xfrm_kmaddress *k)
2268 {
2269 return -ENOPROTOOPT;
2270 }
2271 #endif
2272
2273 #define XMSGSIZE(type) sizeof(struct type)
2274
2275 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
2276 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2277 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2278 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2279 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2280 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2281 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2282 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
2283 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
2284 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
2285 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2286 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2287 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
2288 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
2289 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
2290 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2291 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2292 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
2293 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2294 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
2295 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
2296 };
2297
2298 #undef XMSGSIZE
2299
2300 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2301 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
2302 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
2303 [XFRMA_LASTUSED] = { .type = NLA_U64},
2304 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
2305 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
2306 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
2307 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
2308 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
2309 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
2310 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
2311 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
2312 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
2313 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
2314 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
2315 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
2316 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
2317 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
2318 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
2319 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
2320 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
2321 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
2322 [XFRMA_TFCPAD] = { .type = NLA_U32 },
2323 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
2324 [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
2325 };
2326
2327 static const struct xfrm_link {
2328 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
2329 int (*dump)(struct sk_buff *, struct netlink_callback *);
2330 int (*done)(struct netlink_callback *);
2331 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
2332 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2333 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
2334 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
2335 .dump = xfrm_dump_sa,
2336 .done = xfrm_dump_sa_done },
2337 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2338 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
2339 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
2340 .dump = xfrm_dump_policy,
2341 .done = xfrm_dump_policy_done },
2342 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
2343 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
2344 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
2345 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2346 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2347 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
2348 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
2349 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
2350 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
2351 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
2352 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
2353 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
2354 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
2355 };
2356
2357 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2358 {
2359 struct net *net = sock_net(skb->sk);
2360 struct nlattr *attrs[XFRMA_MAX+1];
2361 const struct xfrm_link *link;
2362 int type, err;
2363
2364 type = nlh->nlmsg_type;
2365 if (type > XFRM_MSG_MAX)
2366 return -EINVAL;
2367
2368 type -= XFRM_MSG_BASE;
2369 link = &xfrm_dispatch[type];
2370
2371 /* All operations require privileges, even GET */
2372 if (!netlink_net_capable(skb, CAP_NET_ADMIN))
2373 return -EPERM;
2374
2375 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2376 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2377 (nlh->nlmsg_flags & NLM_F_DUMP)) {
2378 if (link->dump == NULL)
2379 return -EINVAL;
2380
2381 {
2382 struct netlink_dump_control c = {
2383 .dump = link->dump,
2384 .done = link->done,
2385 };
2386 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
2387 }
2388 }
2389
2390 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
2391 xfrma_policy);
2392 if (err < 0)
2393 return err;
2394
2395 if (link->doit == NULL)
2396 return -EINVAL;
2397
2398 return link->doit(skb, nlh, attrs);
2399 }
2400
2401 static void xfrm_netlink_rcv(struct sk_buff *skb)
2402 {
2403 mutex_lock(&xfrm_cfg_mutex);
2404 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
2405 mutex_unlock(&xfrm_cfg_mutex);
2406 }
2407
2408 static inline size_t xfrm_expire_msgsize(void)
2409 {
2410 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
2411 + nla_total_size(sizeof(struct xfrm_mark));
2412 }
2413
2414 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2415 {
2416 struct xfrm_user_expire *ue;
2417 struct nlmsghdr *nlh;
2418 int err;
2419
2420 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2421 if (nlh == NULL)
2422 return -EMSGSIZE;
2423
2424 ue = nlmsg_data(nlh);
2425 copy_to_user_state(x, &ue->state);
2426 ue->hard = (c->data.hard != 0) ? 1 : 0;
2427
2428 err = xfrm_mark_put(skb, &x->mark);
2429 if (err)
2430 return err;
2431
2432 return nlmsg_end(skb, nlh);
2433 }
2434
2435 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
2436 {
2437 struct net *net = xs_net(x);
2438 struct sk_buff *skb;
2439
2440 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
2441 if (skb == NULL)
2442 return -ENOMEM;
2443
2444 if (build_expire(skb, x, c) < 0) {
2445 kfree_skb(skb);
2446 return -EMSGSIZE;
2447 }
2448
2449 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2450 }
2451
2452 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
2453 {
2454 struct net *net = xs_net(x);
2455 struct sk_buff *skb;
2456
2457 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
2458 if (skb == NULL)
2459 return -ENOMEM;
2460
2461 if (build_aevent(skb, x, c) < 0)
2462 BUG();
2463
2464 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
2465 }
2466
2467 static int xfrm_notify_sa_flush(const struct km_event *c)
2468 {
2469 struct net *net = c->net;
2470 struct xfrm_usersa_flush *p;
2471 struct nlmsghdr *nlh;
2472 struct sk_buff *skb;
2473 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
2474
2475 skb = nlmsg_new(len, GFP_ATOMIC);
2476 if (skb == NULL)
2477 return -ENOMEM;
2478
2479 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
2480 if (nlh == NULL) {
2481 kfree_skb(skb);
2482 return -EMSGSIZE;
2483 }
2484
2485 p = nlmsg_data(nlh);
2486 p->proto = c->data.proto;
2487
2488 nlmsg_end(skb, nlh);
2489
2490 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2491 }
2492
2493 static inline size_t xfrm_sa_len(struct xfrm_state *x)
2494 {
2495 size_t l = 0;
2496 if (x->aead)
2497 l += nla_total_size(aead_len(x->aead));
2498 if (x->aalg) {
2499 l += nla_total_size(sizeof(struct xfrm_algo) +
2500 (x->aalg->alg_key_len + 7) / 8);
2501 l += nla_total_size(xfrm_alg_auth_len(x->aalg));
2502 }
2503 if (x->ealg)
2504 l += nla_total_size(xfrm_alg_len(x->ealg));
2505 if (x->calg)
2506 l += nla_total_size(sizeof(*x->calg));
2507 if (x->encap)
2508 l += nla_total_size(sizeof(*x->encap));
2509 if (x->tfcpad)
2510 l += nla_total_size(sizeof(x->tfcpad));
2511 if (x->replay_esn)
2512 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
2513 if (x->security)
2514 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
2515 x->security->ctx_len);
2516 if (x->coaddr)
2517 l += nla_total_size(sizeof(*x->coaddr));
2518 if (x->props.extra_flags)
2519 l += nla_total_size(sizeof(x->props.extra_flags));
2520
2521 /* Must count x->lastused as it may become non-zero behind our back. */
2522 l += nla_total_size(sizeof(u64));
2523
2524 return l;
2525 }
2526
2527 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2528 {
2529 struct net *net = xs_net(x);
2530 struct xfrm_usersa_info *p;
2531 struct xfrm_usersa_id *id;
2532 struct nlmsghdr *nlh;
2533 struct sk_buff *skb;
2534 int len = xfrm_sa_len(x);
2535 int headlen, err;
2536
2537 headlen = sizeof(*p);
2538 if (c->event == XFRM_MSG_DELSA) {
2539 len += nla_total_size(headlen);
2540 headlen = sizeof(*id);
2541 len += nla_total_size(sizeof(struct xfrm_mark));
2542 }
2543 len += NLMSG_ALIGN(headlen);
2544
2545 skb = nlmsg_new(len, GFP_ATOMIC);
2546 if (skb == NULL)
2547 return -ENOMEM;
2548
2549 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
2550 err = -EMSGSIZE;
2551 if (nlh == NULL)
2552 goto out_free_skb;
2553
2554 p = nlmsg_data(nlh);
2555 if (c->event == XFRM_MSG_DELSA) {
2556 struct nlattr *attr;
2557
2558 id = nlmsg_data(nlh);
2559 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2560 id->spi = x->id.spi;
2561 id->family = x->props.family;
2562 id->proto = x->id.proto;
2563
2564 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2565 err = -EMSGSIZE;
2566 if (attr == NULL)
2567 goto out_free_skb;
2568
2569 p = nla_data(attr);
2570 }
2571 err = copy_to_user_state_extra(x, p, skb);
2572 if (err)
2573 goto out_free_skb;
2574
2575 nlmsg_end(skb, nlh);
2576
2577 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2578
2579 out_free_skb:
2580 kfree_skb(skb);
2581 return err;
2582 }
2583
2584 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
2585 {
2586
2587 switch (c->event) {
2588 case XFRM_MSG_EXPIRE:
2589 return xfrm_exp_state_notify(x, c);
2590 case XFRM_MSG_NEWAE:
2591 return xfrm_aevent_state_notify(x, c);
2592 case XFRM_MSG_DELSA:
2593 case XFRM_MSG_UPDSA:
2594 case XFRM_MSG_NEWSA:
2595 return xfrm_notify_sa(x, c);
2596 case XFRM_MSG_FLUSHSA:
2597 return xfrm_notify_sa_flush(c);
2598 default:
2599 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
2600 c->event);
2601 break;
2602 }
2603
2604 return 0;
2605
2606 }
2607
2608 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2609 struct xfrm_policy *xp)
2610 {
2611 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2612 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2613 + nla_total_size(sizeof(struct xfrm_mark))
2614 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2615 + userpolicy_type_attrsize();
2616 }
2617
2618 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2619 struct xfrm_tmpl *xt, struct xfrm_policy *xp)
2620 {
2621 __u32 seq = xfrm_get_acqseq();
2622 struct xfrm_user_acquire *ua;
2623 struct nlmsghdr *nlh;
2624 int err;
2625
2626 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2627 if (nlh == NULL)
2628 return -EMSGSIZE;
2629
2630 ua = nlmsg_data(nlh);
2631 memcpy(&ua->id, &x->id, sizeof(ua->id));
2632 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2633 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2634 copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
2635 ua->aalgos = xt->aalgos;
2636 ua->ealgos = xt->ealgos;
2637 ua->calgos = xt->calgos;
2638 ua->seq = x->km.seq = seq;
2639
2640 err = copy_to_user_tmpl(xp, skb);
2641 if (!err)
2642 err = copy_to_user_state_sec_ctx(x, skb);
2643 if (!err)
2644 err = copy_to_user_policy_type(xp->type, skb);
2645 if (!err)
2646 err = xfrm_mark_put(skb, &xp->mark);
2647 if (err) {
2648 nlmsg_cancel(skb, nlh);
2649 return err;
2650 }
2651
2652 return nlmsg_end(skb, nlh);
2653 }
2654
2655 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2656 struct xfrm_policy *xp)
2657 {
2658 struct net *net = xs_net(x);
2659 struct sk_buff *skb;
2660
2661 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
2662 if (skb == NULL)
2663 return -ENOMEM;
2664
2665 if (build_acquire(skb, x, xt, xp) < 0)
2666 BUG();
2667
2668 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2669 }
2670
2671 /* User gives us xfrm_user_policy_info followed by an array of 0
2672 * or more templates.
2673 */
2674 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2675 u8 *data, int len, int *dir)
2676 {
2677 struct net *net = sock_net(sk);
2678 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2679 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2680 struct xfrm_policy *xp;
2681 int nr;
2682
2683 switch (sk->sk_family) {
2684 case AF_INET:
2685 if (opt != IP_XFRM_POLICY) {
2686 *dir = -EOPNOTSUPP;
2687 return NULL;
2688 }
2689 break;
2690 #if IS_ENABLED(CONFIG_IPV6)
2691 case AF_INET6:
2692 if (opt != IPV6_XFRM_POLICY) {
2693 *dir = -EOPNOTSUPP;
2694 return NULL;
2695 }
2696 break;
2697 #endif
2698 default:
2699 *dir = -EINVAL;
2700 return NULL;
2701 }
2702
2703 *dir = -EINVAL;
2704
2705 if (len < sizeof(*p) ||
2706 verify_newpolicy_info(p))
2707 return NULL;
2708
2709 nr = ((len - sizeof(*p)) / sizeof(*ut));
2710 if (validate_tmpl(nr, ut, p->sel.family))
2711 return NULL;
2712
2713 if (p->dir > XFRM_POLICY_OUT)
2714 return NULL;
2715
2716 xp = xfrm_policy_alloc(net, GFP_ATOMIC);
2717 if (xp == NULL) {
2718 *dir = -ENOBUFS;
2719 return NULL;
2720 }
2721
2722 copy_from_user_policy(xp, p);
2723 xp->type = XFRM_POLICY_TYPE_MAIN;
2724 copy_templates(xp, ut, nr);
2725
2726 *dir = p->dir;
2727
2728 return xp;
2729 }
2730
2731 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2732 {
2733 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2734 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2735 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2736 + nla_total_size(sizeof(struct xfrm_mark))
2737 + userpolicy_type_attrsize();
2738 }
2739
2740 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2741 int dir, const struct km_event *c)
2742 {
2743 struct xfrm_user_polexpire *upe;
2744 int hard = c->data.hard;
2745 struct nlmsghdr *nlh;
2746 int err;
2747
2748 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2749 if (nlh == NULL)
2750 return -EMSGSIZE;
2751
2752 upe = nlmsg_data(nlh);
2753 copy_to_user_policy(xp, &upe->pol, dir);
2754 err = copy_to_user_tmpl(xp, skb);
2755 if (!err)
2756 err = copy_to_user_sec_ctx(xp, skb);
2757 if (!err)
2758 err = copy_to_user_policy_type(xp->type, skb);
2759 if (!err)
2760 err = xfrm_mark_put(skb, &xp->mark);
2761 if (err) {
2762 nlmsg_cancel(skb, nlh);
2763 return err;
2764 }
2765 upe->hard = !!hard;
2766
2767 return nlmsg_end(skb, nlh);
2768 }
2769
2770 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2771 {
2772 struct net *net = xp_net(xp);
2773 struct sk_buff *skb;
2774
2775 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
2776 if (skb == NULL)
2777 return -ENOMEM;
2778
2779 if (build_polexpire(skb, xp, dir, c) < 0)
2780 BUG();
2781
2782 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2783 }
2784
2785 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
2786 {
2787 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2788 struct net *net = xp_net(xp);
2789 struct xfrm_userpolicy_info *p;
2790 struct xfrm_userpolicy_id *id;
2791 struct nlmsghdr *nlh;
2792 struct sk_buff *skb;
2793 int headlen, err;
2794
2795 headlen = sizeof(*p);
2796 if (c->event == XFRM_MSG_DELPOLICY) {
2797 len += nla_total_size(headlen);
2798 headlen = sizeof(*id);
2799 }
2800 len += userpolicy_type_attrsize();
2801 len += nla_total_size(sizeof(struct xfrm_mark));
2802 len += NLMSG_ALIGN(headlen);
2803
2804 skb = nlmsg_new(len, GFP_ATOMIC);
2805 if (skb == NULL)
2806 return -ENOMEM;
2807
2808 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
2809 err = -EMSGSIZE;
2810 if (nlh == NULL)
2811 goto out_free_skb;
2812
2813 p = nlmsg_data(nlh);
2814 if (c->event == XFRM_MSG_DELPOLICY) {
2815 struct nlattr *attr;
2816
2817 id = nlmsg_data(nlh);
2818 memset(id, 0, sizeof(*id));
2819 id->dir = dir;
2820 if (c->data.byid)
2821 id->index = xp->index;
2822 else
2823 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2824
2825 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2826 err = -EMSGSIZE;
2827 if (attr == NULL)
2828 goto out_free_skb;
2829
2830 p = nla_data(attr);
2831 }
2832
2833 copy_to_user_policy(xp, p, dir);
2834 err = copy_to_user_tmpl(xp, skb);
2835 if (!err)
2836 err = copy_to_user_policy_type(xp->type, skb);
2837 if (!err)
2838 err = xfrm_mark_put(skb, &xp->mark);
2839 if (err)
2840 goto out_free_skb;
2841
2842 nlmsg_end(skb, nlh);
2843
2844 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2845
2846 out_free_skb:
2847 kfree_skb(skb);
2848 return err;
2849 }
2850
2851 static int xfrm_notify_policy_flush(const struct km_event *c)
2852 {
2853 struct net *net = c->net;
2854 struct nlmsghdr *nlh;
2855 struct sk_buff *skb;
2856 int err;
2857
2858 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2859 if (skb == NULL)
2860 return -ENOMEM;
2861
2862 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2863 err = -EMSGSIZE;
2864 if (nlh == NULL)
2865 goto out_free_skb;
2866 err = copy_to_user_policy_type(c->data.type, skb);
2867 if (err)
2868 goto out_free_skb;
2869
2870 nlmsg_end(skb, nlh);
2871
2872 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2873
2874 out_free_skb:
2875 kfree_skb(skb);
2876 return err;
2877 }
2878
2879 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2880 {
2881
2882 switch (c->event) {
2883 case XFRM_MSG_NEWPOLICY:
2884 case XFRM_MSG_UPDPOLICY:
2885 case XFRM_MSG_DELPOLICY:
2886 return xfrm_notify_policy(xp, dir, c);
2887 case XFRM_MSG_FLUSHPOLICY:
2888 return xfrm_notify_policy_flush(c);
2889 case XFRM_MSG_POLEXPIRE:
2890 return xfrm_exp_policy_notify(xp, dir, c);
2891 default:
2892 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
2893 c->event);
2894 }
2895
2896 return 0;
2897
2898 }
2899
2900 static inline size_t xfrm_report_msgsize(void)
2901 {
2902 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
2903 }
2904
2905 static int build_report(struct sk_buff *skb, u8 proto,
2906 struct xfrm_selector *sel, xfrm_address_t *addr)
2907 {
2908 struct xfrm_user_report *ur;
2909 struct nlmsghdr *nlh;
2910
2911 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
2912 if (nlh == NULL)
2913 return -EMSGSIZE;
2914
2915 ur = nlmsg_data(nlh);
2916 ur->proto = proto;
2917 memcpy(&ur->sel, sel, sizeof(ur->sel));
2918
2919 if (addr) {
2920 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
2921 if (err) {
2922 nlmsg_cancel(skb, nlh);
2923 return err;
2924 }
2925 }
2926 return nlmsg_end(skb, nlh);
2927 }
2928
2929 static int xfrm_send_report(struct net *net, u8 proto,
2930 struct xfrm_selector *sel, xfrm_address_t *addr)
2931 {
2932 struct sk_buff *skb;
2933
2934 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
2935 if (skb == NULL)
2936 return -ENOMEM;
2937
2938 if (build_report(skb, proto, sel, addr) < 0)
2939 BUG();
2940
2941 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2942 }
2943
2944 static inline size_t xfrm_mapping_msgsize(void)
2945 {
2946 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
2947 }
2948
2949 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
2950 xfrm_address_t *new_saddr, __be16 new_sport)
2951 {
2952 struct xfrm_user_mapping *um;
2953 struct nlmsghdr *nlh;
2954
2955 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
2956 if (nlh == NULL)
2957 return -EMSGSIZE;
2958
2959 um = nlmsg_data(nlh);
2960
2961 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
2962 um->id.spi = x->id.spi;
2963 um->id.family = x->props.family;
2964 um->id.proto = x->id.proto;
2965 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
2966 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
2967 um->new_sport = new_sport;
2968 um->old_sport = x->encap->encap_sport;
2969 um->reqid = x->props.reqid;
2970
2971 return nlmsg_end(skb, nlh);
2972 }
2973
2974 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
2975 __be16 sport)
2976 {
2977 struct net *net = xs_net(x);
2978 struct sk_buff *skb;
2979
2980 if (x->id.proto != IPPROTO_ESP)
2981 return -EINVAL;
2982
2983 if (!x->encap)
2984 return -EINVAL;
2985
2986 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
2987 if (skb == NULL)
2988 return -ENOMEM;
2989
2990 if (build_mapping(skb, x, ipaddr, sport) < 0)
2991 BUG();
2992
2993 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC);
2994 }
2995
2996 static struct xfrm_mgr netlink_mgr = {
2997 .id = "netlink",
2998 .notify = xfrm_send_state_notify,
2999 .acquire = xfrm_send_acquire,
3000 .compile_policy = xfrm_compile_policy,
3001 .notify_policy = xfrm_send_policy_notify,
3002 .report = xfrm_send_report,
3003 .migrate = xfrm_send_migrate,
3004 .new_mapping = xfrm_send_mapping,
3005 };
3006
3007 static int __net_init xfrm_user_net_init(struct net *net)
3008 {
3009 struct sock *nlsk;
3010 struct netlink_kernel_cfg cfg = {
3011 .groups = XFRMNLGRP_MAX,
3012 .input = xfrm_netlink_rcv,
3013 };
3014
3015 nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
3016 if (nlsk == NULL)
3017 return -ENOMEM;
3018 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
3019 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
3020 return 0;
3021 }
3022
3023 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
3024 {
3025 struct net *net;
3026 list_for_each_entry(net, net_exit_list, exit_list)
3027 RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
3028 synchronize_net();
3029 list_for_each_entry(net, net_exit_list, exit_list)
3030 netlink_kernel_release(net->xfrm.nlsk_stash);
3031 }
3032
3033 static struct pernet_operations xfrm_user_net_ops = {
3034 .init = xfrm_user_net_init,
3035 .exit_batch = xfrm_user_net_exit,
3036 };
3037
3038 static int __init xfrm_user_init(void)
3039 {
3040 int rv;
3041
3042 printk(KERN_INFO "Initializing XFRM netlink socket\n");
3043
3044 rv = register_pernet_subsys(&xfrm_user_net_ops);
3045 if (rv < 0)
3046 return rv;
3047 rv = xfrm_register_km(&netlink_mgr);
3048 if (rv < 0)
3049 unregister_pernet_subsys(&xfrm_user_net_ops);
3050 return rv;
3051 }
3052
3053 static void __exit xfrm_user_exit(void)
3054 {
3055 xfrm_unregister_km(&netlink_mgr);
3056 unregister_pernet_subsys(&xfrm_user_net_ops);
3057 }
3058
3059 module_init(xfrm_user_init);
3060 module_exit(xfrm_user_exit);
3061 MODULE_LICENSE("GPL");
3062 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
3063