acb70d235e475a1410b1a6c0ee888607c2c01603
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / net / sunrpc / svcauth_unix.c
1 #include <linux/types.h>
2 #include <linux/sched.h>
3 #include <linux/module.h>
4 #include <linux/sunrpc/types.h>
5 #include <linux/sunrpc/xdr.h>
6 #include <linux/sunrpc/svcsock.h>
7 #include <linux/sunrpc/svcauth.h>
8 #include <linux/sunrpc/gss_api.h>
9 #include <linux/sunrpc/addr.h>
10 #include <linux/err.h>
11 #include <linux/seq_file.h>
12 #include <linux/hash.h>
13 #include <linux/string.h>
14 #include <linux/slab.h>
15 #include <net/sock.h>
16 #include <net/ipv6.h>
17 #include <linux/kernel.h>
18 #include <linux/user_namespace.h>
19 #define RPCDBG_FACILITY RPCDBG_AUTH
20
21
22 #include "netns.h"
23
24 /*
25 * AUTHUNIX and AUTHNULL credentials are both handled here.
26 * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
27 * are always nobody (-2). i.e. we do the same IP address checks for
28 * AUTHNULL as for AUTHUNIX, and that is done here.
29 */
30
31
32 struct unix_domain {
33 struct auth_domain h;
34 /* other stuff later */
35 };
36
37 extern struct auth_ops svcauth_null;
38 extern struct auth_ops svcauth_unix;
39
40 static void svcauth_unix_domain_release(struct auth_domain *dom)
41 {
42 struct unix_domain *ud = container_of(dom, struct unix_domain, h);
43
44 kfree(dom->name);
45 kfree(ud);
46 }
47
48 struct auth_domain *unix_domain_find(char *name)
49 {
50 struct auth_domain *rv;
51 struct unix_domain *new = NULL;
52
53 rv = auth_domain_lookup(name, NULL);
54 while(1) {
55 if (rv) {
56 if (new && rv != &new->h)
57 svcauth_unix_domain_release(&new->h);
58
59 if (rv->flavour != &svcauth_unix) {
60 auth_domain_put(rv);
61 return NULL;
62 }
63 return rv;
64 }
65
66 new = kmalloc(sizeof(*new), GFP_KERNEL);
67 if (new == NULL)
68 return NULL;
69 kref_init(&new->h.ref);
70 new->h.name = kstrdup(name, GFP_KERNEL);
71 if (new->h.name == NULL) {
72 kfree(new);
73 return NULL;
74 }
75 new->h.flavour = &svcauth_unix;
76 rv = auth_domain_lookup(name, &new->h);
77 }
78 }
79 EXPORT_SYMBOL_GPL(unix_domain_find);
80
81
82 /**************************************************
83 * cache for IP address to unix_domain
84 * as needed by AUTH_UNIX
85 */
86 #define IP_HASHBITS 8
87 #define IP_HASHMAX (1<<IP_HASHBITS)
88
89 struct ip_map {
90 struct cache_head h;
91 char m_class[8]; /* e.g. "nfsd" */
92 struct in6_addr m_addr;
93 struct unix_domain *m_client;
94 };
95
96 static void ip_map_put(struct kref *kref)
97 {
98 struct cache_head *item = container_of(kref, struct cache_head, ref);
99 struct ip_map *im = container_of(item, struct ip_map,h);
100
101 if (test_bit(CACHE_VALID, &item->flags) &&
102 !test_bit(CACHE_NEGATIVE, &item->flags))
103 auth_domain_put(&im->m_client->h);
104 kfree(im);
105 }
106
107 static inline int hash_ip6(const struct in6_addr *ip)
108 {
109 return hash_32(ipv6_addr_hash(ip), IP_HASHBITS);
110 }
111 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
112 {
113 struct ip_map *orig = container_of(corig, struct ip_map, h);
114 struct ip_map *new = container_of(cnew, struct ip_map, h);
115 return strcmp(orig->m_class, new->m_class) == 0 &&
116 ipv6_addr_equal(&orig->m_addr, &new->m_addr);
117 }
118 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
119 {
120 struct ip_map *new = container_of(cnew, struct ip_map, h);
121 struct ip_map *item = container_of(citem, struct ip_map, h);
122
123 strcpy(new->m_class, item->m_class);
124 new->m_addr = item->m_addr;
125 }
126 static void update(struct cache_head *cnew, struct cache_head *citem)
127 {
128 struct ip_map *new = container_of(cnew, struct ip_map, h);
129 struct ip_map *item = container_of(citem, struct ip_map, h);
130
131 kref_get(&item->m_client->h.ref);
132 new->m_client = item->m_client;
133 }
134 static struct cache_head *ip_map_alloc(void)
135 {
136 struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
137 if (i)
138 return &i->h;
139 else
140 return NULL;
141 }
142
143 static void ip_map_request(struct cache_detail *cd,
144 struct cache_head *h,
145 char **bpp, int *blen)
146 {
147 char text_addr[40];
148 struct ip_map *im = container_of(h, struct ip_map, h);
149
150 if (ipv6_addr_v4mapped(&(im->m_addr))) {
151 snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]);
152 } else {
153 snprintf(text_addr, 40, "%pI6", &im->m_addr);
154 }
155 qword_add(bpp, blen, im->m_class);
156 qword_add(bpp, blen, text_addr);
157 (*bpp)[-1] = '\n';
158 }
159
160 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
161 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
162
163 static int ip_map_parse(struct cache_detail *cd,
164 char *mesg, int mlen)
165 {
166 /* class ipaddress [domainname] */
167 /* should be safe just to use the start of the input buffer
168 * for scratch: */
169 char *buf = mesg;
170 int len;
171 char class[8];
172 union {
173 struct sockaddr sa;
174 struct sockaddr_in s4;
175 struct sockaddr_in6 s6;
176 } address;
177 struct sockaddr_in6 sin6;
178 int err;
179
180 struct ip_map *ipmp;
181 struct auth_domain *dom;
182 time_t expiry;
183
184 if (mesg[mlen-1] != '\n')
185 return -EINVAL;
186 mesg[mlen-1] = 0;
187
188 /* class */
189 len = qword_get(&mesg, class, sizeof(class));
190 if (len <= 0) return -EINVAL;
191
192 /* ip address */
193 len = qword_get(&mesg, buf, mlen);
194 if (len <= 0) return -EINVAL;
195
196 if (rpc_pton(cd->net, buf, len, &address.sa, sizeof(address)) == 0)
197 return -EINVAL;
198 switch (address.sa.sa_family) {
199 case AF_INET:
200 /* Form a mapped IPv4 address in sin6 */
201 sin6.sin6_family = AF_INET6;
202 ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
203 &sin6.sin6_addr);
204 break;
205 #if IS_ENABLED(CONFIG_IPV6)
206 case AF_INET6:
207 memcpy(&sin6, &address.s6, sizeof(sin6));
208 break;
209 #endif
210 default:
211 return -EINVAL;
212 }
213
214 expiry = get_expiry(&mesg);
215 if (expiry ==0)
216 return -EINVAL;
217
218 /* domainname, or empty for NEGATIVE */
219 len = qword_get(&mesg, buf, mlen);
220 if (len < 0) return -EINVAL;
221
222 if (len) {
223 dom = unix_domain_find(buf);
224 if (dom == NULL)
225 return -ENOENT;
226 } else
227 dom = NULL;
228
229 /* IPv6 scope IDs are ignored for now */
230 ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr);
231 if (ipmp) {
232 err = __ip_map_update(cd, ipmp,
233 container_of(dom, struct unix_domain, h),
234 expiry);
235 } else
236 err = -ENOMEM;
237
238 if (dom)
239 auth_domain_put(dom);
240
241 cache_flush();
242 return err;
243 }
244
245 static int ip_map_show(struct seq_file *m,
246 struct cache_detail *cd,
247 struct cache_head *h)
248 {
249 struct ip_map *im;
250 struct in6_addr addr;
251 char *dom = "-no-domain-";
252
253 if (h == NULL) {
254 seq_puts(m, "#class IP domain\n");
255 return 0;
256 }
257 im = container_of(h, struct ip_map, h);
258 /* class addr domain */
259 addr = im->m_addr;
260
261 if (test_bit(CACHE_VALID, &h->flags) &&
262 !test_bit(CACHE_NEGATIVE, &h->flags))
263 dom = im->m_client->h.name;
264
265 if (ipv6_addr_v4mapped(&addr)) {
266 seq_printf(m, "%s %pI4 %s\n",
267 im->m_class, &addr.s6_addr32[3], dom);
268 } else {
269 seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom);
270 }
271 return 0;
272 }
273
274
275 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
276 struct in6_addr *addr)
277 {
278 struct ip_map ip;
279 struct cache_head *ch;
280
281 strcpy(ip.m_class, class);
282 ip.m_addr = *addr;
283 ch = sunrpc_cache_lookup(cd, &ip.h,
284 hash_str(class, IP_HASHBITS) ^
285 hash_ip6(addr));
286
287 if (ch)
288 return container_of(ch, struct ip_map, h);
289 else
290 return NULL;
291 }
292
293 static inline struct ip_map *ip_map_lookup(struct net *net, char *class,
294 struct in6_addr *addr)
295 {
296 struct sunrpc_net *sn;
297
298 sn = net_generic(net, sunrpc_net_id);
299 return __ip_map_lookup(sn->ip_map_cache, class, addr);
300 }
301
302 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
303 struct unix_domain *udom, time_t expiry)
304 {
305 struct ip_map ip;
306 struct cache_head *ch;
307
308 ip.m_client = udom;
309 ip.h.flags = 0;
310 if (!udom)
311 set_bit(CACHE_NEGATIVE, &ip.h.flags);
312 ip.h.expiry_time = expiry;
313 ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
314 hash_str(ipm->m_class, IP_HASHBITS) ^
315 hash_ip6(&ipm->m_addr));
316 if (!ch)
317 return -ENOMEM;
318 cache_put(ch, cd);
319 return 0;
320 }
321
322 static inline int ip_map_update(struct net *net, struct ip_map *ipm,
323 struct unix_domain *udom, time_t expiry)
324 {
325 struct sunrpc_net *sn;
326
327 sn = net_generic(net, sunrpc_net_id);
328 return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
329 }
330
331 void svcauth_unix_purge(struct net *net)
332 {
333 struct sunrpc_net *sn;
334
335 sn = net_generic(net, sunrpc_net_id);
336 cache_purge(sn->ip_map_cache);
337 }
338 EXPORT_SYMBOL_GPL(svcauth_unix_purge);
339
340 static inline struct ip_map *
341 ip_map_cached_get(struct svc_xprt *xprt)
342 {
343 struct ip_map *ipm = NULL;
344 struct sunrpc_net *sn;
345
346 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
347 spin_lock(&xprt->xpt_lock);
348 ipm = xprt->xpt_auth_cache;
349 if (ipm != NULL) {
350 sn = net_generic(xprt->xpt_net, sunrpc_net_id);
351 if (cache_is_expired(sn->ip_map_cache, &ipm->h)) {
352 /*
353 * The entry has been invalidated since it was
354 * remembered, e.g. by a second mount from the
355 * same IP address.
356 */
357 xprt->xpt_auth_cache = NULL;
358 spin_unlock(&xprt->xpt_lock);
359 cache_put(&ipm->h, sn->ip_map_cache);
360 return NULL;
361 }
362 cache_get(&ipm->h);
363 }
364 spin_unlock(&xprt->xpt_lock);
365 }
366 return ipm;
367 }
368
369 static inline void
370 ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm)
371 {
372 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
373 spin_lock(&xprt->xpt_lock);
374 if (xprt->xpt_auth_cache == NULL) {
375 /* newly cached, keep the reference */
376 xprt->xpt_auth_cache = ipm;
377 ipm = NULL;
378 }
379 spin_unlock(&xprt->xpt_lock);
380 }
381 if (ipm) {
382 struct sunrpc_net *sn;
383
384 sn = net_generic(xprt->xpt_net, sunrpc_net_id);
385 cache_put(&ipm->h, sn->ip_map_cache);
386 }
387 }
388
389 void
390 svcauth_unix_info_release(struct svc_xprt *xpt)
391 {
392 struct ip_map *ipm;
393
394 ipm = xpt->xpt_auth_cache;
395 if (ipm != NULL) {
396 struct sunrpc_net *sn;
397
398 sn = net_generic(xpt->xpt_net, sunrpc_net_id);
399 cache_put(&ipm->h, sn->ip_map_cache);
400 }
401 }
402
403 /****************************************************************************
404 * auth.unix.gid cache
405 * simple cache to map a UID to a list of GIDs
406 * because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS
407 */
408 #define GID_HASHBITS 8
409 #define GID_HASHMAX (1<<GID_HASHBITS)
410
411 struct unix_gid {
412 struct cache_head h;
413 kuid_t uid;
414 struct group_info *gi;
415 };
416
417 static int unix_gid_hash(kuid_t uid)
418 {
419 return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
420 }
421
422 static void unix_gid_put(struct kref *kref)
423 {
424 struct cache_head *item = container_of(kref, struct cache_head, ref);
425 struct unix_gid *ug = container_of(item, struct unix_gid, h);
426 if (test_bit(CACHE_VALID, &item->flags) &&
427 !test_bit(CACHE_NEGATIVE, &item->flags))
428 put_group_info(ug->gi);
429 kfree(ug);
430 }
431
432 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
433 {
434 struct unix_gid *orig = container_of(corig, struct unix_gid, h);
435 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
436 return uid_eq(orig->uid, new->uid);
437 }
438 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
439 {
440 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
441 struct unix_gid *item = container_of(citem, struct unix_gid, h);
442 new->uid = item->uid;
443 }
444 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
445 {
446 struct unix_gid *new = container_of(cnew, struct unix_gid, h);
447 struct unix_gid *item = container_of(citem, struct unix_gid, h);
448
449 get_group_info(item->gi);
450 new->gi = item->gi;
451 }
452 static struct cache_head *unix_gid_alloc(void)
453 {
454 struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
455 if (g)
456 return &g->h;
457 else
458 return NULL;
459 }
460
461 static void unix_gid_request(struct cache_detail *cd,
462 struct cache_head *h,
463 char **bpp, int *blen)
464 {
465 char tuid[20];
466 struct unix_gid *ug = container_of(h, struct unix_gid, h);
467
468 snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid));
469 qword_add(bpp, blen, tuid);
470 (*bpp)[-1] = '\n';
471 }
472
473 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
474
475 static int unix_gid_parse(struct cache_detail *cd,
476 char *mesg, int mlen)
477 {
478 /* uid expiry Ngid gid0 gid1 ... gidN-1 */
479 int id;
480 kuid_t uid;
481 int gids;
482 int rv;
483 int i;
484 int err;
485 time_t expiry;
486 struct unix_gid ug, *ugp;
487
488 if (mesg[mlen - 1] != '\n')
489 return -EINVAL;
490 mesg[mlen-1] = 0;
491
492 rv = get_int(&mesg, &id);
493 if (rv)
494 return -EINVAL;
495 uid = make_kuid(&init_user_ns, id);
496 ug.uid = uid;
497
498 expiry = get_expiry(&mesg);
499 if (expiry == 0)
500 return -EINVAL;
501
502 rv = get_int(&mesg, &gids);
503 if (rv || gids < 0 || gids > 8192)
504 return -EINVAL;
505
506 ug.gi = groups_alloc(gids);
507 if (!ug.gi)
508 return -ENOMEM;
509
510 for (i = 0 ; i < gids ; i++) {
511 int gid;
512 kgid_t kgid;
513 rv = get_int(&mesg, &gid);
514 err = -EINVAL;
515 if (rv)
516 goto out;
517 kgid = make_kgid(&init_user_ns, gid);
518 if (!gid_valid(kgid))
519 goto out;
520 ug.gi->gid[i] = kgid;
521 }
522
523 groups_sort(ug.gi);
524 ugp = unix_gid_lookup(cd, uid);
525 if (ugp) {
526 struct cache_head *ch;
527 ug.h.flags = 0;
528 ug.h.expiry_time = expiry;
529 ch = sunrpc_cache_update(cd,
530 &ug.h, &ugp->h,
531 unix_gid_hash(uid));
532 if (!ch)
533 err = -ENOMEM;
534 else {
535 err = 0;
536 cache_put(ch, cd);
537 }
538 } else
539 err = -ENOMEM;
540 out:
541 if (ug.gi)
542 put_group_info(ug.gi);
543 return err;
544 }
545
546 static int unix_gid_show(struct seq_file *m,
547 struct cache_detail *cd,
548 struct cache_head *h)
549 {
550 struct user_namespace *user_ns = &init_user_ns;
551 struct unix_gid *ug;
552 int i;
553 int glen;
554
555 if (h == NULL) {
556 seq_puts(m, "#uid cnt: gids...\n");
557 return 0;
558 }
559 ug = container_of(h, struct unix_gid, h);
560 if (test_bit(CACHE_VALID, &h->flags) &&
561 !test_bit(CACHE_NEGATIVE, &h->flags))
562 glen = ug->gi->ngroups;
563 else
564 glen = 0;
565
566 seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen);
567 for (i = 0; i < glen; i++)
568 seq_printf(m, " %d", from_kgid_munged(user_ns, ug->gi->gid[i]));
569 seq_printf(m, "\n");
570 return 0;
571 }
572
573 static struct cache_detail unix_gid_cache_template = {
574 .owner = THIS_MODULE,
575 .hash_size = GID_HASHMAX,
576 .name = "auth.unix.gid",
577 .cache_put = unix_gid_put,
578 .cache_request = unix_gid_request,
579 .cache_parse = unix_gid_parse,
580 .cache_show = unix_gid_show,
581 .match = unix_gid_match,
582 .init = unix_gid_init,
583 .update = unix_gid_update,
584 .alloc = unix_gid_alloc,
585 };
586
587 int unix_gid_cache_create(struct net *net)
588 {
589 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
590 struct cache_detail *cd;
591 int err;
592
593 cd = cache_create_net(&unix_gid_cache_template, net);
594 if (IS_ERR(cd))
595 return PTR_ERR(cd);
596 err = cache_register_net(cd, net);
597 if (err) {
598 cache_destroy_net(cd, net);
599 return err;
600 }
601 sn->unix_gid_cache = cd;
602 return 0;
603 }
604
605 void unix_gid_cache_destroy(struct net *net)
606 {
607 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
608 struct cache_detail *cd = sn->unix_gid_cache;
609
610 sn->unix_gid_cache = NULL;
611 cache_purge(cd);
612 cache_unregister_net(cd, net);
613 cache_destroy_net(cd, net);
614 }
615
616 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid)
617 {
618 struct unix_gid ug;
619 struct cache_head *ch;
620
621 ug.uid = uid;
622 ch = sunrpc_cache_lookup(cd, &ug.h, unix_gid_hash(uid));
623 if (ch)
624 return container_of(ch, struct unix_gid, h);
625 else
626 return NULL;
627 }
628
629 static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp)
630 {
631 struct unix_gid *ug;
632 struct group_info *gi;
633 int ret;
634 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net,
635 sunrpc_net_id);
636
637 ug = unix_gid_lookup(sn->unix_gid_cache, uid);
638 if (!ug)
639 return ERR_PTR(-EAGAIN);
640 ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle);
641 switch (ret) {
642 case -ENOENT:
643 return ERR_PTR(-ENOENT);
644 case -ETIMEDOUT:
645 return ERR_PTR(-ESHUTDOWN);
646 case 0:
647 gi = get_group_info(ug->gi);
648 cache_put(&ug->h, sn->unix_gid_cache);
649 return gi;
650 default:
651 return ERR_PTR(-EAGAIN);
652 }
653 }
654
655 int
656 svcauth_unix_set_client(struct svc_rqst *rqstp)
657 {
658 struct sockaddr_in *sin;
659 struct sockaddr_in6 *sin6, sin6_storage;
660 struct ip_map *ipm;
661 struct group_info *gi;
662 struct svc_cred *cred = &rqstp->rq_cred;
663 struct svc_xprt *xprt = rqstp->rq_xprt;
664 struct net *net = xprt->xpt_net;
665 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
666
667 switch (rqstp->rq_addr.ss_family) {
668 case AF_INET:
669 sin = svc_addr_in(rqstp);
670 sin6 = &sin6_storage;
671 ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr);
672 break;
673 case AF_INET6:
674 sin6 = svc_addr_in6(rqstp);
675 break;
676 default:
677 BUG();
678 }
679
680 rqstp->rq_client = NULL;
681 if (rqstp->rq_proc == 0)
682 return SVC_OK;
683
684 ipm = ip_map_cached_get(xprt);
685 if (ipm == NULL)
686 ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class,
687 &sin6->sin6_addr);
688
689 if (ipm == NULL)
690 return SVC_DENIED;
691
692 switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
693 default:
694 BUG();
695 case -ETIMEDOUT:
696 return SVC_CLOSE;
697 case -EAGAIN:
698 return SVC_DROP;
699 case -ENOENT:
700 return SVC_DENIED;
701 case 0:
702 rqstp->rq_client = &ipm->m_client->h;
703 kref_get(&rqstp->rq_client->ref);
704 ip_map_cached_put(xprt, ipm);
705 break;
706 }
707
708 gi = unix_gid_find(cred->cr_uid, rqstp);
709 switch (PTR_ERR(gi)) {
710 case -EAGAIN:
711 return SVC_DROP;
712 case -ESHUTDOWN:
713 return SVC_CLOSE;
714 case -ENOENT:
715 break;
716 default:
717 put_group_info(cred->cr_group_info);
718 cred->cr_group_info = gi;
719 }
720 return SVC_OK;
721 }
722
723 EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
724
725 static int
726 svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
727 {
728 struct kvec *argv = &rqstp->rq_arg.head[0];
729 struct kvec *resv = &rqstp->rq_res.head[0];
730 struct svc_cred *cred = &rqstp->rq_cred;
731
732 if (argv->iov_len < 3*4)
733 return SVC_GARBAGE;
734
735 if (svc_getu32(argv) != 0) {
736 dprintk("svc: bad null cred\n");
737 *authp = rpc_autherr_badcred;
738 return SVC_DENIED;
739 }
740 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
741 dprintk("svc: bad null verf\n");
742 *authp = rpc_autherr_badverf;
743 return SVC_DENIED;
744 }
745
746 /* Signal that mapping to nobody uid/gid is required */
747 cred->cr_uid = INVALID_UID;
748 cred->cr_gid = INVALID_GID;
749 cred->cr_group_info = groups_alloc(0);
750 if (cred->cr_group_info == NULL)
751 return SVC_CLOSE; /* kmalloc failure - client must retry */
752
753 /* Put NULL verifier */
754 svc_putnl(resv, RPC_AUTH_NULL);
755 svc_putnl(resv, 0);
756
757 rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL;
758 return SVC_OK;
759 }
760
761 static int
762 svcauth_null_release(struct svc_rqst *rqstp)
763 {
764 if (rqstp->rq_client)
765 auth_domain_put(rqstp->rq_client);
766 rqstp->rq_client = NULL;
767 if (rqstp->rq_cred.cr_group_info)
768 put_group_info(rqstp->rq_cred.cr_group_info);
769 rqstp->rq_cred.cr_group_info = NULL;
770
771 return 0; /* don't drop */
772 }
773
774
775 struct auth_ops svcauth_null = {
776 .name = "null",
777 .owner = THIS_MODULE,
778 .flavour = RPC_AUTH_NULL,
779 .accept = svcauth_null_accept,
780 .release = svcauth_null_release,
781 .set_client = svcauth_unix_set_client,
782 };
783
784
785 static int
786 svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
787 {
788 struct kvec *argv = &rqstp->rq_arg.head[0];
789 struct kvec *resv = &rqstp->rq_res.head[0];
790 struct svc_cred *cred = &rqstp->rq_cred;
791 u32 slen, i;
792 int len = argv->iov_len;
793
794 if ((len -= 3*4) < 0)
795 return SVC_GARBAGE;
796
797 svc_getu32(argv); /* length */
798 svc_getu32(argv); /* time stamp */
799 slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */
800 if (slen > 64 || (len -= (slen + 3)*4) < 0)
801 goto badcred;
802 argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
803 argv->iov_len -= slen*4;
804 /*
805 * Note: we skip uid_valid()/gid_valid() checks here for
806 * backwards compatibility with clients that use -1 id's.
807 * Instead, -1 uid or gid is later mapped to the
808 * (export-specific) anonymous id by nfsd_setuser.
809 * Supplementary gid's will be left alone.
810 */
811 cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */
812 cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */
813 slen = svc_getnl(argv); /* gids length */
814 if (slen > UNX_NGROUPS || (len -= (slen + 2)*4) < 0)
815 goto badcred;
816 cred->cr_group_info = groups_alloc(slen);
817 if (cred->cr_group_info == NULL)
818 return SVC_CLOSE;
819 for (i = 0; i < slen; i++) {
820 kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
821 cred->cr_group_info->gid[i] = kgid;
822 }
823 groups_sort(cred->cr_group_info);
824 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
825 *authp = rpc_autherr_badverf;
826 return SVC_DENIED;
827 }
828
829 /* Put NULL verifier */
830 svc_putnl(resv, RPC_AUTH_NULL);
831 svc_putnl(resv, 0);
832
833 rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX;
834 return SVC_OK;
835
836 badcred:
837 *authp = rpc_autherr_badcred;
838 return SVC_DENIED;
839 }
840
841 static int
842 svcauth_unix_release(struct svc_rqst *rqstp)
843 {
844 /* Verifier (such as it is) is already in place.
845 */
846 if (rqstp->rq_client)
847 auth_domain_put(rqstp->rq_client);
848 rqstp->rq_client = NULL;
849 if (rqstp->rq_cred.cr_group_info)
850 put_group_info(rqstp->rq_cred.cr_group_info);
851 rqstp->rq_cred.cr_group_info = NULL;
852
853 return 0;
854 }
855
856
857 struct auth_ops svcauth_unix = {
858 .name = "unix",
859 .owner = THIS_MODULE,
860 .flavour = RPC_AUTH_UNIX,
861 .accept = svcauth_unix_accept,
862 .release = svcauth_unix_release,
863 .domain_release = svcauth_unix_domain_release,
864 .set_client = svcauth_unix_set_client,
865 };
866
867 static struct cache_detail ip_map_cache_template = {
868 .owner = THIS_MODULE,
869 .hash_size = IP_HASHMAX,
870 .name = "auth.unix.ip",
871 .cache_put = ip_map_put,
872 .cache_request = ip_map_request,
873 .cache_parse = ip_map_parse,
874 .cache_show = ip_map_show,
875 .match = ip_map_match,
876 .init = ip_map_init,
877 .update = update,
878 .alloc = ip_map_alloc,
879 };
880
881 int ip_map_cache_create(struct net *net)
882 {
883 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
884 struct cache_detail *cd;
885 int err;
886
887 cd = cache_create_net(&ip_map_cache_template, net);
888 if (IS_ERR(cd))
889 return PTR_ERR(cd);
890 err = cache_register_net(cd, net);
891 if (err) {
892 cache_destroy_net(cd, net);
893 return err;
894 }
895 sn->ip_map_cache = cd;
896 return 0;
897 }
898
899 void ip_map_cache_destroy(struct net *net)
900 {
901 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
902 struct cache_detail *cd = sn->ip_map_cache;
903
904 sn->ip_map_cache = NULL;
905 cache_purge(cd);
906 cache_unregister_net(cd, net);
907 cache_destroy_net(cd, net);
908 }