[NET]: Conversions from kmalloc+memset to k(z|c)alloc.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / sunrpc / auth_gss / auth_gss.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/auth_gss.c
3 *
4 * RPCSEC_GSS client authentication.
5 *
6 * Copyright (c) 2000 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Dug Song <dugsong@monkey.org>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * $Id$
38 */
39
40
41#include <linux/module.h>
42#include <linux/init.h>
43#include <linux/types.h>
44#include <linux/slab.h>
1da177e4 45#include <linux/sched.h>
2d2da60c 46#include <linux/pagemap.h>
1da177e4
LT
47#include <linux/sunrpc/clnt.h>
48#include <linux/sunrpc/auth.h>
49#include <linux/sunrpc/auth_gss.h>
50#include <linux/sunrpc/svcauth_gss.h>
51#include <linux/sunrpc/gss_err.h>
52#include <linux/workqueue.h>
53#include <linux/sunrpc/rpc_pipe_fs.h>
54#include <linux/sunrpc/gss_api.h>
55#include <asm/uaccess.h>
56
57static struct rpc_authops authgss_ops;
58
59static struct rpc_credops gss_credops;
60
61#ifdef RPC_DEBUG
62# define RPCDBG_FACILITY RPCDBG_AUTH
63#endif
64
65#define NFS_NGROUPS 16
66
67#define GSS_CRED_EXPIRE (60 * HZ) /* XXX: reasonable? */
68#define GSS_CRED_SLACK 1024 /* XXX: unused */
69/* length of a krb5 verifier (48), plus data added before arguments when
70 * using integrity (two 4-byte integers): */
71#define GSS_VERF_SLACK 56
72
73/* XXX this define must match the gssd define
74* as it is passed to gssd to signal the use of
75* machine creds should be part of the shared rpc interface */
76
77#define CA_RUN_AS_MACHINE 0x00000200
78
79/* dump the buffer in `emacs-hexl' style */
80#define isprint(c) ((c > 0x1f) && (c < 0x7f))
81
82static DEFINE_RWLOCK(gss_ctx_lock);
83
84struct gss_auth {
85 struct rpc_auth rpc_auth;
86 struct gss_api_mech *mech;
87 enum rpc_gss_svc service;
88 struct list_head upcalls;
89 struct rpc_clnt *client;
90 struct dentry *dentry;
f134585a 91 char path[48];
1da177e4
LT
92 spinlock_t lock;
93};
94
95static void gss_destroy_ctx(struct gss_cl_ctx *);
96static struct rpc_pipe_ops gss_upcall_ops;
97
98void
99print_hexl(u32 *p, u_int length, u_int offset)
100{
101 u_int i, j, jm;
102 u8 c, *cp;
103
104 dprintk("RPC: print_hexl: length %d\n",length);
105 dprintk("\n");
106 cp = (u8 *) p;
107
108 for (i = 0; i < length; i += 0x10) {
109 dprintk(" %04x: ", (u_int)(i + offset));
110 jm = length - i;
111 jm = jm > 16 ? 16 : jm;
112
113 for (j = 0; j < jm; j++) {
114 if ((j % 2) == 1)
115 dprintk("%02x ", (u_int)cp[i+j]);
116 else
117 dprintk("%02x", (u_int)cp[i+j]);
118 }
119 for (; j < 16; j++) {
120 if ((j % 2) == 1)
121 dprintk(" ");
122 else
123 dprintk(" ");
124 }
125 dprintk(" ");
126
127 for (j = 0; j < jm; j++) {
128 c = cp[i+j];
129 c = isprint(c) ? c : '.';
130 dprintk("%c", c);
131 }
132 dprintk("\n");
133 }
134}
135
136EXPORT_SYMBOL(print_hexl);
137
138static inline struct gss_cl_ctx *
139gss_get_ctx(struct gss_cl_ctx *ctx)
140{
141 atomic_inc(&ctx->count);
142 return ctx;
143}
144
145static inline void
146gss_put_ctx(struct gss_cl_ctx *ctx)
147{
148 if (atomic_dec_and_test(&ctx->count))
149 gss_destroy_ctx(ctx);
150}
151
152static void
153gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
154{
155 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
156 struct gss_cl_ctx *old;
157 write_lock(&gss_ctx_lock);
158 old = gss_cred->gc_ctx;
159 gss_cred->gc_ctx = ctx;
160 cred->cr_flags |= RPCAUTH_CRED_UPTODATE;
8a317760 161 cred->cr_flags &= ~RPCAUTH_CRED_NEW;
1da177e4
LT
162 write_unlock(&gss_ctx_lock);
163 if (old)
164 gss_put_ctx(old);
165}
166
167static int
168gss_cred_is_uptodate_ctx(struct rpc_cred *cred)
169{
170 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
171 int res = 0;
172
173 read_lock(&gss_ctx_lock);
174 if ((cred->cr_flags & RPCAUTH_CRED_UPTODATE) && gss_cred->gc_ctx)
175 res = 1;
176 read_unlock(&gss_ctx_lock);
177 return res;
178}
179
180static const void *
181simple_get_bytes(const void *p, const void *end, void *res, size_t len)
182{
183 const void *q = (const void *)((const char *)p + len);
184 if (unlikely(q > end || q < p))
185 return ERR_PTR(-EFAULT);
186 memcpy(res, p, len);
187 return q;
188}
189
190static inline const void *
191simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
192{
193 const void *q;
194 unsigned int len;
195
196 p = simple_get_bytes(p, end, &len, sizeof(len));
197 if (IS_ERR(p))
198 return p;
199 q = (const void *)((const char *)p + len);
200 if (unlikely(q > end || q < p))
201 return ERR_PTR(-EFAULT);
202 dest->data = kmalloc(len, GFP_KERNEL);
203 if (unlikely(dest->data == NULL))
204 return ERR_PTR(-ENOMEM);
205 dest->len = len;
206 memcpy(dest->data, p, len);
207 return q;
208}
209
210static struct gss_cl_ctx *
211gss_cred_get_ctx(struct rpc_cred *cred)
212{
213 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
214 struct gss_cl_ctx *ctx = NULL;
215
216 read_lock(&gss_ctx_lock);
217 if (gss_cred->gc_ctx)
218 ctx = gss_get_ctx(gss_cred->gc_ctx);
219 read_unlock(&gss_ctx_lock);
220 return ctx;
221}
222
223static struct gss_cl_ctx *
224gss_alloc_context(void)
225{
226 struct gss_cl_ctx *ctx;
227
0da974f4 228 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1da177e4 229 if (ctx != NULL) {
1da177e4
LT
230 ctx->gc_proc = RPC_GSS_PROC_DATA;
231 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
232 spin_lock_init(&ctx->gc_seq_lock);
233 atomic_set(&ctx->count,1);
234 }
235 return ctx;
236}
237
238#define GSSD_MIN_TIMEOUT (60 * 60)
239static const void *
240gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
241{
242 const void *q;
243 unsigned int seclen;
244 unsigned int timeout;
245 u32 window_size;
246 int ret;
247
248 /* First unsigned int gives the lifetime (in seconds) of the cred */
249 p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
250 if (IS_ERR(p))
251 goto err;
252 if (timeout == 0)
253 timeout = GSSD_MIN_TIMEOUT;
254 ctx->gc_expiry = jiffies + (unsigned long)timeout * HZ * 3 / 4;
255 /* Sequence number window. Determines the maximum number of simultaneous requests */
256 p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
257 if (IS_ERR(p))
258 goto err;
259 ctx->gc_win = window_size;
260 /* gssd signals an error by passing ctx->gc_win = 0: */
261 if (ctx->gc_win == 0) {
262 /* in which case, p points to an error code which we ignore */
263 p = ERR_PTR(-EACCES);
264 goto err;
265 }
266 /* copy the opaque wire context */
267 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
268 if (IS_ERR(p))
269 goto err;
270 /* import the opaque security context */
271 p = simple_get_bytes(p, end, &seclen, sizeof(seclen));
272 if (IS_ERR(p))
273 goto err;
274 q = (const void *)((const char *)p + seclen);
275 if (unlikely(q > end || q < p)) {
276 p = ERR_PTR(-EFAULT);
277 goto err;
278 }
279 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx);
280 if (ret < 0) {
281 p = ERR_PTR(ret);
282 goto err;
283 }
284 return q;
285err:
286 dprintk("RPC: gss_fill_context returning %ld\n", -PTR_ERR(p));
287 return p;
288}
289
290
291struct gss_upcall_msg {
292 atomic_t count;
293 uid_t uid;
294 struct rpc_pipe_msg msg;
295 struct list_head list;
296 struct gss_auth *auth;
297 struct rpc_wait_queue rpc_waitqueue;
298 wait_queue_head_t waitqueue;
299 struct gss_cl_ctx *ctx;
300};
301
302static void
303gss_release_msg(struct gss_upcall_msg *gss_msg)
304{
305 if (!atomic_dec_and_test(&gss_msg->count))
306 return;
307 BUG_ON(!list_empty(&gss_msg->list));
308 if (gss_msg->ctx != NULL)
309 gss_put_ctx(gss_msg->ctx);
310 kfree(gss_msg);
311}
312
313static struct gss_upcall_msg *
314__gss_find_upcall(struct gss_auth *gss_auth, uid_t uid)
315{
316 struct gss_upcall_msg *pos;
317 list_for_each_entry(pos, &gss_auth->upcalls, list) {
318 if (pos->uid != uid)
319 continue;
320 atomic_inc(&pos->count);
321 dprintk("RPC: gss_find_upcall found msg %p\n", pos);
322 return pos;
323 }
324 dprintk("RPC: gss_find_upcall found nothing\n");
325 return NULL;
326}
327
328/* Try to add a upcall to the pipefs queue.
329 * If an upcall owned by our uid already exists, then we return a reference
330 * to that upcall instead of adding the new upcall.
331 */
332static inline struct gss_upcall_msg *
333gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg)
334{
335 struct gss_upcall_msg *old;
336
337 spin_lock(&gss_auth->lock);
338 old = __gss_find_upcall(gss_auth, gss_msg->uid);
339 if (old == NULL) {
340 atomic_inc(&gss_msg->count);
341 list_add(&gss_msg->list, &gss_auth->upcalls);
342 } else
343 gss_msg = old;
344 spin_unlock(&gss_auth->lock);
345 return gss_msg;
346}
347
348static void
349__gss_unhash_msg(struct gss_upcall_msg *gss_msg)
350{
351 if (list_empty(&gss_msg->list))
352 return;
353 list_del_init(&gss_msg->list);
354 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
355 wake_up_all(&gss_msg->waitqueue);
356 atomic_dec(&gss_msg->count);
357}
358
359static void
360gss_unhash_msg(struct gss_upcall_msg *gss_msg)
361{
362 struct gss_auth *gss_auth = gss_msg->auth;
363
364 spin_lock(&gss_auth->lock);
365 __gss_unhash_msg(gss_msg);
366 spin_unlock(&gss_auth->lock);
367}
368
369static void
370gss_upcall_callback(struct rpc_task *task)
371{
372 struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
373 struct gss_cred, gc_base);
374 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
375
376 BUG_ON(gss_msg == NULL);
377 if (gss_msg->ctx)
378 gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx));
379 else
380 task->tk_status = gss_msg->msg.errno;
381 spin_lock(&gss_msg->auth->lock);
382 gss_cred->gc_upcall = NULL;
383 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
384 spin_unlock(&gss_msg->auth->lock);
385 gss_release_msg(gss_msg);
386}
387
388static inline struct gss_upcall_msg *
389gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
390{
391 struct gss_upcall_msg *gss_msg;
392
0da974f4 393 gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL);
1da177e4 394 if (gss_msg != NULL) {
1da177e4
LT
395 INIT_LIST_HEAD(&gss_msg->list);
396 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
397 init_waitqueue_head(&gss_msg->waitqueue);
398 atomic_set(&gss_msg->count, 1);
399 gss_msg->msg.data = &gss_msg->uid;
400 gss_msg->msg.len = sizeof(gss_msg->uid);
401 gss_msg->uid = uid;
402 gss_msg->auth = gss_auth;
403 }
404 return gss_msg;
405}
406
407static struct gss_upcall_msg *
408gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cred *cred)
409{
410 struct gss_upcall_msg *gss_new, *gss_msg;
411
412 gss_new = gss_alloc_msg(gss_auth, cred->cr_uid);
413 if (gss_new == NULL)
414 return ERR_PTR(-ENOMEM);
415 gss_msg = gss_add_msg(gss_auth, gss_new);
416 if (gss_msg == gss_new) {
417 int res = rpc_queue_upcall(gss_auth->dentry->d_inode, &gss_new->msg);
418 if (res) {
419 gss_unhash_msg(gss_new);
420 gss_msg = ERR_PTR(res);
421 }
422 } else
423 gss_release_msg(gss_new);
424 return gss_msg;
425}
426
427static inline int
428gss_refresh_upcall(struct rpc_task *task)
429{
430 struct rpc_cred *cred = task->tk_msg.rpc_cred;
431 struct gss_auth *gss_auth = container_of(task->tk_client->cl_auth,
432 struct gss_auth, rpc_auth);
433 struct gss_cred *gss_cred = container_of(cred,
434 struct gss_cred, gc_base);
435 struct gss_upcall_msg *gss_msg;
436 int err = 0;
437
438 dprintk("RPC: %4u gss_refresh_upcall for uid %u\n", task->tk_pid, cred->cr_uid);
439 gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
440 if (IS_ERR(gss_msg)) {
441 err = PTR_ERR(gss_msg);
442 goto out;
443 }
444 spin_lock(&gss_auth->lock);
445 if (gss_cred->gc_upcall != NULL)
446 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL);
447 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
448 task->tk_timeout = 0;
449 gss_cred->gc_upcall = gss_msg;
450 /* gss_upcall_callback will release the reference to gss_upcall_msg */
451 atomic_inc(&gss_msg->count);
452 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL);
453 } else
454 err = gss_msg->msg.errno;
455 spin_unlock(&gss_auth->lock);
456 gss_release_msg(gss_msg);
457out:
458 dprintk("RPC: %4u gss_refresh_upcall for uid %u result %d\n", task->tk_pid,
459 cred->cr_uid, err);
460 return err;
461}
462
463static inline int
464gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
465{
466 struct rpc_cred *cred = &gss_cred->gc_base;
467 struct gss_upcall_msg *gss_msg;
468 DEFINE_WAIT(wait);
469 int err = 0;
470
471 dprintk("RPC: gss_upcall for uid %u\n", cred->cr_uid);
472 gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
473 if (IS_ERR(gss_msg)) {
474 err = PTR_ERR(gss_msg);
475 goto out;
476 }
477 for (;;) {
478 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
479 spin_lock(&gss_auth->lock);
480 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
481 spin_unlock(&gss_auth->lock);
482 break;
483 }
484 spin_unlock(&gss_auth->lock);
485 if (signalled()) {
486 err = -ERESTARTSYS;
487 goto out_intr;
488 }
489 schedule();
490 }
491 if (gss_msg->ctx)
492 gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx));
493 else
494 err = gss_msg->msg.errno;
495out_intr:
496 finish_wait(&gss_msg->waitqueue, &wait);
497 gss_release_msg(gss_msg);
498out:
499 dprintk("RPC: gss_create_upcall for uid %u result %d\n", cred->cr_uid, err);
500 return err;
501}
502
503static ssize_t
504gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
505 char __user *dst, size_t buflen)
506{
507 char *data = (char *)msg->data + msg->copied;
508 ssize_t mlen = msg->len;
509 ssize_t left;
510
511 if (mlen > buflen)
512 mlen = buflen;
513 left = copy_to_user(dst, data, mlen);
514 if (left < 0) {
515 msg->errno = left;
516 return left;
517 }
518 mlen -= left;
519 msg->copied += mlen;
520 msg->errno = 0;
521 return mlen;
522}
523
524#define MSG_BUF_MAXSIZE 1024
525
526static ssize_t
527gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
528{
529 const void *p, *end;
530 void *buf;
531 struct rpc_clnt *clnt;
532 struct gss_auth *gss_auth;
533 struct rpc_cred *cred;
534 struct gss_upcall_msg *gss_msg;
535 struct gss_cl_ctx *ctx;
536 uid_t uid;
537 int err = -EFBIG;
538
539 if (mlen > MSG_BUF_MAXSIZE)
540 goto out;
541 err = -ENOMEM;
542 buf = kmalloc(mlen, GFP_KERNEL);
543 if (!buf)
544 goto out;
545
546 clnt = RPC_I(filp->f_dentry->d_inode)->private;
547 err = -EFAULT;
548 if (copy_from_user(buf, src, mlen))
549 goto err;
550
551 end = (const void *)((char *)buf + mlen);
552 p = simple_get_bytes(buf, end, &uid, sizeof(uid));
553 if (IS_ERR(p)) {
554 err = PTR_ERR(p);
555 goto err;
556 }
557
558 err = -ENOMEM;
559 ctx = gss_alloc_context();
560 if (ctx == NULL)
561 goto err;
562 err = 0;
563 gss_auth = container_of(clnt->cl_auth, struct gss_auth, rpc_auth);
564 p = gss_fill_context(p, end, ctx, gss_auth->mech);
565 if (IS_ERR(p)) {
566 err = PTR_ERR(p);
567 if (err != -EACCES)
568 goto err_put_ctx;
569 }
570 spin_lock(&gss_auth->lock);
571 gss_msg = __gss_find_upcall(gss_auth, uid);
572 if (gss_msg) {
573 if (err == 0 && gss_msg->ctx == NULL)
574 gss_msg->ctx = gss_get_ctx(ctx);
575 gss_msg->msg.errno = err;
576 __gss_unhash_msg(gss_msg);
577 spin_unlock(&gss_auth->lock);
578 gss_release_msg(gss_msg);
579 } else {
580 struct auth_cred acred = { .uid = uid };
581 spin_unlock(&gss_auth->lock);
8a317760 582 cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, RPCAUTH_LOOKUP_NEW);
1da177e4
LT
583 if (IS_ERR(cred)) {
584 err = PTR_ERR(cred);
585 goto err_put_ctx;
586 }
587 gss_cred_set_ctx(cred, gss_get_ctx(ctx));
588 }
589 gss_put_ctx(ctx);
590 kfree(buf);
591 dprintk("RPC: gss_pipe_downcall returning length %Zu\n", mlen);
592 return mlen;
593err_put_ctx:
594 gss_put_ctx(ctx);
595err:
596 kfree(buf);
597out:
598 dprintk("RPC: gss_pipe_downcall returning %d\n", err);
599 return err;
600}
601
602static void
603gss_pipe_release(struct inode *inode)
604{
605 struct rpc_inode *rpci = RPC_I(inode);
606 struct rpc_clnt *clnt;
607 struct rpc_auth *auth;
608 struct gss_auth *gss_auth;
609
610 clnt = rpci->private;
611 auth = clnt->cl_auth;
612 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
613 spin_lock(&gss_auth->lock);
614 while (!list_empty(&gss_auth->upcalls)) {
615 struct gss_upcall_msg *gss_msg;
616
617 gss_msg = list_entry(gss_auth->upcalls.next,
618 struct gss_upcall_msg, list);
619 gss_msg->msg.errno = -EPIPE;
620 atomic_inc(&gss_msg->count);
621 __gss_unhash_msg(gss_msg);
622 spin_unlock(&gss_auth->lock);
623 gss_release_msg(gss_msg);
624 spin_lock(&gss_auth->lock);
625 }
626 spin_unlock(&gss_auth->lock);
627}
628
629static void
630gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
631{
632 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
633 static unsigned long ratelimit;
634
635 if (msg->errno < 0) {
636 dprintk("RPC: gss_pipe_destroy_msg releasing msg %p\n",
637 gss_msg);
638 atomic_inc(&gss_msg->count);
639 gss_unhash_msg(gss_msg);
48e49187 640 if (msg->errno == -ETIMEDOUT) {
1da177e4
LT
641 unsigned long now = jiffies;
642 if (time_after(now, ratelimit)) {
643 printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
644 "Please check user daemon is running!\n");
645 ratelimit = now + 15*HZ;
646 }
647 }
648 gss_release_msg(gss_msg);
649 }
650}
651
652/*
653 * NOTE: we have the opportunity to use different
654 * parameters based on the input flavor (which must be a pseudoflavor)
655 */
656static struct rpc_auth *
657gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
658{
659 struct gss_auth *gss_auth;
660 struct rpc_auth * auth;
6a19275a 661 int err = -ENOMEM; /* XXX? */
1da177e4
LT
662
663 dprintk("RPC: creating GSS authenticator for client %p\n",clnt);
664
665 if (!try_module_get(THIS_MODULE))
6a19275a 666 return ERR_PTR(err);
1da177e4
LT
667 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
668 goto out_dec;
669 gss_auth->client = clnt;
6a19275a 670 err = -EINVAL;
1da177e4
LT
671 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
672 if (!gss_auth->mech) {
673 printk(KERN_WARNING "%s: Pseudoflavor %d not found!",
674 __FUNCTION__, flavor);
675 goto err_free;
676 }
677 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
438b6fde
BF
678 if (gss_auth->service == 0)
679 goto err_put_mech;
1da177e4
LT
680 INIT_LIST_HEAD(&gss_auth->upcalls);
681 spin_lock_init(&gss_auth->lock);
682 auth = &gss_auth->rpc_auth;
683 auth->au_cslack = GSS_CRED_SLACK >> 2;
684 auth->au_rslack = GSS_VERF_SLACK >> 2;
685 auth->au_ops = &authgss_ops;
686 auth->au_flavor = flavor;
687 atomic_set(&auth->au_count, 1);
688
6a19275a
BF
689 err = rpcauth_init_credcache(auth, GSS_CRED_EXPIRE);
690 if (err)
1da177e4
LT
691 goto err_put_mech;
692
f134585a
TM
693 snprintf(gss_auth->path, sizeof(gss_auth->path), "%s/%s",
694 clnt->cl_pathname,
695 gss_auth->mech->gm_name);
696 gss_auth->dentry = rpc_mkpipe(gss_auth->path, clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
6a19275a
BF
697 if (IS_ERR(gss_auth->dentry)) {
698 err = PTR_ERR(gss_auth->dentry);
1da177e4 699 goto err_put_mech;
6a19275a 700 }
1da177e4
LT
701
702 return auth;
703err_put_mech:
704 gss_mech_put(gss_auth->mech);
705err_free:
706 kfree(gss_auth);
707out_dec:
708 module_put(THIS_MODULE);
6a19275a 709 return ERR_PTR(err);
1da177e4
LT
710}
711
712static void
713gss_destroy(struct rpc_auth *auth)
714{
715 struct gss_auth *gss_auth;
716
717 dprintk("RPC: destroying GSS authenticator %p flavor %d\n",
718 auth, auth->au_flavor);
719
720 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
f134585a 721 rpc_unlink(gss_auth->path);
12de3b35
TM
722 dput(gss_auth->dentry);
723 gss_auth->dentry = NULL;
1da177e4
LT
724 gss_mech_put(gss_auth->mech);
725
726 rpcauth_free_credcache(auth);
727 kfree(gss_auth);
728 module_put(THIS_MODULE);
729}
730
731/* gss_destroy_cred (and gss_destroy_ctx) are used to clean up after failure
732 * to create a new cred or context, so they check that things have been
733 * allocated before freeing them. */
734static void
735gss_destroy_ctx(struct gss_cl_ctx *ctx)
736{
737 dprintk("RPC: gss_destroy_ctx\n");
738
739 if (ctx->gc_gss_ctx)
740 gss_delete_sec_context(&ctx->gc_gss_ctx);
741
742 kfree(ctx->gc_wire_ctx.data);
743 kfree(ctx);
744}
745
746static void
747gss_destroy_cred(struct rpc_cred *rc)
748{
749 struct gss_cred *cred = container_of(rc, struct gss_cred, gc_base);
750
751 dprintk("RPC: gss_destroy_cred \n");
752
753 if (cred->gc_ctx)
754 gss_put_ctx(cred->gc_ctx);
755 kfree(cred);
756}
757
758/*
759 * Lookup RPCSEC_GSS cred for the current process
760 */
761static struct rpc_cred *
8a317760 762gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1da177e4 763{
8a317760 764 return rpcauth_lookup_credcache(auth, acred, flags);
1da177e4
LT
765}
766
767static struct rpc_cred *
8a317760 768gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1da177e4
LT
769{
770 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
771 struct gss_cred *cred = NULL;
772 int err = -ENOMEM;
773
774 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n",
775 acred->uid, auth->au_flavor);
776
0da974f4 777 if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL)))
1da177e4
LT
778 goto out_err;
779
1da177e4
LT
780 atomic_set(&cred->gc_count, 1);
781 cred->gc_uid = acred->uid;
782 /*
783 * Note: in order to force a call to call_refresh(), we deliberately
784 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
785 */
786 cred->gc_flags = 0;
787 cred->gc_base.cr_ops = &gss_credops;
8a317760 788 cred->gc_base.cr_flags = RPCAUTH_CRED_NEW;
1da177e4 789 cred->gc_service = gss_auth->service;
1da177e4
LT
790 return &cred->gc_base;
791
792out_err:
793 dprintk("RPC: gss_create_cred failed with error %d\n", err);
1da177e4
LT
794 return ERR_PTR(err);
795}
796
fba3bad4
TM
797static int
798gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
799{
800 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
801 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
802 int err;
803
804 do {
805 err = gss_create_upcall(gss_auth, gss_cred);
806 } while (err == -EAGAIN);
807 return err;
808}
809
1da177e4 810static int
8a317760 811gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
1da177e4
LT
812{
813 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
814
8a317760
TM
815 /*
816 * If the searchflags have set RPCAUTH_LOOKUP_NEW, then
817 * we don't really care if the credential has expired or not,
818 * since the caller should be prepared to reinitialise it.
819 */
820 if ((flags & RPCAUTH_LOOKUP_NEW) && (rc->cr_flags & RPCAUTH_CRED_NEW))
821 goto out;
1da177e4
LT
822 /* Don't match with creds that have expired. */
823 if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
824 return 0;
8a317760 825out:
1da177e4
LT
826 return (rc->cr_uid == acred->uid);
827}
828
829/*
830* Marshal credentials.
831* Maybe we should keep a cached credential for performance reasons.
832*/
833static u32 *
834gss_marshal(struct rpc_task *task, u32 *p)
835{
836 struct rpc_cred *cred = task->tk_msg.rpc_cred;
837 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
838 gc_base);
839 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
840 u32 *cred_len;
841 struct rpc_rqst *req = task->tk_rqstp;
842 u32 maj_stat = 0;
843 struct xdr_netobj mic;
844 struct kvec iov;
845 struct xdr_buf verf_buf;
846
847 dprintk("RPC: %4u gss_marshal\n", task->tk_pid);
848
849 *p++ = htonl(RPC_AUTH_GSS);
850 cred_len = p++;
851
852 spin_lock(&ctx->gc_seq_lock);
853 req->rq_seqno = ctx->gc_seq++;
854 spin_unlock(&ctx->gc_seq_lock);
855
856 *p++ = htonl((u32) RPC_GSS_VERSION);
857 *p++ = htonl((u32) ctx->gc_proc);
858 *p++ = htonl((u32) req->rq_seqno);
859 *p++ = htonl((u32) gss_cred->gc_service);
860 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
861 *cred_len = htonl((p - (cred_len + 1)) << 2);
862
863 /* We compute the checksum for the verifier over the xdr-encoded bytes
864 * starting with the xid and ending at the end of the credential: */
808012fb
CL
865 iov.iov_base = xprt_skip_transport_header(task->tk_xprt,
866 req->rq_snd_buf.head[0].iov_base);
1da177e4
LT
867 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
868 xdr_buf_from_iov(&iov, &verf_buf);
869
870 /* set verifier flavor*/
871 *p++ = htonl(RPC_AUTH_GSS);
872
873 mic.data = (u8 *)(p + 1);
00fd6e14 874 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1da177e4
LT
875 if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
876 cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
877 } else if (maj_stat != 0) {
878 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
879 goto out_put_ctx;
880 }
881 p = xdr_encode_opaque(p, NULL, mic.len);
882 gss_put_ctx(ctx);
883 return p;
884out_put_ctx:
885 gss_put_ctx(ctx);
886 return NULL;
887}
888
889/*
890* Refresh credentials. XXX - finish
891*/
892static int
893gss_refresh(struct rpc_task *task)
894{
895
896 if (!gss_cred_is_uptodate_ctx(task->tk_msg.rpc_cred))
897 return gss_refresh_upcall(task);
898 return 0;
899}
900
901static u32 *
902gss_validate(struct rpc_task *task, u32 *p)
903{
904 struct rpc_cred *cred = task->tk_msg.rpc_cred;
1da177e4 905 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
00fd6e14 906 u32 seq;
1da177e4
LT
907 struct kvec iov;
908 struct xdr_buf verf_buf;
909 struct xdr_netobj mic;
910 u32 flav,len;
911 u32 maj_stat;
912
913 dprintk("RPC: %4u gss_validate\n", task->tk_pid);
914
915 flav = ntohl(*p++);
916 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
917 goto out_bad;
918 if (flav != RPC_AUTH_GSS)
919 goto out_bad;
920 seq = htonl(task->tk_rqstp->rq_seqno);
921 iov.iov_base = &seq;
922 iov.iov_len = sizeof(seq);
923 xdr_buf_from_iov(&iov, &verf_buf);
924 mic.data = (u8 *)p;
925 mic.len = len;
926
00fd6e14 927 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1da177e4
LT
928 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
929 cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
930 if (maj_stat)
931 goto out_bad;
24b2605b
BF
932 /* We leave it to unwrap to calculate au_rslack. For now we just
933 * calculate the length of the verifier: */
934 task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2;
1da177e4
LT
935 gss_put_ctx(ctx);
936 dprintk("RPC: %4u GSS gss_validate: gss_verify_mic succeeded.\n",
937 task->tk_pid);
938 return p + XDR_QUADLEN(len);
939out_bad:
940 gss_put_ctx(ctx);
941 dprintk("RPC: %4u gss_validate failed.\n", task->tk_pid);
942 return NULL;
943}
944
945static inline int
946gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
947 kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj)
948{
949 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
950 struct xdr_buf integ_buf;
951 u32 *integ_len = NULL;
952 struct xdr_netobj mic;
953 u32 offset, *q;
954 struct kvec *iov;
955 u32 maj_stat = 0;
956 int status = -EIO;
957
958 integ_len = p++;
959 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
960 *p++ = htonl(rqstp->rq_seqno);
961
962 status = encode(rqstp, p, obj);
963 if (status)
964 return status;
965
966 if (xdr_buf_subsegment(snd_buf, &integ_buf,
967 offset, snd_buf->len - offset))
968 return status;
969 *integ_len = htonl(integ_buf.len);
970
971 /* guess whether we're in the head or the tail: */
972 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
973 iov = snd_buf->tail;
974 else
975 iov = snd_buf->head;
976 p = iov->iov_base + iov->iov_len;
977 mic.data = (u8 *)(p + 1);
978
00fd6e14 979 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1da177e4
LT
980 status = -EIO; /* XXX? */
981 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
982 cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
983 else if (maj_stat)
984 return status;
985 q = xdr_encode_opaque(p, NULL, mic.len);
986
987 offset = (u8 *)q - (u8 *)p;
988 iov->iov_len += offset;
989 snd_buf->len += offset;
990 return 0;
991}
992
2d2da60c
BF
993static void
994priv_release_snd_buf(struct rpc_rqst *rqstp)
995{
996 int i;
997
998 for (i=0; i < rqstp->rq_enc_pages_num; i++)
999 __free_page(rqstp->rq_enc_pages[i]);
1000 kfree(rqstp->rq_enc_pages);
1001}
1002
1003static int
1004alloc_enc_pages(struct rpc_rqst *rqstp)
1005{
1006 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1007 int first, last, i;
1008
1009 if (snd_buf->page_len == 0) {
1010 rqstp->rq_enc_pages_num = 0;
1011 return 0;
1012 }
1013
1014 first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1015 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
1016 rqstp->rq_enc_pages_num = last - first + 1 + 1;
1017 rqstp->rq_enc_pages
1018 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
1019 GFP_NOFS);
1020 if (!rqstp->rq_enc_pages)
1021 goto out;
1022 for (i=0; i < rqstp->rq_enc_pages_num; i++) {
1023 rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
1024 if (rqstp->rq_enc_pages[i] == NULL)
1025 goto out_free;
1026 }
1027 rqstp->rq_release_snd_buf = priv_release_snd_buf;
1028 return 0;
1029out_free:
1030 for (i--; i >= 0; i--) {
1031 __free_page(rqstp->rq_enc_pages[i]);
1032 }
1033out:
1034 return -EAGAIN;
1035}
1036
1037static inline int
1038gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1039 kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj)
1040{
1041 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1042 u32 offset;
1043 u32 maj_stat;
1044 int status;
1045 u32 *opaque_len;
1046 struct page **inpages;
1047 int first;
1048 int pad;
1049 struct kvec *iov;
1050 char *tmp;
1051
1052 opaque_len = p++;
1053 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1054 *p++ = htonl(rqstp->rq_seqno);
1055
1056 status = encode(rqstp, p, obj);
1057 if (status)
1058 return status;
1059
1060 status = alloc_enc_pages(rqstp);
1061 if (status)
1062 return status;
1063 first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1064 inpages = snd_buf->pages + first;
1065 snd_buf->pages = rqstp->rq_enc_pages;
1066 snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
1067 /* Give the tail its own page, in case we need extra space in the
1068 * head when wrapping: */
1069 if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1070 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1071 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1072 snd_buf->tail[0].iov_base = tmp;
1073 }
00fd6e14 1074 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
2d2da60c
BF
1075 /* RPC_SLACK_SPACE should prevent this ever happening: */
1076 BUG_ON(snd_buf->len > snd_buf->buflen);
1077 status = -EIO;
1078 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1079 * done anyway, so it's safe to put the request on the wire: */
1080 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1081 cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
1082 else if (maj_stat)
1083 return status;
1084
1085 *opaque_len = htonl(snd_buf->len - offset);
1086 /* guess whether we're in the head or the tail: */
1087 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1088 iov = snd_buf->tail;
1089 else
1090 iov = snd_buf->head;
1091 p = iov->iov_base + iov->iov_len;
1092 pad = 3 - ((snd_buf->len - offset - 1) & 3);
1093 memset(p, 0, pad);
1094 iov->iov_len += pad;
1095 snd_buf->len += pad;
1096
1097 return 0;
1098}
1099
1da177e4
LT
1100static int
1101gss_wrap_req(struct rpc_task *task,
1102 kxdrproc_t encode, void *rqstp, u32 *p, void *obj)
1103{
1104 struct rpc_cred *cred = task->tk_msg.rpc_cred;
1105 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1106 gc_base);
1107 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1108 int status = -EIO;
1109
1110 dprintk("RPC: %4u gss_wrap_req\n", task->tk_pid);
1111 if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1112 /* The spec seems a little ambiguous here, but I think that not
1113 * wrapping context destruction requests makes the most sense.
1114 */
1115 status = encode(rqstp, p, obj);
1116 goto out;
1117 }
1118 switch (gss_cred->gc_service) {
1119 case RPC_GSS_SVC_NONE:
1120 status = encode(rqstp, p, obj);
1121 break;
1122 case RPC_GSS_SVC_INTEGRITY:
1123 status = gss_wrap_req_integ(cred, ctx, encode,
1124 rqstp, p, obj);
1125 break;
1126 case RPC_GSS_SVC_PRIVACY:
2d2da60c
BF
1127 status = gss_wrap_req_priv(cred, ctx, encode,
1128 rqstp, p, obj);
1da177e4
LT
1129 break;
1130 }
1131out:
1132 gss_put_ctx(ctx);
1133 dprintk("RPC: %4u gss_wrap_req returning %d\n", task->tk_pid, status);
1134 return status;
1135}
1136
1137static inline int
1138gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1139 struct rpc_rqst *rqstp, u32 **p)
1140{
1141 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1142 struct xdr_buf integ_buf;
1143 struct xdr_netobj mic;
1144 u32 data_offset, mic_offset;
1145 u32 integ_len;
1146 u32 maj_stat;
1147 int status = -EIO;
1148
1149 integ_len = ntohl(*(*p)++);
1150 if (integ_len & 3)
1151 return status;
1152 data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1153 mic_offset = integ_len + data_offset;
1154 if (mic_offset > rcv_buf->len)
1155 return status;
1156 if (ntohl(*(*p)++) != rqstp->rq_seqno)
1157 return status;
1158
1159 if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset,
1160 mic_offset - data_offset))
1161 return status;
1162
1163 if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
1164 return status;
1165
00fd6e14 1166 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1da177e4
LT
1167 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1168 cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
1169 if (maj_stat != GSS_S_COMPLETE)
1170 return status;
1171 return 0;
1172}
1173
2d2da60c
BF
1174static inline int
1175gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1176 struct rpc_rqst *rqstp, u32 **p)
1177{
1178 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1179 u32 offset;
1180 u32 opaque_len;
1181 u32 maj_stat;
1182 int status = -EIO;
1183
1184 opaque_len = ntohl(*(*p)++);
1185 offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1186 if (offset + opaque_len > rcv_buf->len)
1187 return status;
1188 /* remove padding: */
1189 rcv_buf->len = offset + opaque_len;
1190
00fd6e14 1191 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
2d2da60c
BF
1192 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1193 cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE;
1194 if (maj_stat != GSS_S_COMPLETE)
1195 return status;
1196 if (ntohl(*(*p)++) != rqstp->rq_seqno)
1197 return status;
1198
1199 return 0;
1200}
1201
1202
1da177e4
LT
1203static int
1204gss_unwrap_resp(struct rpc_task *task,
1205 kxdrproc_t decode, void *rqstp, u32 *p, void *obj)
1206{
1207 struct rpc_cred *cred = task->tk_msg.rpc_cred;
1208 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1209 gc_base);
1210 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
24b2605b 1211 u32 *savedp = p;
2d2da60c
BF
1212 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
1213 int savedlen = head->iov_len;
1da177e4
LT
1214 int status = -EIO;
1215
1216 if (ctx->gc_proc != RPC_GSS_PROC_DATA)
1217 goto out_decode;
1218 switch (gss_cred->gc_service) {
1219 case RPC_GSS_SVC_NONE:
1220 break;
1221 case RPC_GSS_SVC_INTEGRITY:
1222 status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p);
1223 if (status)
1224 goto out;
1225 break;
1226 case RPC_GSS_SVC_PRIVACY:
2d2da60c
BF
1227 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
1228 if (status)
1229 goto out;
1da177e4
LT
1230 break;
1231 }
24b2605b 1232 /* take into account extra slack for integrity and privacy cases: */
2d2da60c
BF
1233 task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp)
1234 + (savedlen - head->iov_len);
1da177e4
LT
1235out_decode:
1236 status = decode(rqstp, p, obj);
1237out:
1238 gss_put_ctx(ctx);
1239 dprintk("RPC: %4u gss_unwrap_resp returning %d\n", task->tk_pid,
1240 status);
1241 return status;
1242}
1243
1244static struct rpc_authops authgss_ops = {
1245 .owner = THIS_MODULE,
1246 .au_flavor = RPC_AUTH_GSS,
1247#ifdef RPC_DEBUG
1248 .au_name = "RPCSEC_GSS",
1249#endif
1250 .create = gss_create,
1251 .destroy = gss_destroy,
1252 .lookup_cred = gss_lookup_cred,
1253 .crcreate = gss_create_cred
1254};
1255
1256static struct rpc_credops gss_credops = {
1257 .cr_name = "AUTH_GSS",
1258 .crdestroy = gss_destroy_cred,
fba3bad4 1259 .cr_init = gss_cred_init,
1da177e4
LT
1260 .crmatch = gss_match,
1261 .crmarshal = gss_marshal,
1262 .crrefresh = gss_refresh,
1263 .crvalidate = gss_validate,
1264 .crwrap_req = gss_wrap_req,
1265 .crunwrap_resp = gss_unwrap_resp,
1266};
1267
1268static struct rpc_pipe_ops gss_upcall_ops = {
1269 .upcall = gss_pipe_upcall,
1270 .downcall = gss_pipe_downcall,
1271 .destroy_msg = gss_pipe_destroy_msg,
1272 .release_pipe = gss_pipe_release,
1273};
1274
1275/*
1276 * Initialize RPCSEC_GSS module
1277 */
1278static int __init init_rpcsec_gss(void)
1279{
1280 int err = 0;
1281
1282 err = rpcauth_register(&authgss_ops);
1283 if (err)
1284 goto out;
1285 err = gss_svc_init();
1286 if (err)
1287 goto out_unregister;
1288 return 0;
1289out_unregister:
1290 rpcauth_unregister(&authgss_ops);
1291out:
1292 return err;
1293}
1294
1295static void __exit exit_rpcsec_gss(void)
1296{
1297 gss_svc_shutdown();
1298 rpcauth_unregister(&authgss_ops);
1299}
1300
1301MODULE_LICENSE("GPL");
1302module_init(init_rpcsec_gss)
1303module_exit(exit_rpcsec_gss)