ARM: mxs: icoll: Fix interrupts gpio bank 0
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / sunrpc / auth_gss / auth_gss.c
1 /*
2 * linux/net/sunrpc/auth_gss/auth_gss.c
3 *
4 * RPCSEC_GSS client authentication.
5 *
6 * Copyright (c) 2000 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Dug Song <dugsong@monkey.org>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/types.h>
42 #include <linux/slab.h>
43 #include <linux/sched.h>
44 #include <linux/pagemap.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/sunrpc/auth.h>
47 #include <linux/sunrpc/auth_gss.h>
48 #include <linux/sunrpc/svcauth_gss.h>
49 #include <linux/sunrpc/gss_err.h>
50 #include <linux/workqueue.h>
51 #include <linux/sunrpc/rpc_pipe_fs.h>
52 #include <linux/sunrpc/gss_api.h>
53 #include <asm/uaccess.h>
54
55 static const struct rpc_authops authgss_ops;
56
57 static const struct rpc_credops gss_credops;
58 static const struct rpc_credops gss_nullops;
59
60 #define GSS_RETRY_EXPIRED 5
61 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
62
63 #ifdef RPC_DEBUG
64 # define RPCDBG_FACILITY RPCDBG_AUTH
65 #endif
66
67 #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2)
68 /* length of a krb5 verifier (48), plus data added before arguments when
69 * using integrity (two 4-byte integers): */
70 #define GSS_VERF_SLACK 100
71
72 struct gss_auth {
73 struct kref kref;
74 struct rpc_auth rpc_auth;
75 struct gss_api_mech *mech;
76 enum rpc_gss_svc service;
77 struct rpc_clnt *client;
78 /*
79 * There are two upcall pipes; dentry[1], named "gssd", is used
80 * for the new text-based upcall; dentry[0] is named after the
81 * mechanism (for example, "krb5") and exists for
82 * backwards-compatibility with older gssd's.
83 */
84 struct rpc_pipe *pipe[2];
85 };
86
87 /* pipe_version >= 0 if and only if someone has a pipe open. */
88 static int pipe_version = -1;
89 static atomic_t pipe_users = ATOMIC_INIT(0);
90 static DEFINE_SPINLOCK(pipe_version_lock);
91 static struct rpc_wait_queue pipe_version_rpc_waitqueue;
92 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
93
94 static void gss_free_ctx(struct gss_cl_ctx *);
95 static const struct rpc_pipe_ops gss_upcall_ops_v0;
96 static const struct rpc_pipe_ops gss_upcall_ops_v1;
97
98 static inline struct gss_cl_ctx *
99 gss_get_ctx(struct gss_cl_ctx *ctx)
100 {
101 atomic_inc(&ctx->count);
102 return ctx;
103 }
104
105 static inline void
106 gss_put_ctx(struct gss_cl_ctx *ctx)
107 {
108 if (atomic_dec_and_test(&ctx->count))
109 gss_free_ctx(ctx);
110 }
111
112 /* gss_cred_set_ctx:
113 * called by gss_upcall_callback and gss_create_upcall in order
114 * to set the gss context. The actual exchange of an old context
115 * and a new one is protected by the pipe->lock.
116 */
117 static void
118 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
119 {
120 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
121
122 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
123 return;
124 gss_get_ctx(ctx);
125 rcu_assign_pointer(gss_cred->gc_ctx, ctx);
126 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
127 smp_mb__before_clear_bit();
128 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
129 }
130
131 static const void *
132 simple_get_bytes(const void *p, const void *end, void *res, size_t len)
133 {
134 const void *q = (const void *)((const char *)p + len);
135 if (unlikely(q > end || q < p))
136 return ERR_PTR(-EFAULT);
137 memcpy(res, p, len);
138 return q;
139 }
140
141 static inline const void *
142 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
143 {
144 const void *q;
145 unsigned int len;
146
147 p = simple_get_bytes(p, end, &len, sizeof(len));
148 if (IS_ERR(p))
149 return p;
150 q = (const void *)((const char *)p + len);
151 if (unlikely(q > end || q < p))
152 return ERR_PTR(-EFAULT);
153 dest->data = kmemdup(p, len, GFP_NOFS);
154 if (unlikely(dest->data == NULL))
155 return ERR_PTR(-ENOMEM);
156 dest->len = len;
157 return q;
158 }
159
160 static struct gss_cl_ctx *
161 gss_cred_get_ctx(struct rpc_cred *cred)
162 {
163 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
164 struct gss_cl_ctx *ctx = NULL;
165
166 rcu_read_lock();
167 if (gss_cred->gc_ctx)
168 ctx = gss_get_ctx(gss_cred->gc_ctx);
169 rcu_read_unlock();
170 return ctx;
171 }
172
173 static struct gss_cl_ctx *
174 gss_alloc_context(void)
175 {
176 struct gss_cl_ctx *ctx;
177
178 ctx = kzalloc(sizeof(*ctx), GFP_NOFS);
179 if (ctx != NULL) {
180 ctx->gc_proc = RPC_GSS_PROC_DATA;
181 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
182 spin_lock_init(&ctx->gc_seq_lock);
183 atomic_set(&ctx->count,1);
184 }
185 return ctx;
186 }
187
188 #define GSSD_MIN_TIMEOUT (60 * 60)
189 static const void *
190 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
191 {
192 const void *q;
193 unsigned int seclen;
194 unsigned int timeout;
195 unsigned long now = jiffies;
196 u32 window_size;
197 int ret;
198
199 /* First unsigned int gives the remaining lifetime in seconds of the
200 * credential - e.g. the remaining TGT lifetime for Kerberos or
201 * the -t value passed to GSSD.
202 */
203 p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
204 if (IS_ERR(p))
205 goto err;
206 if (timeout == 0)
207 timeout = GSSD_MIN_TIMEOUT;
208 ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
209 /* Sequence number window. Determines the maximum number of
210 * simultaneous requests
211 */
212 p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
213 if (IS_ERR(p))
214 goto err;
215 ctx->gc_win = window_size;
216 /* gssd signals an error by passing ctx->gc_win = 0: */
217 if (ctx->gc_win == 0) {
218 /*
219 * in which case, p points to an error code. Anything other
220 * than -EKEYEXPIRED gets converted to -EACCES.
221 */
222 p = simple_get_bytes(p, end, &ret, sizeof(ret));
223 if (!IS_ERR(p))
224 p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
225 ERR_PTR(-EACCES);
226 goto err;
227 }
228 /* copy the opaque wire context */
229 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
230 if (IS_ERR(p))
231 goto err;
232 /* import the opaque security context */
233 p = simple_get_bytes(p, end, &seclen, sizeof(seclen));
234 if (IS_ERR(p))
235 goto err;
236 q = (const void *)((const char *)p + seclen);
237 if (unlikely(q > end || q < p)) {
238 p = ERR_PTR(-EFAULT);
239 goto err;
240 }
241 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
242 if (ret < 0) {
243 p = ERR_PTR(ret);
244 goto err;
245 }
246 dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u\n",
247 __func__, ctx->gc_expiry, now, timeout);
248 return q;
249 err:
250 dprintk("RPC: %s returns error %ld\n", __func__, -PTR_ERR(p));
251 return p;
252 }
253
254 #define UPCALL_BUF_LEN 128
255
256 struct gss_upcall_msg {
257 atomic_t count;
258 kuid_t uid;
259 struct rpc_pipe_msg msg;
260 struct list_head list;
261 struct gss_auth *auth;
262 struct rpc_pipe *pipe;
263 struct rpc_wait_queue rpc_waitqueue;
264 wait_queue_head_t waitqueue;
265 struct gss_cl_ctx *ctx;
266 char databuf[UPCALL_BUF_LEN];
267 };
268
269 static int get_pipe_version(void)
270 {
271 int ret;
272
273 spin_lock(&pipe_version_lock);
274 if (pipe_version >= 0) {
275 atomic_inc(&pipe_users);
276 ret = pipe_version;
277 } else
278 ret = -EAGAIN;
279 spin_unlock(&pipe_version_lock);
280 return ret;
281 }
282
283 static void put_pipe_version(void)
284 {
285 if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) {
286 pipe_version = -1;
287 spin_unlock(&pipe_version_lock);
288 }
289 }
290
291 static void
292 gss_release_msg(struct gss_upcall_msg *gss_msg)
293 {
294 if (!atomic_dec_and_test(&gss_msg->count))
295 return;
296 put_pipe_version();
297 BUG_ON(!list_empty(&gss_msg->list));
298 if (gss_msg->ctx != NULL)
299 gss_put_ctx(gss_msg->ctx);
300 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
301 kfree(gss_msg);
302 }
303
304 static struct gss_upcall_msg *
305 __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid)
306 {
307 struct gss_upcall_msg *pos;
308 list_for_each_entry(pos, &pipe->in_downcall, list) {
309 if (!uid_eq(pos->uid, uid))
310 continue;
311 atomic_inc(&pos->count);
312 dprintk("RPC: %s found msg %p\n", __func__, pos);
313 return pos;
314 }
315 dprintk("RPC: %s found nothing\n", __func__);
316 return NULL;
317 }
318
319 /* Try to add an upcall to the pipefs queue.
320 * If an upcall owned by our uid already exists, then we return a reference
321 * to that upcall instead of adding the new upcall.
322 */
323 static inline struct gss_upcall_msg *
324 gss_add_msg(struct gss_upcall_msg *gss_msg)
325 {
326 struct rpc_pipe *pipe = gss_msg->pipe;
327 struct gss_upcall_msg *old;
328
329 spin_lock(&pipe->lock);
330 old = __gss_find_upcall(pipe, gss_msg->uid);
331 if (old == NULL) {
332 atomic_inc(&gss_msg->count);
333 list_add(&gss_msg->list, &pipe->in_downcall);
334 } else
335 gss_msg = old;
336 spin_unlock(&pipe->lock);
337 return gss_msg;
338 }
339
340 static void
341 __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
342 {
343 list_del_init(&gss_msg->list);
344 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
345 wake_up_all(&gss_msg->waitqueue);
346 atomic_dec(&gss_msg->count);
347 }
348
349 static void
350 gss_unhash_msg(struct gss_upcall_msg *gss_msg)
351 {
352 struct rpc_pipe *pipe = gss_msg->pipe;
353
354 if (list_empty(&gss_msg->list))
355 return;
356 spin_lock(&pipe->lock);
357 if (!list_empty(&gss_msg->list))
358 __gss_unhash_msg(gss_msg);
359 spin_unlock(&pipe->lock);
360 }
361
362 static void
363 gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
364 {
365 switch (gss_msg->msg.errno) {
366 case 0:
367 if (gss_msg->ctx == NULL)
368 break;
369 clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
370 gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
371 break;
372 case -EKEYEXPIRED:
373 set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
374 }
375 gss_cred->gc_upcall_timestamp = jiffies;
376 gss_cred->gc_upcall = NULL;
377 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
378 }
379
380 static void
381 gss_upcall_callback(struct rpc_task *task)
382 {
383 struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
384 struct gss_cred, gc_base);
385 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
386 struct rpc_pipe *pipe = gss_msg->pipe;
387
388 spin_lock(&pipe->lock);
389 gss_handle_downcall_result(gss_cred, gss_msg);
390 spin_unlock(&pipe->lock);
391 task->tk_status = gss_msg->msg.errno;
392 gss_release_msg(gss_msg);
393 }
394
395 static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
396 {
397 uid_t uid = from_kuid(&init_user_ns, gss_msg->uid);
398 memcpy(gss_msg->databuf, &uid, sizeof(uid));
399 gss_msg->msg.data = gss_msg->databuf;
400 gss_msg->msg.len = sizeof(uid);
401 BUG_ON(sizeof(uid) > UPCALL_BUF_LEN);
402 }
403
404 static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
405 struct rpc_clnt *clnt,
406 const char *service_name)
407 {
408 struct gss_api_mech *mech = gss_msg->auth->mech;
409 char *p = gss_msg->databuf;
410 int len = 0;
411
412 gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ",
413 mech->gm_name,
414 from_kuid(&init_user_ns, gss_msg->uid));
415 p += gss_msg->msg.len;
416 if (clnt->cl_principal) {
417 len = sprintf(p, "target=%s ", clnt->cl_principal);
418 p += len;
419 gss_msg->msg.len += len;
420 }
421 if (service_name != NULL) {
422 len = sprintf(p, "service=%s ", service_name);
423 p += len;
424 gss_msg->msg.len += len;
425 }
426 if (mech->gm_upcall_enctypes) {
427 len = sprintf(p, "enctypes=%s ", mech->gm_upcall_enctypes);
428 p += len;
429 gss_msg->msg.len += len;
430 }
431 len = sprintf(p, "\n");
432 gss_msg->msg.len += len;
433
434 gss_msg->msg.data = gss_msg->databuf;
435 BUG_ON(gss_msg->msg.len > UPCALL_BUF_LEN);
436 }
437
438 static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
439 struct rpc_clnt *clnt,
440 const char *service_name)
441 {
442 if (pipe_version == 0)
443 gss_encode_v0_msg(gss_msg);
444 else /* pipe_version == 1 */
445 gss_encode_v1_msg(gss_msg, clnt, service_name);
446 }
447
448 static struct gss_upcall_msg *
449 gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt,
450 kuid_t uid, const char *service_name)
451 {
452 struct gss_upcall_msg *gss_msg;
453 int vers;
454
455 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
456 if (gss_msg == NULL)
457 return ERR_PTR(-ENOMEM);
458 vers = get_pipe_version();
459 if (vers < 0) {
460 kfree(gss_msg);
461 return ERR_PTR(vers);
462 }
463 gss_msg->pipe = gss_auth->pipe[vers];
464 INIT_LIST_HEAD(&gss_msg->list);
465 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
466 init_waitqueue_head(&gss_msg->waitqueue);
467 atomic_set(&gss_msg->count, 1);
468 gss_msg->uid = uid;
469 gss_msg->auth = gss_auth;
470 gss_encode_msg(gss_msg, clnt, service_name);
471 return gss_msg;
472 }
473
474 static struct gss_upcall_msg *
475 gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cred *cred)
476 {
477 struct gss_cred *gss_cred = container_of(cred,
478 struct gss_cred, gc_base);
479 struct gss_upcall_msg *gss_new, *gss_msg;
480 kuid_t uid = cred->cr_uid;
481
482 gss_new = gss_alloc_msg(gss_auth, clnt, uid, gss_cred->gc_principal);
483 if (IS_ERR(gss_new))
484 return gss_new;
485 gss_msg = gss_add_msg(gss_new);
486 if (gss_msg == gss_new) {
487 int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
488 if (res) {
489 gss_unhash_msg(gss_new);
490 gss_msg = ERR_PTR(res);
491 }
492 } else
493 gss_release_msg(gss_new);
494 return gss_msg;
495 }
496
497 static void warn_gssd(void)
498 {
499 static unsigned long ratelimit;
500 unsigned long now = jiffies;
501
502 if (time_after(now, ratelimit)) {
503 printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
504 "Please check user daemon is running.\n");
505 ratelimit = now + 15*HZ;
506 }
507 }
508
509 static inline int
510 gss_refresh_upcall(struct rpc_task *task)
511 {
512 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
513 struct gss_auth *gss_auth = container_of(cred->cr_auth,
514 struct gss_auth, rpc_auth);
515 struct gss_cred *gss_cred = container_of(cred,
516 struct gss_cred, gc_base);
517 struct gss_upcall_msg *gss_msg;
518 struct rpc_pipe *pipe;
519 int err = 0;
520
521 dprintk("RPC: %5u %s for uid %u\n",
522 task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_uid));
523 gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
524 if (PTR_ERR(gss_msg) == -EAGAIN) {
525 /* XXX: warning on the first, under the assumption we
526 * shouldn't normally hit this case on a refresh. */
527 warn_gssd();
528 task->tk_timeout = 15*HZ;
529 rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL);
530 return -EAGAIN;
531 }
532 if (IS_ERR(gss_msg)) {
533 err = PTR_ERR(gss_msg);
534 goto out;
535 }
536 pipe = gss_msg->pipe;
537 spin_lock(&pipe->lock);
538 if (gss_cred->gc_upcall != NULL)
539 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
540 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
541 task->tk_timeout = 0;
542 gss_cred->gc_upcall = gss_msg;
543 /* gss_upcall_callback will release the reference to gss_upcall_msg */
544 atomic_inc(&gss_msg->count);
545 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
546 } else {
547 gss_handle_downcall_result(gss_cred, gss_msg);
548 err = gss_msg->msg.errno;
549 }
550 spin_unlock(&pipe->lock);
551 gss_release_msg(gss_msg);
552 out:
553 dprintk("RPC: %5u %s for uid %u result %d\n",
554 task->tk_pid, __func__,
555 from_kuid(&init_user_ns, cred->cr_uid), err);
556 return err;
557 }
558
559 static inline int
560 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
561 {
562 struct rpc_pipe *pipe;
563 struct rpc_cred *cred = &gss_cred->gc_base;
564 struct gss_upcall_msg *gss_msg;
565 DEFINE_WAIT(wait);
566 int err = 0;
567
568 dprintk("RPC: %s for uid %u\n",
569 __func__, from_kuid(&init_user_ns, cred->cr_uid));
570 retry:
571 gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
572 if (PTR_ERR(gss_msg) == -EAGAIN) {
573 err = wait_event_interruptible_timeout(pipe_version_waitqueue,
574 pipe_version >= 0, 15*HZ);
575 if (pipe_version < 0) {
576 warn_gssd();
577 err = -EACCES;
578 }
579 if (err)
580 goto out;
581 goto retry;
582 }
583 if (IS_ERR(gss_msg)) {
584 err = PTR_ERR(gss_msg);
585 goto out;
586 }
587 pipe = gss_msg->pipe;
588 for (;;) {
589 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
590 spin_lock(&pipe->lock);
591 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
592 break;
593 }
594 spin_unlock(&pipe->lock);
595 if (fatal_signal_pending(current)) {
596 err = -ERESTARTSYS;
597 goto out_intr;
598 }
599 schedule();
600 }
601 if (gss_msg->ctx)
602 gss_cred_set_ctx(cred, gss_msg->ctx);
603 else
604 err = gss_msg->msg.errno;
605 spin_unlock(&pipe->lock);
606 out_intr:
607 finish_wait(&gss_msg->waitqueue, &wait);
608 gss_release_msg(gss_msg);
609 out:
610 dprintk("RPC: %s for uid %u result %d\n",
611 __func__, from_kuid(&init_user_ns, cred->cr_uid), err);
612 return err;
613 }
614
615 #define MSG_BUF_MAXSIZE 1024
616
617 static ssize_t
618 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
619 {
620 const void *p, *end;
621 void *buf;
622 struct gss_upcall_msg *gss_msg;
623 struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe;
624 struct gss_cl_ctx *ctx;
625 uid_t id;
626 kuid_t uid;
627 ssize_t err = -EFBIG;
628
629 if (mlen > MSG_BUF_MAXSIZE)
630 goto out;
631 err = -ENOMEM;
632 buf = kmalloc(mlen, GFP_NOFS);
633 if (!buf)
634 goto out;
635
636 err = -EFAULT;
637 if (copy_from_user(buf, src, mlen))
638 goto err;
639
640 end = (const void *)((char *)buf + mlen);
641 p = simple_get_bytes(buf, end, &id, sizeof(id));
642 if (IS_ERR(p)) {
643 err = PTR_ERR(p);
644 goto err;
645 }
646
647 uid = make_kuid(&init_user_ns, id);
648 if (!uid_valid(uid)) {
649 err = -EINVAL;
650 goto err;
651 }
652
653 err = -ENOMEM;
654 ctx = gss_alloc_context();
655 if (ctx == NULL)
656 goto err;
657
658 err = -ENOENT;
659 /* Find a matching upcall */
660 spin_lock(&pipe->lock);
661 gss_msg = __gss_find_upcall(pipe, uid);
662 if (gss_msg == NULL) {
663 spin_unlock(&pipe->lock);
664 goto err_put_ctx;
665 }
666 list_del_init(&gss_msg->list);
667 spin_unlock(&pipe->lock);
668
669 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
670 if (IS_ERR(p)) {
671 err = PTR_ERR(p);
672 switch (err) {
673 case -EACCES:
674 case -EKEYEXPIRED:
675 gss_msg->msg.errno = err;
676 err = mlen;
677 break;
678 case -EFAULT:
679 case -ENOMEM:
680 case -EINVAL:
681 case -ENOSYS:
682 gss_msg->msg.errno = -EAGAIN;
683 break;
684 default:
685 printk(KERN_CRIT "%s: bad return from "
686 "gss_fill_context: %zd\n", __func__, err);
687 BUG();
688 }
689 goto err_release_msg;
690 }
691 gss_msg->ctx = gss_get_ctx(ctx);
692 err = mlen;
693
694 err_release_msg:
695 spin_lock(&pipe->lock);
696 __gss_unhash_msg(gss_msg);
697 spin_unlock(&pipe->lock);
698 gss_release_msg(gss_msg);
699 err_put_ctx:
700 gss_put_ctx(ctx);
701 err:
702 kfree(buf);
703 out:
704 dprintk("RPC: %s returning %Zd\n", __func__, err);
705 return err;
706 }
707
708 static int gss_pipe_open(struct inode *inode, int new_version)
709 {
710 int ret = 0;
711
712 spin_lock(&pipe_version_lock);
713 if (pipe_version < 0) {
714 /* First open of any gss pipe determines the version: */
715 pipe_version = new_version;
716 rpc_wake_up(&pipe_version_rpc_waitqueue);
717 wake_up(&pipe_version_waitqueue);
718 } else if (pipe_version != new_version) {
719 /* Trying to open a pipe of a different version */
720 ret = -EBUSY;
721 goto out;
722 }
723 atomic_inc(&pipe_users);
724 out:
725 spin_unlock(&pipe_version_lock);
726 return ret;
727
728 }
729
730 static int gss_pipe_open_v0(struct inode *inode)
731 {
732 return gss_pipe_open(inode, 0);
733 }
734
735 static int gss_pipe_open_v1(struct inode *inode)
736 {
737 return gss_pipe_open(inode, 1);
738 }
739
740 static void
741 gss_pipe_release(struct inode *inode)
742 {
743 struct rpc_pipe *pipe = RPC_I(inode)->pipe;
744 struct gss_upcall_msg *gss_msg;
745
746 restart:
747 spin_lock(&pipe->lock);
748 list_for_each_entry(gss_msg, &pipe->in_downcall, list) {
749
750 if (!list_empty(&gss_msg->msg.list))
751 continue;
752 gss_msg->msg.errno = -EPIPE;
753 atomic_inc(&gss_msg->count);
754 __gss_unhash_msg(gss_msg);
755 spin_unlock(&pipe->lock);
756 gss_release_msg(gss_msg);
757 goto restart;
758 }
759 spin_unlock(&pipe->lock);
760
761 put_pipe_version();
762 }
763
764 static void
765 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
766 {
767 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
768
769 if (msg->errno < 0) {
770 dprintk("RPC: %s releasing msg %p\n",
771 __func__, gss_msg);
772 atomic_inc(&gss_msg->count);
773 gss_unhash_msg(gss_msg);
774 if (msg->errno == -ETIMEDOUT)
775 warn_gssd();
776 gss_release_msg(gss_msg);
777 }
778 }
779
780 static void gss_pipes_dentries_destroy(struct rpc_auth *auth)
781 {
782 struct gss_auth *gss_auth;
783
784 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
785 if (gss_auth->pipe[0]->dentry)
786 rpc_unlink(gss_auth->pipe[0]->dentry);
787 if (gss_auth->pipe[1]->dentry)
788 rpc_unlink(gss_auth->pipe[1]->dentry);
789 }
790
791 static int gss_pipes_dentries_create(struct rpc_auth *auth)
792 {
793 int err;
794 struct gss_auth *gss_auth;
795 struct rpc_clnt *clnt;
796
797 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
798 clnt = gss_auth->client;
799
800 gss_auth->pipe[1]->dentry = rpc_mkpipe_dentry(clnt->cl_dentry,
801 "gssd",
802 clnt, gss_auth->pipe[1]);
803 if (IS_ERR(gss_auth->pipe[1]->dentry))
804 return PTR_ERR(gss_auth->pipe[1]->dentry);
805 gss_auth->pipe[0]->dentry = rpc_mkpipe_dentry(clnt->cl_dentry,
806 gss_auth->mech->gm_name,
807 clnt, gss_auth->pipe[0]);
808 if (IS_ERR(gss_auth->pipe[0]->dentry)) {
809 err = PTR_ERR(gss_auth->pipe[0]->dentry);
810 goto err_unlink_pipe_1;
811 }
812 return 0;
813
814 err_unlink_pipe_1:
815 rpc_unlink(gss_auth->pipe[1]->dentry);
816 return err;
817 }
818
819 static void gss_pipes_dentries_destroy_net(struct rpc_clnt *clnt,
820 struct rpc_auth *auth)
821 {
822 struct net *net = rpc_net_ns(clnt);
823 struct super_block *sb;
824
825 sb = rpc_get_sb_net(net);
826 if (sb) {
827 if (clnt->cl_dentry)
828 gss_pipes_dentries_destroy(auth);
829 rpc_put_sb_net(net);
830 }
831 }
832
833 static int gss_pipes_dentries_create_net(struct rpc_clnt *clnt,
834 struct rpc_auth *auth)
835 {
836 struct net *net = rpc_net_ns(clnt);
837 struct super_block *sb;
838 int err = 0;
839
840 sb = rpc_get_sb_net(net);
841 if (sb) {
842 if (clnt->cl_dentry)
843 err = gss_pipes_dentries_create(auth);
844 rpc_put_sb_net(net);
845 }
846 return err;
847 }
848
849 /*
850 * NOTE: we have the opportunity to use different
851 * parameters based on the input flavor (which must be a pseudoflavor)
852 */
853 static struct rpc_auth *
854 gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
855 {
856 struct gss_auth *gss_auth;
857 struct rpc_auth * auth;
858 int err = -ENOMEM; /* XXX? */
859
860 dprintk("RPC: creating GSS authenticator for client %p\n", clnt);
861
862 if (!try_module_get(THIS_MODULE))
863 return ERR_PTR(err);
864 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
865 goto out_dec;
866 gss_auth->client = clnt;
867 err = -EINVAL;
868 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
869 if (!gss_auth->mech) {
870 dprintk("RPC: Pseudoflavor %d not found!\n", flavor);
871 goto err_free;
872 }
873 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
874 if (gss_auth->service == 0)
875 goto err_put_mech;
876 auth = &gss_auth->rpc_auth;
877 auth->au_cslack = GSS_CRED_SLACK >> 2;
878 auth->au_rslack = GSS_VERF_SLACK >> 2;
879 auth->au_ops = &authgss_ops;
880 auth->au_flavor = flavor;
881 atomic_set(&auth->au_count, 1);
882 kref_init(&gss_auth->kref);
883
884 /*
885 * Note: if we created the old pipe first, then someone who
886 * examined the directory at the right moment might conclude
887 * that we supported only the old pipe. So we instead create
888 * the new pipe first.
889 */
890 gss_auth->pipe[1] = rpc_mkpipe_data(&gss_upcall_ops_v1,
891 RPC_PIPE_WAIT_FOR_OPEN);
892 if (IS_ERR(gss_auth->pipe[1])) {
893 err = PTR_ERR(gss_auth->pipe[1]);
894 goto err_put_mech;
895 }
896
897 gss_auth->pipe[0] = rpc_mkpipe_data(&gss_upcall_ops_v0,
898 RPC_PIPE_WAIT_FOR_OPEN);
899 if (IS_ERR(gss_auth->pipe[0])) {
900 err = PTR_ERR(gss_auth->pipe[0]);
901 goto err_destroy_pipe_1;
902 }
903 err = gss_pipes_dentries_create_net(clnt, auth);
904 if (err)
905 goto err_destroy_pipe_0;
906 err = rpcauth_init_credcache(auth);
907 if (err)
908 goto err_unlink_pipes;
909
910 return auth;
911 err_unlink_pipes:
912 gss_pipes_dentries_destroy_net(clnt, auth);
913 err_destroy_pipe_0:
914 rpc_destroy_pipe_data(gss_auth->pipe[0]);
915 err_destroy_pipe_1:
916 rpc_destroy_pipe_data(gss_auth->pipe[1]);
917 err_put_mech:
918 gss_mech_put(gss_auth->mech);
919 err_free:
920 kfree(gss_auth);
921 out_dec:
922 module_put(THIS_MODULE);
923 return ERR_PTR(err);
924 }
925
926 static void
927 gss_free(struct gss_auth *gss_auth)
928 {
929 gss_pipes_dentries_destroy_net(gss_auth->client, &gss_auth->rpc_auth);
930 rpc_destroy_pipe_data(gss_auth->pipe[0]);
931 rpc_destroy_pipe_data(gss_auth->pipe[1]);
932 gss_mech_put(gss_auth->mech);
933
934 kfree(gss_auth);
935 module_put(THIS_MODULE);
936 }
937
938 static void
939 gss_free_callback(struct kref *kref)
940 {
941 struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
942
943 gss_free(gss_auth);
944 }
945
946 static void
947 gss_destroy(struct rpc_auth *auth)
948 {
949 struct gss_auth *gss_auth;
950
951 dprintk("RPC: destroying GSS authenticator %p flavor %d\n",
952 auth, auth->au_flavor);
953
954 rpcauth_destroy_credcache(auth);
955
956 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
957 kref_put(&gss_auth->kref, gss_free_callback);
958 }
959
960 /*
961 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call
962 * to the server with the GSS control procedure field set to
963 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
964 * all RPCSEC_GSS state associated with that context.
965 */
966 static int
967 gss_destroying_context(struct rpc_cred *cred)
968 {
969 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
970 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
971 struct rpc_task *task;
972
973 if (gss_cred->gc_ctx == NULL ||
974 test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
975 return 0;
976
977 gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY;
978 cred->cr_ops = &gss_nullops;
979
980 /* Take a reference to ensure the cred will be destroyed either
981 * by the RPC call or by the put_rpccred() below */
982 get_rpccred(cred);
983
984 task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT);
985 if (!IS_ERR(task))
986 rpc_put_task(task);
987
988 put_rpccred(cred);
989 return 1;
990 }
991
992 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
993 * to create a new cred or context, so they check that things have been
994 * allocated before freeing them. */
995 static void
996 gss_do_free_ctx(struct gss_cl_ctx *ctx)
997 {
998 dprintk("RPC: %s\n", __func__);
999
1000 gss_delete_sec_context(&ctx->gc_gss_ctx);
1001 kfree(ctx->gc_wire_ctx.data);
1002 kfree(ctx);
1003 }
1004
1005 static void
1006 gss_free_ctx_callback(struct rcu_head *head)
1007 {
1008 struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
1009 gss_do_free_ctx(ctx);
1010 }
1011
1012 static void
1013 gss_free_ctx(struct gss_cl_ctx *ctx)
1014 {
1015 call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
1016 }
1017
1018 static void
1019 gss_free_cred(struct gss_cred *gss_cred)
1020 {
1021 dprintk("RPC: %s cred=%p\n", __func__, gss_cred);
1022 kfree(gss_cred);
1023 }
1024
1025 static void
1026 gss_free_cred_callback(struct rcu_head *head)
1027 {
1028 struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
1029 gss_free_cred(gss_cred);
1030 }
1031
1032 static void
1033 gss_destroy_nullcred(struct rpc_cred *cred)
1034 {
1035 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1036 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1037 struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
1038
1039 RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
1040 call_rcu(&cred->cr_rcu, gss_free_cred_callback);
1041 if (ctx)
1042 gss_put_ctx(ctx);
1043 kref_put(&gss_auth->kref, gss_free_callback);
1044 }
1045
1046 static void
1047 gss_destroy_cred(struct rpc_cred *cred)
1048 {
1049
1050 if (gss_destroying_context(cred))
1051 return;
1052 gss_destroy_nullcred(cred);
1053 }
1054
1055 /*
1056 * Lookup RPCSEC_GSS cred for the current process
1057 */
1058 static struct rpc_cred *
1059 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1060 {
1061 return rpcauth_lookup_credcache(auth, acred, flags);
1062 }
1063
1064 static struct rpc_cred *
1065 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1066 {
1067 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1068 struct gss_cred *cred = NULL;
1069 int err = -ENOMEM;
1070
1071 dprintk("RPC: %s for uid %d, flavor %d\n",
1072 __func__, from_kuid(&init_user_ns, acred->uid),
1073 auth->au_flavor);
1074
1075 if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS)))
1076 goto out_err;
1077
1078 rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
1079 /*
1080 * Note: in order to force a call to call_refresh(), we deliberately
1081 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
1082 */
1083 cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
1084 cred->gc_service = gss_auth->service;
1085 cred->gc_principal = NULL;
1086 if (acred->machine_cred)
1087 cred->gc_principal = acred->principal;
1088 kref_get(&gss_auth->kref);
1089 return &cred->gc_base;
1090
1091 out_err:
1092 dprintk("RPC: %s failed with error %d\n", __func__, err);
1093 return ERR_PTR(err);
1094 }
1095
1096 static int
1097 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
1098 {
1099 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1100 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
1101 int err;
1102
1103 do {
1104 err = gss_create_upcall(gss_auth, gss_cred);
1105 } while (err == -EAGAIN);
1106 return err;
1107 }
1108
1109 static int
1110 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
1111 {
1112 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1113
1114 if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
1115 goto out;
1116 /* Don't match with creds that have expired. */
1117 if (time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
1118 return 0;
1119 if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
1120 return 0;
1121 out:
1122 if (acred->principal != NULL) {
1123 if (gss_cred->gc_principal == NULL)
1124 return 0;
1125 return strcmp(acred->principal, gss_cred->gc_principal) == 0;
1126 }
1127 if (gss_cred->gc_principal != NULL)
1128 return 0;
1129 return uid_eq(rc->cr_uid, acred->uid);
1130 }
1131
1132 /*
1133 * Marshal credentials.
1134 * Maybe we should keep a cached credential for performance reasons.
1135 */
1136 static __be32 *
1137 gss_marshal(struct rpc_task *task, __be32 *p)
1138 {
1139 struct rpc_rqst *req = task->tk_rqstp;
1140 struct rpc_cred *cred = req->rq_cred;
1141 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1142 gc_base);
1143 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1144 __be32 *cred_len;
1145 u32 maj_stat = 0;
1146 struct xdr_netobj mic;
1147 struct kvec iov;
1148 struct xdr_buf verf_buf;
1149
1150 dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
1151
1152 *p++ = htonl(RPC_AUTH_GSS);
1153 cred_len = p++;
1154
1155 spin_lock(&ctx->gc_seq_lock);
1156 req->rq_seqno = ctx->gc_seq++;
1157 spin_unlock(&ctx->gc_seq_lock);
1158
1159 *p++ = htonl((u32) RPC_GSS_VERSION);
1160 *p++ = htonl((u32) ctx->gc_proc);
1161 *p++ = htonl((u32) req->rq_seqno);
1162 *p++ = htonl((u32) gss_cred->gc_service);
1163 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
1164 *cred_len = htonl((p - (cred_len + 1)) << 2);
1165
1166 /* We compute the checksum for the verifier over the xdr-encoded bytes
1167 * starting with the xid and ending at the end of the credential: */
1168 iov.iov_base = xprt_skip_transport_header(req->rq_xprt,
1169 req->rq_snd_buf.head[0].iov_base);
1170 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
1171 xdr_buf_from_iov(&iov, &verf_buf);
1172
1173 /* set verifier flavor*/
1174 *p++ = htonl(RPC_AUTH_GSS);
1175
1176 mic.data = (u8 *)(p + 1);
1177 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1178 if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
1179 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1180 } else if (maj_stat != 0) {
1181 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
1182 goto out_put_ctx;
1183 }
1184 p = xdr_encode_opaque(p, NULL, mic.len);
1185 gss_put_ctx(ctx);
1186 return p;
1187 out_put_ctx:
1188 gss_put_ctx(ctx);
1189 return NULL;
1190 }
1191
1192 static int gss_renew_cred(struct rpc_task *task)
1193 {
1194 struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
1195 struct gss_cred *gss_cred = container_of(oldcred,
1196 struct gss_cred,
1197 gc_base);
1198 struct rpc_auth *auth = oldcred->cr_auth;
1199 struct auth_cred acred = {
1200 .uid = oldcred->cr_uid,
1201 .principal = gss_cred->gc_principal,
1202 .machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0),
1203 };
1204 struct rpc_cred *new;
1205
1206 new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
1207 if (IS_ERR(new))
1208 return PTR_ERR(new);
1209 task->tk_rqstp->rq_cred = new;
1210 put_rpccred(oldcred);
1211 return 0;
1212 }
1213
1214 static int gss_cred_is_negative_entry(struct rpc_cred *cred)
1215 {
1216 if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
1217 unsigned long now = jiffies;
1218 unsigned long begin, expire;
1219 struct gss_cred *gss_cred;
1220
1221 gss_cred = container_of(cred, struct gss_cred, gc_base);
1222 begin = gss_cred->gc_upcall_timestamp;
1223 expire = begin + gss_expired_cred_retry_delay * HZ;
1224
1225 if (time_in_range_open(now, begin, expire))
1226 return 1;
1227 }
1228 return 0;
1229 }
1230
1231 /*
1232 * Refresh credentials. XXX - finish
1233 */
1234 static int
1235 gss_refresh(struct rpc_task *task)
1236 {
1237 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1238 int ret = 0;
1239
1240 if (gss_cred_is_negative_entry(cred))
1241 return -EKEYEXPIRED;
1242
1243 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
1244 !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
1245 ret = gss_renew_cred(task);
1246 if (ret < 0)
1247 goto out;
1248 cred = task->tk_rqstp->rq_cred;
1249 }
1250
1251 if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
1252 ret = gss_refresh_upcall(task);
1253 out:
1254 return ret;
1255 }
1256
1257 /* Dummy refresh routine: used only when destroying the context */
1258 static int
1259 gss_refresh_null(struct rpc_task *task)
1260 {
1261 return -EACCES;
1262 }
1263
1264 static __be32 *
1265 gss_validate(struct rpc_task *task, __be32 *p)
1266 {
1267 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1268 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1269 __be32 seq;
1270 struct kvec iov;
1271 struct xdr_buf verf_buf;
1272 struct xdr_netobj mic;
1273 u32 flav,len;
1274 u32 maj_stat;
1275
1276 dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
1277
1278 flav = ntohl(*p++);
1279 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
1280 goto out_bad;
1281 if (flav != RPC_AUTH_GSS)
1282 goto out_bad;
1283 seq = htonl(task->tk_rqstp->rq_seqno);
1284 iov.iov_base = &seq;
1285 iov.iov_len = sizeof(seq);
1286 xdr_buf_from_iov(&iov, &verf_buf);
1287 mic.data = (u8 *)p;
1288 mic.len = len;
1289
1290 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1291 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1292 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1293 if (maj_stat) {
1294 dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n",
1295 task->tk_pid, __func__, maj_stat);
1296 goto out_bad;
1297 }
1298 /* We leave it to unwrap to calculate au_rslack. For now we just
1299 * calculate the length of the verifier: */
1300 cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
1301 gss_put_ctx(ctx);
1302 dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n",
1303 task->tk_pid, __func__);
1304 return p + XDR_QUADLEN(len);
1305 out_bad:
1306 gss_put_ctx(ctx);
1307 dprintk("RPC: %5u %s failed.\n", task->tk_pid, __func__);
1308 return NULL;
1309 }
1310
1311 static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
1312 __be32 *p, void *obj)
1313 {
1314 struct xdr_stream xdr;
1315
1316 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
1317 encode(rqstp, &xdr, obj);
1318 }
1319
1320 static inline int
1321 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1322 kxdreproc_t encode, struct rpc_rqst *rqstp,
1323 __be32 *p, void *obj)
1324 {
1325 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1326 struct xdr_buf integ_buf;
1327 __be32 *integ_len = NULL;
1328 struct xdr_netobj mic;
1329 u32 offset;
1330 __be32 *q;
1331 struct kvec *iov;
1332 u32 maj_stat = 0;
1333 int status = -EIO;
1334
1335 integ_len = p++;
1336 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1337 *p++ = htonl(rqstp->rq_seqno);
1338
1339 gss_wrap_req_encode(encode, rqstp, p, obj);
1340
1341 if (xdr_buf_subsegment(snd_buf, &integ_buf,
1342 offset, snd_buf->len - offset))
1343 return status;
1344 *integ_len = htonl(integ_buf.len);
1345
1346 /* guess whether we're in the head or the tail: */
1347 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1348 iov = snd_buf->tail;
1349 else
1350 iov = snd_buf->head;
1351 p = iov->iov_base + iov->iov_len;
1352 mic.data = (u8 *)(p + 1);
1353
1354 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1355 status = -EIO; /* XXX? */
1356 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1357 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1358 else if (maj_stat)
1359 return status;
1360 q = xdr_encode_opaque(p, NULL, mic.len);
1361
1362 offset = (u8 *)q - (u8 *)p;
1363 iov->iov_len += offset;
1364 snd_buf->len += offset;
1365 return 0;
1366 }
1367
1368 static void
1369 priv_release_snd_buf(struct rpc_rqst *rqstp)
1370 {
1371 int i;
1372
1373 for (i=0; i < rqstp->rq_enc_pages_num; i++)
1374 __free_page(rqstp->rq_enc_pages[i]);
1375 kfree(rqstp->rq_enc_pages);
1376 }
1377
1378 static int
1379 alloc_enc_pages(struct rpc_rqst *rqstp)
1380 {
1381 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1382 int first, last, i;
1383
1384 if (snd_buf->page_len == 0) {
1385 rqstp->rq_enc_pages_num = 0;
1386 return 0;
1387 }
1388
1389 first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1390 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
1391 rqstp->rq_enc_pages_num = last - first + 1 + 1;
1392 rqstp->rq_enc_pages
1393 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
1394 GFP_NOFS);
1395 if (!rqstp->rq_enc_pages)
1396 goto out;
1397 for (i=0; i < rqstp->rq_enc_pages_num; i++) {
1398 rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
1399 if (rqstp->rq_enc_pages[i] == NULL)
1400 goto out_free;
1401 }
1402 rqstp->rq_release_snd_buf = priv_release_snd_buf;
1403 return 0;
1404 out_free:
1405 rqstp->rq_enc_pages_num = i;
1406 priv_release_snd_buf(rqstp);
1407 out:
1408 return -EAGAIN;
1409 }
1410
1411 static inline int
1412 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1413 kxdreproc_t encode, struct rpc_rqst *rqstp,
1414 __be32 *p, void *obj)
1415 {
1416 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1417 u32 offset;
1418 u32 maj_stat;
1419 int status;
1420 __be32 *opaque_len;
1421 struct page **inpages;
1422 int first;
1423 int pad;
1424 struct kvec *iov;
1425 char *tmp;
1426
1427 opaque_len = p++;
1428 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1429 *p++ = htonl(rqstp->rq_seqno);
1430
1431 gss_wrap_req_encode(encode, rqstp, p, obj);
1432
1433 status = alloc_enc_pages(rqstp);
1434 if (status)
1435 return status;
1436 first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1437 inpages = snd_buf->pages + first;
1438 snd_buf->pages = rqstp->rq_enc_pages;
1439 snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
1440 /*
1441 * Give the tail its own page, in case we need extra space in the
1442 * head when wrapping:
1443 *
1444 * call_allocate() allocates twice the slack space required
1445 * by the authentication flavor to rq_callsize.
1446 * For GSS, slack is GSS_CRED_SLACK.
1447 */
1448 if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1449 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1450 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1451 snd_buf->tail[0].iov_base = tmp;
1452 }
1453 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1454 /* slack space should prevent this ever happening: */
1455 BUG_ON(snd_buf->len > snd_buf->buflen);
1456 status = -EIO;
1457 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1458 * done anyway, so it's safe to put the request on the wire: */
1459 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1460 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1461 else if (maj_stat)
1462 return status;
1463
1464 *opaque_len = htonl(snd_buf->len - offset);
1465 /* guess whether we're in the head or the tail: */
1466 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1467 iov = snd_buf->tail;
1468 else
1469 iov = snd_buf->head;
1470 p = iov->iov_base + iov->iov_len;
1471 pad = 3 - ((snd_buf->len - offset - 1) & 3);
1472 memset(p, 0, pad);
1473 iov->iov_len += pad;
1474 snd_buf->len += pad;
1475
1476 return 0;
1477 }
1478
1479 static int
1480 gss_wrap_req(struct rpc_task *task,
1481 kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
1482 {
1483 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1484 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1485 gc_base);
1486 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1487 int status = -EIO;
1488
1489 dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
1490 if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1491 /* The spec seems a little ambiguous here, but I think that not
1492 * wrapping context destruction requests makes the most sense.
1493 */
1494 gss_wrap_req_encode(encode, rqstp, p, obj);
1495 status = 0;
1496 goto out;
1497 }
1498 switch (gss_cred->gc_service) {
1499 case RPC_GSS_SVC_NONE:
1500 gss_wrap_req_encode(encode, rqstp, p, obj);
1501 status = 0;
1502 break;
1503 case RPC_GSS_SVC_INTEGRITY:
1504 status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj);
1505 break;
1506 case RPC_GSS_SVC_PRIVACY:
1507 status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj);
1508 break;
1509 }
1510 out:
1511 gss_put_ctx(ctx);
1512 dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status);
1513 return status;
1514 }
1515
1516 static inline int
1517 gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1518 struct rpc_rqst *rqstp, __be32 **p)
1519 {
1520 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1521 struct xdr_buf integ_buf;
1522 struct xdr_netobj mic;
1523 u32 data_offset, mic_offset;
1524 u32 integ_len;
1525 u32 maj_stat;
1526 int status = -EIO;
1527
1528 integ_len = ntohl(*(*p)++);
1529 if (integ_len & 3)
1530 return status;
1531 data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1532 mic_offset = integ_len + data_offset;
1533 if (mic_offset > rcv_buf->len)
1534 return status;
1535 if (ntohl(*(*p)++) != rqstp->rq_seqno)
1536 return status;
1537
1538 if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset,
1539 mic_offset - data_offset))
1540 return status;
1541
1542 if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
1543 return status;
1544
1545 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1546 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1547 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1548 if (maj_stat != GSS_S_COMPLETE)
1549 return status;
1550 return 0;
1551 }
1552
1553 static inline int
1554 gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1555 struct rpc_rqst *rqstp, __be32 **p)
1556 {
1557 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1558 u32 offset;
1559 u32 opaque_len;
1560 u32 maj_stat;
1561 int status = -EIO;
1562
1563 opaque_len = ntohl(*(*p)++);
1564 offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1565 if (offset + opaque_len > rcv_buf->len)
1566 return status;
1567 /* remove padding: */
1568 rcv_buf->len = offset + opaque_len;
1569
1570 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
1571 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1572 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1573 if (maj_stat != GSS_S_COMPLETE)
1574 return status;
1575 if (ntohl(*(*p)++) != rqstp->rq_seqno)
1576 return status;
1577
1578 return 0;
1579 }
1580
1581 static int
1582 gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
1583 __be32 *p, void *obj)
1584 {
1585 struct xdr_stream xdr;
1586
1587 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
1588 return decode(rqstp, &xdr, obj);
1589 }
1590
1591 static int
1592 gss_unwrap_resp(struct rpc_task *task,
1593 kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
1594 {
1595 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1596 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1597 gc_base);
1598 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1599 __be32 *savedp = p;
1600 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
1601 int savedlen = head->iov_len;
1602 int status = -EIO;
1603
1604 if (ctx->gc_proc != RPC_GSS_PROC_DATA)
1605 goto out_decode;
1606 switch (gss_cred->gc_service) {
1607 case RPC_GSS_SVC_NONE:
1608 break;
1609 case RPC_GSS_SVC_INTEGRITY:
1610 status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p);
1611 if (status)
1612 goto out;
1613 break;
1614 case RPC_GSS_SVC_PRIVACY:
1615 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
1616 if (status)
1617 goto out;
1618 break;
1619 }
1620 /* take into account extra slack for integrity and privacy cases: */
1621 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
1622 + (savedlen - head->iov_len);
1623 out_decode:
1624 status = gss_unwrap_req_decode(decode, rqstp, p, obj);
1625 out:
1626 gss_put_ctx(ctx);
1627 dprintk("RPC: %5u %s returning %d\n",
1628 task->tk_pid, __func__, status);
1629 return status;
1630 }
1631
1632 static const struct rpc_authops authgss_ops = {
1633 .owner = THIS_MODULE,
1634 .au_flavor = RPC_AUTH_GSS,
1635 .au_name = "RPCSEC_GSS",
1636 .create = gss_create,
1637 .destroy = gss_destroy,
1638 .lookup_cred = gss_lookup_cred,
1639 .crcreate = gss_create_cred,
1640 .pipes_create = gss_pipes_dentries_create,
1641 .pipes_destroy = gss_pipes_dentries_destroy,
1642 .list_pseudoflavors = gss_mech_list_pseudoflavors,
1643 .info2flavor = gss_mech_info2flavor,
1644 .flavor2info = gss_mech_flavor2info,
1645 };
1646
1647 static const struct rpc_credops gss_credops = {
1648 .cr_name = "AUTH_GSS",
1649 .crdestroy = gss_destroy_cred,
1650 .cr_init = gss_cred_init,
1651 .crbind = rpcauth_generic_bind_cred,
1652 .crmatch = gss_match,
1653 .crmarshal = gss_marshal,
1654 .crrefresh = gss_refresh,
1655 .crvalidate = gss_validate,
1656 .crwrap_req = gss_wrap_req,
1657 .crunwrap_resp = gss_unwrap_resp,
1658 };
1659
1660 static const struct rpc_credops gss_nullops = {
1661 .cr_name = "AUTH_GSS",
1662 .crdestroy = gss_destroy_nullcred,
1663 .crbind = rpcauth_generic_bind_cred,
1664 .crmatch = gss_match,
1665 .crmarshal = gss_marshal,
1666 .crrefresh = gss_refresh_null,
1667 .crvalidate = gss_validate,
1668 .crwrap_req = gss_wrap_req,
1669 .crunwrap_resp = gss_unwrap_resp,
1670 };
1671
1672 static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
1673 .upcall = rpc_pipe_generic_upcall,
1674 .downcall = gss_pipe_downcall,
1675 .destroy_msg = gss_pipe_destroy_msg,
1676 .open_pipe = gss_pipe_open_v0,
1677 .release_pipe = gss_pipe_release,
1678 };
1679
1680 static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
1681 .upcall = rpc_pipe_generic_upcall,
1682 .downcall = gss_pipe_downcall,
1683 .destroy_msg = gss_pipe_destroy_msg,
1684 .open_pipe = gss_pipe_open_v1,
1685 .release_pipe = gss_pipe_release,
1686 };
1687
1688 static __net_init int rpcsec_gss_init_net(struct net *net)
1689 {
1690 return gss_svc_init_net(net);
1691 }
1692
1693 static __net_exit void rpcsec_gss_exit_net(struct net *net)
1694 {
1695 gss_svc_shutdown_net(net);
1696 }
1697
1698 static struct pernet_operations rpcsec_gss_net_ops = {
1699 .init = rpcsec_gss_init_net,
1700 .exit = rpcsec_gss_exit_net,
1701 };
1702
1703 /*
1704 * Initialize RPCSEC_GSS module
1705 */
1706 static int __init init_rpcsec_gss(void)
1707 {
1708 int err = 0;
1709
1710 err = rpcauth_register(&authgss_ops);
1711 if (err)
1712 goto out;
1713 err = gss_svc_init();
1714 if (err)
1715 goto out_unregister;
1716 err = register_pernet_subsys(&rpcsec_gss_net_ops);
1717 if (err)
1718 goto out_svc_exit;
1719 rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
1720 return 0;
1721 out_svc_exit:
1722 gss_svc_shutdown();
1723 out_unregister:
1724 rpcauth_unregister(&authgss_ops);
1725 out:
1726 return err;
1727 }
1728
1729 static void __exit exit_rpcsec_gss(void)
1730 {
1731 unregister_pernet_subsys(&rpcsec_gss_net_ops);
1732 gss_svc_shutdown();
1733 rpcauth_unregister(&authgss_ops);
1734 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1735 }
1736
1737 MODULE_ALIAS("rpc-auth-6");
1738 MODULE_LICENSE("GPL");
1739 module_param_named(expired_cred_retry_delay,
1740 gss_expired_cred_retry_delay,
1741 uint, 0644);
1742 MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
1743 "the RPC engine retries an expired credential");
1744
1745 module_init(init_rpcsec_gss)
1746 module_exit(exit_rpcsec_gss)