include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / sunrpc / auth_gss / gss_krb5_crypto.c
1 /*
2 * linux/net/sunrpc/gss_krb5_crypto.c
3 *
4 * Copyright (c) 2000 The Regents of the University of Michigan.
5 * All rights reserved.
6 *
7 * Andy Adamson <andros@umich.edu>
8 * Bruce Fields <bfields@umich.edu>
9 */
10
11 /*
12 * Copyright (C) 1998 by the FundsXpress, INC.
13 *
14 * All rights reserved.
15 *
16 * Export of this software from the United States of America may require
17 * a specific license from the United States Government. It is the
18 * responsibility of any person or organization contemplating export to
19 * obtain such a license before exporting.
20 *
21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22 * distribute this software and its documentation for any purpose and
23 * without fee is hereby granted, provided that the above copyright
24 * notice appear in all copies and that both that copyright notice and
25 * this permission notice appear in supporting documentation, and that
26 * the name of FundsXpress. not be used in advertising or publicity pertaining
27 * to distribution of the software without specific, written prior
28 * permission. FundsXpress makes no representations about the suitability of
29 * this software for any purpose. It is provided "as is" without express
30 * or implied warranty.
31 *
32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
35 */
36
37 #include <linux/err.h>
38 #include <linux/types.h>
39 #include <linux/mm.h>
40 #include <linux/scatterlist.h>
41 #include <linux/crypto.h>
42 #include <linux/highmem.h>
43 #include <linux/pagemap.h>
44 #include <linux/sunrpc/gss_krb5.h>
45 #include <linux/sunrpc/xdr.h>
46
47 #ifdef RPC_DEBUG
48 # define RPCDBG_FACILITY RPCDBG_AUTH
49 #endif
50
51 u32
52 krb5_encrypt(
53 struct crypto_blkcipher *tfm,
54 void * iv,
55 void * in,
56 void * out,
57 int length)
58 {
59 u32 ret = -EINVAL;
60 struct scatterlist sg[1];
61 u8 local_iv[16] = {0};
62 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
63
64 if (length % crypto_blkcipher_blocksize(tfm) != 0)
65 goto out;
66
67 if (crypto_blkcipher_ivsize(tfm) > 16) {
68 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
69 crypto_blkcipher_ivsize(tfm));
70 goto out;
71 }
72
73 if (iv)
74 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
75
76 memcpy(out, in, length);
77 sg_init_one(sg, out, length);
78
79 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
80 out:
81 dprintk("RPC: krb5_encrypt returns %d\n", ret);
82 return ret;
83 }
84
85 u32
86 krb5_decrypt(
87 struct crypto_blkcipher *tfm,
88 void * iv,
89 void * in,
90 void * out,
91 int length)
92 {
93 u32 ret = -EINVAL;
94 struct scatterlist sg[1];
95 u8 local_iv[16] = {0};
96 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
97
98 if (length % crypto_blkcipher_blocksize(tfm) != 0)
99 goto out;
100
101 if (crypto_blkcipher_ivsize(tfm) > 16) {
102 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
103 crypto_blkcipher_ivsize(tfm));
104 goto out;
105 }
106 if (iv)
107 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
108
109 memcpy(out, in, length);
110 sg_init_one(sg, out, length);
111
112 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
113 out:
114 dprintk("RPC: gss_k5decrypt returns %d\n",ret);
115 return ret;
116 }
117
118 static int
119 checksummer(struct scatterlist *sg, void *data)
120 {
121 struct hash_desc *desc = data;
122
123 return crypto_hash_update(desc, sg, sg->length);
124 }
125
126 /* checksum the plaintext data and hdrlen bytes of the token header */
127 s32
128 make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
129 int body_offset, struct xdr_netobj *cksum)
130 {
131 struct hash_desc desc; /* XXX add to ctx? */
132 struct scatterlist sg[1];
133 int err;
134
135 desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
136 if (IS_ERR(desc.tfm))
137 return GSS_S_FAILURE;
138 cksum->len = crypto_hash_digestsize(desc.tfm);
139 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
140
141 err = crypto_hash_init(&desc);
142 if (err)
143 goto out;
144 sg_init_one(sg, header, hdrlen);
145 err = crypto_hash_update(&desc, sg, hdrlen);
146 if (err)
147 goto out;
148 err = xdr_process_buf(body, body_offset, body->len - body_offset,
149 checksummer, &desc);
150 if (err)
151 goto out;
152 err = crypto_hash_final(&desc, cksum->data);
153
154 out:
155 crypto_free_hash(desc.tfm);
156 return err ? GSS_S_FAILURE : 0;
157 }
158
159 struct encryptor_desc {
160 u8 iv[8]; /* XXX hard-coded blocksize */
161 struct blkcipher_desc desc;
162 int pos;
163 struct xdr_buf *outbuf;
164 struct page **pages;
165 struct scatterlist infrags[4];
166 struct scatterlist outfrags[4];
167 int fragno;
168 int fraglen;
169 };
170
171 static int
172 encryptor(struct scatterlist *sg, void *data)
173 {
174 struct encryptor_desc *desc = data;
175 struct xdr_buf *outbuf = desc->outbuf;
176 struct page *in_page;
177 int thislen = desc->fraglen + sg->length;
178 int fraglen, ret;
179 int page_pos;
180
181 /* Worst case is 4 fragments: head, end of page 1, start
182 * of page 2, tail. Anything more is a bug. */
183 BUG_ON(desc->fragno > 3);
184
185 page_pos = desc->pos - outbuf->head[0].iov_len;
186 if (page_pos >= 0 && page_pos < outbuf->page_len) {
187 /* pages are not in place: */
188 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
189 in_page = desc->pages[i];
190 } else {
191 in_page = sg_page(sg);
192 }
193 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
194 sg->offset);
195 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
196 sg->offset);
197 desc->fragno++;
198 desc->fraglen += sg->length;
199 desc->pos += sg->length;
200
201 fraglen = thislen & 7; /* XXX hardcoded blocksize */
202 thislen -= fraglen;
203
204 if (thislen == 0)
205 return 0;
206
207 sg_mark_end(&desc->infrags[desc->fragno - 1]);
208 sg_mark_end(&desc->outfrags[desc->fragno - 1]);
209
210 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
211 desc->infrags, thislen);
212 if (ret)
213 return ret;
214
215 sg_init_table(desc->infrags, 4);
216 sg_init_table(desc->outfrags, 4);
217
218 if (fraglen) {
219 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
220 sg->offset + sg->length - fraglen);
221 desc->infrags[0] = desc->outfrags[0];
222 sg_assign_page(&desc->infrags[0], in_page);
223 desc->fragno = 1;
224 desc->fraglen = fraglen;
225 } else {
226 desc->fragno = 0;
227 desc->fraglen = 0;
228 }
229 return 0;
230 }
231
232 int
233 gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
234 int offset, struct page **pages)
235 {
236 int ret;
237 struct encryptor_desc desc;
238
239 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
240
241 memset(desc.iv, 0, sizeof(desc.iv));
242 desc.desc.tfm = tfm;
243 desc.desc.info = desc.iv;
244 desc.desc.flags = 0;
245 desc.pos = offset;
246 desc.outbuf = buf;
247 desc.pages = pages;
248 desc.fragno = 0;
249 desc.fraglen = 0;
250
251 sg_init_table(desc.infrags, 4);
252 sg_init_table(desc.outfrags, 4);
253
254 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
255 return ret;
256 }
257
258 struct decryptor_desc {
259 u8 iv[8]; /* XXX hard-coded blocksize */
260 struct blkcipher_desc desc;
261 struct scatterlist frags[4];
262 int fragno;
263 int fraglen;
264 };
265
266 static int
267 decryptor(struct scatterlist *sg, void *data)
268 {
269 struct decryptor_desc *desc = data;
270 int thislen = desc->fraglen + sg->length;
271 int fraglen, ret;
272
273 /* Worst case is 4 fragments: head, end of page 1, start
274 * of page 2, tail. Anything more is a bug. */
275 BUG_ON(desc->fragno > 3);
276 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
277 sg->offset);
278 desc->fragno++;
279 desc->fraglen += sg->length;
280
281 fraglen = thislen & 7; /* XXX hardcoded blocksize */
282 thislen -= fraglen;
283
284 if (thislen == 0)
285 return 0;
286
287 sg_mark_end(&desc->frags[desc->fragno - 1]);
288
289 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
290 desc->frags, thislen);
291 if (ret)
292 return ret;
293
294 sg_init_table(desc->frags, 4);
295
296 if (fraglen) {
297 sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
298 sg->offset + sg->length - fraglen);
299 desc->fragno = 1;
300 desc->fraglen = fraglen;
301 } else {
302 desc->fragno = 0;
303 desc->fraglen = 0;
304 }
305 return 0;
306 }
307
308 int
309 gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
310 int offset)
311 {
312 struct decryptor_desc desc;
313
314 /* XXXJBF: */
315 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
316
317 memset(desc.iv, 0, sizeof(desc.iv));
318 desc.desc.tfm = tfm;
319 desc.desc.info = desc.iv;
320 desc.desc.flags = 0;
321 desc.fragno = 0;
322 desc.fraglen = 0;
323
324 sg_init_table(desc.frags, 4);
325
326 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
327 }