]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/sunrpc/auth_gss/gss_krb5_crypto.c
gssd_krb5: More arcfour-hmac support
[mirror_ubuntu-zesty-kernel.git] / net / sunrpc / auth_gss / gss_krb5_crypto.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/gss_krb5_crypto.c
3 *
81d4a433 4 * Copyright (c) 2000-2008 The Regents of the University of Michigan.
1da177e4
LT
5 * All rights reserved.
6 *
7 * Andy Adamson <andros@umich.edu>
8 * Bruce Fields <bfields@umich.edu>
9 */
10
11/*
12 * Copyright (C) 1998 by the FundsXpress, INC.
13 *
14 * All rights reserved.
15 *
16 * Export of this software from the United States of America may require
17 * a specific license from the United States Government. It is the
18 * responsibility of any person or organization contemplating export to
19 * obtain such a license before exporting.
20 *
21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22 * distribute this software and its documentation for any purpose and
23 * without fee is hereby granted, provided that the above copyright
24 * notice appear in all copies and that both that copyright notice and
25 * this permission notice appear in supporting documentation, and that
26 * the name of FundsXpress. not be used in advertising or publicity pertaining
27 * to distribution of the software without specific, written prior
28 * permission. FundsXpress makes no representations about the suitability of
29 * this software for any purpose. It is provided "as is" without express
30 * or implied warranty.
31 *
32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
35 */
36
35058687 37#include <linux/err.h>
1da177e4
LT
38#include <linux/types.h>
39#include <linux/mm.h>
378f058c 40#include <linux/scatterlist.h>
1da177e4
LT
41#include <linux/crypto.h>
42#include <linux/highmem.h>
43#include <linux/pagemap.h>
934a95aa 44#include <linux/random.h>
1da177e4 45#include <linux/sunrpc/gss_krb5.h>
37a4e6cb 46#include <linux/sunrpc/xdr.h>
1da177e4
LT
47
48#ifdef RPC_DEBUG
49# define RPCDBG_FACILITY RPCDBG_AUTH
50#endif
51
52u32
53krb5_encrypt(
378c6697 54 struct crypto_blkcipher *tfm,
1da177e4
LT
55 void * iv,
56 void * in,
57 void * out,
58 int length)
59{
60 u32 ret = -EINVAL;
cca5172a 61 struct scatterlist sg[1];
81d4a433 62 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
378c6697 63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
1da177e4 64
378c6697 65 if (length % crypto_blkcipher_blocksize(tfm) != 0)
1da177e4
LT
66 goto out;
67
81d4a433 68 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
3d4a6886
KC
69 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
70 crypto_blkcipher_ivsize(tfm));
1da177e4
LT
71 goto out;
72 }
73
74 if (iv)
378c6697 75 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
1da177e4
LT
76
77 memcpy(out, in, length);
68e3f5dd 78 sg_init_one(sg, out, length);
1da177e4 79
378c6697 80 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
1da177e4 81out:
8885cb36 82 dprintk("RPC: krb5_encrypt returns %d\n", ret);
8fc7500b 83 return ret;
1da177e4
LT
84}
85
1da177e4
LT
86u32
87krb5_decrypt(
378c6697 88 struct crypto_blkcipher *tfm,
1da177e4
LT
89 void * iv,
90 void * in,
91 void * out,
92 int length)
93{
94 u32 ret = -EINVAL;
95 struct scatterlist sg[1];
81d4a433 96 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
378c6697 97 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
1da177e4 98
378c6697 99 if (length % crypto_blkcipher_blocksize(tfm) != 0)
1da177e4
LT
100 goto out;
101
81d4a433 102 if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
3d4a6886 103 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
378c6697 104 crypto_blkcipher_ivsize(tfm));
1da177e4
LT
105 goto out;
106 }
107 if (iv)
378c6697 108 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
1da177e4
LT
109
110 memcpy(out, in, length);
68e3f5dd 111 sg_init_one(sg, out, length);
1da177e4 112
378c6697 113 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
1da177e4 114out:
8885cb36 115 dprintk("RPC: gss_k5decrypt returns %d\n",ret);
8fc7500b 116 return ret;
1da177e4
LT
117}
118
f7b3af64
BF
119static int
120checksummer(struct scatterlist *sg, void *data)
121{
35058687 122 struct hash_desc *desc = data;
f7b3af64 123
35058687 124 return crypto_hash_update(desc, sg, sg->length);
f7b3af64
BF
125}
126
e1f6c07b
KC
127/*
128 * checksum the plaintext data and hdrlen bytes of the token header
129 * The checksum is performed over the first 8 bytes of the
130 * gss token header and then over the data body
131 */
132u32
133make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
134 struct xdr_buf *body, int body_offset, u8 *cksumkey,
8b237076 135 unsigned int usage, struct xdr_netobj *cksumout)
1da177e4 136{
e1f6c07b 137 struct hash_desc desc;
1da177e4 138 struct scatterlist sg[1];
35058687 139 int err;
e1f6c07b
KC
140 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
141 unsigned int checksumlen;
142
143 if (cksumout->len < kctx->gk5e->cksumlength) {
144 dprintk("%s: checksum buffer length, %u, too small for %s\n",
145 __func__, cksumout->len, kctx->gk5e->name);
146 return GSS_S_FAILURE;
147 }
1da177e4 148
e1f6c07b 149 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
35058687 150 if (IS_ERR(desc.tfm))
d4a30e7e 151 return GSS_S_FAILURE;
35058687 152 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
1da177e4 153
e1f6c07b
KC
154 checksumlen = crypto_hash_digestsize(desc.tfm);
155
156 if (cksumkey != NULL) {
157 err = crypto_hash_setkey(desc.tfm, cksumkey,
158 kctx->gk5e->keylength);
159 if (err)
160 goto out;
161 }
162
35058687
HX
163 err = crypto_hash_init(&desc);
164 if (err)
165 goto out;
68e3f5dd 166 sg_init_one(sg, header, hdrlen);
35058687
HX
167 err = crypto_hash_update(&desc, sg, hdrlen);
168 if (err)
169 goto out;
37a4e6cb 170 err = xdr_process_buf(body, body_offset, body->len - body_offset,
35058687
HX
171 checksummer, &desc);
172 if (err)
173 goto out;
e1f6c07b
KC
174 err = crypto_hash_final(&desc, checksumdata);
175 if (err)
176 goto out;
35058687 177
e1f6c07b
KC
178 switch (kctx->gk5e->ctype) {
179 case CKSUMTYPE_RSA_MD5:
180 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
181 checksumdata, checksumlen);
182 if (err)
183 goto out;
184 memcpy(cksumout->data,
185 checksumdata + checksumlen - kctx->gk5e->cksumlength,
186 kctx->gk5e->cksumlength);
187 break;
958142e9
KC
188 case CKSUMTYPE_HMAC_SHA1_DES3:
189 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
190 break;
e1f6c07b
KC
191 default:
192 BUG();
193 break;
194 }
195 cksumout->len = kctx->gk5e->cksumlength;
35058687
HX
196out:
197 crypto_free_hash(desc.tfm);
198 return err ? GSS_S_FAILURE : 0;
1da177e4
LT
199}
200
de9c17eb
KC
201/*
202 * checksum the plaintext data and hdrlen bytes of the token header
203 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
204 * body then over the first 16 octets of the MIC token
205 * Inclusion of the header data in the calculation of the
206 * checksum is optional.
207 */
208u32
209make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
210 struct xdr_buf *body, int body_offset, u8 *cksumkey,
8b237076 211 unsigned int usage, struct xdr_netobj *cksumout)
de9c17eb
KC
212{
213 struct hash_desc desc;
214 struct scatterlist sg[1];
215 int err;
216 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
217 unsigned int checksumlen;
218
219 if (kctx->gk5e->keyed_cksum == 0) {
220 dprintk("%s: expected keyed hash for %s\n",
221 __func__, kctx->gk5e->name);
222 return GSS_S_FAILURE;
223 }
224 if (cksumkey == NULL) {
225 dprintk("%s: no key supplied for %s\n",
226 __func__, kctx->gk5e->name);
227 return GSS_S_FAILURE;
228 }
229
230 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
231 CRYPTO_ALG_ASYNC);
232 if (IS_ERR(desc.tfm))
233 return GSS_S_FAILURE;
234 checksumlen = crypto_hash_digestsize(desc.tfm);
235 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
236
237 err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
238 if (err)
239 goto out;
240
241 err = crypto_hash_init(&desc);
242 if (err)
243 goto out;
244 err = xdr_process_buf(body, body_offset, body->len - body_offset,
245 checksummer, &desc);
246 if (err)
247 goto out;
248 if (header != NULL) {
249 sg_init_one(sg, header, hdrlen);
250 err = crypto_hash_update(&desc, sg, hdrlen);
251 if (err)
252 goto out;
253 }
254 err = crypto_hash_final(&desc, checksumdata);
255 if (err)
256 goto out;
257
258 cksumout->len = kctx->gk5e->cksumlength;
259
260 switch (kctx->gk5e->ctype) {
261 case CKSUMTYPE_HMAC_SHA1_96_AES128:
262 case CKSUMTYPE_HMAC_SHA1_96_AES256:
263 /* note that this truncates the hash */
264 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
265 break;
266 default:
267 BUG();
268 break;
269 }
270out:
271 crypto_free_hash(desc.tfm);
272 return err ? GSS_S_FAILURE : 0;
273}
274
14ae162c 275struct encryptor_desc {
81d4a433 276 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
378c6697 277 struct blkcipher_desc desc;
14ae162c
BF
278 int pos;
279 struct xdr_buf *outbuf;
280 struct page **pages;
281 struct scatterlist infrags[4];
282 struct scatterlist outfrags[4];
283 int fragno;
284 int fraglen;
285};
286
287static int
288encryptor(struct scatterlist *sg, void *data)
289{
290 struct encryptor_desc *desc = data;
291 struct xdr_buf *outbuf = desc->outbuf;
292 struct page *in_page;
293 int thislen = desc->fraglen + sg->length;
294 int fraglen, ret;
295 int page_pos;
296
297 /* Worst case is 4 fragments: head, end of page 1, start
298 * of page 2, tail. Anything more is a bug. */
299 BUG_ON(desc->fragno > 3);
14ae162c
BF
300
301 page_pos = desc->pos - outbuf->head[0].iov_len;
302 if (page_pos >= 0 && page_pos < outbuf->page_len) {
303 /* pages are not in place: */
304 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
305 in_page = desc->pages[i];
306 } else {
fa05f128 307 in_page = sg_page(sg);
14ae162c 308 }
68e3f5dd
HX
309 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
310 sg->offset);
311 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
312 sg->offset);
14ae162c
BF
313 desc->fragno++;
314 desc->fraglen += sg->length;
315 desc->pos += sg->length;
316
81d4a433 317 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
14ae162c
BF
318 thislen -= fraglen;
319
320 if (thislen == 0)
321 return 0;
322
c46f2334
JA
323 sg_mark_end(&desc->infrags[desc->fragno - 1]);
324 sg_mark_end(&desc->outfrags[desc->fragno - 1]);
68e3f5dd 325
378c6697
HX
326 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
327 desc->infrags, thislen);
14ae162c
BF
328 if (ret)
329 return ret;
68e3f5dd
HX
330
331 sg_init_table(desc->infrags, 4);
332 sg_init_table(desc->outfrags, 4);
333
14ae162c 334 if (fraglen) {
642f1490
JA
335 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
336 sg->offset + sg->length - fraglen);
14ae162c 337 desc->infrags[0] = desc->outfrags[0];
642f1490 338 sg_assign_page(&desc->infrags[0], in_page);
14ae162c
BF
339 desc->fragno = 1;
340 desc->fraglen = fraglen;
341 } else {
342 desc->fragno = 0;
343 desc->fraglen = 0;
344 }
345 return 0;
346}
347
348int
378c6697
HX
349gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
350 int offset, struct page **pages)
14ae162c
BF
351{
352 int ret;
353 struct encryptor_desc desc;
354
378c6697 355 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
14ae162c
BF
356
357 memset(desc.iv, 0, sizeof(desc.iv));
378c6697
HX
358 desc.desc.tfm = tfm;
359 desc.desc.info = desc.iv;
360 desc.desc.flags = 0;
14ae162c
BF
361 desc.pos = offset;
362 desc.outbuf = buf;
363 desc.pages = pages;
364 desc.fragno = 0;
365 desc.fraglen = 0;
366
68e3f5dd
HX
367 sg_init_table(desc.infrags, 4);
368 sg_init_table(desc.outfrags, 4);
369
37a4e6cb 370 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
14ae162c
BF
371 return ret;
372}
373
14ae162c 374struct decryptor_desc {
81d4a433 375 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
378c6697 376 struct blkcipher_desc desc;
14ae162c
BF
377 struct scatterlist frags[4];
378 int fragno;
379 int fraglen;
380};
381
382static int
383decryptor(struct scatterlist *sg, void *data)
384{
385 struct decryptor_desc *desc = data;
386 int thislen = desc->fraglen + sg->length;
387 int fraglen, ret;
388
389 /* Worst case is 4 fragments: head, end of page 1, start
390 * of page 2, tail. Anything more is a bug. */
391 BUG_ON(desc->fragno > 3);
68e3f5dd
HX
392 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
393 sg->offset);
14ae162c
BF
394 desc->fragno++;
395 desc->fraglen += sg->length;
396
81d4a433 397 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
14ae162c
BF
398 thislen -= fraglen;
399
400 if (thislen == 0)
401 return 0;
402
c46f2334 403 sg_mark_end(&desc->frags[desc->fragno - 1]);
68e3f5dd 404
378c6697
HX
405 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
406 desc->frags, thislen);
14ae162c
BF
407 if (ret)
408 return ret;
68e3f5dd
HX
409
410 sg_init_table(desc->frags, 4);
411
14ae162c 412 if (fraglen) {
642f1490
JA
413 sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
414 sg->offset + sg->length - fraglen);
14ae162c
BF
415 desc->fragno = 1;
416 desc->fraglen = fraglen;
417 } else {
418 desc->fragno = 0;
419 desc->fraglen = 0;
420 }
421 return 0;
422}
423
424int
378c6697
HX
425gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
426 int offset)
14ae162c
BF
427{
428 struct decryptor_desc desc;
429
430 /* XXXJBF: */
378c6697 431 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
14ae162c
BF
432
433 memset(desc.iv, 0, sizeof(desc.iv));
378c6697
HX
434 desc.desc.tfm = tfm;
435 desc.desc.info = desc.iv;
436 desc.desc.flags = 0;
14ae162c
BF
437 desc.fragno = 0;
438 desc.fraglen = 0;
68e3f5dd
HX
439
440 sg_init_table(desc.frags, 4);
441
37a4e6cb 442 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
14ae162c 443}
725f2865
KC
444
445/*
446 * This function makes the assumption that it was ultimately called
447 * from gss_wrap().
448 *
449 * The client auth_gss code moves any existing tail data into a
450 * separate page before calling gss_wrap.
451 * The server svcauth_gss code ensures that both the head and the
452 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
453 *
454 * Even with that guarantee, this function may be called more than
455 * once in the processing of gss_wrap(). The best we can do is
456 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
457 * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
458 * At run-time we can verify that a single invocation of this
459 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
460 */
461
462int
463xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
464{
465 u8 *p;
466
467 if (shiftlen == 0)
468 return 0;
469
470 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
471 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
472
473 p = buf->head[0].iov_base + base;
474
475 memmove(p + shiftlen, p, buf->head[0].iov_len - base);
476
477 buf->head[0].iov_len += shiftlen;
478 buf->len += shiftlen;
479
480 return 0;
481}
934a95aa
KC
482
483static u32
484gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
485 u32 offset, u8 *iv, struct page **pages, int encrypt)
486{
487 u32 ret;
488 struct scatterlist sg[1];
489 struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
490 u8 data[crypto_blkcipher_blocksize(cipher) * 2];
491 struct page **save_pages;
492 u32 len = buf->len - offset;
493
494 BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2);
495
496 /*
497 * For encryption, we want to read from the cleartext
498 * page cache pages, and write the encrypted data to
499 * the supplied xdr_buf pages.
500 */
501 save_pages = buf->pages;
502 if (encrypt)
503 buf->pages = pages;
504
505 ret = read_bytes_from_xdr_buf(buf, offset, data, len);
506 buf->pages = save_pages;
507 if (ret)
508 goto out;
509
510 sg_init_one(sg, data, len);
511
512 if (encrypt)
513 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
514 else
515 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
516
517 if (ret)
518 goto out;
519
520 ret = write_bytes_to_xdr_buf(buf, offset, data, len);
521
522out:
523 return ret;
524}
525
526u32
527gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
528 struct xdr_buf *buf, int ec, struct page **pages)
529{
530 u32 err;
531 struct xdr_netobj hmac;
532 u8 *cksumkey;
533 u8 *ecptr;
534 struct crypto_blkcipher *cipher, *aux_cipher;
535 int blocksize;
536 struct page **save_pages;
537 int nblocks, nbytes;
538 struct encryptor_desc desc;
539 u32 cbcbytes;
8b237076 540 unsigned int usage;
934a95aa
KC
541
542 if (kctx->initiate) {
543 cipher = kctx->initiator_enc;
544 aux_cipher = kctx->initiator_enc_aux;
545 cksumkey = kctx->initiator_integ;
8b237076 546 usage = KG_USAGE_INITIATOR_SEAL;
934a95aa
KC
547 } else {
548 cipher = kctx->acceptor_enc;
549 aux_cipher = kctx->acceptor_enc_aux;
550 cksumkey = kctx->acceptor_integ;
8b237076 551 usage = KG_USAGE_ACCEPTOR_SEAL;
934a95aa
KC
552 }
553 blocksize = crypto_blkcipher_blocksize(cipher);
554
555 /* hide the gss token header and insert the confounder */
556 offset += GSS_KRB5_TOK_HDR_LEN;
557 if (xdr_extend_head(buf, offset, blocksize))
558 return GSS_S_FAILURE;
559 gss_krb5_make_confounder(buf->head[0].iov_base + offset, blocksize);
560 offset -= GSS_KRB5_TOK_HDR_LEN;
561
562 if (buf->tail[0].iov_base != NULL) {
563 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
564 } else {
565 buf->tail[0].iov_base = buf->head[0].iov_base
566 + buf->head[0].iov_len;
567 buf->tail[0].iov_len = 0;
568 ecptr = buf->tail[0].iov_base;
569 }
570
571 memset(ecptr, 'X', ec);
572 buf->tail[0].iov_len += ec;
573 buf->len += ec;
574
575 /* copy plaintext gss token header after filler (if any) */
576 memcpy(ecptr + ec, buf->head[0].iov_base + offset,
577 GSS_KRB5_TOK_HDR_LEN);
578 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
579 buf->len += GSS_KRB5_TOK_HDR_LEN;
580
581 /* Do the HMAC */
582 hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
583 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
584
585 /*
586 * When we are called, pages points to the real page cache
587 * data -- which we can't go and encrypt! buf->pages points
588 * to scratch pages which we are going to send off to the
589 * client/server. Swap in the plaintext pages to calculate
590 * the hmac.
591 */
592 save_pages = buf->pages;
593 buf->pages = pages;
594
595 err = make_checksum_v2(kctx, NULL, 0, buf,
8b237076
KC
596 offset + GSS_KRB5_TOK_HDR_LEN,
597 cksumkey, usage, &hmac);
934a95aa
KC
598 buf->pages = save_pages;
599 if (err)
600 return GSS_S_FAILURE;
601
602 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
603 nblocks = (nbytes + blocksize - 1) / blocksize;
604 cbcbytes = 0;
605 if (nblocks > 2)
606 cbcbytes = (nblocks - 2) * blocksize;
607
608 memset(desc.iv, 0, sizeof(desc.iv));
609
610 if (cbcbytes) {
611 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
612 desc.fragno = 0;
613 desc.fraglen = 0;
614 desc.pages = pages;
615 desc.outbuf = buf;
616 desc.desc.info = desc.iv;
617 desc.desc.flags = 0;
618 desc.desc.tfm = aux_cipher;
619
620 sg_init_table(desc.infrags, 4);
621 sg_init_table(desc.outfrags, 4);
622
623 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
624 cbcbytes, encryptor, &desc);
625 if (err)
626 goto out_err;
627 }
628
629 /* Make sure IV carries forward from any CBC results. */
630 err = gss_krb5_cts_crypt(cipher, buf,
631 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
632 desc.iv, pages, 1);
633 if (err) {
634 err = GSS_S_FAILURE;
635 goto out_err;
636 }
637
638 /* Now update buf to account for HMAC */
639 buf->tail[0].iov_len += kctx->gk5e->cksumlength;
640 buf->len += kctx->gk5e->cksumlength;
641
642out_err:
643 if (err)
644 err = GSS_S_FAILURE;
645 return err;
646}
647
648u32
649gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
650 u32 *headskip, u32 *tailskip)
651{
652 struct xdr_buf subbuf;
653 u32 ret = 0;
654 u8 *cksum_key;
655 struct crypto_blkcipher *cipher, *aux_cipher;
656 struct xdr_netobj our_hmac_obj;
657 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
658 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
659 int nblocks, blocksize, cbcbytes;
660 struct decryptor_desc desc;
8b237076 661 unsigned int usage;
934a95aa
KC
662
663 if (kctx->initiate) {
664 cipher = kctx->acceptor_enc;
665 aux_cipher = kctx->acceptor_enc_aux;
666 cksum_key = kctx->acceptor_integ;
8b237076 667 usage = KG_USAGE_ACCEPTOR_SEAL;
934a95aa
KC
668 } else {
669 cipher = kctx->initiator_enc;
670 aux_cipher = kctx->initiator_enc_aux;
671 cksum_key = kctx->initiator_integ;
8b237076 672 usage = KG_USAGE_INITIATOR_SEAL;
934a95aa
KC
673 }
674 blocksize = crypto_blkcipher_blocksize(cipher);
675
676
677 /* create a segment skipping the header and leaving out the checksum */
678 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
679 (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
680 kctx->gk5e->cksumlength));
681
682 nblocks = (subbuf.len + blocksize - 1) / blocksize;
683
684 cbcbytes = 0;
685 if (nblocks > 2)
686 cbcbytes = (nblocks - 2) * blocksize;
687
688 memset(desc.iv, 0, sizeof(desc.iv));
689
690 if (cbcbytes) {
691 desc.fragno = 0;
692 desc.fraglen = 0;
693 desc.desc.info = desc.iv;
694 desc.desc.flags = 0;
695 desc.desc.tfm = aux_cipher;
696
697 sg_init_table(desc.frags, 4);
698
699 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
700 if (ret)
701 goto out_err;
702 }
703
704 /* Make sure IV carries forward from any CBC results. */
705 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
706 if (ret)
707 goto out_err;
708
709
710 /* Calculate our hmac over the plaintext data */
711 our_hmac_obj.len = sizeof(our_hmac);
712 our_hmac_obj.data = our_hmac;
713
714 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
8b237076 715 cksum_key, usage, &our_hmac_obj);
934a95aa
KC
716 if (ret)
717 goto out_err;
718
719 /* Get the packet's hmac value */
720 ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
721 pkt_hmac, kctx->gk5e->cksumlength);
722 if (ret)
723 goto out_err;
724
725 if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
726 ret = GSS_S_BAD_SIG;
727 goto out_err;
728 }
729 *headskip = crypto_blkcipher_blocksize(cipher);
730 *tailskip = kctx->gk5e->cksumlength;
731out_err:
732 if (ret && ret != GSS_S_BAD_SIG)
733 ret = GSS_S_FAILURE;
734 return ret;
735}