]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
staging: add Lustre file system client support
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / lustre / lustre / ptlrpc / gss / gss_krb5_mech.c
1 /*
2 * Modifications for Lustre
3 *
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5 *
6 * Copyright (c) 2011, 2012, Intel Corporation.
7 *
8 * Author: Eric Mei <ericm@clusterfs.com>
9 */
10
11 /*
12 * linux/net/sunrpc/gss_krb5_mech.c
13 * linux/net/sunrpc/gss_krb5_crypto.c
14 * linux/net/sunrpc/gss_krb5_seal.c
15 * linux/net/sunrpc/gss_krb5_seqnum.c
16 * linux/net/sunrpc/gss_krb5_unseal.c
17 *
18 * Copyright (c) 2001 The Regents of the University of Michigan.
19 * All rights reserved.
20 *
21 * Andy Adamson <andros@umich.edu>
22 * J. Bruce Fields <bfields@umich.edu>
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * 1. Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. Neither the name of the University nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
38 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
40 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
44 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
45 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 #define DEBUG_SUBSYSTEM S_SEC
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/crypto.h>
56 #include <linux/mutex.h>
57
58 #include <obd.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre/lustre_idl.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
65
66 #include "gss_err.h"
67 #include "gss_internal.h"
68 #include "gss_api.h"
69 #include "gss_asn1.h"
70 #include "gss_krb5.h"
71
72 static spinlock_t krb5_seq_lock;
73
74 struct krb5_enctype {
75 char *ke_dispname;
76 char *ke_enc_name; /* linux tfm name */
77 char *ke_hash_name; /* linux tfm name */
78 int ke_enc_mode; /* linux tfm mode */
79 int ke_hash_size; /* checksum size */
80 int ke_conf_size; /* confounder size */
81 unsigned int ke_hash_hmac:1; /* is hmac? */
82 };
83
84 /*
85 * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
86 * but currently we simply CBC with padding, because linux doesn't support CTS
87 * yet. this need to be fixed in the future.
88 */
89 static struct krb5_enctype enctypes[] = {
90 [ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */
91 "des-cbc-md5",
92 "cbc(des)",
93 "md5",
94 0,
95 16,
96 8,
97 0,
98 },
99 [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
100 "des3-hmac-sha1",
101 "cbc(des3_ede)",
102 "hmac(sha1)",
103 0,
104 20,
105 8,
106 1,
107 },
108 [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
109 "aes128-cts-hmac-sha1-96",
110 "cbc(aes)",
111 "hmac(sha1)",
112 0,
113 12,
114 16,
115 1,
116 },
117 [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */
118 "aes256-cts-hmac-sha1-96",
119 "cbc(aes)",
120 "hmac(sha1)",
121 0,
122 12,
123 16,
124 1,
125 },
126 [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */
127 "arcfour-hmac-md5",
128 "ecb(arc4)",
129 "hmac(md5)",
130 0,
131 16,
132 8,
133 1,
134 },
135 };
136
137 #define MAX_ENCTYPES sizeof(enctypes)/sizeof(struct krb5_enctype)
138
139 static const char * enctype2str(__u32 enctype)
140 {
141 if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
142 return enctypes[enctype].ke_dispname;
143
144 return "unknown";
145 }
146
147 static
148 int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
149 {
150 kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
151 if (IS_ERR(kb->kb_tfm)) {
152 CERROR("failed to alloc tfm: %s, mode %d\n",
153 alg_name, alg_mode);
154 return -1;
155 }
156
157 if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
158 CERROR("failed to set %s key, len %d\n",
159 alg_name, kb->kb_key.len);
160 return -1;
161 }
162
163 return 0;
164 }
165
166 static
167 int krb5_init_keys(struct krb5_ctx *kctx)
168 {
169 struct krb5_enctype *ke;
170
171 if (kctx->kc_enctype >= MAX_ENCTYPES ||
172 enctypes[kctx->kc_enctype].ke_hash_size == 0) {
173 CERROR("unsupported enctype %x\n", kctx->kc_enctype);
174 return -1;
175 }
176
177 ke = &enctypes[kctx->kc_enctype];
178
179 /* tfm arc4 is stateful, user should alloc-use-free by his own */
180 if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
181 keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
182 return -1;
183
184 /* tfm hmac is stateful, user should alloc-use-free by his own */
185 if (ke->ke_hash_hmac == 0 &&
186 keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
187 return -1;
188 if (ke->ke_hash_hmac == 0 &&
189 keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
190 return -1;
191
192 return 0;
193 }
194
195 static
196 void keyblock_free(struct krb5_keyblock *kb)
197 {
198 rawobj_free(&kb->kb_key);
199 if (kb->kb_tfm)
200 ll_crypto_free_blkcipher(kb->kb_tfm);
201 }
202
203 static
204 int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
205 {
206 return rawobj_dup(&new->kb_key, &kb->kb_key);
207 }
208
209 static
210 int get_bytes(char **ptr, const char *end, void *res, int len)
211 {
212 char *p, *q;
213 p = *ptr;
214 q = p + len;
215 if (q > end || q < p)
216 return -1;
217 memcpy(res, p, len);
218 *ptr = q;
219 return 0;
220 }
221
222 static
223 int get_rawobj(char **ptr, const char *end, rawobj_t *res)
224 {
225 char *p, *q;
226 __u32 len;
227
228 p = *ptr;
229 if (get_bytes(&p, end, &len, sizeof(len)))
230 return -1;
231
232 q = p + len;
233 if (q > end || q < p)
234 return -1;
235
236 OBD_ALLOC_LARGE(res->data, len);
237 if (!res->data)
238 return -1;
239
240 res->len = len;
241 memcpy(res->data, p, len);
242 *ptr = q;
243 return 0;
244 }
245
246 static
247 int get_keyblock(char **ptr, const char *end,
248 struct krb5_keyblock *kb, __u32 keysize)
249 {
250 char *buf;
251
252 OBD_ALLOC_LARGE(buf, keysize);
253 if (buf == NULL)
254 return -1;
255
256 if (get_bytes(ptr, end, buf, keysize)) {
257 OBD_FREE_LARGE(buf, keysize);
258 return -1;
259 }
260
261 kb->kb_key.len = keysize;
262 kb->kb_key.data = buf;
263 return 0;
264 }
265
266 static
267 void delete_context_kerberos(struct krb5_ctx *kctx)
268 {
269 rawobj_free(&kctx->kc_mech_used);
270
271 keyblock_free(&kctx->kc_keye);
272 keyblock_free(&kctx->kc_keyi);
273 keyblock_free(&kctx->kc_keyc);
274 }
275
276 static
277 __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
278 {
279 unsigned int tmp_uint, keysize;
280
281 /* seed_init flag */
282 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
283 goto out_err;
284 kctx->kc_seed_init = (tmp_uint != 0);
285
286 /* seed */
287 if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
288 goto out_err;
289
290 /* sign/seal algorithm, not really used now */
291 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
292 get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
293 goto out_err;
294
295 /* end time */
296 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
297 goto out_err;
298
299 /* seq send */
300 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
301 goto out_err;
302 kctx->kc_seq_send = tmp_uint;
303
304 /* mech oid */
305 if (get_rawobj(&p, end, &kctx->kc_mech_used))
306 goto out_err;
307
308 /* old style enc/seq keys in format:
309 * - enctype (u32)
310 * - keysize (u32)
311 * - keydata
312 * we decompose them to fit into the new context
313 */
314
315 /* enc key */
316 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
317 goto out_err;
318
319 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
320 goto out_err;
321
322 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
323 goto out_err;
324
325 /* seq key */
326 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
327 tmp_uint != kctx->kc_enctype)
328 goto out_err;
329
330 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
331 tmp_uint != keysize)
332 goto out_err;
333
334 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
335 goto out_err;
336
337 /* old style fallback */
338 if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
339 goto out_err;
340
341 if (p != end)
342 goto out_err;
343
344 CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
345 return 0;
346 out_err:
347 return GSS_S_FAILURE;
348 }
349
350 /* Flags for version 2 context flags */
351 #define KRB5_CTX_FLAG_INITIATOR 0x00000001
352 #define KRB5_CTX_FLAG_CFX 0x00000002
353 #define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
354
355 static
356 __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
357 {
358 unsigned int tmp_uint, keysize;
359
360 /* end time */
361 if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
362 goto out_err;
363
364 /* flags */
365 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
366 goto out_err;
367
368 if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
369 kctx->kc_initiate = 1;
370 if (tmp_uint & KRB5_CTX_FLAG_CFX)
371 kctx->kc_cfx = 1;
372 if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
373 kctx->kc_have_acceptor_subkey = 1;
374
375 /* seq send */
376 if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
377 goto out_err;
378
379 /* enctype */
380 if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
381 goto out_err;
382
383 /* size of each key */
384 if (get_bytes(&p, end, &keysize, sizeof(keysize)))
385 goto out_err;
386
387 /* number of keys - should always be 3 */
388 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
389 goto out_err;
390
391 if (tmp_uint != 3) {
392 CERROR("Invalid number of keys: %u\n", tmp_uint);
393 goto out_err;
394 }
395
396 /* ke */
397 if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
398 goto out_err;
399 /* ki */
400 if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
401 goto out_err;
402 /* ki */
403 if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
404 goto out_err;
405
406 CDEBUG(D_SEC, "succesfully imported v2 context\n");
407 return 0;
408 out_err:
409 return GSS_S_FAILURE;
410 }
411
412 /*
413 * The whole purpose here is trying to keep user level gss context parsing
414 * from nfs-utils unchanged as possible as we can, they are not quite mature
415 * yet, and many stuff still not clear, like heimdal etc.
416 */
417 static
418 __u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
419 struct gss_ctx *gctx)
420 {
421 struct krb5_ctx *kctx;
422 char *p = (char *) inbuf->data;
423 char *end = (char *) (inbuf->data + inbuf->len);
424 unsigned int tmp_uint, rc;
425
426 if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
427 CERROR("Fail to read version\n");
428 return GSS_S_FAILURE;
429 }
430
431 /* only support 0, 1 for the moment */
432 if (tmp_uint > 2) {
433 CERROR("Invalid version %u\n", tmp_uint);
434 return GSS_S_FAILURE;
435 }
436
437 OBD_ALLOC_PTR(kctx);
438 if (!kctx)
439 return GSS_S_FAILURE;
440
441 if (tmp_uint == 0 || tmp_uint == 1) {
442 kctx->kc_initiate = tmp_uint;
443 rc = import_context_rfc1964(kctx, p, end);
444 } else {
445 rc = import_context_rfc4121(kctx, p, end);
446 }
447
448 if (rc == 0)
449 rc = krb5_init_keys(kctx);
450
451 if (rc) {
452 delete_context_kerberos(kctx);
453 OBD_FREE_PTR(kctx);
454
455 return GSS_S_FAILURE;
456 }
457
458 gctx->internal_ctx_id = kctx;
459 return GSS_S_COMPLETE;
460 }
461
462 static
463 __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
464 struct gss_ctx *gctx_new)
465 {
466 struct krb5_ctx *kctx = gctx->internal_ctx_id;
467 struct krb5_ctx *knew;
468
469 OBD_ALLOC_PTR(knew);
470 if (!knew)
471 return GSS_S_FAILURE;
472
473 knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
474 knew->kc_cfx = kctx->kc_cfx;
475 knew->kc_seed_init = kctx->kc_seed_init;
476 knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
477 knew->kc_endtime = kctx->kc_endtime;
478
479 memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
480 knew->kc_seq_send = kctx->kc_seq_recv;
481 knew->kc_seq_recv = kctx->kc_seq_send;
482 knew->kc_enctype = kctx->kc_enctype;
483
484 if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
485 goto out_err;
486
487 if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
488 goto out_err;
489 if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
490 goto out_err;
491 if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
492 goto out_err;
493 if (krb5_init_keys(knew))
494 goto out_err;
495
496 gctx_new->internal_ctx_id = knew;
497 CDEBUG(D_SEC, "succesfully copied reverse context\n");
498 return GSS_S_COMPLETE;
499
500 out_err:
501 delete_context_kerberos(knew);
502 OBD_FREE_PTR(knew);
503 return GSS_S_FAILURE;
504 }
505
506 static
507 __u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
508 unsigned long *endtime)
509 {
510 struct krb5_ctx *kctx = gctx->internal_ctx_id;
511
512 *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
513 return GSS_S_COMPLETE;
514 }
515
516 static
517 void gss_delete_sec_context_kerberos(void *internal_ctx)
518 {
519 struct krb5_ctx *kctx = internal_ctx;
520
521 delete_context_kerberos(kctx);
522 OBD_FREE_PTR(kctx);
523 }
524
525 static
526 void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
527 {
528 sg_set_buf(sg, ptr, len);
529 }
530
531 static
532 __u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
533 int decrypt,
534 void * iv,
535 void * in,
536 void * out,
537 int length)
538 {
539 struct blkcipher_desc desc;
540 struct scatterlist sg;
541 __u8 local_iv[16] = {0};
542 __u32 ret = -EINVAL;
543
544 LASSERT(tfm);
545 desc.tfm = tfm;
546 desc.info = local_iv;
547 desc.flags= 0;
548
549 if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
550 CERROR("output length %d mismatch blocksize %d\n",
551 length, ll_crypto_blkcipher_blocksize(tfm));
552 goto out;
553 }
554
555 if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
556 CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
557 goto out;
558 }
559
560 if (iv)
561 memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
562
563 memcpy(out, in, length);
564 buf_to_sg(&sg, out, length);
565
566 if (decrypt)
567 ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
568 else
569 ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
570
571 out:
572 return(ret);
573 }
574
575
576 static inline
577 int krb5_digest_hmac(struct ll_crypto_hash *tfm,
578 rawobj_t *key,
579 struct krb5_header *khdr,
580 int msgcnt, rawobj_t *msgs,
581 int iovcnt, lnet_kiov_t *iovs,
582 rawobj_t *cksum)
583 {
584 struct hash_desc desc;
585 struct scatterlist sg[1];
586 int i;
587
588 ll_crypto_hash_setkey(tfm, key->data, key->len);
589 desc.tfm = tfm;
590 desc.flags= 0;
591
592 ll_crypto_hash_init(&desc);
593
594 for (i = 0; i < msgcnt; i++) {
595 if (msgs[i].len == 0)
596 continue;
597 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
598 ll_crypto_hash_update(&desc, sg, msgs[i].len);
599 }
600
601 for (i = 0; i < iovcnt; i++) {
602 if (iovs[i].kiov_len == 0)
603 continue;
604
605 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
606 iovs[i].kiov_offset);
607 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
608 }
609
610 if (khdr) {
611 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
612 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
613 }
614
615 return ll_crypto_hash_final(&desc, cksum->data);
616 }
617
618
619 static inline
620 int krb5_digest_norm(struct ll_crypto_hash *tfm,
621 struct krb5_keyblock *kb,
622 struct krb5_header *khdr,
623 int msgcnt, rawobj_t *msgs,
624 int iovcnt, lnet_kiov_t *iovs,
625 rawobj_t *cksum)
626 {
627 struct hash_desc desc;
628 struct scatterlist sg[1];
629 int i;
630
631 LASSERT(kb->kb_tfm);
632 desc.tfm = tfm;
633 desc.flags= 0;
634
635 ll_crypto_hash_init(&desc);
636
637 for (i = 0; i < msgcnt; i++) {
638 if (msgs[i].len == 0)
639 continue;
640 buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
641 ll_crypto_hash_update(&desc, sg, msgs[i].len);
642 }
643
644 for (i = 0; i < iovcnt; i++) {
645 if (iovs[i].kiov_len == 0)
646 continue;
647
648 sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
649 iovs[i].kiov_offset);
650 ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
651 }
652
653 if (khdr) {
654 buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
655 ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
656 }
657
658 ll_crypto_hash_final(&desc, cksum->data);
659
660 return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
661 cksum->data, cksum->len);
662 }
663
664 /*
665 * compute (keyed/keyless) checksum against the plain text which appended
666 * with krb5 wire token header.
667 */
668 static
669 __s32 krb5_make_checksum(__u32 enctype,
670 struct krb5_keyblock *kb,
671 struct krb5_header *khdr,
672 int msgcnt, rawobj_t *msgs,
673 int iovcnt, lnet_kiov_t *iovs,
674 rawobj_t *cksum)
675 {
676 struct krb5_enctype *ke = &enctypes[enctype];
677 struct ll_crypto_hash *tfm;
678 __u32 code = GSS_S_FAILURE;
679 int rc;
680
681 if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
682 CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
683 return GSS_S_FAILURE;
684 }
685
686 cksum->len = ll_crypto_hash_digestsize(tfm);
687 OBD_ALLOC_LARGE(cksum->data, cksum->len);
688 if (!cksum->data) {
689 cksum->len = 0;
690 goto out_tfm;
691 }
692
693 if (ke->ke_hash_hmac)
694 rc = krb5_digest_hmac(tfm, &kb->kb_key,
695 khdr, msgcnt, msgs, iovcnt, iovs, cksum);
696 else
697 rc = krb5_digest_norm(tfm, kb,
698 khdr, msgcnt, msgs, iovcnt, iovs, cksum);
699
700 if (rc == 0)
701 code = GSS_S_COMPLETE;
702 out_tfm:
703 ll_crypto_free_hash(tfm);
704 return code;
705 }
706
707 static void fill_krb5_header(struct krb5_ctx *kctx,
708 struct krb5_header *khdr,
709 int privacy)
710 {
711 unsigned char acceptor_flag;
712
713 acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
714
715 if (privacy) {
716 khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
717 khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
718 khdr->kh_ec = cpu_to_be16(0);
719 khdr->kh_rrc = cpu_to_be16(0);
720 } else {
721 khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
722 khdr->kh_flags = acceptor_flag;
723 khdr->kh_ec = cpu_to_be16(0xffff);
724 khdr->kh_rrc = cpu_to_be16(0xffff);
725 }
726
727 khdr->kh_filler = 0xff;
728 spin_lock(&krb5_seq_lock);
729 khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
730 spin_unlock(&krb5_seq_lock);
731 }
732
733 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
734 struct krb5_header *khdr,
735 int privacy)
736 {
737 unsigned char acceptor_flag;
738 __u16 tok_id, ec_rrc;
739
740 acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
741
742 if (privacy) {
743 tok_id = KG_TOK_WRAP_MSG;
744 ec_rrc = 0x0;
745 } else {
746 tok_id = KG_TOK_MIC_MSG;
747 ec_rrc = 0xffff;
748 }
749
750 /* sanity checks */
751 if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
752 CERROR("bad token id\n");
753 return GSS_S_DEFECTIVE_TOKEN;
754 }
755 if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
756 CERROR("bad direction flag\n");
757 return GSS_S_BAD_SIG;
758 }
759 if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
760 CERROR("missing confidential flag\n");
761 return GSS_S_BAD_SIG;
762 }
763 if (khdr->kh_filler != 0xff) {
764 CERROR("bad filler\n");
765 return GSS_S_DEFECTIVE_TOKEN;
766 }
767 if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
768 be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
769 CERROR("bad EC or RRC\n");
770 return GSS_S_DEFECTIVE_TOKEN;
771 }
772 return GSS_S_COMPLETE;
773 }
774
775 static
776 __u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
777 int msgcnt,
778 rawobj_t *msgs,
779 int iovcnt,
780 lnet_kiov_t *iovs,
781 rawobj_t *token)
782 {
783 struct krb5_ctx *kctx = gctx->internal_ctx_id;
784 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
785 struct krb5_header *khdr;
786 rawobj_t cksum = RAWOBJ_EMPTY;
787
788 /* fill krb5 header */
789 LASSERT(token->len >= sizeof(*khdr));
790 khdr = (struct krb5_header *) token->data;
791 fill_krb5_header(kctx, khdr, 0);
792
793 /* checksum */
794 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
795 khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
796 return GSS_S_FAILURE;
797
798 LASSERT(cksum.len >= ke->ke_hash_size);
799 LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
800 memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
801 ke->ke_hash_size);
802
803 token->len = sizeof(*khdr) + ke->ke_hash_size;
804 rawobj_free(&cksum);
805 return GSS_S_COMPLETE;
806 }
807
808 static
809 __u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
810 int msgcnt,
811 rawobj_t *msgs,
812 int iovcnt,
813 lnet_kiov_t *iovs,
814 rawobj_t *token)
815 {
816 struct krb5_ctx *kctx = gctx->internal_ctx_id;
817 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
818 struct krb5_header *khdr;
819 rawobj_t cksum = RAWOBJ_EMPTY;
820 __u32 major;
821
822 if (token->len < sizeof(*khdr)) {
823 CERROR("short signature: %u\n", token->len);
824 return GSS_S_DEFECTIVE_TOKEN;
825 }
826
827 khdr = (struct krb5_header *) token->data;
828
829 major = verify_krb5_header(kctx, khdr, 0);
830 if (major != GSS_S_COMPLETE) {
831 CERROR("bad krb5 header\n");
832 return major;
833 }
834
835 if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
836 CERROR("short signature: %u, require %d\n",
837 token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
838 return GSS_S_FAILURE;
839 }
840
841 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
842 khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
843 CERROR("failed to make checksum\n");
844 return GSS_S_FAILURE;
845 }
846
847 LASSERT(cksum.len >= ke->ke_hash_size);
848 if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
849 ke->ke_hash_size)) {
850 CERROR("checksum mismatch\n");
851 rawobj_free(&cksum);
852 return GSS_S_BAD_SIG;
853 }
854
855 rawobj_free(&cksum);
856 return GSS_S_COMPLETE;
857 }
858
859 static
860 int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
861 {
862 int padding;
863
864 padding = (blocksize - (msg->len & (blocksize - 1))) &
865 (blocksize - 1);
866 if (!padding)
867 return 0;
868
869 if (msg->len + padding > msg_buflen) {
870 CERROR("bufsize %u too small: datalen %u, padding %u\n",
871 msg_buflen, msg->len, padding);
872 return -EINVAL;
873 }
874
875 memset(msg->data + msg->len, padding, padding);
876 msg->len += padding;
877 return 0;
878 }
879
880 static
881 int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
882 int mode_ecb,
883 int inobj_cnt,
884 rawobj_t *inobjs,
885 rawobj_t *outobj,
886 int enc)
887 {
888 struct blkcipher_desc desc;
889 struct scatterlist src, dst;
890 __u8 local_iv[16] = {0}, *buf;
891 __u32 datalen = 0;
892 int i, rc;
893 ENTRY;
894
895 buf = outobj->data;
896 desc.tfm = tfm;
897 desc.info = local_iv;
898 desc.flags = 0;
899
900 for (i = 0; i < inobj_cnt; i++) {
901 LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
902
903 buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
904 buf_to_sg(&dst, buf, outobj->len - datalen);
905
906 if (mode_ecb) {
907 if (enc)
908 rc = ll_crypto_blkcipher_encrypt(
909 &desc, &dst, &src, src.length);
910 else
911 rc = ll_crypto_blkcipher_decrypt(
912 &desc, &dst, &src, src.length);
913 } else {
914 if (enc)
915 rc = ll_crypto_blkcipher_encrypt_iv(
916 &desc, &dst, &src, src.length);
917 else
918 rc = ll_crypto_blkcipher_decrypt_iv(
919 &desc, &dst, &src, src.length);
920 }
921
922 if (rc) {
923 CERROR("encrypt error %d\n", rc);
924 RETURN(rc);
925 }
926
927 datalen += inobjs[i].len;
928 buf += inobjs[i].len;
929 }
930
931 outobj->len = datalen;
932 RETURN(0);
933 }
934
935 /*
936 * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
937 */
938 static
939 int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
940 struct krb5_header *khdr,
941 char *confounder,
942 struct ptlrpc_bulk_desc *desc,
943 rawobj_t *cipher,
944 int adj_nob)
945 {
946 struct blkcipher_desc ciph_desc;
947 __u8 local_iv[16] = {0};
948 struct scatterlist src, dst;
949 int blocksize, i, rc, nob = 0;
950
951 LASSERT(desc->bd_iov_count);
952 LASSERT(desc->bd_enc_iov);
953
954 blocksize = ll_crypto_blkcipher_blocksize(tfm);
955 LASSERT(blocksize > 1);
956 LASSERT(cipher->len == blocksize + sizeof(*khdr));
957
958 ciph_desc.tfm = tfm;
959 ciph_desc.info = local_iv;
960 ciph_desc.flags = 0;
961
962 /* encrypt confounder */
963 buf_to_sg(&src, confounder, blocksize);
964 buf_to_sg(&dst, cipher->data, blocksize);
965
966 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
967 if (rc) {
968 CERROR("error to encrypt confounder: %d\n", rc);
969 return rc;
970 }
971
972 /* encrypt clear pages */
973 for (i = 0; i < desc->bd_iov_count; i++) {
974 sg_set_page(&src, desc->bd_iov[i].kiov_page,
975 (desc->bd_iov[i].kiov_len + blocksize - 1) &
976 (~(blocksize - 1)),
977 desc->bd_iov[i].kiov_offset);
978 if (adj_nob)
979 nob += src.length;
980 sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
981 src.offset);
982
983 desc->bd_enc_iov[i].kiov_offset = dst.offset;
984 desc->bd_enc_iov[i].kiov_len = dst.length;
985
986 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
987 src.length);
988 if (rc) {
989 CERROR("error to encrypt page: %d\n", rc);
990 return rc;
991 }
992 }
993
994 /* encrypt krb5 header */
995 buf_to_sg(&src, khdr, sizeof(*khdr));
996 buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
997
998 rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
999 &dst, &src, sizeof(*khdr));
1000 if (rc) {
1001 CERROR("error to encrypt krb5 header: %d\n", rc);
1002 return rc;
1003 }
1004
1005 if (adj_nob)
1006 desc->bd_nob = nob;
1007
1008 return 0;
1009 }
1010
1011 /*
1012 * desc->bd_nob_transferred is the size of cipher text received.
1013 * desc->bd_nob is the target size of plain text supposed to be.
1014 *
1015 * if adj_nob != 0, we adjust each page's kiov_len to the actual
1016 * plain text size.
1017 * - for client read: we don't know data size for each page, so
1018 * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
1019 * be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
1020 * this means we DO NOT support the situation that server send an odd size
1021 * data in a page which is not the last one.
1022 * - for server write: we knows exactly data size for each page being expected,
1023 * thus kiov_len is accurate already, so we should not adjust it at all.
1024 * and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
1025 * should have been done by prep_bulk().
1026 */
1027 static
1028 int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
1029 struct krb5_header *khdr,
1030 struct ptlrpc_bulk_desc *desc,
1031 rawobj_t *cipher,
1032 rawobj_t *plain,
1033 int adj_nob)
1034 {
1035 struct blkcipher_desc ciph_desc;
1036 __u8 local_iv[16] = {0};
1037 struct scatterlist src, dst;
1038 int ct_nob = 0, pt_nob = 0;
1039 int blocksize, i, rc;
1040
1041 LASSERT(desc->bd_iov_count);
1042 LASSERT(desc->bd_enc_iov);
1043 LASSERT(desc->bd_nob_transferred);
1044
1045 blocksize = ll_crypto_blkcipher_blocksize(tfm);
1046 LASSERT(blocksize > 1);
1047 LASSERT(cipher->len == blocksize + sizeof(*khdr));
1048
1049 ciph_desc.tfm = tfm;
1050 ciph_desc.info = local_iv;
1051 ciph_desc.flags = 0;
1052
1053 if (desc->bd_nob_transferred % blocksize) {
1054 CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
1055 return -EPROTO;
1056 }
1057
1058 /* decrypt head (confounder) */
1059 buf_to_sg(&src, cipher->data, blocksize);
1060 buf_to_sg(&dst, plain->data, blocksize);
1061
1062 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
1063 if (rc) {
1064 CERROR("error to decrypt confounder: %d\n", rc);
1065 return rc;
1066 }
1067
1068 for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
1069 i++) {
1070 if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
1071 desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
1072 CERROR("page %d: odd offset %u len %u, blocksize %d\n",
1073 i, desc->bd_enc_iov[i].kiov_offset,
1074 desc->bd_enc_iov[i].kiov_len, blocksize);
1075 return -EFAULT;
1076 }
1077
1078 if (adj_nob) {
1079 if (ct_nob + desc->bd_enc_iov[i].kiov_len >
1080 desc->bd_nob_transferred)
1081 desc->bd_enc_iov[i].kiov_len =
1082 desc->bd_nob_transferred - ct_nob;
1083
1084 desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
1085 if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
1086 desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
1087 } else {
1088 /* this should be guaranteed by LNET */
1089 LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
1090 desc->bd_nob_transferred);
1091 LASSERT(desc->bd_iov[i].kiov_len <=
1092 desc->bd_enc_iov[i].kiov_len);
1093 }
1094
1095 if (desc->bd_enc_iov[i].kiov_len == 0)
1096 continue;
1097
1098 sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
1099 desc->bd_enc_iov[i].kiov_len,
1100 desc->bd_enc_iov[i].kiov_offset);
1101 dst = src;
1102 if (desc->bd_iov[i].kiov_len % blocksize == 0)
1103 sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
1104
1105 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
1106 src.length);
1107 if (rc) {
1108 CERROR("error to decrypt page: %d\n", rc);
1109 return rc;
1110 }
1111
1112 if (desc->bd_iov[i].kiov_len % blocksize != 0) {
1113 memcpy(page_address(desc->bd_iov[i].kiov_page) +
1114 desc->bd_iov[i].kiov_offset,
1115 page_address(desc->bd_enc_iov[i].kiov_page) +
1116 desc->bd_iov[i].kiov_offset,
1117 desc->bd_iov[i].kiov_len);
1118 }
1119
1120 ct_nob += desc->bd_enc_iov[i].kiov_len;
1121 pt_nob += desc->bd_iov[i].kiov_len;
1122 }
1123
1124 if (unlikely(ct_nob != desc->bd_nob_transferred)) {
1125 CERROR("%d cipher text transferred but only %d decrypted\n",
1126 desc->bd_nob_transferred, ct_nob);
1127 return -EFAULT;
1128 }
1129
1130 if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
1131 CERROR("%d plain text expected but only %d received\n",
1132 desc->bd_nob, pt_nob);
1133 return -EFAULT;
1134 }
1135
1136 /* if needed, clear up the rest unused iovs */
1137 if (adj_nob)
1138 while (i < desc->bd_iov_count)
1139 desc->bd_iov[i++].kiov_len = 0;
1140
1141 /* decrypt tail (krb5 header) */
1142 buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
1143 buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
1144
1145 rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
1146 &dst, &src, sizeof(*khdr));
1147 if (rc) {
1148 CERROR("error to decrypt tail: %d\n", rc);
1149 return rc;
1150 }
1151
1152 if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
1153 CERROR("krb5 header doesn't match\n");
1154 return -EACCES;
1155 }
1156
1157 return 0;
1158 }
1159
1160 static
1161 __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
1162 rawobj_t *gsshdr,
1163 rawobj_t *msg,
1164 int msg_buflen,
1165 rawobj_t *token)
1166 {
1167 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1168 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1169 struct krb5_header *khdr;
1170 int blocksize;
1171 rawobj_t cksum = RAWOBJ_EMPTY;
1172 rawobj_t data_desc[3], cipher;
1173 __u8 conf[GSS_MAX_CIPHER_BLOCK];
1174 int rc = 0;
1175
1176 LASSERT(ke);
1177 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1178 LASSERT(kctx->kc_keye.kb_tfm == NULL ||
1179 ke->ke_conf_size >=
1180 ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
1181
1182 /*
1183 * final token format:
1184 * ---------------------------------------------------
1185 * | krb5 header | cipher text | checksum (16 bytes) |
1186 * ---------------------------------------------------
1187 */
1188
1189 /* fill krb5 header */
1190 LASSERT(token->len >= sizeof(*khdr));
1191 khdr = (struct krb5_header *) token->data;
1192 fill_krb5_header(kctx, khdr, 1);
1193
1194 /* generate confounder */
1195 cfs_get_random_bytes(conf, ke->ke_conf_size);
1196
1197 /* get encryption blocksize. note kc_keye might not associated with
1198 * a tfm, currently only for arcfour-hmac */
1199 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1200 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1201 blocksize = 1;
1202 } else {
1203 LASSERT(kctx->kc_keye.kb_tfm);
1204 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1205 }
1206 LASSERT(blocksize <= ke->ke_conf_size);
1207
1208 /* padding the message */
1209 if (add_padding(msg, msg_buflen, blocksize))
1210 return GSS_S_FAILURE;
1211
1212 /*
1213 * clear text layout for checksum:
1214 * ------------------------------------------------------
1215 * | confounder | gss header | clear msgs | krb5 header |
1216 * ------------------------------------------------------
1217 */
1218 data_desc[0].data = conf;
1219 data_desc[0].len = ke->ke_conf_size;
1220 data_desc[1].data = gsshdr->data;
1221 data_desc[1].len = gsshdr->len;
1222 data_desc[2].data = msg->data;
1223 data_desc[2].len = msg->len;
1224
1225 /* compute checksum */
1226 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1227 khdr, 3, data_desc, 0, NULL, &cksum))
1228 return GSS_S_FAILURE;
1229 LASSERT(cksum.len >= ke->ke_hash_size);
1230
1231 /*
1232 * clear text layout for encryption:
1233 * -----------------------------------------
1234 * | confounder | clear msgs | krb5 header |
1235 * -----------------------------------------
1236 */
1237 data_desc[0].data = conf;
1238 data_desc[0].len = ke->ke_conf_size;
1239 data_desc[1].data = msg->data;
1240 data_desc[1].len = msg->len;
1241 data_desc[2].data = (__u8 *) khdr;
1242 data_desc[2].len = sizeof(*khdr);
1243
1244 /* cipher text will be directly inplace */
1245 cipher.data = (__u8 *) (khdr + 1);
1246 cipher.len = token->len - sizeof(*khdr);
1247 LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
1248
1249 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1250 rawobj_t arc4_keye;
1251 struct ll_crypto_cipher *arc4_tfm;
1252
1253 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1254 NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1255 CERROR("failed to obtain arc4 enc key\n");
1256 GOTO(arc4_out, rc = -EACCES);
1257 }
1258
1259 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1260 if (IS_ERR(arc4_tfm)) {
1261 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1262 GOTO(arc4_out_key, rc = -EACCES);
1263 }
1264
1265 if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
1266 arc4_keye.len)) {
1267 CERROR("failed to set arc4 key, len %d\n",
1268 arc4_keye.len);
1269 GOTO(arc4_out_tfm, rc = -EACCES);
1270 }
1271
1272 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1273 3, data_desc, &cipher, 1);
1274 arc4_out_tfm:
1275 ll_crypto_free_blkcipher(arc4_tfm);
1276 arc4_out_key:
1277 rawobj_free(&arc4_keye);
1278 arc4_out:
1279 do {} while(0); /* just to avoid compile warning */
1280 } else {
1281 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1282 3, data_desc, &cipher, 1);
1283 }
1284
1285 if (rc != 0) {
1286 rawobj_free(&cksum);
1287 return GSS_S_FAILURE;
1288 }
1289
1290 /* fill in checksum */
1291 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1292 memcpy((char *)(khdr + 1) + cipher.len,
1293 cksum.data + cksum.len - ke->ke_hash_size,
1294 ke->ke_hash_size);
1295 rawobj_free(&cksum);
1296
1297 /* final token length */
1298 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1299 return GSS_S_COMPLETE;
1300 }
1301
1302 static
1303 __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
1304 struct ptlrpc_bulk_desc *desc)
1305 {
1306 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1307 int blocksize, i;
1308
1309 LASSERT(desc->bd_iov_count);
1310 LASSERT(desc->bd_enc_iov);
1311 LASSERT(kctx->kc_keye.kb_tfm);
1312
1313 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1314
1315 for (i = 0; i < desc->bd_iov_count; i++) {
1316 LASSERT(desc->bd_enc_iov[i].kiov_page);
1317 /*
1318 * offset should always start at page boundary of either
1319 * client or server side.
1320 */
1321 if (desc->bd_iov[i].kiov_offset & blocksize) {
1322 CERROR("odd offset %d in page %d\n",
1323 desc->bd_iov[i].kiov_offset, i);
1324 return GSS_S_FAILURE;
1325 }
1326
1327 desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
1328 desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
1329 blocksize - 1) & (~(blocksize - 1));
1330 }
1331
1332 return GSS_S_COMPLETE;
1333 }
1334
1335 static
1336 __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
1337 struct ptlrpc_bulk_desc *desc,
1338 rawobj_t *token, int adj_nob)
1339 {
1340 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1341 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1342 struct krb5_header *khdr;
1343 int blocksize;
1344 rawobj_t cksum = RAWOBJ_EMPTY;
1345 rawobj_t data_desc[1], cipher;
1346 __u8 conf[GSS_MAX_CIPHER_BLOCK];
1347 int rc = 0;
1348
1349 LASSERT(ke);
1350 LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
1351
1352 /*
1353 * final token format:
1354 * --------------------------------------------------
1355 * | krb5 header | head/tail cipher text | checksum |
1356 * --------------------------------------------------
1357 */
1358
1359 /* fill krb5 header */
1360 LASSERT(token->len >= sizeof(*khdr));
1361 khdr = (struct krb5_header *) token->data;
1362 fill_krb5_header(kctx, khdr, 1);
1363
1364 /* generate confounder */
1365 cfs_get_random_bytes(conf, ke->ke_conf_size);
1366
1367 /* get encryption blocksize. note kc_keye might not associated with
1368 * a tfm, currently only for arcfour-hmac */
1369 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1370 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1371 blocksize = 1;
1372 } else {
1373 LASSERT(kctx->kc_keye.kb_tfm);
1374 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1375 }
1376
1377 /*
1378 * we assume the size of krb5_header (16 bytes) must be n * blocksize.
1379 * the bulk token size would be exactly (sizeof(krb5_header) +
1380 * blocksize + sizeof(krb5_header) + hashsize)
1381 */
1382 LASSERT(blocksize <= ke->ke_conf_size);
1383 LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1384 LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
1385
1386 /*
1387 * clear text layout for checksum:
1388 * ------------------------------------------
1389 * | confounder | clear pages | krb5 header |
1390 * ------------------------------------------
1391 */
1392 data_desc[0].data = conf;
1393 data_desc[0].len = ke->ke_conf_size;
1394
1395 /* compute checksum */
1396 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1397 khdr, 1, data_desc,
1398 desc->bd_iov_count, desc->bd_iov,
1399 &cksum))
1400 return GSS_S_FAILURE;
1401 LASSERT(cksum.len >= ke->ke_hash_size);
1402
1403 /*
1404 * clear text layout for encryption:
1405 * ------------------------------------------
1406 * | confounder | clear pages | krb5 header |
1407 * ------------------------------------------
1408 * | | |
1409 * ---------- (cipher pages) |
1410 * result token: | |
1411 * -------------------------------------------
1412 * | krb5 header | cipher text | cipher text |
1413 * -------------------------------------------
1414 */
1415 data_desc[0].data = conf;
1416 data_desc[0].len = ke->ke_conf_size;
1417
1418 cipher.data = (__u8 *) (khdr + 1);
1419 cipher.len = blocksize + sizeof(*khdr);
1420
1421 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1422 LBUG();
1423 rc = 0;
1424 } else {
1425 rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1426 conf, desc, &cipher, adj_nob);
1427 }
1428
1429 if (rc != 0) {
1430 rawobj_free(&cksum);
1431 return GSS_S_FAILURE;
1432 }
1433
1434 /* fill in checksum */
1435 LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
1436 memcpy((char *)(khdr + 1) + cipher.len,
1437 cksum.data + cksum.len - ke->ke_hash_size,
1438 ke->ke_hash_size);
1439 rawobj_free(&cksum);
1440
1441 /* final token length */
1442 token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
1443 return GSS_S_COMPLETE;
1444 }
1445
1446 static
1447 __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
1448 rawobj_t *gsshdr,
1449 rawobj_t *token,
1450 rawobj_t *msg)
1451 {
1452 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1453 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1454 struct krb5_header *khdr;
1455 unsigned char *tmpbuf;
1456 int blocksize, bodysize;
1457 rawobj_t cksum = RAWOBJ_EMPTY;
1458 rawobj_t cipher_in, plain_out;
1459 rawobj_t hash_objs[3];
1460 int rc = 0;
1461 __u32 major;
1462
1463 LASSERT(ke);
1464
1465 if (token->len < sizeof(*khdr)) {
1466 CERROR("short signature: %u\n", token->len);
1467 return GSS_S_DEFECTIVE_TOKEN;
1468 }
1469
1470 khdr = (struct krb5_header *) token->data;
1471
1472 major = verify_krb5_header(kctx, khdr, 1);
1473 if (major != GSS_S_COMPLETE) {
1474 CERROR("bad krb5 header\n");
1475 return major;
1476 }
1477
1478 /* block size */
1479 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1480 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1481 blocksize = 1;
1482 } else {
1483 LASSERT(kctx->kc_keye.kb_tfm);
1484 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1485 }
1486
1487 /* expected token layout:
1488 * ----------------------------------------
1489 * | krb5 header | cipher text | checksum |
1490 * ----------------------------------------
1491 */
1492 bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
1493
1494 if (bodysize % blocksize) {
1495 CERROR("odd bodysize %d\n", bodysize);
1496 return GSS_S_DEFECTIVE_TOKEN;
1497 }
1498
1499 if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
1500 CERROR("incomplete token: bodysize %d\n", bodysize);
1501 return GSS_S_DEFECTIVE_TOKEN;
1502 }
1503
1504 if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
1505 CERROR("buffer too small: %u, require %d\n",
1506 msg->len, bodysize - ke->ke_conf_size);
1507 return GSS_S_FAILURE;
1508 }
1509
1510 /* decrypting */
1511 OBD_ALLOC_LARGE(tmpbuf, bodysize);
1512 if (!tmpbuf)
1513 return GSS_S_FAILURE;
1514
1515 major = GSS_S_FAILURE;
1516
1517 cipher_in.data = (__u8 *) (khdr + 1);
1518 cipher_in.len = bodysize;
1519 plain_out.data = tmpbuf;
1520 plain_out.len = bodysize;
1521
1522 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1523 rawobj_t arc4_keye;
1524 struct ll_crypto_cipher *arc4_tfm;
1525
1526 cksum.data = token->data + token->len - ke->ke_hash_size;
1527 cksum.len = ke->ke_hash_size;
1528
1529 if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
1530 NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
1531 CERROR("failed to obtain arc4 enc key\n");
1532 GOTO(arc4_out, rc = -EACCES);
1533 }
1534
1535 arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
1536 if (IS_ERR(arc4_tfm)) {
1537 CERROR("failed to alloc tfm arc4 in ECB mode\n");
1538 GOTO(arc4_out_key, rc = -EACCES);
1539 }
1540
1541 if (ll_crypto_blkcipher_setkey(arc4_tfm,
1542 arc4_keye.data, arc4_keye.len)) {
1543 CERROR("failed to set arc4 key, len %d\n",
1544 arc4_keye.len);
1545 GOTO(arc4_out_tfm, rc = -EACCES);
1546 }
1547
1548 rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1549 1, &cipher_in, &plain_out, 0);
1550 arc4_out_tfm:
1551 ll_crypto_free_blkcipher(arc4_tfm);
1552 arc4_out_key:
1553 rawobj_free(&arc4_keye);
1554 arc4_out:
1555 cksum = RAWOBJ_EMPTY;
1556 } else {
1557 rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
1558 1, &cipher_in, &plain_out, 0);
1559 }
1560
1561 if (rc != 0) {
1562 CERROR("error decrypt\n");
1563 goto out_free;
1564 }
1565 LASSERT(plain_out.len == bodysize);
1566
1567 /* expected clear text layout:
1568 * -----------------------------------------
1569 * | confounder | clear msgs | krb5 header |
1570 * -----------------------------------------
1571 */
1572
1573 /* verify krb5 header in token is not modified */
1574 if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
1575 sizeof(*khdr))) {
1576 CERROR("decrypted krb5 header mismatch\n");
1577 goto out_free;
1578 }
1579
1580 /* verify checksum, compose clear text as layout:
1581 * ------------------------------------------------------
1582 * | confounder | gss header | clear msgs | krb5 header |
1583 * ------------------------------------------------------
1584 */
1585 hash_objs[0].len = ke->ke_conf_size;
1586 hash_objs[0].data = plain_out.data;
1587 hash_objs[1].len = gsshdr->len;
1588 hash_objs[1].data = gsshdr->data;
1589 hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
1590 hash_objs[2].data = plain_out.data + ke->ke_conf_size;
1591 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1592 khdr, 3, hash_objs, 0, NULL, &cksum))
1593 goto out_free;
1594
1595 LASSERT(cksum.len >= ke->ke_hash_size);
1596 if (memcmp((char *)(khdr + 1) + bodysize,
1597 cksum.data + cksum.len - ke->ke_hash_size,
1598 ke->ke_hash_size)) {
1599 CERROR("checksum mismatch\n");
1600 goto out_free;
1601 }
1602
1603 msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr);
1604 memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
1605
1606 major = GSS_S_COMPLETE;
1607 out_free:
1608 OBD_FREE_LARGE(tmpbuf, bodysize);
1609 rawobj_free(&cksum);
1610 return major;
1611 }
1612
1613 static
1614 __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
1615 struct ptlrpc_bulk_desc *desc,
1616 rawobj_t *token, int adj_nob)
1617 {
1618 struct krb5_ctx *kctx = gctx->internal_ctx_id;
1619 struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
1620 struct krb5_header *khdr;
1621 int blocksize;
1622 rawobj_t cksum = RAWOBJ_EMPTY;
1623 rawobj_t cipher, plain;
1624 rawobj_t data_desc[1];
1625 int rc;
1626 __u32 major;
1627
1628 LASSERT(ke);
1629
1630 if (token->len < sizeof(*khdr)) {
1631 CERROR("short signature: %u\n", token->len);
1632 return GSS_S_DEFECTIVE_TOKEN;
1633 }
1634
1635 khdr = (struct krb5_header *) token->data;
1636
1637 major = verify_krb5_header(kctx, khdr, 1);
1638 if (major != GSS_S_COMPLETE) {
1639 CERROR("bad krb5 header\n");
1640 return major;
1641 }
1642
1643 /* block size */
1644 if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
1645 LASSERT(kctx->kc_keye.kb_tfm == NULL);
1646 blocksize = 1;
1647 LBUG();
1648 } else {
1649 LASSERT(kctx->kc_keye.kb_tfm);
1650 blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
1651 }
1652 LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
1653
1654 /*
1655 * token format is expected as:
1656 * -----------------------------------------------
1657 * | krb5 header | head/tail cipher text | cksum |
1658 * -----------------------------------------------
1659 */
1660 if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
1661 ke->ke_hash_size) {
1662 CERROR("short token size: %u\n", token->len);
1663 return GSS_S_DEFECTIVE_TOKEN;
1664 }
1665
1666 cipher.data = (__u8 *) (khdr + 1);
1667 cipher.len = blocksize + sizeof(*khdr);
1668 plain.data = cipher.data;
1669 plain.len = cipher.len;
1670
1671 rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
1672 desc, &cipher, &plain, adj_nob);
1673 if (rc)
1674 return GSS_S_DEFECTIVE_TOKEN;
1675
1676 /*
1677 * verify checksum, compose clear text as layout:
1678 * ------------------------------------------
1679 * | confounder | clear pages | krb5 header |
1680 * ------------------------------------------
1681 */
1682 data_desc[0].data = plain.data;
1683 data_desc[0].len = blocksize;
1684
1685 if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
1686 khdr, 1, data_desc,
1687 desc->bd_iov_count, desc->bd_iov,
1688 &cksum))
1689 return GSS_S_FAILURE;
1690 LASSERT(cksum.len >= ke->ke_hash_size);
1691
1692 if (memcmp(plain.data + blocksize + sizeof(*khdr),
1693 cksum.data + cksum.len - ke->ke_hash_size,
1694 ke->ke_hash_size)) {
1695 CERROR("checksum mismatch\n");
1696 rawobj_free(&cksum);
1697 return GSS_S_BAD_SIG;
1698 }
1699
1700 rawobj_free(&cksum);
1701 return GSS_S_COMPLETE;
1702 }
1703
1704 int gss_display_kerberos(struct gss_ctx *ctx,
1705 char *buf,
1706 int bufsize)
1707 {
1708 struct krb5_ctx *kctx = ctx->internal_ctx_id;
1709 int written;
1710
1711 written = snprintf(buf, bufsize, "krb5 (%s)",
1712 enctype2str(kctx->kc_enctype));
1713 return written;
1714 }
1715
1716 static struct gss_api_ops gss_kerberos_ops = {
1717 .gss_import_sec_context = gss_import_sec_context_kerberos,
1718 .gss_copy_reverse_context = gss_copy_reverse_context_kerberos,
1719 .gss_inquire_context = gss_inquire_context_kerberos,
1720 .gss_get_mic = gss_get_mic_kerberos,
1721 .gss_verify_mic = gss_verify_mic_kerberos,
1722 .gss_wrap = gss_wrap_kerberos,
1723 .gss_unwrap = gss_unwrap_kerberos,
1724 .gss_prep_bulk = gss_prep_bulk_kerberos,
1725 .gss_wrap_bulk = gss_wrap_bulk_kerberos,
1726 .gss_unwrap_bulk = gss_unwrap_bulk_kerberos,
1727 .gss_delete_sec_context = gss_delete_sec_context_kerberos,
1728 .gss_display = gss_display_kerberos,
1729 };
1730
1731 static struct subflavor_desc gss_kerberos_sfs[] = {
1732 {
1733 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5N,
1734 .sf_qop = 0,
1735 .sf_service = SPTLRPC_SVC_NULL,
1736 .sf_name = "krb5n"
1737 },
1738 {
1739 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5A,
1740 .sf_qop = 0,
1741 .sf_service = SPTLRPC_SVC_AUTH,
1742 .sf_name = "krb5a"
1743 },
1744 {
1745 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
1746 .sf_qop = 0,
1747 .sf_service = SPTLRPC_SVC_INTG,
1748 .sf_name = "krb5i"
1749 },
1750 {
1751 .sf_subflavor = SPTLRPC_SUBFLVR_KRB5P,
1752 .sf_qop = 0,
1753 .sf_service = SPTLRPC_SVC_PRIV,
1754 .sf_name = "krb5p"
1755 },
1756 };
1757
1758 /*
1759 * currently we leave module owner NULL
1760 */
1761 static struct gss_api_mech gss_kerberos_mech = {
1762 .gm_owner = NULL, /*THIS_MODULE, */
1763 .gm_name = "krb5",
1764 .gm_oid = (rawobj_t)
1765 {9, "\052\206\110\206\367\022\001\002\002"},
1766 .gm_ops = &gss_kerberos_ops,
1767 .gm_sf_num = 4,
1768 .gm_sfs = gss_kerberos_sfs,
1769 };
1770
1771 int __init init_kerberos_module(void)
1772 {
1773 int status;
1774
1775 spin_lock_init(&krb5_seq_lock);
1776
1777 status = lgss_mech_register(&gss_kerberos_mech);
1778 if (status)
1779 CERROR("Failed to register kerberos gss mechanism!\n");
1780 return status;
1781 }
1782
1783 void __exit cleanup_kerberos_module(void)
1784 {
1785 lgss_mech_unregister(&gss_kerberos_mech);
1786 }