]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - zfs/module/icp/algs/modes/gcm.c
UBUNTU: SAUCE: Update zfs to e02aaf17f15ad274fa1f24c9c826f1477911ea3f
[mirror_ubuntu-zesty-kernel.git] / zfs / module / icp / algs / modes / gcm.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #include <sys/zfs_context.h>
26 #include <modes/modes.h>
27 #include <sys/crypto/common.h>
28 #include <sys/crypto/impl.h>
29 #include <sys/byteorder.h>
30
31 #ifdef __amd64
32
33 #ifdef _KERNEL
34 /* Workaround for no XMM kernel thread save/restore */
35 #define KPREEMPT_DISABLE kpreempt_disable()
36 #define KPREEMPT_ENABLE kpreempt_enable()
37
38 #else
39 #define KPREEMPT_DISABLE
40 #define KPREEMPT_ENABLE
41 #endif /* _KERNEL */
42
43 extern void gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res);
44 static int intel_pclmulqdq_instruction_present(void);
45 #endif /* __amd64 */
46
47 struct aes_block {
48 uint64_t a;
49 uint64_t b;
50 };
51
52
53 /*
54 * gcm_mul()
55 * Perform a carry-less multiplication (that is, use XOR instead of the
56 * multiply operator) on *x_in and *y and place the result in *res.
57 *
58 * Byte swap the input (*x_in and *y) and the output (*res).
59 *
60 * Note: x_in, y, and res all point to 16-byte numbers (an array of two
61 * 64-bit integers).
62 */
63 void
64 gcm_mul(uint64_t *x_in, uint64_t *y, uint64_t *res)
65 {
66 #ifdef __amd64
67 if (intel_pclmulqdq_instruction_present()) {
68 KPREEMPT_DISABLE;
69 gcm_mul_pclmulqdq(x_in, y, res);
70 KPREEMPT_ENABLE;
71 } else
72 #endif /* __amd64 */
73 {
74 static const uint64_t R = 0xe100000000000000ULL;
75 struct aes_block z = {0, 0};
76 struct aes_block v;
77 uint64_t x;
78 int i, j;
79
80 v.a = ntohll(y[0]);
81 v.b = ntohll(y[1]);
82
83 for (j = 0; j < 2; j++) {
84 x = ntohll(x_in[j]);
85 for (i = 0; i < 64; i++, x <<= 1) {
86 if (x & 0x8000000000000000ULL) {
87 z.a ^= v.a;
88 z.b ^= v.b;
89 }
90 if (v.b & 1ULL) {
91 v.b = (v.a << 63)|(v.b >> 1);
92 v.a = (v.a >> 1) ^ R;
93 } else {
94 v.b = (v.a << 63)|(v.b >> 1);
95 v.a = v.a >> 1;
96 }
97 }
98 }
99 res[0] = htonll(z.a);
100 res[1] = htonll(z.b);
101 }
102 }
103
104
105 #define GHASH(c, d, t) \
106 xor_block((uint8_t *)(d), (uint8_t *)(c)->gcm_ghash); \
107 gcm_mul((uint64_t *)(void *)(c)->gcm_ghash, (c)->gcm_H, \
108 (uint64_t *)(void *)(t));
109
110
111 /*
112 * Encrypt multiple blocks of data in GCM mode. Decrypt for GCM mode
113 * is done in another function.
114 */
115 int
116 gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
117 crypto_data_t *out, size_t block_size,
118 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
119 void (*copy_block)(uint8_t *, uint8_t *),
120 void (*xor_block)(uint8_t *, uint8_t *))
121 {
122 size_t remainder = length;
123 size_t need = 0;
124 uint8_t *datap = (uint8_t *)data;
125 uint8_t *blockp;
126 uint8_t *lastp;
127 void *iov_or_mp;
128 offset_t offset;
129 uint8_t *out_data_1;
130 uint8_t *out_data_2;
131 size_t out_data_1_len;
132 uint64_t counter;
133 uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
134
135 if (length + ctx->gcm_remainder_len < block_size) {
136 /* accumulate bytes here and return */
137 bcopy(datap,
138 (uint8_t *)ctx->gcm_remainder + ctx->gcm_remainder_len,
139 length);
140 ctx->gcm_remainder_len += length;
141 ctx->gcm_copy_to = datap;
142 return (CRYPTO_SUCCESS);
143 }
144
145 lastp = (uint8_t *)ctx->gcm_cb;
146 if (out != NULL)
147 crypto_init_ptrs(out, &iov_or_mp, &offset);
148
149 do {
150 /* Unprocessed data from last call. */
151 if (ctx->gcm_remainder_len > 0) {
152 need = block_size - ctx->gcm_remainder_len;
153
154 if (need > remainder)
155 return (CRYPTO_DATA_LEN_RANGE);
156
157 bcopy(datap, &((uint8_t *)ctx->gcm_remainder)
158 [ctx->gcm_remainder_len], need);
159
160 blockp = (uint8_t *)ctx->gcm_remainder;
161 } else {
162 blockp = datap;
163 }
164
165 /*
166 * Increment counter. Counter bits are confined
167 * to the bottom 32 bits of the counter block.
168 */
169 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
170 counter = htonll(counter + 1);
171 counter &= counter_mask;
172 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
173
174 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
175 (uint8_t *)ctx->gcm_tmp);
176 xor_block(blockp, (uint8_t *)ctx->gcm_tmp);
177
178 lastp = (uint8_t *)ctx->gcm_tmp;
179
180 ctx->gcm_processed_data_len += block_size;
181
182 if (out == NULL) {
183 if (ctx->gcm_remainder_len > 0) {
184 bcopy(blockp, ctx->gcm_copy_to,
185 ctx->gcm_remainder_len);
186 bcopy(blockp + ctx->gcm_remainder_len, datap,
187 need);
188 }
189 } else {
190 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
191 &out_data_1_len, &out_data_2, block_size);
192
193 /* copy block to where it belongs */
194 if (out_data_1_len == block_size) {
195 copy_block(lastp, out_data_1);
196 } else {
197 bcopy(lastp, out_data_1, out_data_1_len);
198 if (out_data_2 != NULL) {
199 bcopy(lastp + out_data_1_len,
200 out_data_2,
201 block_size - out_data_1_len);
202 }
203 }
204 /* update offset */
205 out->cd_offset += block_size;
206 }
207
208 /* add ciphertext to the hash */
209 GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
210
211 /* Update pointer to next block of data to be processed. */
212 if (ctx->gcm_remainder_len != 0) {
213 datap += need;
214 ctx->gcm_remainder_len = 0;
215 } else {
216 datap += block_size;
217 }
218
219 remainder = (size_t)&data[length] - (size_t)datap;
220
221 /* Incomplete last block. */
222 if (remainder > 0 && remainder < block_size) {
223 bcopy(datap, ctx->gcm_remainder, remainder);
224 ctx->gcm_remainder_len = remainder;
225 ctx->gcm_copy_to = datap;
226 goto out;
227 }
228 ctx->gcm_copy_to = NULL;
229
230 } while (remainder > 0);
231 out:
232 return (CRYPTO_SUCCESS);
233 }
234
235 /* ARGSUSED */
236 int
237 gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
238 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
239 void (*copy_block)(uint8_t *, uint8_t *),
240 void (*xor_block)(uint8_t *, uint8_t *))
241 {
242 uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
243 uint8_t *ghash, *macp = NULL;
244 int i, rv;
245
246 if (out->cd_length <
247 (ctx->gcm_remainder_len + ctx->gcm_tag_len)) {
248 return (CRYPTO_DATA_LEN_RANGE);
249 }
250
251 ghash = (uint8_t *)ctx->gcm_ghash;
252
253 if (ctx->gcm_remainder_len > 0) {
254 uint64_t counter;
255 uint8_t *tmpp = (uint8_t *)ctx->gcm_tmp;
256
257 /*
258 * Here is where we deal with data that is not a
259 * multiple of the block size.
260 */
261
262 /*
263 * Increment counter.
264 */
265 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
266 counter = htonll(counter + 1);
267 counter &= counter_mask;
268 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
269
270 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb,
271 (uint8_t *)ctx->gcm_tmp);
272
273 macp = (uint8_t *)ctx->gcm_remainder;
274 bzero(macp + ctx->gcm_remainder_len,
275 block_size - ctx->gcm_remainder_len);
276
277 /* XOR with counter block */
278 for (i = 0; i < ctx->gcm_remainder_len; i++) {
279 macp[i] ^= tmpp[i];
280 }
281
282 /* add ciphertext to the hash */
283 GHASH(ctx, macp, ghash);
284
285 ctx->gcm_processed_data_len += ctx->gcm_remainder_len;
286 }
287
288 ctx->gcm_len_a_len_c[1] =
289 htonll(CRYPTO_BYTES2BITS(ctx->gcm_processed_data_len));
290 GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
291 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
292 (uint8_t *)ctx->gcm_J0);
293 xor_block((uint8_t *)ctx->gcm_J0, ghash);
294
295 if (ctx->gcm_remainder_len > 0) {
296 rv = crypto_put_output_data(macp, out, ctx->gcm_remainder_len);
297 if (rv != CRYPTO_SUCCESS)
298 return (rv);
299 }
300 out->cd_offset += ctx->gcm_remainder_len;
301 ctx->gcm_remainder_len = 0;
302 rv = crypto_put_output_data(ghash, out, ctx->gcm_tag_len);
303 if (rv != CRYPTO_SUCCESS)
304 return (rv);
305 out->cd_offset += ctx->gcm_tag_len;
306
307 return (CRYPTO_SUCCESS);
308 }
309
310 /*
311 * This will only deal with decrypting the last block of the input that
312 * might not be a multiple of block length.
313 */
314 static void
315 gcm_decrypt_incomplete_block(gcm_ctx_t *ctx, size_t block_size, size_t index,
316 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
317 void (*xor_block)(uint8_t *, uint8_t *))
318 {
319 uint8_t *datap, *outp, *counterp;
320 uint64_t counter;
321 uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
322 int i;
323
324 /*
325 * Increment counter.
326 * Counter bits are confined to the bottom 32 bits
327 */
328 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
329 counter = htonll(counter + 1);
330 counter &= counter_mask;
331 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
332
333 datap = (uint8_t *)ctx->gcm_remainder;
334 outp = &((ctx->gcm_pt_buf)[index]);
335 counterp = (uint8_t *)ctx->gcm_tmp;
336
337 /* authentication tag */
338 bzero((uint8_t *)ctx->gcm_tmp, block_size);
339 bcopy(datap, (uint8_t *)ctx->gcm_tmp, ctx->gcm_remainder_len);
340
341 /* add ciphertext to the hash */
342 GHASH(ctx, ctx->gcm_tmp, ctx->gcm_ghash);
343
344 /* decrypt remaining ciphertext */
345 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, counterp);
346
347 /* XOR with counter block */
348 for (i = 0; i < ctx->gcm_remainder_len; i++) {
349 outp[i] = datap[i] ^ counterp[i];
350 }
351 }
352
353 /* ARGSUSED */
354 int
355 gcm_mode_decrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
356 crypto_data_t *out, size_t block_size,
357 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
358 void (*copy_block)(uint8_t *, uint8_t *),
359 void (*xor_block)(uint8_t *, uint8_t *))
360 {
361 size_t new_len;
362 uint8_t *new;
363
364 /*
365 * Copy contiguous ciphertext input blocks to plaintext buffer.
366 * Ciphertext will be decrypted in the final.
367 */
368 if (length > 0) {
369 new_len = ctx->gcm_pt_buf_len + length;
370 new = vmem_alloc(new_len, ctx->gcm_kmflag);
371 bcopy(ctx->gcm_pt_buf, new, ctx->gcm_pt_buf_len);
372 vmem_free(ctx->gcm_pt_buf, ctx->gcm_pt_buf_len);
373 if (new == NULL)
374 return (CRYPTO_HOST_MEMORY);
375
376 ctx->gcm_pt_buf = new;
377 ctx->gcm_pt_buf_len = new_len;
378 bcopy(data, &ctx->gcm_pt_buf[ctx->gcm_processed_data_len],
379 length);
380 ctx->gcm_processed_data_len += length;
381 }
382
383 ctx->gcm_remainder_len = 0;
384 return (CRYPTO_SUCCESS);
385 }
386
387 int
388 gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
389 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
390 void (*xor_block)(uint8_t *, uint8_t *))
391 {
392 size_t pt_len;
393 size_t remainder;
394 uint8_t *ghash;
395 uint8_t *blockp;
396 uint8_t *cbp;
397 uint64_t counter;
398 uint64_t counter_mask = ntohll(0x00000000ffffffffULL);
399 int processed = 0, rv;
400
401 ASSERT(ctx->gcm_processed_data_len == ctx->gcm_pt_buf_len);
402
403 pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
404 ghash = (uint8_t *)ctx->gcm_ghash;
405 blockp = ctx->gcm_pt_buf;
406 remainder = pt_len;
407 while (remainder > 0) {
408 /* Incomplete last block */
409 if (remainder < block_size) {
410 bcopy(blockp, ctx->gcm_remainder, remainder);
411 ctx->gcm_remainder_len = remainder;
412 /*
413 * not expecting anymore ciphertext, just
414 * compute plaintext for the remaining input
415 */
416 gcm_decrypt_incomplete_block(ctx, block_size,
417 processed, encrypt_block, xor_block);
418 ctx->gcm_remainder_len = 0;
419 goto out;
420 }
421 /* add ciphertext to the hash */
422 GHASH(ctx, blockp, ghash);
423
424 /*
425 * Increment counter.
426 * Counter bits are confined to the bottom 32 bits
427 */
428 counter = ntohll(ctx->gcm_cb[1] & counter_mask);
429 counter = htonll(counter + 1);
430 counter &= counter_mask;
431 ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
432
433 cbp = (uint8_t *)ctx->gcm_tmp;
434 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_cb, cbp);
435
436 /* XOR with ciphertext */
437 xor_block(cbp, blockp);
438
439 processed += block_size;
440 blockp += block_size;
441 remainder -= block_size;
442 }
443 out:
444 ctx->gcm_len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(pt_len));
445 GHASH(ctx, ctx->gcm_len_a_len_c, ghash);
446 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_J0,
447 (uint8_t *)ctx->gcm_J0);
448 xor_block((uint8_t *)ctx->gcm_J0, ghash);
449
450 /* compare the input authentication tag with what we calculated */
451 if (bcmp(&ctx->gcm_pt_buf[pt_len], ghash, ctx->gcm_tag_len)) {
452 /* They don't match */
453 return (CRYPTO_INVALID_MAC);
454 } else {
455 rv = crypto_put_output_data(ctx->gcm_pt_buf, out, pt_len);
456 if (rv != CRYPTO_SUCCESS)
457 return (rv);
458 out->cd_offset += pt_len;
459 }
460 return (CRYPTO_SUCCESS);
461 }
462
463 static int
464 gcm_validate_args(CK_AES_GCM_PARAMS *gcm_param)
465 {
466 size_t tag_len;
467
468 /*
469 * Check the length of the authentication tag (in bits).
470 */
471 tag_len = gcm_param->ulTagBits;
472 switch (tag_len) {
473 case 32:
474 case 64:
475 case 96:
476 case 104:
477 case 112:
478 case 120:
479 case 128:
480 break;
481 default:
482 return (CRYPTO_MECHANISM_PARAM_INVALID);
483 }
484
485 if (gcm_param->ulIvLen == 0)
486 return (CRYPTO_MECHANISM_PARAM_INVALID);
487
488 return (CRYPTO_SUCCESS);
489 }
490
491 static void
492 gcm_format_initial_blocks(uchar_t *iv, ulong_t iv_len,
493 gcm_ctx_t *ctx, size_t block_size,
494 void (*copy_block)(uint8_t *, uint8_t *),
495 void (*xor_block)(uint8_t *, uint8_t *))
496 {
497 uint8_t *cb;
498 ulong_t remainder = iv_len;
499 ulong_t processed = 0;
500 uint8_t *datap, *ghash;
501 uint64_t len_a_len_c[2];
502
503 ghash = (uint8_t *)ctx->gcm_ghash;
504 cb = (uint8_t *)ctx->gcm_cb;
505 if (iv_len == 12) {
506 bcopy(iv, cb, 12);
507 cb[12] = 0;
508 cb[13] = 0;
509 cb[14] = 0;
510 cb[15] = 1;
511 /* J0 will be used again in the final */
512 copy_block(cb, (uint8_t *)ctx->gcm_J0);
513 } else {
514 /* GHASH the IV */
515 do {
516 if (remainder < block_size) {
517 bzero(cb, block_size);
518 bcopy(&(iv[processed]), cb, remainder);
519 datap = (uint8_t *)cb;
520 remainder = 0;
521 } else {
522 datap = (uint8_t *)(&(iv[processed]));
523 processed += block_size;
524 remainder -= block_size;
525 }
526 GHASH(ctx, datap, ghash);
527 } while (remainder > 0);
528
529 len_a_len_c[0] = 0;
530 len_a_len_c[1] = htonll(CRYPTO_BYTES2BITS(iv_len));
531 GHASH(ctx, len_a_len_c, ctx->gcm_J0);
532
533 /* J0 will be used again in the final */
534 copy_block((uint8_t *)ctx->gcm_J0, (uint8_t *)cb);
535 }
536 }
537
538 /*
539 * The following function is called at encrypt or decrypt init time
540 * for AES GCM mode.
541 */
542 int
543 gcm_init(gcm_ctx_t *ctx, unsigned char *iv, size_t iv_len,
544 unsigned char *auth_data, size_t auth_data_len, size_t block_size,
545 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
546 void (*copy_block)(uint8_t *, uint8_t *),
547 void (*xor_block)(uint8_t *, uint8_t *))
548 {
549 uint8_t *ghash, *datap, *authp;
550 size_t remainder, processed;
551
552 /* encrypt zero block to get subkey H */
553 bzero(ctx->gcm_H, sizeof (ctx->gcm_H));
554 encrypt_block(ctx->gcm_keysched, (uint8_t *)ctx->gcm_H,
555 (uint8_t *)ctx->gcm_H);
556
557 gcm_format_initial_blocks(iv, iv_len, ctx, block_size,
558 copy_block, xor_block);
559
560 authp = (uint8_t *)ctx->gcm_tmp;
561 ghash = (uint8_t *)ctx->gcm_ghash;
562 bzero(authp, block_size);
563 bzero(ghash, block_size);
564
565 processed = 0;
566 remainder = auth_data_len;
567 do {
568 if (remainder < block_size) {
569 /*
570 * There's not a block full of data, pad rest of
571 * buffer with zero
572 */
573 bzero(authp, block_size);
574 bcopy(&(auth_data[processed]), authp, remainder);
575 datap = (uint8_t *)authp;
576 remainder = 0;
577 } else {
578 datap = (uint8_t *)(&(auth_data[processed]));
579 processed += block_size;
580 remainder -= block_size;
581 }
582
583 /* add auth data to the hash */
584 GHASH(ctx, datap, ghash);
585
586 } while (remainder > 0);
587
588 return (CRYPTO_SUCCESS);
589 }
590
591 int
592 gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
593 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
594 void (*copy_block)(uint8_t *, uint8_t *),
595 void (*xor_block)(uint8_t *, uint8_t *))
596 {
597 int rv;
598 CK_AES_GCM_PARAMS *gcm_param;
599
600 if (param != NULL) {
601 gcm_param = (CK_AES_GCM_PARAMS *)(void *)param;
602
603 if ((rv = gcm_validate_args(gcm_param)) != 0) {
604 return (rv);
605 }
606
607 gcm_ctx->gcm_tag_len = gcm_param->ulTagBits;
608 gcm_ctx->gcm_tag_len >>= 3;
609 gcm_ctx->gcm_processed_data_len = 0;
610
611 /* these values are in bits */
612 gcm_ctx->gcm_len_a_len_c[0]
613 = htonll(CRYPTO_BYTES2BITS(gcm_param->ulAADLen));
614
615 rv = CRYPTO_SUCCESS;
616 gcm_ctx->gcm_flags |= GCM_MODE;
617 } else {
618 rv = CRYPTO_MECHANISM_PARAM_INVALID;
619 goto out;
620 }
621
622 if (gcm_init(gcm_ctx, gcm_param->pIv, gcm_param->ulIvLen,
623 gcm_param->pAAD, gcm_param->ulAADLen, block_size,
624 encrypt_block, copy_block, xor_block) != 0) {
625 rv = CRYPTO_MECHANISM_PARAM_INVALID;
626 }
627 out:
628 return (rv);
629 }
630
631 int
632 gmac_init_ctx(gcm_ctx_t *gcm_ctx, char *param, size_t block_size,
633 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
634 void (*copy_block)(uint8_t *, uint8_t *),
635 void (*xor_block)(uint8_t *, uint8_t *))
636 {
637 int rv;
638 CK_AES_GMAC_PARAMS *gmac_param;
639
640 if (param != NULL) {
641 gmac_param = (CK_AES_GMAC_PARAMS *)(void *)param;
642
643 gcm_ctx->gcm_tag_len = CRYPTO_BITS2BYTES(AES_GMAC_TAG_BITS);
644 gcm_ctx->gcm_processed_data_len = 0;
645
646 /* these values are in bits */
647 gcm_ctx->gcm_len_a_len_c[0]
648 = htonll(CRYPTO_BYTES2BITS(gmac_param->ulAADLen));
649
650 rv = CRYPTO_SUCCESS;
651 gcm_ctx->gcm_flags |= GMAC_MODE;
652 } else {
653 rv = CRYPTO_MECHANISM_PARAM_INVALID;
654 goto out;
655 }
656
657 if (gcm_init(gcm_ctx, gmac_param->pIv, AES_GMAC_IV_LEN,
658 gmac_param->pAAD, gmac_param->ulAADLen, block_size,
659 encrypt_block, copy_block, xor_block) != 0) {
660 rv = CRYPTO_MECHANISM_PARAM_INVALID;
661 }
662 out:
663 return (rv);
664 }
665
666 void *
667 gcm_alloc_ctx(int kmflag)
668 {
669 gcm_ctx_t *gcm_ctx;
670
671 if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
672 return (NULL);
673
674 gcm_ctx->gcm_flags = GCM_MODE;
675 return (gcm_ctx);
676 }
677
678 void *
679 gmac_alloc_ctx(int kmflag)
680 {
681 gcm_ctx_t *gcm_ctx;
682
683 if ((gcm_ctx = kmem_zalloc(sizeof (gcm_ctx_t), kmflag)) == NULL)
684 return (NULL);
685
686 gcm_ctx->gcm_flags = GMAC_MODE;
687 return (gcm_ctx);
688 }
689
690 void
691 gcm_set_kmflag(gcm_ctx_t *ctx, int kmflag)
692 {
693 ctx->gcm_kmflag = kmflag;
694 }
695
696
697 #ifdef __amd64
698
699 #define INTEL_PCLMULQDQ_FLAG (1 << 1)
700
701 /*
702 * Return 1 if executing on Intel with PCLMULQDQ instructions,
703 * otherwise 0 (i.e., Intel without PCLMULQDQ or AMD64).
704 * Cache the result, as the CPU can't change.
705 *
706 * Note: the userland version uses getisax(). The kernel version uses
707 * is_x86_featureset().
708 */
709 static int
710 intel_pclmulqdq_instruction_present(void)
711 {
712 static int cached_result = -1;
713 unsigned eax, ebx, ecx, edx;
714 unsigned func, subfunc;
715
716 if (cached_result == -1) { /* first time */
717 /* check for an intel cpu */
718 func = 0;
719 subfunc = 0;
720
721 __asm__ __volatile__(
722 "cpuid"
723 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
724 : "a"(func), "c"(subfunc));
725
726 if (memcmp((char *) (&ebx), "Genu", 4) == 0 &&
727 memcmp((char *) (&edx), "ineI", 4) == 0 &&
728 memcmp((char *) (&ecx), "ntel", 4) == 0) {
729
730 func = 1;
731 subfunc = 0;
732
733 /* check for aes-ni instruction set */
734 __asm__ __volatile__(
735 "cpuid"
736 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
737 : "a"(func), "c"(subfunc));
738
739 cached_result = !!(ecx & INTEL_PCLMULQDQ_FLAG);
740 } else {
741 cached_result = 0;
742 }
743 }
744
745 return (cached_result);
746 }
747
748 #endif /* __amd64 */