2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
31 #include "nx_csbcpb.h"
35 static int gcm_aes_nx_set_key(struct crypto_aead
*tfm
,
39 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
40 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
41 struct nx_csbcpb
*csbcpb_aead
= nx_ctx
->csbcpb_aead
;
43 nx_ctx_init(nx_ctx
, HCOP_FC_AES
);
47 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_128
);
48 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_128
);
49 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_128
];
52 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_192
);
53 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_192
);
54 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_192
];
57 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_256
);
58 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_256
);
59 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_256
];
65 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GCM
;
66 memcpy(csbcpb
->cpb
.aes_gcm
.key
, in_key
, key_len
);
68 csbcpb_aead
->cpb
.hdr
.mode
= NX_MODE_AES_GCA
;
69 memcpy(csbcpb_aead
->cpb
.aes_gca
.key
, in_key
, key_len
);
74 static int gcm4106_aes_nx_set_key(struct crypto_aead
*tfm
,
78 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
79 char *nonce
= nx_ctx
->priv
.gcm
.nonce
;
87 rc
= gcm_aes_nx_set_key(tfm
, in_key
, key_len
);
91 memcpy(nonce
, in_key
+ key_len
, 4);
96 static int gcm4106_aes_nx_setauthsize(struct crypto_aead
*tfm
,
97 unsigned int authsize
)
111 static int nx_gca(struct nx_crypto_ctx
*nx_ctx
,
112 struct aead_request
*req
,
116 struct nx_csbcpb
*csbcpb_aead
= nx_ctx
->csbcpb_aead
;
117 struct scatter_walk walk
;
118 struct nx_sg
*nx_sg
= nx_ctx
->in_sg
;
119 unsigned int nbytes
= req
->assoclen
;
120 unsigned int processed
= 0, to_process
;
121 unsigned int max_sg_len
;
123 if (nbytes
<= AES_BLOCK_SIZE
) {
124 scatterwalk_start(&walk
, req
->src
);
125 scatterwalk_copychunks(out
, &walk
, nbytes
, SCATTERWALK_FROM_SG
);
126 scatterwalk_done(&walk
, SCATTERWALK_FROM_SG
, 0);
130 NX_CPB_FDM(csbcpb_aead
) &= ~NX_FDM_CONTINUATION
;
132 /* page_limit: number of sg entries that fit on one page */
133 max_sg_len
= min_t(u64
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
135 max_sg_len
= min_t(u64
, max_sg_len
,
136 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
140 * to_process: the data chunk to process in this update.
141 * This value is bound by sg list limits.
143 to_process
= min_t(u64
, nbytes
- processed
,
144 nx_ctx
->ap
->databytelen
);
145 to_process
= min_t(u64
, to_process
,
146 NX_PAGE_SIZE
* (max_sg_len
- 1));
148 nx_sg
= nx_walk_and_build(nx_ctx
->in_sg
, max_sg_len
,
149 req
->src
, processed
, &to_process
);
151 if ((to_process
+ processed
) < nbytes
)
152 NX_CPB_FDM(csbcpb_aead
) |= NX_FDM_INTERMEDIATE
;
154 NX_CPB_FDM(csbcpb_aead
) &= ~NX_FDM_INTERMEDIATE
;
156 nx_ctx
->op_aead
.inlen
= (nx_ctx
->in_sg
- nx_sg
)
157 * sizeof(struct nx_sg
);
159 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op_aead
,
160 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
164 memcpy(csbcpb_aead
->cpb
.aes_gca
.in_pat
,
165 csbcpb_aead
->cpb
.aes_gca
.out_pat
,
167 NX_CPB_FDM(csbcpb_aead
) |= NX_FDM_CONTINUATION
;
169 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
170 atomic64_add(req
->assoclen
, &(nx_ctx
->stats
->aes_bytes
));
172 processed
+= to_process
;
173 } while (processed
< nbytes
);
175 memcpy(out
, csbcpb_aead
->cpb
.aes_gca
.out_pat
, AES_BLOCK_SIZE
);
180 static int gmac(struct aead_request
*req
, struct blkcipher_desc
*desc
)
183 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
184 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
186 unsigned int nbytes
= req
->assoclen
;
187 unsigned int processed
= 0, to_process
;
188 unsigned int max_sg_len
;
191 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GMAC
;
193 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_CONTINUATION
;
195 /* page_limit: number of sg entries that fit on one page */
196 max_sg_len
= min_t(u64
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
198 max_sg_len
= min_t(u64
, max_sg_len
,
199 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
202 memcpy(csbcpb
->cpb
.aes_gcm
.iv_or_cnt
, desc
->info
, AES_BLOCK_SIZE
);
206 * to_process: the data chunk to process in this update.
207 * This value is bound by sg list limits.
209 to_process
= min_t(u64
, nbytes
- processed
,
210 nx_ctx
->ap
->databytelen
);
211 to_process
= min_t(u64
, to_process
,
212 NX_PAGE_SIZE
* (max_sg_len
- 1));
214 nx_sg
= nx_walk_and_build(nx_ctx
->in_sg
, max_sg_len
,
215 req
->src
, processed
, &to_process
);
217 if ((to_process
+ processed
) < nbytes
)
218 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
220 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
222 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- nx_sg
)
223 * sizeof(struct nx_sg
);
225 csbcpb
->cpb
.aes_gcm
.bit_length_data
= 0;
226 csbcpb
->cpb
.aes_gcm
.bit_length_aad
= 8 * nbytes
;
228 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
229 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
233 memcpy(csbcpb
->cpb
.aes_gcm
.in_pat_or_aad
,
234 csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
235 memcpy(csbcpb
->cpb
.aes_gcm
.in_s0
,
236 csbcpb
->cpb
.aes_gcm
.out_s0
, AES_BLOCK_SIZE
);
238 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
240 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
241 atomic64_add(req
->assoclen
, &(nx_ctx
->stats
->aes_bytes
));
243 processed
+= to_process
;
244 } while (processed
< nbytes
);
247 /* Restore GCM mode */
248 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GCM
;
252 static int gcm_empty(struct aead_request
*req
, struct blkcipher_desc
*desc
,
256 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
257 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
258 char out
[AES_BLOCK_SIZE
];
259 struct nx_sg
*in_sg
, *out_sg
;
262 /* For scenarios where the input message is zero length, AES CTR mode
263 * may be used. Set the source data to be a single block (16B) of all
264 * zeros, and set the input IV value to be the same as the GMAC IV
265 * value. - nx_wb 4.8.1.3 */
267 /* Change to ECB mode */
268 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_ECB
;
269 memcpy(csbcpb
->cpb
.aes_ecb
.key
, csbcpb
->cpb
.aes_gcm
.key
,
270 sizeof(csbcpb
->cpb
.aes_ecb
.key
));
272 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
274 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
276 len
= AES_BLOCK_SIZE
;
278 /* Encrypt the counter/IV */
279 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*) desc
->info
,
280 &len
, nx_ctx
->ap
->sglen
);
282 if (len
!= AES_BLOCK_SIZE
)
286 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, (u8
*) out
, &len
,
289 if (len
!= sizeof(out
))
292 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
293 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
295 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
296 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
299 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
301 /* Copy out the auth tag */
302 memcpy(csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
, out
,
303 crypto_aead_authsize(crypto_aead_reqtfm(req
)));
305 /* Restore XCBC mode */
306 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_GCM
;
309 * ECB key uses the same region that GCM AAD and counter, so it's safe
310 * to just fill it with zeroes.
312 memset(csbcpb
->cpb
.aes_ecb
.key
, 0, sizeof(csbcpb
->cpb
.aes_ecb
.key
));
317 static int gcm_aes_nx_crypt(struct aead_request
*req
, int enc
)
319 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
320 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
321 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
322 struct blkcipher_desc desc
;
323 unsigned int nbytes
= req
->cryptlen
;
324 unsigned int processed
= 0, to_process
;
325 unsigned long irq_flags
;
328 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
330 desc
.info
= rctx
->iv
;
331 /* initialize the counter */
332 *(u32
*)(desc
.info
+ NX_GCM_CTR_OFFSET
) = 1;
335 if (req
->assoclen
== 0)
336 rc
= gcm_empty(req
, &desc
, enc
);
338 rc
= gmac(req
, &desc
);
345 /* Process associated data */
346 csbcpb
->cpb
.aes_gcm
.bit_length_aad
= req
->assoclen
* 8;
348 rc
= nx_gca(nx_ctx
, req
, csbcpb
->cpb
.aes_gcm
.in_pat_or_aad
);
353 /* Set flags for encryption */
354 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_CONTINUATION
;
356 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
358 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
359 nbytes
-= crypto_aead_authsize(crypto_aead_reqtfm(req
));
363 to_process
= nbytes
- processed
;
365 csbcpb
->cpb
.aes_gcm
.bit_length_data
= nbytes
* 8;
366 desc
.tfm
= (struct crypto_blkcipher
*) req
->base
.tfm
;
367 rc
= nx_build_sg_lists(nx_ctx
, &desc
, req
->dst
,
368 req
->src
, &to_process
,
369 processed
+ req
->assoclen
,
370 csbcpb
->cpb
.aes_gcm
.iv_or_cnt
);
375 if ((to_process
+ processed
) < nbytes
)
376 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
378 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
381 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
382 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
386 memcpy(desc
.info
, csbcpb
->cpb
.aes_gcm
.out_cnt
, AES_BLOCK_SIZE
);
387 memcpy(csbcpb
->cpb
.aes_gcm
.in_pat_or_aad
,
388 csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
389 memcpy(csbcpb
->cpb
.aes_gcm
.in_s0
,
390 csbcpb
->cpb
.aes_gcm
.out_s0
, AES_BLOCK_SIZE
);
392 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
394 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
395 atomic64_add(csbcpb
->csb
.processed_byte_count
,
396 &(nx_ctx
->stats
->aes_bytes
));
398 processed
+= to_process
;
399 } while (processed
< nbytes
);
403 /* copy out the auth tag */
404 scatterwalk_map_and_copy(
405 csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
,
406 req
->dst
, req
->assoclen
+ nbytes
,
407 crypto_aead_authsize(crypto_aead_reqtfm(req
)),
410 u8
*itag
= nx_ctx
->priv
.gcm
.iauth_tag
;
411 u8
*otag
= csbcpb
->cpb
.aes_gcm
.out_pat_or_mac
;
413 scatterwalk_map_and_copy(
414 itag
, req
->src
, req
->assoclen
+ nbytes
,
415 crypto_aead_authsize(crypto_aead_reqtfm(req
)),
416 SCATTERWALK_FROM_SG
);
417 rc
= memcmp(itag
, otag
,
418 crypto_aead_authsize(crypto_aead_reqtfm(req
))) ?
422 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
426 static int gcm_aes_nx_encrypt(struct aead_request
*req
)
428 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
431 memcpy(iv
, req
->iv
, 12);
433 return gcm_aes_nx_crypt(req
, 1);
436 static int gcm_aes_nx_decrypt(struct aead_request
*req
)
438 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
441 memcpy(iv
, req
->iv
, 12);
443 return gcm_aes_nx_crypt(req
, 0);
446 static int gcm4106_aes_nx_encrypt(struct aead_request
*req
)
448 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
449 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
451 char *nonce
= nx_ctx
->priv
.gcm
.nonce
;
453 memcpy(iv
, nonce
, NX_GCM4106_NONCE_LEN
);
454 memcpy(iv
+ NX_GCM4106_NONCE_LEN
, req
->iv
, 8);
456 return gcm_aes_nx_crypt(req
, 1);
459 static int gcm4106_aes_nx_decrypt(struct aead_request
*req
)
461 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
462 struct nx_gcm_rctx
*rctx
= aead_request_ctx(req
);
464 char *nonce
= nx_ctx
->priv
.gcm
.nonce
;
466 memcpy(iv
, nonce
, NX_GCM4106_NONCE_LEN
);
467 memcpy(iv
+ NX_GCM4106_NONCE_LEN
, req
->iv
, 8);
469 return gcm_aes_nx_crypt(req
, 0);
472 /* tell the block cipher walk routines that this is a stream cipher by
473 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
474 * during encrypt/decrypt doesn't solve this problem, because it calls
475 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
476 * but instead uses this tfm->blocksize. */
477 struct aead_alg nx_gcm_aes_alg
= {
479 .cra_name
= "gcm(aes)",
480 .cra_driver_name
= "gcm-aes-nx",
483 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
484 .cra_module
= THIS_MODULE
,
486 .init
= nx_crypto_ctx_aes_gcm_init
,
487 .exit
= nx_crypto_ctx_aead_exit
,
489 .maxauthsize
= AES_BLOCK_SIZE
,
490 .setkey
= gcm_aes_nx_set_key
,
491 .encrypt
= gcm_aes_nx_encrypt
,
492 .decrypt
= gcm_aes_nx_decrypt
,
495 struct aead_alg nx_gcm4106_aes_alg
= {
497 .cra_name
= "rfc4106(gcm(aes))",
498 .cra_driver_name
= "rfc4106-gcm-aes-nx",
501 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
502 .cra_module
= THIS_MODULE
,
504 .init
= nx_crypto_ctx_aes_gcm_init
,
505 .exit
= nx_crypto_ctx_aead_exit
,
507 .maxauthsize
= AES_BLOCK_SIZE
,
508 .setkey
= gcm4106_aes_nx_set_key
,
509 .setauthsize
= gcm4106_aes_nx_setauthsize
,
510 .encrypt
= gcm4106_aes_nx_encrypt
,
511 .decrypt
= gcm4106_aes_nx_decrypt
,