]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/crypto/ccp/ccp-crypto-aes-galois.c
crypto: ccp - Add support for valid authsize values less than 16
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / ccp / ccp-crypto-aes-galois.c
CommitLineData
36cf515b
GH
1/*
2 * AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support
3 *
68cc652f 4 * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
36cf515b
GH
5 *
6 * Author: Gary R Hook <gary.hook@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/delay.h>
16#include <linux/scatterlist.h>
17#include <linux/crypto.h>
18#include <crypto/internal/aead.h>
19#include <crypto/algapi.h>
20#include <crypto/aes.h>
21#include <crypto/ctr.h>
cf0bd0ae 22#include <crypto/gcm.h>
36cf515b
GH
23#include <crypto/scatterwalk.h>
24#include <linux/delay.h>
25
26#include "ccp-crypto.h"
27
36cf515b
GH
28static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
29{
30 return ret;
31}
32
33static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
34 unsigned int key_len)
35{
36 struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
37
38 switch (key_len) {
39 case AES_KEYSIZE_128:
40 ctx->u.aes.type = CCP_AES_TYPE_128;
41 break;
42 case AES_KEYSIZE_192:
43 ctx->u.aes.type = CCP_AES_TYPE_192;
44 break;
45 case AES_KEYSIZE_256:
46 ctx->u.aes.type = CCP_AES_TYPE_256;
47 break;
48 default:
49 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
50 return -EINVAL;
51 }
52
53 ctx->u.aes.mode = CCP_AES_MODE_GCM;
54 ctx->u.aes.key_len = key_len;
55
56 memcpy(ctx->u.aes.key, key, key_len);
57 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
58
59 return 0;
60}
61
62static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
63 unsigned int authsize)
64{
3417660c
GH
65 switch (authsize) {
66 case 16:
67 case 15:
68 case 14:
69 case 13:
70 case 12:
71 case 8:
72 case 4:
73 break;
74 default:
75 return -EINVAL;
76 }
77
36cf515b
GH
78 return 0;
79}
80
81static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
82{
83 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
84 struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
85 struct ccp_aes_req_ctx *rctx = aead_request_ctx(req);
86 struct scatterlist *iv_sg = NULL;
87 unsigned int iv_len = 0;
88 int i;
89 int ret = 0;
90
91 if (!ctx->u.aes.key_len)
92 return -EINVAL;
93
94 if (ctx->u.aes.mode != CCP_AES_MODE_GCM)
95 return -EINVAL;
96
97 if (!req->iv)
98 return -EINVAL;
99
100 /*
101 * 5 parts:
102 * plaintext/ciphertext input
103 * AAD
104 * key
105 * IV
106 * Destination+tag buffer
107 */
108
109 /* Prepare the IV: 12 bytes + an integer (counter) */
cf0bd0ae 110 memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
36cf515b 111 for (i = 0; i < 3; i++)
cf0bd0ae 112 rctx->iv[i + GCM_AES_IV_SIZE] = 0;
36cf515b
GH
113 rctx->iv[AES_BLOCK_SIZE - 1] = 1;
114
115 /* Set up a scatterlist for the IV */
116 iv_sg = &rctx->iv_sg;
117 iv_len = AES_BLOCK_SIZE;
118 sg_init_one(iv_sg, rctx->iv, iv_len);
119
120 /* The AAD + plaintext are concatenated in the src buffer */
121 memset(&rctx->cmd, 0, sizeof(rctx->cmd));
122 INIT_LIST_HEAD(&rctx->cmd.entry);
123 rctx->cmd.engine = CCP_ENGINE_AES;
3417660c 124 rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
36cf515b
GH
125 rctx->cmd.u.aes.type = ctx->u.aes.type;
126 rctx->cmd.u.aes.mode = ctx->u.aes.mode;
127 rctx->cmd.u.aes.action = encrypt;
128 rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
129 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
130 rctx->cmd.u.aes.iv = iv_sg;
131 rctx->cmd.u.aes.iv_len = iv_len;
132 rctx->cmd.u.aes.src = req->src;
133 rctx->cmd.u.aes.src_len = req->cryptlen;
134 rctx->cmd.u.aes.aad_len = req->assoclen;
135
136 /* The cipher text + the tag are in the dst buffer */
137 rctx->cmd.u.aes.dst = req->dst;
138
139 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
140
141 return ret;
142}
143
144static int ccp_aes_gcm_encrypt(struct aead_request *req)
145{
146 return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_ENCRYPT);
147}
148
149static int ccp_aes_gcm_decrypt(struct aead_request *req)
150{
151 return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_DECRYPT);
152}
153
154static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm)
155{
156 struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
157
158 ctx->complete = ccp_aes_gcm_complete;
159 ctx->u.aes.key_len = 0;
160
161 crypto_aead_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
162
163 return 0;
164}
165
166static void ccp_aes_gcm_cra_exit(struct crypto_tfm *tfm)
167{
168}
169
170static struct aead_alg ccp_aes_gcm_defaults = {
171 .setkey = ccp_aes_gcm_setkey,
172 .setauthsize = ccp_aes_gcm_setauthsize,
173 .encrypt = ccp_aes_gcm_encrypt,
174 .decrypt = ccp_aes_gcm_decrypt,
175 .init = ccp_aes_gcm_cra_init,
cf0bd0ae 176 .ivsize = GCM_AES_IV_SIZE,
36cf515b
GH
177 .maxauthsize = AES_BLOCK_SIZE,
178 .base = {
179 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
180 CRYPTO_ALG_ASYNC |
181 CRYPTO_ALG_KERN_DRIVER_ONLY |
182 CRYPTO_ALG_NEED_FALLBACK,
183 .cra_blocksize = AES_BLOCK_SIZE,
184 .cra_ctxsize = sizeof(struct ccp_ctx),
185 .cra_priority = CCP_CRA_PRIORITY,
186 .cra_type = &crypto_ablkcipher_type,
187 .cra_exit = ccp_aes_gcm_cra_exit,
188 .cra_module = THIS_MODULE,
189 },
190};
191
192struct ccp_aes_aead_def {
193 enum ccp_aes_mode mode;
194 unsigned int version;
195 const char *name;
196 const char *driver_name;
197 unsigned int blocksize;
198 unsigned int ivsize;
199 struct aead_alg *alg_defaults;
200};
201
202static struct ccp_aes_aead_def aes_aead_algs[] = {
203 {
204 .mode = CCP_AES_MODE_GHASH,
205 .version = CCP_VERSION(5, 0),
206 .name = "gcm(aes)",
207 .driver_name = "gcm-aes-ccp",
208 .blocksize = 1,
209 .ivsize = AES_BLOCK_SIZE,
210 .alg_defaults = &ccp_aes_gcm_defaults,
211 },
212};
213
214static int ccp_register_aes_aead(struct list_head *head,
215 const struct ccp_aes_aead_def *def)
216{
217 struct ccp_crypto_aead *ccp_aead;
218 struct aead_alg *alg;
219 int ret;
220
221 ccp_aead = kzalloc(sizeof(*ccp_aead), GFP_KERNEL);
222 if (!ccp_aead)
223 return -ENOMEM;
224
225 INIT_LIST_HEAD(&ccp_aead->entry);
226
227 ccp_aead->mode = def->mode;
228
229 /* Copy the defaults and override as necessary */
230 alg = &ccp_aead->alg;
231 *alg = *def->alg_defaults;
232 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
233 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
234 def->driver_name);
235 alg->base.cra_blocksize = def->blocksize;
236 alg->base.cra_ablkcipher.ivsize = def->ivsize;
237
238 ret = crypto_register_aead(alg);
239 if (ret) {
240 pr_err("%s ablkcipher algorithm registration error (%d)\n",
241 alg->base.cra_name, ret);
242 kfree(ccp_aead);
243 return ret;
244 }
245
246 list_add(&ccp_aead->entry, head);
247
248 return 0;
249}
250
251int ccp_register_aes_aeads(struct list_head *head)
252{
253 int i, ret;
254 unsigned int ccpversion = ccp_version();
255
256 for (i = 0; i < ARRAY_SIZE(aes_aead_algs); i++) {
257 if (aes_aead_algs[i].version > ccpversion)
258 continue;
259 ret = ccp_register_aes_aead(head, &aes_aead_algs[i]);
260 if (ret)
261 return ret;
262 }
263
264 return 0;
265}