]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/crypto/sa2ul.c
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[mirror_ubuntu-hirsute-kernel.git] / drivers / crypto / sa2ul.c
CommitLineData
7694b6ca
K
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * K3 SA2UL crypto accelerator driver
4 *
5 * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Authors: Keerthy
8 * Vitaly Andrianov
9 * Tero Kristo
10 */
11#include <linux/clk.h>
12#include <linux/dmaengine.h>
13#include <linux/dmapool.h>
14#include <linux/module.h>
15#include <linux/of_device.h>
16#include <linux/platform_device.h>
17#include <linux/pm_runtime.h>
18
19#include <crypto/aes.h>
d2c8ac18 20#include <crypto/authenc.h>
7694b6ca 21#include <crypto/des.h>
d2c8ac18 22#include <crypto/internal/aead.h>
2dc53d00 23#include <crypto/internal/hash.h>
7694b6ca
K
24#include <crypto/internal/skcipher.h>
25#include <crypto/scatterwalk.h>
2dc53d00 26#include <crypto/sha.h>
7694b6ca
K
27
28#include "sa2ul.h"
29
30/* Byte offset for key in encryption security context */
31#define SC_ENC_KEY_OFFSET (1 + 27 + 4)
32/* Byte offset for Aux-1 in encryption security context */
33#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
34
35#define SA_CMDL_UPD_ENC 0x0001
36#define SA_CMDL_UPD_AUTH 0x0002
37#define SA_CMDL_UPD_ENC_IV 0x0004
38#define SA_CMDL_UPD_AUTH_IV 0x0008
39#define SA_CMDL_UPD_AUX_KEY 0x0010
40
41#define SA_AUTH_SUBKEY_LEN 16
42#define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
43#define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
44
45#define MODE_CONTROL_BYTES 27
46#define SA_HASH_PROCESSING 0
47#define SA_CRYPTO_PROCESSING 0
48#define SA_UPLOAD_HASH_TO_TLR BIT(6)
49
50#define SA_SW0_FLAGS_MASK 0xF0000
51#define SA_SW0_CMDL_INFO_MASK 0x1F00000
52#define SA_SW0_CMDL_PRESENT BIT(4)
53#define SA_SW0_ENG_ID_MASK 0x3E000000
54#define SA_SW0_DEST_INFO_PRESENT BIT(30)
55#define SA_SW2_EGRESS_LENGTH 0xFF000000
56#define SA_BASIC_HASH 0x10
57
58#define SHA256_DIGEST_WORDS 8
59/* Make 32-bit word from 4 bytes */
60#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
61 ((b2) << 8) | (b3))
62
63/* size of SCCTL structure in bytes */
64#define SA_SCCTL_SZ 16
65
66/* Max Authentication tag size */
67#define SA_MAX_AUTH_TAG_SZ 64
68
69#define PRIV_ID 0x1
70#define PRIV 0x1
71
72static struct device *sa_k3_dev;
73
74/**
75 * struct sa_cmdl_cfg - Command label configuration descriptor
2dc53d00 76 * @aalg: authentication algorithm ID
7694b6ca 77 * @enc_eng_id: Encryption Engine ID supported by the SA hardware
2dc53d00 78 * @auth_eng_id: Authentication Engine ID
7694b6ca 79 * @iv_size: Initialization Vector size
2dc53d00
K
80 * @akey: Authentication key
81 * @akey_len: Authentication key length
d2c8ac18 82 * @enc: True, if this is an encode request
7694b6ca
K
83 */
84struct sa_cmdl_cfg {
2dc53d00 85 int aalg;
7694b6ca 86 u8 enc_eng_id;
2dc53d00 87 u8 auth_eng_id;
7694b6ca 88 u8 iv_size;
2dc53d00
K
89 const u8 *akey;
90 u16 akey_len;
d2c8ac18 91 bool enc;
7694b6ca
K
92};
93
94/**
95 * struct algo_data - Crypto algorithm specific data
96 * @enc_eng: Encryption engine info structure
2dc53d00
K
97 * @auth_eng: Authentication engine info structure
98 * @auth_ctrl: Authentication control word
99 * @hash_size: Size of digest
7694b6ca
K
100 * @iv_idx: iv index in psdata
101 * @iv_out_size: iv out size
102 * @ealg_id: Encryption Algorithm ID
2dc53d00 103 * @aalg_id: Authentication algorithm ID
7694b6ca
K
104 * @mci_enc: Mode Control Instruction for Encryption algorithm
105 * @mci_dec: Mode Control Instruction for Decryption
106 * @inv_key: Whether the encryption algorithm demands key inversion
107 * @ctx: Pointer to the algorithm context
d2c8ac18
K
108 * @keyed_mac: Whether the authentication algorithm has key
109 * @prep_iopad: Function pointer to generate intermediate ipad/opad
7694b6ca
K
110 */
111struct algo_data {
112 struct sa_eng_info enc_eng;
2dc53d00
K
113 struct sa_eng_info auth_eng;
114 u8 auth_ctrl;
115 u8 hash_size;
7694b6ca
K
116 u8 iv_idx;
117 u8 iv_out_size;
118 u8 ealg_id;
2dc53d00 119 u8 aalg_id;
7694b6ca
K
120 u8 *mci_enc;
121 u8 *mci_dec;
122 bool inv_key;
123 struct sa_tfm_ctx *ctx;
d2c8ac18
K
124 bool keyed_mac;
125 void (*prep_iopad)(struct algo_data *algo, const u8 *key,
126 u16 key_sz, __be32 *ipad, __be32 *opad);
7694b6ca
K
127};
128
129/**
130 * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
131 * @type: Type of the crypto algorithm.
132 * @alg: Union of crypto algorithm definitions.
133 * @registered: Flag indicating if the crypto algorithm is already registered
134 */
135struct sa_alg_tmpl {
136 u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
137 union {
138 struct skcipher_alg skcipher;
2dc53d00 139 struct ahash_alg ahash;
d2c8ac18 140 struct aead_alg aead;
7694b6ca
K
141 } alg;
142 bool registered;
143};
144
145/**
146 * struct sa_rx_data: RX Packet miscellaneous data place holder
147 * @req: crypto request data pointer
148 * @ddev: pointer to the DMA device
149 * @tx_in: dma_async_tx_descriptor pointer for rx channel
150 * @split_src_sg: Set if the src sg is split and needs to be freed up
151 * @split_dst_sg: Set if the dst sg is split and needs to be freed up
152 * @enc: Flag indicating either encryption or decryption
153 * @enc_iv_size: Initialisation vector size
154 * @iv_idx: Initialisation vector index
155 * @rx_sg: Static scatterlist entry for overriding RX data
156 * @tx_sg: Static scatterlist entry for overriding TX data
157 * @src: Source data pointer
158 * @dst: Destination data pointer
159 */
160struct sa_rx_data {
161 void *req;
162 struct device *ddev;
163 struct dma_async_tx_descriptor *tx_in;
164 struct scatterlist *split_src_sg;
165 struct scatterlist *split_dst_sg;
166 u8 enc;
167 u8 enc_iv_size;
168 u8 iv_idx;
169 struct scatterlist rx_sg;
170 struct scatterlist tx_sg;
171 struct scatterlist *src;
172 struct scatterlist *dst;
173};
174
175/**
176 * struct sa_req: SA request definition
177 * @dev: device for the request
178 * @size: total data to the xmitted via DMA
179 * @enc_offset: offset of cipher data
180 * @enc_size: data to be passed to cipher engine
181 * @enc_iv: cipher IV
2dc53d00
K
182 * @auth_offset: offset of the authentication data
183 * @auth_size: size of the authentication data
184 * @auth_iv: authentication IV
7694b6ca
K
185 * @type: algorithm type for the request
186 * @cmdl: command label pointer
187 * @base: pointer to the base request
188 * @ctx: pointer to the algorithm context data
189 * @enc: true if this is an encode request
190 * @src: source data
191 * @dst: destination data
192 * @callback: DMA callback for the request
193 * @mdata_size: metadata size passed to DMA
194 */
195struct sa_req {
196 struct device *dev;
197 u16 size;
198 u8 enc_offset;
199 u16 enc_size;
200 u8 *enc_iv;
2dc53d00
K
201 u8 auth_offset;
202 u16 auth_size;
203 u8 *auth_iv;
7694b6ca
K
204 u32 type;
205 u32 *cmdl;
206 struct crypto_async_request *base;
207 struct sa_tfm_ctx *ctx;
208 bool enc;
209 struct scatterlist *src;
210 struct scatterlist *dst;
211 dma_async_tx_callback callback;
212 u16 mdata_size;
213};
214
215/*
216 * Mode Control Instructions for various Key lengths 128, 192, 256
217 * For CBC (Cipher Block Chaining) mode for encryption
218 */
219static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
220 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
222 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
223 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
224 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
226 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
227 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
229};
230
231/*
232 * Mode Control Instructions for various Key lengths 128, 192, 256
233 * For CBC (Cipher Block Chaining) mode for decryption
234 */
235static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
236 { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
238 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
239 { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
240 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
242 { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
244 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
245};
246
d2c8ac18
K
247/*
248 * Mode Control Instructions for various Key lengths 128, 192, 256
249 * For CBC (Cipher Block Chaining) mode for encryption
250 */
251static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
252 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
254 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
255 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
258 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
260 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
261};
262
263/*
264 * Mode Control Instructions for various Key lengths 128, 192, 256
265 * For CBC (Cipher Block Chaining) mode for decryption
266 */
267static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
268 { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
271 { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
273 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
274 { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
275 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
277};
278
7694b6ca
K
279/*
280 * Mode Control Instructions for various Key lengths 128, 192, 256
281 * For ECB (Electronic Code Book) mode for encryption
282 */
283static u8 mci_ecb_enc_array[3][27] = {
284 { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
287 { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
290 { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
293};
294
295/*
296 * Mode Control Instructions for various Key lengths 128, 192, 256
297 * For ECB (Electronic Code Book) mode for decryption
298 */
299static u8 mci_ecb_dec_array[3][27] = {
300 { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
303 { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
306 { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
308 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
309};
310
311/*
312 * Mode Control Instructions for DES algorithm
313 * For CBC (Cipher Block Chaining) mode and ECB mode
314 * encryption and for decryption respectively
315 */
316static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
317 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
318 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
319 0x00, 0x00, 0x00,
320};
321
322static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
323 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00,
326};
327
328static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
329 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
331 0x00, 0x00, 0x00,
332};
333
334static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
335 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
337 0x00, 0x00, 0x00,
338};
339
340/*
341 * Perform 16 byte or 128 bit swizzling
342 * The SA2UL Expects the security context to
343 * be in little Endian and the bus width is 128 bits or 16 bytes
344 * Hence swap 16 bytes at a time from higher to lower address
345 */
346static void sa_swiz_128(u8 *in, u16 len)
347{
348 u8 data[16];
349 int i, j;
350
351 for (i = 0; i < len; i += 16) {
352 memcpy(data, &in[i], 16);
353 for (j = 0; j < 16; j++)
354 in[i + j] = data[15 - j];
355 }
356}
357
d2c8ac18
K
358/* Prepare the ipad and opad from key as per SHA algorithm step 1*/
359static void prepare_kiopad(u8 *k_ipad, u8 *k_opad, const u8 *key, u16 key_sz)
360{
361 int i;
362
363 for (i = 0; i < key_sz; i++) {
364 k_ipad[i] = key[i] ^ 0x36;
365 k_opad[i] = key[i] ^ 0x5c;
366 }
367
368 /* Instead of XOR with 0 */
369 for (; i < SHA1_BLOCK_SIZE; i++) {
370 k_ipad[i] = 0x36;
371 k_opad[i] = 0x5c;
372 }
373}
374
375static void sa_export_shash(struct shash_desc *hash, int block_size,
376 int digest_size, __be32 *out)
377{
378 union {
379 struct sha1_state sha1;
380 struct sha256_state sha256;
381 struct sha512_state sha512;
382 } sha;
383 void *state;
384 u32 *result;
385 int i;
386
387 switch (digest_size) {
388 case SHA1_DIGEST_SIZE:
389 state = &sha.sha1;
390 result = sha.sha1.state;
391 break;
392 case SHA256_DIGEST_SIZE:
393 state = &sha.sha256;
394 result = sha.sha256.state;
395 break;
396 default:
397 dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
398 digest_size);
399 return;
400 }
401
402 crypto_shash_export(hash, state);
403
404 for (i = 0; i < digest_size >> 2; i++)
405 out[i] = cpu_to_be32(result[i]);
406}
407
408static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
409 u16 key_sz, __be32 *ipad, __be32 *opad)
410{
411 SHASH_DESC_ON_STACK(shash, data->ctx->shash);
412 int block_size = crypto_shash_blocksize(data->ctx->shash);
413 int digest_size = crypto_shash_digestsize(data->ctx->shash);
414 u8 k_ipad[SHA1_BLOCK_SIZE];
415 u8 k_opad[SHA1_BLOCK_SIZE];
416
417 shash->tfm = data->ctx->shash;
418
419 prepare_kiopad(k_ipad, k_opad, key, key_sz);
420
421 memzero_explicit(ipad, block_size);
422 memzero_explicit(opad, block_size);
423
424 crypto_shash_init(shash);
425 crypto_shash_update(shash, k_ipad, block_size);
426 sa_export_shash(shash, block_size, digest_size, ipad);
427
428 crypto_shash_init(shash);
429 crypto_shash_update(shash, k_opad, block_size);
430
431 sa_export_shash(shash, block_size, digest_size, opad);
432}
433
7694b6ca
K
434/* Derive the inverse key used in AES-CBC decryption operation */
435static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
436{
437 struct crypto_aes_ctx ctx;
438 int key_pos;
439
440 if (aes_expandkey(&ctx, key, key_sz)) {
441 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
442 return -EINVAL;
443 }
444
445 /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
446 if (key_sz == AES_KEYSIZE_192) {
447 ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
448 ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
449 }
450
451 /* Based crypto_aes_expand_key logic */
452 switch (key_sz) {
453 case AES_KEYSIZE_128:
454 case AES_KEYSIZE_192:
455 key_pos = key_sz + 24;
456 break;
457
458 case AES_KEYSIZE_256:
459 key_pos = key_sz + 24 - 4;
460 break;
461
462 default:
463 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
464 return -EINVAL;
465 }
466
467 memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
468 return 0;
469}
470
471/* Set Security context for the encryption engine */
472static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
473 u8 enc, u8 *sc_buf)
474{
475 const u8 *mci = NULL;
476
477 /* Set Encryption mode selector to crypto processing */
478 sc_buf[0] = SA_CRYPTO_PROCESSING;
479
480 if (enc)
481 mci = ad->mci_enc;
482 else
483 mci = ad->mci_dec;
484 /* Set the mode control instructions in security context */
485 if (mci)
486 memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
487
488 /* For AES-CBC decryption get the inverse key */
489 if (ad->inv_key && !enc) {
490 if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
491 return -EINVAL;
492 /* For all other cases: key is used */
493 } else {
494 memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
495 }
496
497 return 0;
498}
499
2dc53d00
K
500/* Set Security context for the authentication engine */
501static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
502 u8 *sc_buf)
503{
d2c8ac18
K
504 __be32 ipad[64], opad[64];
505
2dc53d00
K
506 /* Set Authentication mode selector to hash processing */
507 sc_buf[0] = SA_HASH_PROCESSING;
508 /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
509 sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
510 sc_buf[1] |= ad->auth_ctrl;
511
d2c8ac18
K
512 /* Copy the keys or ipad/opad */
513 if (ad->keyed_mac) {
514 ad->prep_iopad(ad, key, key_sz, ipad, opad);
515
516 /* Copy ipad to AuthKey */
517 memcpy(&sc_buf[32], ipad, ad->hash_size);
518 /* Copy opad to Aux-1 */
519 memcpy(&sc_buf[64], opad, ad->hash_size);
520 } else {
521 /* basic hash */
522 sc_buf[1] |= SA_BASIC_HASH;
523 }
2dc53d00
K
524}
525
7694b6ca
K
526static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
527{
528 int j;
529
530 for (j = 0; j < ((size16) ? 4 : 2); j++) {
531 *out = cpu_to_be32(*((u32 *)iv));
532 iv += 4;
533 out++;
534 }
535}
536
537/* Format general command label */
538static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
539 struct sa_cmdl_upd_info *upd_info)
540{
2dc53d00 541 u8 enc_offset = 0, auth_offset = 0, total = 0;
7694b6ca 542 u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
2dc53d00 543 u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
7694b6ca
K
544 u32 *word_ptr = (u32 *)cmdl;
545 int i;
546
547 /* Clear the command label */
548 memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
549
550 /* Iniialize the command update structure */
551 memzero_explicit(upd_info, sizeof(*upd_info));
552
d2c8ac18
K
553 if (cfg->enc_eng_id && cfg->auth_eng_id) {
554 if (cfg->enc) {
555 auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
556 enc_next_eng = cfg->auth_eng_id;
7694b6ca 557
d2c8ac18
K
558 if (cfg->iv_size)
559 auth_offset += cfg->iv_size;
560 } else {
561 enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
562 auth_next_eng = cfg->enc_eng_id;
563 }
564 }
7694b6ca 565
2dc53d00 566 if (cfg->enc_eng_id) {
7694b6ca
K
567 upd_info->flags |= SA_CMDL_UPD_ENC;
568 upd_info->enc_size.index = enc_offset >> 2;
569 upd_info->enc_offset.index = upd_info->enc_size.index + 1;
570 /* Encryption command label */
571 cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
572
573 /* Encryption modes requiring IV */
574 if (cfg->iv_size) {
575 upd_info->flags |= SA_CMDL_UPD_ENC_IV;
576 upd_info->enc_iv.index =
577 (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
578 upd_info->enc_iv.size = cfg->iv_size;
579
580 cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
581 SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
582
583 cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
584 (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
d2c8ac18 585 total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
7694b6ca
K
586 } else {
587 cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
588 SA_CMDL_HEADER_SIZE_BYTES;
d2c8ac18 589 total += SA_CMDL_HEADER_SIZE_BYTES;
7694b6ca
K
590 }
591 }
592
2dc53d00
K
593 if (cfg->auth_eng_id) {
594 upd_info->flags |= SA_CMDL_UPD_AUTH;
595 upd_info->auth_size.index = auth_offset >> 2;
596 upd_info->auth_offset.index = upd_info->auth_size.index + 1;
597 cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
598 cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
599 SA_CMDL_HEADER_SIZE_BYTES;
600 total += SA_CMDL_HEADER_SIZE_BYTES;
601 }
602
7694b6ca
K
603 total = roundup(total, 8);
604
605 for (i = 0; i < total / 4; i++)
606 word_ptr[i] = swab32(word_ptr[i]);
607
608 return total;
609}
610
611/* Update Command label */
612static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
613 struct sa_cmdl_upd_info *upd_info)
614{
615 int i = 0, j;
616
617 if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
618 cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
619 cmdl[upd_info->enc_size.index] |= req->enc_size;
620 cmdl[upd_info->enc_offset.index] &=
621 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
622 cmdl[upd_info->enc_offset.index] |=
623 ((u32)req->enc_offset <<
624 __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
625
626 if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
627 __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
628 u32 *enc_iv = (u32 *)req->enc_iv;
629
630 for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
631 data[j] = cpu_to_be32(*enc_iv);
632 enc_iv++;
633 }
634 }
635 }
2dc53d00
K
636
637 if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
638 cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
639 cmdl[upd_info->auth_size.index] |= req->auth_size;
640 cmdl[upd_info->auth_offset.index] &=
641 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
642 cmdl[upd_info->auth_offset.index] |=
643 ((u32)req->auth_offset <<
644 __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
645 if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
646 sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
647 req->auth_iv,
648 (upd_info->auth_iv.size > 8));
649 }
650 if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
651 int offset = (req->auth_size & 0xF) ? 4 : 0;
652
653 memcpy(&cmdl[upd_info->aux_key_info.index],
654 &upd_info->aux_key[offset], 16);
655 }
656 }
7694b6ca
K
657}
658
659/* Format SWINFO words to be sent to SA */
660static
661void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
662 u8 cmdl_present, u8 cmdl_offset, u8 flags,
663 u8 hash_size, u32 *swinfo)
664{
665 swinfo[0] = sc_id;
666 swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK));
667 if (likely(cmdl_present))
668 swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) <<
669 __ffs(SA_SW0_CMDL_INFO_MASK));
670 swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK));
671
672 swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
673 swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
674 swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
675 swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH));
676}
677
678/* Dump the security context */
679static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
680{
681#ifdef DEBUG
682 dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
683 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
684 16, 1, buf, SA_CTX_MAX_SZ, false);
685#endif
686}
687
688static
689int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
2dc53d00
K
690 u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
691 struct algo_data *ad, u8 enc, u32 *swinfo)
7694b6ca
K
692{
693 int enc_sc_offset = 0;
2dc53d00 694 int auth_sc_offset = 0;
7694b6ca
K
695 u8 *sc_buf = ctx->sc;
696 u16 sc_id = ctx->sc_id;
d2c8ac18 697 u8 first_engine = 0;
7694b6ca
K
698
699 memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
700
d2c8ac18
K
701 if (ad->auth_eng.eng_id) {
702 if (enc)
703 first_engine = ad->enc_eng.eng_id;
704 else
705 first_engine = ad->auth_eng.eng_id;
706
2dc53d00
K
707 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
708 auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
2dc53d00
K
709 sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
710 if (!ad->hash_size)
711 return -EINVAL;
712 ad->hash_size = roundup(ad->hash_size, 8);
d2c8ac18
K
713
714 } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
715 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
716 first_engine = ad->enc_eng.eng_id;
717 sc_buf[1] = SA_SCCTL_FE_ENC;
718 ad->hash_size = ad->iv_out_size;
2dc53d00 719 }
7694b6ca
K
720
721 /* SCCTL Owner info: 0=host, 1=CP_ACE */
722 sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
7694b6ca
K
723 memcpy(&sc_buf[2], &sc_id, 2);
724 sc_buf[4] = 0x0;
725 sc_buf[5] = PRIV_ID;
726 sc_buf[6] = PRIV;
727 sc_buf[7] = 0x0;
728
729 /* Prepare context for encryption engine */
730 if (ad->enc_eng.sc_size) {
731 if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
732 &sc_buf[enc_sc_offset]))
733 return -EINVAL;
734 }
735
2dc53d00
K
736 /* Prepare context for authentication engine */
737 if (ad->auth_eng.sc_size)
738 sa_set_sc_auth(ad, auth_key, auth_key_sz,
739 &sc_buf[auth_sc_offset]);
740
7694b6ca
K
741 /* Set the ownership of context to CP_ACE */
742 sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
743
744 /* swizzle the security context */
745 sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
7694b6ca
K
746
747 sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
2dc53d00 748 SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
7694b6ca
K
749
750 sa_dump_sc(sc_buf, ctx->sc_phys);
751
752 return 0;
753}
754
755/* Free the per direction context memory */
756static void sa_free_ctx_info(struct sa_ctx_info *ctx,
757 struct sa_crypto_data *data)
758{
759 unsigned long bn;
760
761 bn = ctx->sc_id - data->sc_id_start;
762 spin_lock(&data->scid_lock);
763 __clear_bit(bn, data->ctx_bm);
764 data->sc_id--;
765 spin_unlock(&data->scid_lock);
766
767 if (ctx->sc) {
768 dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
769 ctx->sc = NULL;
770 }
771}
772
773static int sa_init_ctx_info(struct sa_ctx_info *ctx,
774 struct sa_crypto_data *data)
775{
776 unsigned long bn;
777 int err;
778
779 spin_lock(&data->scid_lock);
780 bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
781 __set_bit(bn, data->ctx_bm);
782 data->sc_id++;
783 spin_unlock(&data->scid_lock);
784
785 ctx->sc_id = (u16)(data->sc_id_start + bn);
786
787 ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
788 if (!ctx->sc) {
789 dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
790 err = -ENOMEM;
791 goto scid_rollback;
792 }
793
794 return 0;
795
796scid_rollback:
797 spin_lock(&data->scid_lock);
798 __clear_bit(bn, data->ctx_bm);
799 data->sc_id--;
800 spin_unlock(&data->scid_lock);
801
802 return err;
803}
804
805static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
806{
807 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
808 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
809
810 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
811 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
812 ctx->dec.sc_id, &ctx->dec.sc_phys);
813
814 sa_free_ctx_info(&ctx->enc, data);
815 sa_free_ctx_info(&ctx->dec, data);
816
817 crypto_free_sync_skcipher(ctx->fallback.skcipher);
818}
819
820static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
821{
822 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
823 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
824 const char *name = crypto_tfm_alg_name(&tfm->base);
825 int ret;
826
827 memzero_explicit(ctx, sizeof(*ctx));
828 ctx->dev_data = data;
829
830 ret = sa_init_ctx_info(&ctx->enc, data);
831 if (ret)
832 return ret;
833 ret = sa_init_ctx_info(&ctx->dec, data);
834 if (ret) {
835 sa_free_ctx_info(&ctx->enc, data);
836 return ret;
837 }
838
839 ctx->fallback.skcipher =
840 crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
841
842 if (IS_ERR(ctx->fallback.skcipher)) {
843 dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
844 return PTR_ERR(ctx->fallback.skcipher);
845 }
846
847 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
848 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
849 ctx->dec.sc_id, &ctx->dec.sc_phys);
850 return 0;
851}
852
853static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
854 unsigned int keylen, struct algo_data *ad)
855{
856 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
857 int cmdl_len;
858 struct sa_cmdl_cfg cfg;
859 int ret;
860
861 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
862 keylen != AES_KEYSIZE_256)
863 return -EINVAL;
864
865 ad->enc_eng.eng_id = SA_ENG_ID_EM1;
866 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
867
868 memzero_explicit(&cfg, sizeof(cfg));
869 cfg.enc_eng_id = ad->enc_eng.eng_id;
870 cfg.iv_size = crypto_skcipher_ivsize(tfm);
871
872 crypto_sync_skcipher_clear_flags(ctx->fallback.skcipher,
873 CRYPTO_TFM_REQ_MASK);
874 crypto_sync_skcipher_set_flags(ctx->fallback.skcipher,
875 tfm->base.crt_flags &
876 CRYPTO_TFM_REQ_MASK);
877 ret = crypto_sync_skcipher_setkey(ctx->fallback.skcipher, key, keylen);
878 if (ret)
879 return ret;
880
881 /* Setup Encryption Security Context & Command label template */
2dc53d00
K
882 if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
883 &ctx->enc.epib[1]))
7694b6ca
K
884 goto badkey;
885
886 cmdl_len = sa_format_cmdl_gen(&cfg,
887 (u8 *)ctx->enc.cmdl,
888 &ctx->enc.cmdl_upd_info);
889 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
890 goto badkey;
891
892 ctx->enc.cmdl_size = cmdl_len;
893
894 /* Setup Decryption Security Context & Command label template */
2dc53d00
K
895 if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
896 &ctx->dec.epib[1]))
7694b6ca
K
897 goto badkey;
898
899 cfg.enc_eng_id = ad->enc_eng.eng_id;
900 cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
901 &ctx->dec.cmdl_upd_info);
902
903 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
904 goto badkey;
905
906 ctx->dec.cmdl_size = cmdl_len;
907 ctx->iv_idx = ad->iv_idx;
908
909 return 0;
910
911badkey:
912 dev_err(sa_k3_dev, "%s: badkey\n", __func__);
913 return -EINVAL;
914}
915
916static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
917 unsigned int keylen)
918{
919 struct algo_data ad = { 0 };
920 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
921 int key_idx = (keylen >> 3) - 2;
922
923 if (key_idx >= 3)
924 return -EINVAL;
925
926 ad.mci_enc = mci_cbc_enc_array[key_idx];
927 ad.mci_dec = mci_cbc_dec_array[key_idx];
928 ad.inv_key = true;
929 ad.ealg_id = SA_EALG_ID_AES_CBC;
930 ad.iv_idx = 4;
931 ad.iv_out_size = 16;
932
933 return sa_cipher_setkey(tfm, key, keylen, &ad);
934}
935
936static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
937 unsigned int keylen)
938{
939 struct algo_data ad = { 0 };
940 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
941 int key_idx = (keylen >> 3) - 2;
942
943 if (key_idx >= 3)
944 return -EINVAL;
945
946 ad.mci_enc = mci_ecb_enc_array[key_idx];
947 ad.mci_dec = mci_ecb_dec_array[key_idx];
948 ad.inv_key = true;
949 ad.ealg_id = SA_EALG_ID_AES_ECB;
950
951 return sa_cipher_setkey(tfm, key, keylen, &ad);
952}
953
954static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
955 unsigned int keylen)
956{
957 struct algo_data ad = { 0 };
958
959 ad.mci_enc = mci_cbc_3des_enc_array;
960 ad.mci_dec = mci_cbc_3des_dec_array;
961 ad.ealg_id = SA_EALG_ID_3DES_CBC;
962 ad.iv_idx = 6;
963 ad.iv_out_size = 8;
964
965 return sa_cipher_setkey(tfm, key, keylen, &ad);
966}
967
968static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
969 unsigned int keylen)
970{
971 struct algo_data ad = { 0 };
972
973 ad.mci_enc = mci_ecb_3des_enc_array;
974 ad.mci_dec = mci_ecb_3des_dec_array;
975
976 return sa_cipher_setkey(tfm, key, keylen, &ad);
977}
978
979static void sa_aes_dma_in_callback(void *data)
980{
981 struct sa_rx_data *rxd = (struct sa_rx_data *)data;
982 struct skcipher_request *req;
983 int sglen;
984 u32 *result;
985 __be32 *mdptr;
986 size_t ml, pl;
987 int i;
988 enum dma_data_direction dir_src;
989 bool diff_dst;
990
991 req = container_of(rxd->req, struct skcipher_request, base);
992 sglen = sg_nents_for_len(req->src, req->cryptlen);
993
994 diff_dst = (req->src != req->dst) ? true : false;
995 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
996
997 if (req->iv) {
998 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
999 &ml);
1000 result = (u32 *)req->iv;
1001
1002 for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1003 result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1004 }
1005
1006 dma_unmap_sg(rxd->ddev, req->src, sglen, dir_src);
1007 kfree(rxd->split_src_sg);
1008
1009 if (diff_dst) {
1010 sglen = sg_nents_for_len(req->dst, req->cryptlen);
1011
1012 dma_unmap_sg(rxd->ddev, req->dst, sglen,
1013 DMA_FROM_DEVICE);
1014 kfree(rxd->split_dst_sg);
1015 }
1016
1017 kfree(rxd);
1018
1019 skcipher_request_complete(req, 0);
1020}
1021
1022static void
1023sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1024{
1025 u32 *out, *in;
1026 int i;
1027
1028 for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1029 *out++ = *in++;
1030
1031 mdptr[4] = (0xFFFF << 16);
1032 for (out = &mdptr[5], in = psdata, i = 0;
1033 i < pslen / sizeof(u32); i++)
1034 *out++ = *in++;
1035}
1036
1037static int sa_run(struct sa_req *req)
1038{
1039 struct sa_rx_data *rxd;
1040 gfp_t gfp_flags;
1041 u32 cmdl[SA_MAX_CMDL_WORDS];
1042 struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
1043 struct device *ddev;
1044 struct dma_chan *dma_rx;
1045 int sg_nents, src_nents, dst_nents;
1046 int mapped_src_nents, mapped_dst_nents;
1047 struct scatterlist *src, *dst;
1048 size_t pl, ml, split_size;
1049 struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1050 int ret;
1051 struct dma_async_tx_descriptor *tx_out;
1052 u32 *mdptr;
1053 bool diff_dst;
1054 enum dma_data_direction dir_src;
1055
1056 gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1057 GFP_KERNEL : GFP_ATOMIC;
1058
1059 rxd = kzalloc(sizeof(*rxd), gfp_flags);
1060 if (!rxd)
1061 return -ENOMEM;
1062
1063 if (req->src != req->dst) {
1064 diff_dst = true;
1065 dir_src = DMA_TO_DEVICE;
1066 } else {
1067 diff_dst = false;
1068 dir_src = DMA_BIDIRECTIONAL;
1069 }
1070
1071 /*
1072 * SA2UL has an interesting feature where the receive DMA channel
1073 * is selected based on the data passed to the engine. Within the
1074 * transition range, there is also a space where it is impossible
1075 * to determine where the data will end up, and this should be
1076 * avoided. This will be handled by the SW fallback mechanism by
1077 * the individual algorithm implementations.
1078 */
1079 if (req->size >= 256)
1080 dma_rx = pdata->dma_rx2;
1081 else
1082 dma_rx = pdata->dma_rx1;
1083
1084 ddev = dma_rx->device->dev;
1085
1086 memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1087
1088 sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
1089
1090 if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1091 if (req->enc)
1092 req->type |=
1093 (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1094 else
1095 req->type |=
1096 (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1097 }
1098
1099 cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1100
1101 /*
1102 * Map the packets, first we check if the data fits into a single
1103 * sg entry and use that if possible. If it does not fit, we check
1104 * if we need to do sg_split to align the scatterlist data on the
1105 * actual data size being processed by the crypto engine.
1106 */
1107 src = req->src;
1108 sg_nents = sg_nents_for_len(src, req->size);
1109
1110 split_size = req->size;
1111
1112 if (sg_nents == 1 && split_size <= req->src->length) {
1113 src = &rxd->rx_sg;
1114 sg_init_table(src, 1);
1115 sg_set_page(src, sg_page(req->src), split_size,
1116 req->src->offset);
1117 src_nents = 1;
1118 dma_map_sg(ddev, src, sg_nents, dir_src);
1119 } else {
1120 mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents,
1121 dir_src);
1122 ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size,
1123 &src, &src_nents, gfp_flags);
1124 if (ret) {
1125 src_nents = sg_nents;
1126 src = req->src;
1127 } else {
1128 rxd->split_src_sg = src;
1129 }
1130 }
1131
1132 if (!diff_dst) {
1133 dst_nents = src_nents;
1134 dst = src;
1135 } else {
1136 dst_nents = sg_nents_for_len(req->dst, req->size);
1137
1138 if (dst_nents == 1 && split_size <= req->dst->length) {
1139 dst = &rxd->tx_sg;
1140 sg_init_table(dst, 1);
1141 sg_set_page(dst, sg_page(req->dst), split_size,
1142 req->dst->offset);
1143 dst_nents = 1;
1144 dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE);
1145 } else {
1146 mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents,
1147 DMA_FROM_DEVICE);
1148 ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
1149 &split_size, &dst, &dst_nents,
1150 gfp_flags);
1151 if (ret) {
1152 dst_nents = dst_nents;
1153 dst = req->dst;
1154 } else {
1155 rxd->split_dst_sg = dst;
1156 }
1157 }
1158 }
1159
1160 if (unlikely(src_nents != sg_nents)) {
1161 dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
1162 ret = -EIO;
1163 goto err_cleanup;
1164 }
1165
1166 rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
1167 DMA_DEV_TO_MEM,
1168 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1169 if (!rxd->tx_in) {
1170 dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1171 ret = -EINVAL;
1172 goto err_cleanup;
1173 }
1174
1175 rxd->req = (void *)req->base;
1176 rxd->enc = req->enc;
1177 rxd->ddev = ddev;
1178 rxd->src = src;
1179 rxd->dst = dst;
1180 rxd->iv_idx = req->ctx->iv_idx;
1181 rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1182 rxd->tx_in->callback = req->callback;
1183 rxd->tx_in->callback_param = rxd;
1184
1185 tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
1186 src_nents, DMA_MEM_TO_DEV,
1187 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1188
1189 if (!tx_out) {
1190 dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1191 ret = -EINVAL;
1192 goto err_cleanup;
1193 }
1194
1195 /*
1196 * Prepare metadata for DMA engine. This essentially describes the
1197 * crypto algorithm to be used, data sizes, different keys etc.
1198 */
1199 mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
1200
1201 sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1202 sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
1203 sa_ctx->epib);
1204
1205 ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1206 dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
1207
1208 dmaengine_submit(tx_out);
1209 dmaengine_submit(rxd->tx_in);
1210
1211 dma_async_issue_pending(dma_rx);
1212 dma_async_issue_pending(pdata->dma_tx);
1213
1214 return -EINPROGRESS;
1215
1216err_cleanup:
1217 dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE);
1218 kfree(rxd->split_src_sg);
1219
1220 if (req->src != req->dst) {
1221 dst_nents = sg_nents_for_len(req->dst, req->size);
1222 dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE);
1223 kfree(rxd->split_dst_sg);
1224 }
1225
1226 kfree(rxd);
1227
1228 return ret;
1229}
1230
1231static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1232{
1233 struct sa_tfm_ctx *ctx =
1234 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1235 struct crypto_alg *alg = req->base.tfm->__crt_alg;
1236 struct sa_req sa_req = { 0 };
1237 int ret;
1238
1239 if (!req->cryptlen)
1240 return 0;
1241
1242 if (req->cryptlen % alg->cra_blocksize)
1243 return -EINVAL;
1244
1245 /* Use SW fallback if the data size is not supported */
1246 if (req->cryptlen > SA_MAX_DATA_SZ ||
1247 (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1248 req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1249 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback.skcipher);
1250
1251 skcipher_request_set_sync_tfm(subreq, ctx->fallback.skcipher);
1252 skcipher_request_set_callback(subreq, req->base.flags,
1253 NULL, NULL);
1254 skcipher_request_set_crypt(subreq, req->src, req->dst,
1255 req->cryptlen, req->iv);
1256 if (enc)
1257 ret = crypto_skcipher_encrypt(subreq);
1258 else
1259 ret = crypto_skcipher_decrypt(subreq);
1260
1261 skcipher_request_zero(subreq);
1262 return ret;
1263 }
1264
1265 sa_req.size = req->cryptlen;
1266 sa_req.enc_size = req->cryptlen;
1267 sa_req.src = req->src;
1268 sa_req.dst = req->dst;
1269 sa_req.enc_iv = iv;
1270 sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1271 sa_req.enc = enc;
1272 sa_req.callback = sa_aes_dma_in_callback;
1273 sa_req.mdata_size = 44;
1274 sa_req.base = &req->base;
1275 sa_req.ctx = ctx;
1276
1277 return sa_run(&sa_req);
1278}
1279
1280static int sa_encrypt(struct skcipher_request *req)
1281{
1282 return sa_cipher_run(req, req->iv, 1);
1283}
1284
1285static int sa_decrypt(struct skcipher_request *req)
1286{
1287 return sa_cipher_run(req, req->iv, 0);
1288}
1289
2dc53d00
K
1290static void sa_sha_dma_in_callback(void *data)
1291{
1292 struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1293 struct ahash_request *req;
1294 struct crypto_ahash *tfm;
1295 unsigned int authsize;
1296 int i, sg_nents;
1297 size_t ml, pl;
1298 u32 *result;
1299 __be32 *mdptr;
1300
1301 req = container_of(rxd->req, struct ahash_request, base);
1302 tfm = crypto_ahash_reqtfm(req);
1303 authsize = crypto_ahash_digestsize(tfm);
1304
1305 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1306 result = (u32 *)req->result;
1307
1308 for (i = 0; i < (authsize / 4); i++)
1309 result[i] = be32_to_cpu(mdptr[i + 4]);
1310
1311 sg_nents = sg_nents_for_len(req->src, req->nbytes);
1312 dma_unmap_sg(rxd->ddev, req->src, sg_nents, DMA_FROM_DEVICE);
1313
1314 kfree(rxd->split_src_sg);
1315
1316 kfree(rxd);
1317
1318 ahash_request_complete(req, 0);
1319}
1320
1321static int zero_message_process(struct ahash_request *req)
1322{
1323 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1324 int sa_digest_size = crypto_ahash_digestsize(tfm);
1325
1326 switch (sa_digest_size) {
1327 case SHA1_DIGEST_SIZE:
1328 memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1329 break;
1330 case SHA256_DIGEST_SIZE:
1331 memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1332 break;
1333 case SHA512_DIGEST_SIZE:
1334 memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1335 break;
1336 default:
1337 return -EINVAL;
1338 }
1339
1340 return 0;
1341}
1342
1343static int sa_sha_run(struct ahash_request *req)
1344{
1345 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1346 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1347 struct sa_req sa_req = { 0 };
1348 size_t auth_len;
1349
1350 auth_len = req->nbytes;
1351
1352 if (!auth_len)
1353 return zero_message_process(req);
1354
1355 if (auth_len > SA_MAX_DATA_SZ ||
1356 (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1357 auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1358 struct ahash_request *subreq = &rctx->fallback_req;
1359 int ret = 0;
1360
1361 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1362 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1363
1364 crypto_ahash_init(subreq);
1365
1366 subreq->nbytes = auth_len;
1367 subreq->src = req->src;
1368 subreq->result = req->result;
1369
1370 ret |= crypto_ahash_update(subreq);
1371
1372 subreq->nbytes = 0;
1373
1374 ret |= crypto_ahash_final(subreq);
1375
1376 return ret;
1377 }
1378
1379 sa_req.size = auth_len;
1380 sa_req.auth_size = auth_len;
1381 sa_req.src = req->src;
1382 sa_req.dst = req->src;
1383 sa_req.enc = true;
1384 sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1385 sa_req.callback = sa_sha_dma_in_callback;
1386 sa_req.mdata_size = 28;
1387 sa_req.ctx = ctx;
1388 sa_req.base = &req->base;
1389
1390 return sa_run(&sa_req);
1391}
1392
1393static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
1394{
1395 int bs = crypto_shash_blocksize(ctx->shash);
1396 int cmdl_len;
1397 struct sa_cmdl_cfg cfg;
1398
1399 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1400 ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1401 ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1402
1403 memset(ctx->authkey, 0, bs);
1404 memset(&cfg, 0, sizeof(cfg));
1405 cfg.aalg = ad->aalg_id;
1406 cfg.enc_eng_id = ad->enc_eng.eng_id;
1407 cfg.auth_eng_id = ad->auth_eng.eng_id;
1408 cfg.iv_size = 0;
1409 cfg.akey = NULL;
1410 cfg.akey_len = 0;
1411
1412 /* Setup Encryption Security Context & Command label template */
1413 if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
1414 &ctx->enc.epib[1]))
1415 goto badkey;
1416
1417 cmdl_len = sa_format_cmdl_gen(&cfg,
1418 (u8 *)ctx->enc.cmdl,
1419 &ctx->enc.cmdl_upd_info);
1420 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1421 goto badkey;
1422
1423 ctx->enc.cmdl_size = cmdl_len;
1424
1425 return 0;
1426
1427badkey:
1428 dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1429 return -EINVAL;
1430}
1431
1432static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1433{
1434 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1435 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1436 int ret;
1437
1438 memset(ctx, 0, sizeof(*ctx));
1439 ctx->dev_data = data;
1440 ret = sa_init_ctx_info(&ctx->enc, data);
1441 if (ret)
1442 return ret;
1443
1444 if (alg_base) {
1445 ctx->shash = crypto_alloc_shash(alg_base, 0,
1446 CRYPTO_ALG_NEED_FALLBACK);
1447 if (IS_ERR(ctx->shash)) {
1448 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1449 alg_base);
1450 return PTR_ERR(ctx->shash);
1451 }
1452 /* for fallback */
1453 ctx->fallback.ahash =
1454 crypto_alloc_ahash(alg_base, 0,
1455 CRYPTO_ALG_NEED_FALLBACK);
1456 if (IS_ERR(ctx->fallback.ahash)) {
1457 dev_err(ctx->dev_data->dev,
1458 "Could not load fallback driver\n");
1459 return PTR_ERR(ctx->fallback.ahash);
1460 }
1461 }
1462
1463 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1464 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1465 ctx->dec.sc_id, &ctx->dec.sc_phys);
1466
1467 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1468 sizeof(struct sa_sha_req_ctx) +
1469 crypto_ahash_reqsize(ctx->fallback.ahash));
1470
1471 return 0;
1472}
1473
1474static int sa_sha_digest(struct ahash_request *req)
1475{
1476 return sa_sha_run(req);
1477}
1478
1479static int sa_sha_init(struct ahash_request *req)
1480{
1481 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1482 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1483 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1484
1485 dev_dbg(sa_k3_dev, "init: digest size: %d, rctx=%llx\n",
1486 crypto_ahash_digestsize(tfm), (u64)rctx);
1487
1488 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1489 rctx->fallback_req.base.flags =
1490 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1491
1492 return crypto_ahash_init(&rctx->fallback_req);
1493}
1494
1495static int sa_sha_update(struct ahash_request *req)
1496{
1497 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1498 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1499 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1500
1501 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1502 rctx->fallback_req.base.flags =
1503 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1504 rctx->fallback_req.nbytes = req->nbytes;
1505 rctx->fallback_req.src = req->src;
1506
1507 return crypto_ahash_update(&rctx->fallback_req);
1508}
1509
1510static int sa_sha_final(struct ahash_request *req)
1511{
1512 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1513 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1514 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1515
1516 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1517 rctx->fallback_req.base.flags =
1518 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1519 rctx->fallback_req.result = req->result;
1520
1521 return crypto_ahash_final(&rctx->fallback_req);
1522}
1523
1524static int sa_sha_finup(struct ahash_request *req)
1525{
1526 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1527 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1528 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1529
1530 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1531 rctx->fallback_req.base.flags =
1532 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1533
1534 rctx->fallback_req.nbytes = req->nbytes;
1535 rctx->fallback_req.src = req->src;
1536 rctx->fallback_req.result = req->result;
1537
1538 return crypto_ahash_finup(&rctx->fallback_req);
1539}
1540
1541static int sa_sha_import(struct ahash_request *req, const void *in)
1542{
1543 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1544 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1545 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1546
1547 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1548 rctx->fallback_req.base.flags = req->base.flags &
1549 CRYPTO_TFM_REQ_MAY_SLEEP;
1550
1551 return crypto_ahash_import(&rctx->fallback_req, in);
1552}
1553
1554static int sa_sha_export(struct ahash_request *req, void *out)
1555{
1556 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1557 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1558 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1559 struct ahash_request *subreq = &rctx->fallback_req;
1560
1561 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1562 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1563
1564 return crypto_ahash_export(subreq, out);
1565}
1566
1567static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1568{
1569 struct algo_data ad = { 0 };
1570 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1571
1572 sa_sha_cra_init_alg(tfm, "sha1");
1573
1574 ad.aalg_id = SA_AALG_ID_SHA1;
1575 ad.hash_size = SHA1_DIGEST_SIZE;
1576 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1577
1578 sa_sha_setup(ctx, &ad);
1579
1580 return 0;
1581}
1582
1583static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1584{
1585 struct algo_data ad = { 0 };
1586 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1587
1588 sa_sha_cra_init_alg(tfm, "sha256");
1589
1590 ad.aalg_id = SA_AALG_ID_SHA2_256;
1591 ad.hash_size = SHA256_DIGEST_SIZE;
1592 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1593
1594 sa_sha_setup(ctx, &ad);
1595
1596 return 0;
1597}
1598
1599static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1600{
1601 struct algo_data ad = { 0 };
1602 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1603
1604 sa_sha_cra_init_alg(tfm, "sha512");
1605
1606 ad.aalg_id = SA_AALG_ID_SHA2_512;
1607 ad.hash_size = SHA512_DIGEST_SIZE;
1608 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1609
1610 sa_sha_setup(ctx, &ad);
1611
1612 return 0;
1613}
1614
1615static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1616{
1617 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1618 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1619
1620 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1621 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1622 ctx->dec.sc_id, &ctx->dec.sc_phys);
1623
1624 if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1625 sa_free_ctx_info(&ctx->enc, data);
1626
1627 crypto_free_shash(ctx->shash);
1628 crypto_free_ahash(ctx->fallback.ahash);
1629}
1630
d2c8ac18
K
1631static void sa_aead_dma_in_callback(void *data)
1632{
1633 struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1634 struct aead_request *req;
1635 struct crypto_aead *tfm;
1636 unsigned int start;
1637 unsigned int authsize;
1638 u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1639 size_t pl, ml;
1640 int i, sglen;
1641 int err = 0;
1642 u16 auth_len;
1643 u32 *mdptr;
1644 bool diff_dst;
1645 enum dma_data_direction dir_src;
1646
1647 req = container_of(rxd->req, struct aead_request, base);
1648 tfm = crypto_aead_reqtfm(req);
1649 start = req->assoclen + req->cryptlen;
1650 authsize = crypto_aead_authsize(tfm);
1651
1652 diff_dst = (req->src != req->dst) ? true : false;
1653 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
1654
1655 mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1656 for (i = 0; i < (authsize / 4); i++)
1657 mdptr[i + 4] = swab32(mdptr[i + 4]);
1658
1659 auth_len = req->assoclen + req->cryptlen;
1660 if (!rxd->enc)
1661 auth_len -= authsize;
1662
1663 sglen = sg_nents_for_len(rxd->src, auth_len);
1664 dma_unmap_sg(rxd->ddev, rxd->src, sglen, dir_src);
1665 kfree(rxd->split_src_sg);
1666
1667 if (diff_dst) {
1668 sglen = sg_nents_for_len(rxd->dst, auth_len);
1669 dma_unmap_sg(rxd->ddev, rxd->dst, sglen, DMA_FROM_DEVICE);
1670 kfree(rxd->split_dst_sg);
1671 }
1672
1673 if (rxd->enc) {
1674 scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1675 1);
1676 } else {
1677 start -= authsize;
1678 scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
1679 0);
1680
1681 err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
1682 }
1683
1684 kfree(rxd);
1685
1686 aead_request_complete(req, err);
1687}
1688
1689static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1690 const char *fallback)
1691{
1692 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1693 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1694 int ret;
1695
1696 memzero_explicit(ctx, sizeof(*ctx));
1697
1698 ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
1699 if (IS_ERR(ctx->shash)) {
1700 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1701 return PTR_ERR(ctx->shash);
1702 }
1703
1704 ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
1705 CRYPTO_ALG_NEED_FALLBACK);
1706
1707 if (IS_ERR(ctx->fallback.aead)) {
1708 dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1709 fallback);
1710 return PTR_ERR(ctx->fallback.aead);
1711 }
1712
1713 crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1714 crypto_aead_reqsize(ctx->fallback.aead));
1715
1716 ret = sa_init_ctx_info(&ctx->enc, data);
1717 if (ret)
1718 return ret;
1719
1720 ret = sa_init_ctx_info(&ctx->dec, data);
1721 if (ret) {
1722 sa_free_ctx_info(&ctx->enc, data);
1723 return ret;
1724 }
1725
1726 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1727 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1728 ctx->dec.sc_id, &ctx->dec.sc_phys);
1729
1730 return ret;
1731}
1732
1733static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1734{
1735 return sa_cra_init_aead(tfm, "sha1",
1736 "authenc(hmac(sha1-ce),cbc(aes-ce))");
1737}
1738
1739static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1740{
1741 return sa_cra_init_aead(tfm, "sha256",
1742 "authenc(hmac(sha256-ce),cbc(aes-ce))");
1743}
1744
1745static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1746{
1747 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1748 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1749
1750 crypto_free_shash(ctx->shash);
1751 crypto_free_aead(ctx->fallback.aead);
1752
1753 sa_free_ctx_info(&ctx->enc, data);
1754 sa_free_ctx_info(&ctx->dec, data);
1755}
1756
1757/* AEAD algorithm configuration interface function */
1758static int sa_aead_setkey(struct crypto_aead *authenc,
1759 const u8 *key, unsigned int keylen,
1760 struct algo_data *ad)
1761{
1762 struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
1763 struct crypto_authenc_keys keys;
1764 int cmdl_len;
1765 struct sa_cmdl_cfg cfg;
1766 int key_idx;
1767
1768 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1769 return -EINVAL;
1770
1771 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
1772 key_idx = (keys.enckeylen >> 3) - 2;
1773 if (key_idx >= 3)
1774 return -EINVAL;
1775
1776 ad->ctx = ctx;
1777 ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1778 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1779 ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1780 ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1781 ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1782 ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1783 ad->inv_key = true;
1784 ad->keyed_mac = true;
1785 ad->ealg_id = SA_EALG_ID_AES_CBC;
1786 ad->prep_iopad = sa_prepare_iopads;
1787
1788 memset(&cfg, 0, sizeof(cfg));
1789 cfg.enc = true;
1790 cfg.aalg = ad->aalg_id;
1791 cfg.enc_eng_id = ad->enc_eng.eng_id;
1792 cfg.auth_eng_id = ad->auth_eng.eng_id;
1793 cfg.iv_size = crypto_aead_ivsize(authenc);
1794 cfg.akey = keys.authkey;
1795 cfg.akey_len = keys.authkeylen;
1796
1797 /* Setup Encryption Security Context & Command label template */
1798 if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
1799 keys.authkey, keys.authkeylen,
1800 ad, 1, &ctx->enc.epib[1]))
1801 return -EINVAL;
1802
1803 cmdl_len = sa_format_cmdl_gen(&cfg,
1804 (u8 *)ctx->enc.cmdl,
1805 &ctx->enc.cmdl_upd_info);
1806 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1807 return -EINVAL;
1808
1809 ctx->enc.cmdl_size = cmdl_len;
1810
1811 /* Setup Decryption Security Context & Command label template */
1812 if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
1813 keys.authkey, keys.authkeylen,
1814 ad, 0, &ctx->dec.epib[1]))
1815 return -EINVAL;
1816
1817 cfg.enc = false;
1818 cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
1819 &ctx->dec.cmdl_upd_info);
1820
1821 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1822 return -EINVAL;
1823
1824 ctx->dec.cmdl_size = cmdl_len;
1825
1826 crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1827 crypto_aead_set_flags(ctx->fallback.aead,
1828 crypto_aead_get_flags(authenc) &
1829 CRYPTO_TFM_REQ_MASK);
1830 crypto_aead_setkey(ctx->fallback.aead, key, keylen);
1831
1832 return 0;
1833}
1834
1835static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1836{
1837 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
1838
1839 return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
1840}
1841
1842static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1843 const u8 *key, unsigned int keylen)
1844{
1845 struct algo_data ad = { 0 };
1846
1847 ad.ealg_id = SA_EALG_ID_AES_CBC;
1848 ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1849 ad.hash_size = SHA1_DIGEST_SIZE;
1850 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1851
1852 return sa_aead_setkey(authenc, key, keylen, &ad);
1853}
1854
1855static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1856 const u8 *key, unsigned int keylen)
1857{
1858 struct algo_data ad = { 0 };
1859
1860 ad.ealg_id = SA_EALG_ID_AES_CBC;
1861 ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1862 ad.hash_size = SHA256_DIGEST_SIZE;
1863 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1864
1865 return sa_aead_setkey(authenc, key, keylen, &ad);
1866}
1867
1868static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1869{
1870 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1871 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1872 struct sa_req sa_req = { 0 };
1873 size_t auth_size, enc_size;
1874
1875 enc_size = req->cryptlen;
1876 auth_size = req->assoclen + req->cryptlen;
1877
1878 if (!enc) {
1879 enc_size -= crypto_aead_authsize(tfm);
1880 auth_size -= crypto_aead_authsize(tfm);
1881 }
1882
1883 if (auth_size > SA_MAX_DATA_SZ ||
1884 (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1885 auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1886 struct aead_request *subreq = aead_request_ctx(req);
1887 int ret;
1888
1889 aead_request_set_tfm(subreq, ctx->fallback.aead);
1890 aead_request_set_callback(subreq, req->base.flags,
1891 req->base.complete, req->base.data);
1892 aead_request_set_crypt(subreq, req->src, req->dst,
1893 req->cryptlen, req->iv);
1894 aead_request_set_ad(subreq, req->assoclen);
1895
1896 ret = enc ? crypto_aead_encrypt(subreq) :
1897 crypto_aead_decrypt(subreq);
1898 return ret;
1899 }
1900
1901 sa_req.enc_offset = req->assoclen;
1902 sa_req.enc_size = enc_size;
1903 sa_req.auth_size = auth_size;
1904 sa_req.size = auth_size;
1905 sa_req.enc_iv = iv;
1906 sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1907 sa_req.enc = enc;
1908 sa_req.callback = sa_aead_dma_in_callback;
1909 sa_req.mdata_size = 52;
1910 sa_req.base = &req->base;
1911 sa_req.ctx = ctx;
1912 sa_req.src = req->src;
1913 sa_req.dst = req->dst;
1914
1915 return sa_run(&sa_req);
1916}
1917
1918/* AEAD algorithm encrypt interface function */
1919static int sa_aead_encrypt(struct aead_request *req)
1920{
1921 return sa_aead_run(req, req->iv, 1);
1922}
1923
1924/* AEAD algorithm decrypt interface function */
1925static int sa_aead_decrypt(struct aead_request *req)
1926{
1927 return sa_aead_run(req, req->iv, 0);
1928}
1929
7694b6ca
K
1930static struct sa_alg_tmpl sa_algs[] = {
1931 {
1932 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1933 .alg.skcipher = {
1934 .base.cra_name = "cbc(aes)",
1935 .base.cra_driver_name = "cbc-aes-sa2ul",
1936 .base.cra_priority = 30000,
1937 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1938 CRYPTO_ALG_KERN_DRIVER_ONLY |
1939 CRYPTO_ALG_ASYNC |
1940 CRYPTO_ALG_NEED_FALLBACK,
1941 .base.cra_blocksize = AES_BLOCK_SIZE,
1942 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
1943 .base.cra_module = THIS_MODULE,
1944 .init = sa_cipher_cra_init,
1945 .exit = sa_cipher_cra_exit,
1946 .min_keysize = AES_MIN_KEY_SIZE,
1947 .max_keysize = AES_MAX_KEY_SIZE,
1948 .ivsize = AES_BLOCK_SIZE,
1949 .setkey = sa_aes_cbc_setkey,
1950 .encrypt = sa_encrypt,
1951 .decrypt = sa_decrypt,
1952 }
1953 },
1954 {
1955 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1956 .alg.skcipher = {
1957 .base.cra_name = "ecb(aes)",
1958 .base.cra_driver_name = "ecb-aes-sa2ul",
1959 .base.cra_priority = 30000,
1960 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1961 CRYPTO_ALG_KERN_DRIVER_ONLY |
1962 CRYPTO_ALG_ASYNC |
1963 CRYPTO_ALG_NEED_FALLBACK,
1964 .base.cra_blocksize = AES_BLOCK_SIZE,
1965 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
1966 .base.cra_module = THIS_MODULE,
1967 .init = sa_cipher_cra_init,
1968 .exit = sa_cipher_cra_exit,
1969 .min_keysize = AES_MIN_KEY_SIZE,
1970 .max_keysize = AES_MAX_KEY_SIZE,
1971 .setkey = sa_aes_ecb_setkey,
1972 .encrypt = sa_encrypt,
1973 .decrypt = sa_decrypt,
1974 }
1975 },
1976 {
1977 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1978 .alg.skcipher = {
1979 .base.cra_name = "cbc(des3_ede)",
1980 .base.cra_driver_name = "cbc-des3-sa2ul",
1981 .base.cra_priority = 30000,
1982 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1983 CRYPTO_ALG_KERN_DRIVER_ONLY |
1984 CRYPTO_ALG_ASYNC |
1985 CRYPTO_ALG_NEED_FALLBACK,
1986 .base.cra_blocksize = DES_BLOCK_SIZE,
1987 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
1988 .base.cra_module = THIS_MODULE,
1989 .init = sa_cipher_cra_init,
1990 .exit = sa_cipher_cra_exit,
1991 .min_keysize = 3 * DES_KEY_SIZE,
1992 .max_keysize = 3 * DES_KEY_SIZE,
1993 .ivsize = DES_BLOCK_SIZE,
1994 .setkey = sa_3des_cbc_setkey,
1995 .encrypt = sa_encrypt,
1996 .decrypt = sa_decrypt,
1997 }
1998 },
1999 {
2000 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2001 .alg.skcipher = {
2002 .base.cra_name = "ecb(des3_ede)",
2003 .base.cra_driver_name = "ecb-des3-sa2ul",
2004 .base.cra_priority = 30000,
2005 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2006 CRYPTO_ALG_KERN_DRIVER_ONLY |
2007 CRYPTO_ALG_ASYNC |
2008 CRYPTO_ALG_NEED_FALLBACK,
2009 .base.cra_blocksize = DES_BLOCK_SIZE,
2010 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2011 .base.cra_module = THIS_MODULE,
2012 .init = sa_cipher_cra_init,
2013 .exit = sa_cipher_cra_exit,
2014 .min_keysize = 3 * DES_KEY_SIZE,
2015 .max_keysize = 3 * DES_KEY_SIZE,
2016 .setkey = sa_3des_ecb_setkey,
2017 .encrypt = sa_encrypt,
2018 .decrypt = sa_decrypt,
2019 }
2020 },
2dc53d00
K
2021 {
2022 .type = CRYPTO_ALG_TYPE_AHASH,
2023 .alg.ahash = {
2024 .halg.base = {
2025 .cra_name = "sha1",
2026 .cra_driver_name = "sha1-sa2ul",
2027 .cra_priority = 400,
2028 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2029 CRYPTO_ALG_ASYNC |
2030 CRYPTO_ALG_KERN_DRIVER_ONLY |
2031 CRYPTO_ALG_NEED_FALLBACK,
2032 .cra_blocksize = SHA1_BLOCK_SIZE,
2033 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2034 .cra_module = THIS_MODULE,
2035 .cra_init = sa_sha1_cra_init,
2036 .cra_exit = sa_sha_cra_exit,
2037 },
2038 .halg.digestsize = SHA1_DIGEST_SIZE,
2039 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2040 sizeof(struct sha1_state),
2041 .init = sa_sha_init,
2042 .update = sa_sha_update,
2043 .final = sa_sha_final,
2044 .finup = sa_sha_finup,
2045 .digest = sa_sha_digest,
2046 .export = sa_sha_export,
2047 .import = sa_sha_import,
2048 },
2049 },
2050 {
2051 .type = CRYPTO_ALG_TYPE_AHASH,
2052 .alg.ahash = {
2053 .halg.base = {
2054 .cra_name = "sha256",
2055 .cra_driver_name = "sha256-sa2ul",
2056 .cra_priority = 400,
2057 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2058 CRYPTO_ALG_ASYNC |
2059 CRYPTO_ALG_KERN_DRIVER_ONLY |
2060 CRYPTO_ALG_NEED_FALLBACK,
2061 .cra_blocksize = SHA256_BLOCK_SIZE,
2062 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2063 .cra_module = THIS_MODULE,
2064 .cra_init = sa_sha256_cra_init,
2065 .cra_exit = sa_sha_cra_exit,
2066 },
2067 .halg.digestsize = SHA256_DIGEST_SIZE,
2068 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2069 sizeof(struct sha256_state),
2070 .init = sa_sha_init,
2071 .update = sa_sha_update,
2072 .final = sa_sha_final,
2073 .finup = sa_sha_finup,
2074 .digest = sa_sha_digest,
2075 .export = sa_sha_export,
2076 .import = sa_sha_import,
2077 },
2078 },
2079 {
2080 .type = CRYPTO_ALG_TYPE_AHASH,
2081 .alg.ahash = {
2082 .halg.base = {
2083 .cra_name = "sha512",
2084 .cra_driver_name = "sha512-sa2ul",
2085 .cra_priority = 400,
2086 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2087 CRYPTO_ALG_ASYNC |
2088 CRYPTO_ALG_KERN_DRIVER_ONLY |
2089 CRYPTO_ALG_NEED_FALLBACK,
2090 .cra_blocksize = SHA512_BLOCK_SIZE,
2091 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2092 .cra_module = THIS_MODULE,
2093 .cra_init = sa_sha512_cra_init,
2094 .cra_exit = sa_sha_cra_exit,
2095 },
2096 .halg.digestsize = SHA512_DIGEST_SIZE,
2097 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2098 sizeof(struct sha512_state),
2099 .init = sa_sha_init,
2100 .update = sa_sha_update,
2101 .final = sa_sha_final,
2102 .finup = sa_sha_finup,
2103 .digest = sa_sha_digest,
2104 .export = sa_sha_export,
2105 .import = sa_sha_import,
2106 },
2107 },
d2c8ac18
K
2108 {
2109 .type = CRYPTO_ALG_TYPE_AEAD,
2110 .alg.aead = {
2111 .base = {
2112 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2113 .cra_driver_name =
2114 "authenc(hmac(sha1),cbc(aes))-sa2ul",
2115 .cra_blocksize = AES_BLOCK_SIZE,
2116 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2117 CRYPTO_ALG_KERN_DRIVER_ONLY |
2118 CRYPTO_ALG_ASYNC |
2119 CRYPTO_ALG_NEED_FALLBACK,
2120 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2121 .cra_module = THIS_MODULE,
2122 .cra_priority = 3000,
2123 },
2124 .ivsize = AES_BLOCK_SIZE,
2125 .maxauthsize = SHA1_DIGEST_SIZE,
2126
2127 .init = sa_cra_init_aead_sha1,
2128 .exit = sa_exit_tfm_aead,
2129 .setkey = sa_aead_cbc_sha1_setkey,
2130 .setauthsize = sa_aead_setauthsize,
2131 .encrypt = sa_aead_encrypt,
2132 .decrypt = sa_aead_decrypt,
2133 },
2134 },
2135 {
2136 .type = CRYPTO_ALG_TYPE_AEAD,
2137 .alg.aead = {
2138 .base = {
2139 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2140 .cra_driver_name =
2141 "authenc(hmac(sha256),cbc(aes))-sa2ul",
2142 .cra_blocksize = AES_BLOCK_SIZE,
2143 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2144 CRYPTO_ALG_KERN_DRIVER_ONLY |
2145 CRYPTO_ALG_ASYNC |
2146 CRYPTO_ALG_NEED_FALLBACK,
2147 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2148 .cra_module = THIS_MODULE,
2149 .cra_alignmask = 0,
2150 .cra_priority = 3000,
2151 },
2152 .ivsize = AES_BLOCK_SIZE,
2153 .maxauthsize = SHA256_DIGEST_SIZE,
2154
2155 .init = sa_cra_init_aead_sha256,
2156 .exit = sa_exit_tfm_aead,
2157 .setkey = sa_aead_cbc_sha256_setkey,
2158 .setauthsize = sa_aead_setauthsize,
2159 .encrypt = sa_aead_encrypt,
2160 .decrypt = sa_aead_decrypt,
2161 },
2162 },
7694b6ca
K
2163};
2164
2165/* Register the algorithms in crypto framework */
2166static void sa_register_algos(const struct device *dev)
2167{
2168 char *alg_name;
2169 u32 type;
2170 int i, err;
2171
2172 for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2173 type = sa_algs[i].type;
2174 if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2175 alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2176 err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
2dc53d00
K
2177 } else if (type == CRYPTO_ALG_TYPE_AHASH) {
2178 alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2179 err = crypto_register_ahash(&sa_algs[i].alg.ahash);
d2c8ac18
K
2180 } else if (type == CRYPTO_ALG_TYPE_AEAD) {
2181 alg_name = sa_algs[i].alg.aead.base.cra_name;
2182 err = crypto_register_aead(&sa_algs[i].alg.aead);
7694b6ca
K
2183 } else {
2184 dev_err(dev,
2185 "un-supported crypto algorithm (%d)",
2186 sa_algs[i].type);
2187 continue;
2188 }
2189
2190 if (err)
2191 dev_err(dev, "Failed to register '%s'\n", alg_name);
2192 else
2193 sa_algs[i].registered = true;
2194 }
2195}
2196
2197/* Unregister the algorithms in crypto framework */
2198static void sa_unregister_algos(const struct device *dev)
2199{
2200 u32 type;
2201 int i;
2202
2203 for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2204 type = sa_algs[i].type;
2205 if (!sa_algs[i].registered)
2206 continue;
2207 if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2208 crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
2dc53d00
K
2209 else if (type == CRYPTO_ALG_TYPE_AHASH)
2210 crypto_unregister_ahash(&sa_algs[i].alg.ahash);
d2c8ac18
K
2211 else if (type == CRYPTO_ALG_TYPE_AEAD)
2212 crypto_unregister_aead(&sa_algs[i].alg.aead);
7694b6ca
K
2213
2214 sa_algs[i].registered = false;
2215 }
2216}
2217
2218static int sa_init_mem(struct sa_crypto_data *dev_data)
2219{
2220 struct device *dev = &dev_data->pdev->dev;
2221 /* Setup dma pool for security context buffers */
2222 dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
2223 SA_CTX_MAX_SZ, 64, 0);
2224 if (!dev_data->sc_pool) {
2225 dev_err(dev, "Failed to create dma pool");
2226 return -ENOMEM;
2227 }
2228
2229 return 0;
2230}
2231
2232static int sa_dma_init(struct sa_crypto_data *dd)
2233{
2234 int ret;
2235 struct dma_slave_config cfg;
2236
2237 dd->dma_rx1 = NULL;
2238 dd->dma_tx = NULL;
2239 dd->dma_rx2 = NULL;
2240
2241 ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
2242 if (ret)
2243 return ret;
2244
2245 dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
2246 if (IS_ERR(dd->dma_rx1)) {
2247 if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER)
2248 dev_err(dd->dev, "Unable to request rx1 DMA channel\n");
2249 return PTR_ERR(dd->dma_rx1);
2250 }
2251
2252 dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
2253 if (IS_ERR(dd->dma_rx2)) {
2254 dma_release_channel(dd->dma_rx1);
2255 if (PTR_ERR(dd->dma_rx2) != -EPROBE_DEFER)
2256 dev_err(dd->dev, "Unable to request rx2 DMA channel\n");
2257 return PTR_ERR(dd->dma_rx2);
2258 }
2259
2260 dd->dma_tx = dma_request_chan(dd->dev, "tx");
2261 if (IS_ERR(dd->dma_tx)) {
3cbfe807 2262 if (PTR_ERR(dd->dma_tx) != -EPROBE_DEFER)
7694b6ca
K
2263 dev_err(dd->dev, "Unable to request tx DMA channel\n");
2264 ret = PTR_ERR(dd->dma_tx);
2265 goto err_dma_tx;
2266 }
2267
2268 memzero_explicit(&cfg, sizeof(cfg));
2269
2270 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2271 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2272 cfg.src_maxburst = 4;
2273 cfg.dst_maxburst = 4;
2274
2275 ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
2276 if (ret) {
2277 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2278 ret);
2279 return ret;
2280 }
2281
2282 ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
2283 if (ret) {
2284 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2285 ret);
2286 return ret;
2287 }
2288
2289 ret = dmaengine_slave_config(dd->dma_tx, &cfg);
2290 if (ret) {
2291 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2292 ret);
2293 return ret;
2294 }
2295
2296 return 0;
2297
2298err_dma_tx:
2299 dma_release_channel(dd->dma_rx1);
2300 dma_release_channel(dd->dma_rx2);
2301
2302 return ret;
2303}
2304
fd92028e
TK
2305static int sa_link_child(struct device *dev, void *data)
2306{
2307 struct device *parent = data;
2308
2309 device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
2310
2311 return 0;
2312}
2313
7694b6ca
K
2314static int sa_ul_probe(struct platform_device *pdev)
2315{
2316 struct device *dev = &pdev->dev;
2317 struct device_node *node = dev->of_node;
2318 struct resource *res;
2319 static void __iomem *saul_base;
2320 struct sa_crypto_data *dev_data;
2321 u32 val;
2322 int ret;
2323
2324 dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
2325 if (!dev_data)
2326 return -ENOMEM;
2327
2328 sa_k3_dev = dev;
2329 dev_data->dev = dev;
2330 dev_data->pdev = pdev;
2331 platform_set_drvdata(pdev, dev_data);
2332 dev_set_drvdata(sa_k3_dev, dev_data);
2333
2334 pm_runtime_enable(dev);
2335 ret = pm_runtime_get_sync(dev);
2336 if (ret) {
2337 dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
2338 ret);
2339 return ret;
2340 }
2341
2342 sa_init_mem(dev_data);
2343 ret = sa_dma_init(dev_data);
2344 if (ret)
2345 goto disable_pm_runtime;
2346
2347 spin_lock_init(&dev_data->scid_lock);
2348 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2349 saul_base = devm_ioremap_resource(dev, res);
2350
2351 dev_data->base = saul_base;
2352 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2353 SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2354 SA_EEC_TRNG_EN;
2355
2356 writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2357
2358 sa_register_algos(dev);
2359
2360 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2361 if (ret)
2362 goto release_dma;
2363
fd92028e
TK
2364 device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
2365
7694b6ca
K
2366 return 0;
2367
2368release_dma:
2369 sa_unregister_algos(&pdev->dev);
2370
2371 dma_release_channel(dev_data->dma_rx2);
2372 dma_release_channel(dev_data->dma_rx1);
2373 dma_release_channel(dev_data->dma_tx);
2374
2375 dma_pool_destroy(dev_data->sc_pool);
2376
2377disable_pm_runtime:
2378 pm_runtime_put_sync(&pdev->dev);
2379 pm_runtime_disable(&pdev->dev);
2380
2381 return ret;
2382}
2383
2384static int sa_ul_remove(struct platform_device *pdev)
2385{
2386 struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2387
2388 sa_unregister_algos(&pdev->dev);
2389
2390 dma_release_channel(dev_data->dma_rx2);
2391 dma_release_channel(dev_data->dma_rx1);
2392 dma_release_channel(dev_data->dma_tx);
2393
2394 dma_pool_destroy(dev_data->sc_pool);
2395
2396 platform_set_drvdata(pdev, NULL);
2397
2398 pm_runtime_put_sync(&pdev->dev);
2399 pm_runtime_disable(&pdev->dev);
2400
2401 return 0;
2402}
2403
2404static const struct of_device_id of_match[] = {
2405 {.compatible = "ti,j721e-sa2ul",},
2406 {.compatible = "ti,am654-sa2ul",},
2407 {},
2408};
2409MODULE_DEVICE_TABLE(of, of_match);
2410
2411static struct platform_driver sa_ul_driver = {
2412 .probe = sa_ul_probe,
2413 .remove = sa_ul_remove,
2414 .driver = {
2415 .name = "saul-crypto",
2416 .of_match_table = of_match,
2417 },
2418};
2419module_platform_driver(sa_ul_driver);
2420MODULE_LICENSE("GPL v2");