]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/crypto/inside-secure/safexcel.c
crypto: inside-secure - stop requeueing failed requests
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / inside-secure / safexcel.c
CommitLineData
1b44c5a6
AT
1/*
2 * Copyright (C) 2017 Marvell
3 *
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#include <linux/clk.h>
12#include <linux/device.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/firmware.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/of_platform.h>
19#include <linux/of_irq.h>
20#include <linux/platform_device.h>
21#include <linux/workqueue.h>
22
23#include <crypto/internal/hash.h>
24#include <crypto/internal/skcipher.h>
25
26#include "safexcel.h"
27
28static u32 max_rings = EIP197_MAX_RINGS;
29module_param(max_rings, uint, 0644);
30MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
31
32static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
33{
34 u32 val, htable_offset;
35 int i;
36
37 /* Enable the record cache memory access */
38 val = readl(priv->base + EIP197_CS_RAM_CTRL);
39 val &= ~EIP197_TRC_ENABLE_MASK;
40 val |= EIP197_TRC_ENABLE_0;
41 writel(val, priv->base + EIP197_CS_RAM_CTRL);
42
43 /* Clear all ECC errors */
44 writel(0, priv->base + EIP197_TRC_ECCCTRL);
45
46 /*
47 * Make sure the cache memory is accessible by taking record cache into
48 * reset.
49 */
50 val = readl(priv->base + EIP197_TRC_PARAMS);
51 val |= EIP197_TRC_PARAMS_SW_RESET;
52 val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
53 writel(val, priv->base + EIP197_TRC_PARAMS);
54
55 /* Clear all records */
56 for (i = 0; i < EIP197_CS_RC_MAX; i++) {
57 u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
58
59 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
60 EIP197_CS_RC_PREV(EIP197_RC_NULL),
61 priv->base + offset);
62
63 val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
64 if (i == 0)
65 val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
66 else if (i == EIP197_CS_RC_MAX - 1)
67 val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
68 writel(val, priv->base + offset + sizeof(u32));
69 }
70
71 /* Clear the hash table entries */
72 htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
73 for (i = 0; i < 64; i++)
74 writel(GENMASK(29, 0),
75 priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
76
77 /* Disable the record cache memory access */
78 val = readl(priv->base + EIP197_CS_RAM_CTRL);
79 val &= ~EIP197_TRC_ENABLE_MASK;
80 writel(val, priv->base + EIP197_CS_RAM_CTRL);
81
82 /* Write head and tail pointers of the record free chain */
83 val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
84 EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
85 writel(val, priv->base + EIP197_TRC_FREECHAIN);
86
87 /* Configure the record cache #1 */
88 val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
89 EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
90 writel(val, priv->base + EIP197_TRC_PARAMS2);
91
92 /* Configure the record cache #2 */
93 val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
94 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
95 EIP197_TRC_PARAMS_HTABLE_SZ(2);
96 writel(val, priv->base + EIP197_TRC_PARAMS);
97}
98
99static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
100 const struct firmware *fw, u32 ctrl,
101 u32 prog_en)
102{
103 const u32 *data = (const u32 *)fw->data;
104 u32 val;
105 int i;
106
107 /* Reset the engine to make its program memory accessible */
108 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
109 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
110 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
111 priv->base + ctrl);
112
113 /* Enable access to the program memory */
114 writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL);
115
116 /* Write the firmware */
117 for (i = 0; i < fw->size / sizeof(u32); i++)
118 writel(be32_to_cpu(data[i]),
119 priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
120
121 /* Disable access to the program memory */
122 writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL);
123
124 /* Release engine from reset */
125 val = readl(priv->base + ctrl);
126 val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
127 writel(val, priv->base + ctrl);
128}
129
130static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
131{
132 const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
133 const struct firmware *fw[FW_NB];
134 int i, j, ret = 0;
135 u32 val;
136
137 for (i = 0; i < FW_NB; i++) {
138 ret = request_firmware(&fw[i], fw_name[i], priv->dev);
139 if (ret) {
140 dev_err(priv->dev,
141 "Failed to request firmware %s (%d)\n",
142 fw_name[i], ret);
143 goto release_fw;
144 }
145 }
146
147 /* Clear the scratchpad memory */
148 val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
149 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
150 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
151 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
152 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
153 writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
154
155 memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0,
156 EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
157
158 eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
159 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
160
161 eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
162 EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
163
164release_fw:
165 for (j = 0; j < i; j++)
166 release_firmware(fw[j]);
167
168 return ret;
169}
170
171static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
172{
173 u32 hdw, cd_size_rnd, val;
174 int i;
175
176 hdw = readl(priv->base + EIP197_HIA_OPTIONS);
177 hdw &= GENMASK(27, 25);
178 hdw >>= 25;
179
180 cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
181
182 for (i = 0; i < priv->config.rings; i++) {
183 /* ring base address */
184 writel(lower_32_bits(priv->ring[i].cdr.base_dma),
185 priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
186 writel(upper_32_bits(priv->ring[i].cdr.base_dma),
187 priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
188
189 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
190 priv->config.cd_size,
191 priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE);
192 writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
193 (EIP197_FETCH_COUNT * priv->config.cd_offset),
194 priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
195
196 /* Configure DMA tx control */
197 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
198 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
199 writel(val,
200 priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG);
201
202 /* clear any pending interrupt */
203 writel(GENMASK(5, 0),
204 priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT);
205 }
206
207 return 0;
208}
209
210static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
211{
212 u32 hdw, rd_size_rnd, val;
213 int i;
214
215 hdw = readl(priv->base + EIP197_HIA_OPTIONS);
216 hdw &= GENMASK(27, 25);
217 hdw >>= 25;
218
219 rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
220
221 for (i = 0; i < priv->config.rings; i++) {
222 /* ring base address */
223 writel(lower_32_bits(priv->ring[i].rdr.base_dma),
224 priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
225 writel(upper_32_bits(priv->ring[i].rdr.base_dma),
226 priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
227
228 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
229 priv->config.rd_size,
230 priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE);
231
232 writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
233 (EIP197_FETCH_COUNT * priv->config.rd_offset),
234 priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
235
236 /* Configure DMA tx control */
237 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
238 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
239 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
240 writel(val,
241 priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG);
242
243 /* clear any pending interrupt */
244 writel(GENMASK(7, 0),
245 priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT);
246
247 /* enable ring interrupt */
248 val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
249 val |= EIP197_RDR_IRQ(i);
250 writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
251 }
252
253 return 0;
254}
255
256static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
257{
258 u32 version, val;
259 int i, ret;
260
261 /* Determine endianess and configure byte swap */
262 version = readl(priv->base + EIP197_HIA_VERSION);
263 val = readl(priv->base + EIP197_HIA_MST_CTRL);
264
265 if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
266 val |= EIP197_MST_CTRL_BYTE_SWAP;
267 else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
268 val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
269
270 writel(val, priv->base + EIP197_HIA_MST_CTRL);
271
272
273 /* Configure wr/rd cache values */
274 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
275 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
276 priv->base + EIP197_MST_CTRL);
277
278 /* Interrupts reset */
279
280 /* Disable all global interrupts */
281 writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL);
282
283 /* Clear any pending interrupt */
284 writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK);
285
286 /* Data Fetch Engine configuration */
287
288 /* Reset all DFE threads */
289 writel(EIP197_DxE_THR_CTRL_RESET_PE,
290 priv->base + EIP197_HIA_DFE_THR_CTRL);
291
292 /* Reset HIA input interface arbiter */
293 writel(EIP197_HIA_RA_PE_CTRL_RESET,
294 priv->base + EIP197_HIA_RA_PE_CTRL);
295
296 /* DMA transfer size to use */
297 val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
298 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
299 val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
300 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
301 val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
302 writel(val, priv->base + EIP197_HIA_DFE_CFG);
303
304 /* Leave the DFE threads reset state */
305 writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL);
306
307 /* Configure the procesing engine thresholds */
308 writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
309 priv->base + EIP197_PE_IN_DBUF_THRES);
310 writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
311 priv->base + EIP197_PE_IN_TBUF_THRES);
312
313 /* enable HIA input interface arbiter and rings */
314 writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
315 priv->base + EIP197_HIA_RA_PE_CTRL);
316
317 /* Data Store Engine configuration */
318
319 /* Reset all DSE threads */
320 writel(EIP197_DxE_THR_CTRL_RESET_PE,
321 priv->base + EIP197_HIA_DSE_THR_CTRL);
322
323 /* Wait for all DSE threads to complete */
324 while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) &
325 GENMASK(15, 12)) != GENMASK(15, 12))
326 ;
327
328 /* DMA transfer size to use */
329 val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
330 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
39ba1bb4 331 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
ee1fd870 332 val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
c87925bf 333 val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
1b44c5a6
AT
334 writel(val, priv->base + EIP197_HIA_DSE_CFG);
335
336 /* Leave the DSE threads reset state */
337 writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL);
338
339 /* Configure the procesing engine thresholds */
340 writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
341 priv->base + EIP197_PE_OUT_DBUF_THRES);
342
343 /* Processing Engine configuration */
344
345 /* H/W capabilities selection */
346 val = EIP197_FUNCTION_RSVD;
347 val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
348 val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
349 val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
350 val |= EIP197_ALG_SHA2;
351 writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN);
352
353 /* Command Descriptor Rings prepare */
354 for (i = 0; i < priv->config.rings; i++) {
355 /* Clear interrupts for this ring */
356 writel(GENMASK(31, 0),
357 priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i));
358
359 /* Disable external triggering */
360 writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
361
362 /* Clear the pending prepared counter */
363 writel(EIP197_xDR_PREP_CLR_COUNT,
364 priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
365
366 /* Clear the pending processed counter */
367 writel(EIP197_xDR_PROC_CLR_COUNT,
368 priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT);
369
370 writel(0,
371 priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR);
372 writel(0,
373 priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR);
374
375 writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
376 priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE);
377 }
378
379 /* Result Descriptor Ring prepare */
380 for (i = 0; i < priv->config.rings; i++) {
381 /* Disable external triggering*/
382 writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
383
384 /* Clear the pending prepared counter */
385 writel(EIP197_xDR_PREP_CLR_COUNT,
386 priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT);
387
388 /* Clear the pending processed counter */
389 writel(EIP197_xDR_PROC_CLR_COUNT,
390 priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT);
391
392 writel(0,
393 priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR);
394 writel(0,
395 priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR);
396
397 /* Ring size */
398 writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
399 priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE);
400 }
401
402 /* Enable command descriptor rings */
403 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
404 priv->base + EIP197_HIA_DFE_THR_CTRL);
405
406 /* Enable result descriptor rings */
407 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
408 priv->base + EIP197_HIA_DSE_THR_CTRL);
409
410 /* Clear any HIA interrupt */
411 writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK);
412
413 eip197_trc_cache_init(priv);
414
415 ret = eip197_load_firmwares(priv);
416 if (ret)
417 return ret;
418
419 safexcel_hw_setup_cdesc_rings(priv);
420 safexcel_hw_setup_rdesc_rings(priv);
421
422 return 0;
423}
424
86671abb 425void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
1b44c5a6
AT
426{
427 struct crypto_async_request *req, *backlog;
428 struct safexcel_context *ctx;
429 struct safexcel_request *request;
86671abb 430 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
1b44c5a6
AT
431
432 do {
86671abb
AT
433 spin_lock_bh(&priv->ring[ring].queue_lock);
434 req = crypto_dequeue_request(&priv->ring[ring].queue);
435 backlog = crypto_get_backlog(&priv->ring[ring].queue);
436 spin_unlock_bh(&priv->ring[ring].queue_lock);
1b44c5a6
AT
437
438 if (!req)
439 goto finalize;
440
441 request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
c5acabd3
AT
442 if (!request) {
443 spin_lock_bh(&priv->ring[ring].queue_lock);
444 crypto_enqueue_request(&priv->ring[ring].queue, req);
445 spin_unlock_bh(&priv->ring[ring].queue_lock);
446
447 priv->ring[ring].need_dequeue = true;
448 goto finalize;
449 }
1b44c5a6
AT
450
451 ctx = crypto_tfm_ctx(req->tfm);
86671abb 452 ret = ctx->send(req, ring, request, &commands, &results);
1b44c5a6
AT
453 if (ret) {
454 kfree(request);
c5acabd3 455 req->complete(req, ret);
86671abb 456 priv->ring[ring].need_dequeue = true;
c5acabd3 457 goto finalize;
1b44c5a6
AT
458 }
459
460 if (backlog)
461 backlog->complete(backlog, -EINPROGRESS);
462
86671abb
AT
463 spin_lock_bh(&priv->ring[ring].egress_lock);
464 list_add_tail(&request->list, &priv->ring[ring].list);
465 spin_unlock_bh(&priv->ring[ring].egress_lock);
1b44c5a6 466
86671abb
AT
467 cdesc += commands;
468 rdesc += results;
469 } while (nreq++ < EIP197_MAX_BATCH_SZ);
1b44c5a6
AT
470
471finalize:
86671abb
AT
472 if (nreq == EIP197_MAX_BATCH_SZ)
473 priv->ring[ring].need_dequeue = true;
474 else if (!nreq)
1b44c5a6
AT
475 return;
476
86671abb 477 spin_lock_bh(&priv->ring[ring].lock);
1b44c5a6 478
86671abb
AT
479 /* Configure when we want an interrupt */
480 writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
481 EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
482 priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
1b44c5a6 483
86671abb
AT
484 /* let the RDR know we have pending descriptors */
485 writel((rdesc * priv->config.rd_offset) << 2,
486 priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
1b44c5a6 487
86671abb
AT
488 /* let the CDR know we have pending descriptors */
489 writel((cdesc * priv->config.cd_offset) << 2,
490 priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
1b44c5a6 491
86671abb 492 spin_unlock_bh(&priv->ring[ring].lock);
1b44c5a6
AT
493}
494
495void safexcel_free_context(struct safexcel_crypto_priv *priv,
496 struct crypto_async_request *req,
497 int result_sz)
498{
499 struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
500
501 if (ctx->result_dma)
502 dma_unmap_single(priv->dev, ctx->result_dma, result_sz,
503 DMA_FROM_DEVICE);
504
505 if (ctx->cache) {
506 dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
507 DMA_TO_DEVICE);
508 kfree(ctx->cache);
509 ctx->cache = NULL;
510 ctx->cache_sz = 0;
511 }
512}
513
514void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
515{
516 struct safexcel_command_desc *cdesc;
517
518 /* Acknowledge the command descriptors */
519 do {
520 cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
521 if (IS_ERR(cdesc)) {
522 dev_err(priv->dev,
523 "Could not retrieve the command descriptor\n");
524 return;
525 }
526 } while (!cdesc->last_seg);
527}
528
529void safexcel_inv_complete(struct crypto_async_request *req, int error)
530{
531 struct safexcel_inv_result *result = req->data;
532
533 if (error == -EINPROGRESS)
534 return;
535
536 result->error = error;
537 complete(&result->completion);
538}
539
540int safexcel_invalidate_cache(struct crypto_async_request *async,
541 struct safexcel_context *ctx,
542 struct safexcel_crypto_priv *priv,
543 dma_addr_t ctxr_dma, int ring,
544 struct safexcel_request *request)
545{
546 struct safexcel_command_desc *cdesc;
547 struct safexcel_result_desc *rdesc;
548 int ret = 0;
549
550 spin_lock_bh(&priv->ring[ring].egress_lock);
551
552 /* Prepare command descriptor */
553 cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
554 if (IS_ERR(cdesc)) {
555 ret = PTR_ERR(cdesc);
556 goto unlock;
557 }
558
559 cdesc->control_data.type = EIP197_TYPE_EXTENDED;
560 cdesc->control_data.options = 0;
561 cdesc->control_data.refresh = 0;
562 cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
563
564 /* Prepare result descriptor */
565 rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
566
567 if (IS_ERR(rdesc)) {
568 ret = PTR_ERR(rdesc);
569 goto cdesc_rollback;
570 }
571
572 request->req = async;
573 goto unlock;
574
575cdesc_rollback:
576 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
577
578unlock:
579 spin_unlock_bh(&priv->ring[ring].egress_lock);
580 return ret;
581}
582
583static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
584 int ring)
585{
586 struct safexcel_request *sreq;
587 struct safexcel_context *ctx;
588 int ret, i, nreq, ndesc = 0;
589 bool should_complete;
590
591 nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
592 nreq >>= 24;
593 nreq &= GENMASK(6, 0);
594 if (!nreq)
595 return;
596
597 for (i = 0; i < nreq; i++) {
598 spin_lock_bh(&priv->ring[ring].egress_lock);
599 sreq = list_first_entry(&priv->ring[ring].list,
600 struct safexcel_request, list);
601 list_del(&sreq->list);
602 spin_unlock_bh(&priv->ring[ring].egress_lock);
603
604 ctx = crypto_tfm_ctx(sreq->req->tfm);
605 ndesc = ctx->handle_result(priv, ring, sreq->req,
606 &should_complete, &ret);
607 if (ndesc < 0) {
608 dev_err(priv->dev, "failed to handle result (%d)", ndesc);
609 return;
610 }
611
612 writel(EIP197_xDR_PROC_xD_PKT(1) |
613 EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset),
614 priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
615
616 if (should_complete) {
617 local_bh_disable();
618 sreq->req->complete(sreq->req, ret);
619 local_bh_enable();
620 }
621
622 kfree(sreq);
623 }
624}
625
626static void safexcel_handle_result_work(struct work_struct *work)
627{
628 struct safexcel_work_data *data =
629 container_of(work, struct safexcel_work_data, work);
630 struct safexcel_crypto_priv *priv = data->priv;
631
632 safexcel_handle_result_descriptor(priv, data->ring);
633
86671abb
AT
634 if (priv->ring[data->ring].need_dequeue) {
635 priv->ring[data->ring].need_dequeue = false;
636 safexcel_dequeue(data->priv, data->ring);
1b44c5a6
AT
637 }
638}
639
640struct safexcel_ring_irq_data {
641 struct safexcel_crypto_priv *priv;
642 int ring;
643};
644
645static irqreturn_t safexcel_irq_ring(int irq, void *data)
646{
647 struct safexcel_ring_irq_data *irq_data = data;
648 struct safexcel_crypto_priv *priv = irq_data->priv;
649 int ring = irq_data->ring;
650 u32 status, stat;
651
652 status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
653 if (!status)
654 return IRQ_NONE;
655
656 /* RDR interrupts */
657 if (status & EIP197_RDR_IRQ(ring)) {
658 stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
659
660 if (unlikely(stat & EIP197_xDR_ERR)) {
661 /*
662 * Fatal error, the RDR is unusable and must be
663 * reinitialized. This should not happen under
664 * normal circumstances.
665 */
666 dev_err(priv->dev, "RDR: fatal error.");
667 } else if (likely(stat & EIP197_xDR_THRESH)) {
668 queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work);
669 }
670
671 /* ACK the interrupts */
672 writel(stat & 0xff,
673 priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
674 }
675
676 /* ACK the interrupts */
677 writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring));
678
679 return IRQ_HANDLED;
680}
681
682static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
683 irq_handler_t handler,
684 struct safexcel_ring_irq_data *ring_irq_priv)
685{
686 int ret, irq = platform_get_irq_byname(pdev, name);
687
688 if (irq < 0) {
689 dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
690 return irq;
691 }
692
693 ret = devm_request_irq(&pdev->dev, irq, handler, 0,
694 dev_name(&pdev->dev), ring_irq_priv);
695 if (ret) {
696 dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
697 return ret;
698 }
699
700 return irq;
701}
702
703static struct safexcel_alg_template *safexcel_algs[] = {
704 &safexcel_alg_ecb_aes,
705 &safexcel_alg_cbc_aes,
706 &safexcel_alg_sha1,
707 &safexcel_alg_sha224,
708 &safexcel_alg_sha256,
709 &safexcel_alg_hmac_sha1,
710};
711
712static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
713{
714 int i, j, ret = 0;
715
716 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
717 safexcel_algs[i]->priv = priv;
718
719 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
720 ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
721 else
722 ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
723
724 if (ret)
725 goto fail;
726 }
727
728 return 0;
729
730fail:
731 for (j = 0; j < i; j++) {
732 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
733 crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
734 else
735 crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
736 }
737
738 return ret;
739}
740
741static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
742{
743 int i;
744
745 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
746 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
747 crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
748 else
749 crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
750 }
751}
752
753static void safexcel_configure(struct safexcel_crypto_priv *priv)
754{
755 u32 val, mask;
756
757 val = readl(priv->base + EIP197_HIA_OPTIONS);
758 val = (val & GENMASK(27, 25)) >> 25;
759 mask = BIT(val) - 1;
760
761 val = readl(priv->base + EIP197_HIA_OPTIONS);
762 priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
763
764 priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
765 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
766
767 priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
768 priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
769}
770
771static int safexcel_probe(struct platform_device *pdev)
772{
773 struct device *dev = &pdev->dev;
774 struct resource *res;
775 struct safexcel_crypto_priv *priv;
776 u64 dma_mask;
777 int i, ret;
778
779 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
780 if (!priv)
781 return -ENOMEM;
782
783 priv->dev = dev;
784
785 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
786 priv->base = devm_ioremap_resource(dev, res);
787 if (IS_ERR(priv->base)) {
788 dev_err(dev, "failed to get resource\n");
789 return PTR_ERR(priv->base);
790 }
791
792 priv->clk = of_clk_get(dev->of_node, 0);
793 if (!IS_ERR(priv->clk)) {
794 ret = clk_prepare_enable(priv->clk);
795 if (ret) {
796 dev_err(dev, "unable to enable clk (%d)\n", ret);
797 return ret;
798 }
799 } else {
800 /* The clock isn't mandatory */
801 if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
802 return -EPROBE_DEFER;
803 }
804
805 if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask))
806 dma_mask = DMA_BIT_MASK(64);
807 ret = dma_set_mask_and_coherent(dev, dma_mask);
808 if (ret)
809 goto err_clk;
810
811 priv->context_pool = dmam_pool_create("safexcel-context", dev,
812 sizeof(struct safexcel_context_record),
813 1, 0);
814 if (!priv->context_pool) {
815 ret = -ENOMEM;
816 goto err_clk;
817 }
818
819 safexcel_configure(priv);
820
821 for (i = 0; i < priv->config.rings; i++) {
822 char irq_name[6] = {0}; /* "ringX\0" */
823 char wq_name[9] = {0}; /* "wq_ringX\0" */
824 int irq;
825 struct safexcel_ring_irq_data *ring_irq;
826
827 ret = safexcel_init_ring_descriptors(priv,
828 &priv->ring[i].cdr,
829 &priv->ring[i].rdr);
830 if (ret)
831 goto err_clk;
832
833 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
834 if (!ring_irq) {
835 ret = -ENOMEM;
836 goto err_clk;
837 }
838
839 ring_irq->priv = priv;
840 ring_irq->ring = i;
841
842 snprintf(irq_name, 6, "ring%d", i);
843 irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
844 ring_irq);
845
846 if (irq < 0)
847 goto err_clk;
848
849 priv->ring[i].work_data.priv = priv;
850 priv->ring[i].work_data.ring = i;
851 INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work);
852
853 snprintf(wq_name, 9, "wq_ring%d", i);
854 priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
855 if (!priv->ring[i].workqueue) {
856 ret = -ENOMEM;
857 goto err_clk;
858 }
859
86671abb
AT
860 crypto_init_queue(&priv->ring[i].queue,
861 EIP197_DEFAULT_RING_SIZE);
862
1b44c5a6
AT
863 INIT_LIST_HEAD(&priv->ring[i].list);
864 spin_lock_init(&priv->ring[i].lock);
865 spin_lock_init(&priv->ring[i].egress_lock);
86671abb 866 spin_lock_init(&priv->ring[i].queue_lock);
1b44c5a6
AT
867 }
868
869 platform_set_drvdata(pdev, priv);
870 atomic_set(&priv->ring_used, 0);
871
1b44c5a6
AT
872 ret = safexcel_hw_init(priv);
873 if (ret) {
874 dev_err(dev, "EIP h/w init failed (%d)\n", ret);
875 goto err_clk;
876 }
877
878 ret = safexcel_register_algorithms(priv);
879 if (ret) {
880 dev_err(dev, "Failed to register algorithms (%d)\n", ret);
881 goto err_clk;
882 }
883
884 return 0;
885
886err_clk:
887 clk_disable_unprepare(priv->clk);
888 return ret;
889}
890
891
892static int safexcel_remove(struct platform_device *pdev)
893{
894 struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
895 int i;
896
897 safexcel_unregister_algorithms(priv);
898 clk_disable_unprepare(priv->clk);
899
900 for (i = 0; i < priv->config.rings; i++)
901 destroy_workqueue(priv->ring[i].workqueue);
902
903 return 0;
904}
905
906static const struct of_device_id safexcel_of_match_table[] = {
907 { .compatible = "inside-secure,safexcel-eip197" },
908 {},
909};
910
911
912static struct platform_driver crypto_safexcel = {
913 .probe = safexcel_probe,
914 .remove = safexcel_remove,
915 .driver = {
916 .name = "crypto-safexcel",
917 .of_match_table = safexcel_of_match_table,
918 },
919};
920module_platform_driver(crypto_safexcel);
921
922MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
923MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
924MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
925MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
926MODULE_LICENSE("GPL v2");