]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) |
2 | * | |
3 | * Copyright 2008-2016 Freescale Semiconductor Inc. | |
4 | * Copyright 2016 NXP | |
5 | * | |
6 | */ | |
7 | ||
8 | #ifndef __RTA_SEC_RUN_TIME_ASM_H__ | |
9 | #define __RTA_SEC_RUN_TIME_ASM_H__ | |
10 | ||
11 | #include "hw/desc.h" | |
12 | ||
13 | /* hw/compat.h is not delivered in kernel */ | |
14 | #ifndef __KERNEL__ | |
15 | #include "hw/compat.h" | |
16 | #endif | |
17 | ||
18 | /** | |
19 | * enum rta_sec_era - SEC HW block revisions supported by the RTA library | |
20 | * @RTA_SEC_ERA_1: SEC Era 1 | |
21 | * @RTA_SEC_ERA_2: SEC Era 2 | |
22 | * @RTA_SEC_ERA_3: SEC Era 3 | |
23 | * @RTA_SEC_ERA_4: SEC Era 4 | |
24 | * @RTA_SEC_ERA_5: SEC Era 5 | |
25 | * @RTA_SEC_ERA_6: SEC Era 6 | |
26 | * @RTA_SEC_ERA_7: SEC Era 7 | |
27 | * @RTA_SEC_ERA_8: SEC Era 8 | |
28 | * @MAX_SEC_ERA: maximum SEC HW block revision supported by RTA library | |
29 | */ | |
30 | enum rta_sec_era { | |
31 | RTA_SEC_ERA_1, | |
32 | RTA_SEC_ERA_2, | |
33 | RTA_SEC_ERA_3, | |
34 | RTA_SEC_ERA_4, | |
35 | RTA_SEC_ERA_5, | |
36 | RTA_SEC_ERA_6, | |
37 | RTA_SEC_ERA_7, | |
38 | RTA_SEC_ERA_8, | |
39 | MAX_SEC_ERA = RTA_SEC_ERA_8 | |
40 | }; | |
41 | ||
42 | /** | |
43 | * DEFAULT_SEC_ERA - the default value for the SEC era in case the user provides | |
44 | * an unsupported value. | |
45 | */ | |
46 | #define DEFAULT_SEC_ERA MAX_SEC_ERA | |
47 | ||
48 | /** | |
49 | * USER_SEC_ERA - translates the SEC Era from internal to user representation. | |
50 | * @sec_era: SEC Era in internal (library) representation | |
51 | */ | |
52 | #define USER_SEC_ERA(sec_era) (sec_era + 1) | |
53 | ||
54 | /** | |
55 | * INTL_SEC_ERA - translates the SEC Era from user representation to internal. | |
56 | * @sec_era: SEC Era in user representation | |
57 | */ | |
58 | #define INTL_SEC_ERA(sec_era) (sec_era - 1) | |
59 | ||
60 | /** | |
61 | * enum rta_jump_type - Types of action taken by JUMP command | |
62 | * @LOCAL_JUMP: conditional jump to an offset within the descriptor buffer | |
63 | * @FAR_JUMP: conditional jump to a location outside the descriptor buffer, | |
64 | * indicated by the POINTER field after the JUMP command. | |
65 | * @HALT: conditional halt - stop the execution of the current descriptor and | |
66 | * writes PKHA / Math condition bits as status / error code. | |
67 | * @HALT_STATUS: conditional halt with user-specified status - stop the | |
68 | * execution of the current descriptor and writes the value of | |
69 | * "LOCAL OFFSET" JUMP field as status / error code. | |
70 | * @GOSUB: conditional subroutine call - similar to @LOCAL_JUMP, but also saves | |
71 | * return address in the Return Address register; subroutine calls | |
72 | * cannot be nested. | |
73 | * @RETURN: conditional subroutine return - similar to @LOCAL_JUMP, but the | |
74 | * offset is taken from the Return Address register. | |
75 | * @LOCAL_JUMP_INC: similar to @LOCAL_JUMP, but increment the register specified | |
76 | * in "SRC_DST" JUMP field before evaluating the jump | |
77 | * condition. | |
78 | * @LOCAL_JUMP_DEC: similar to @LOCAL_JUMP, but decrement the register specified | |
79 | * in "SRC_DST" JUMP field before evaluating the jump | |
80 | * condition. | |
81 | */ | |
82 | enum rta_jump_type { | |
83 | LOCAL_JUMP, | |
84 | FAR_JUMP, | |
85 | HALT, | |
86 | HALT_STATUS, | |
87 | GOSUB, | |
88 | RETURN, | |
89 | LOCAL_JUMP_INC, | |
90 | LOCAL_JUMP_DEC | |
91 | }; | |
92 | ||
93 | /** | |
94 | * enum rta_jump_cond - How test conditions are evaluated by JUMP command | |
95 | * @ALL_TRUE: perform action if ALL selected conditions are true | |
96 | * @ALL_FALSE: perform action if ALL selected conditions are false | |
97 | * @ANY_TRUE: perform action if ANY of the selected conditions is true | |
98 | * @ANY_FALSE: perform action if ANY of the selected conditions is false | |
99 | */ | |
100 | enum rta_jump_cond { | |
101 | ALL_TRUE, | |
102 | ALL_FALSE, | |
103 | ANY_TRUE, | |
104 | ANY_FALSE | |
105 | }; | |
106 | ||
107 | /** | |
108 | * enum rta_share_type - Types of sharing for JOB_HDR and SHR_HDR commands | |
109 | * @SHR_NEVER: nothing is shared; descriptors can execute in parallel (i.e. no | |
110 | * dependencies are allowed between them). | |
111 | * @SHR_WAIT: shared descriptor and keys are shared once the descriptor sets | |
112 | * "OK to share" in DECO Control Register (DCTRL). | |
113 | * @SHR_SERIAL: shared descriptor and keys are shared once the descriptor has | |
114 | * completed. | |
115 | * @SHR_ALWAYS: shared descriptor is shared anytime after the descriptor is | |
116 | * loaded. | |
117 | * @SHR_DEFER: valid only for JOB_HDR; sharing type is the one specified | |
118 | * in the shared descriptor associated with the job descriptor. | |
119 | */ | |
120 | enum rta_share_type { | |
121 | SHR_NEVER, | |
122 | SHR_WAIT, | |
123 | SHR_SERIAL, | |
124 | SHR_ALWAYS, | |
125 | SHR_DEFER | |
126 | }; | |
127 | ||
128 | /** | |
129 | * enum rta_data_type - Indicates how is the data provided and how to include it | |
130 | * in the descriptor. | |
131 | * @RTA_DATA_PTR: Data is in memory and accessed by reference; data address is a | |
132 | * physical (bus) address. | |
133 | * @RTA_DATA_IMM: Data is inlined in descriptor and accessed as immediate data; | |
134 | * data address is a virtual address. | |
135 | * @RTA_DATA_IMM_DMA: (AIOP only) Data is inlined in descriptor and accessed as | |
136 | * immediate data; data address is a physical (bus) address | |
137 | * in external memory and CDMA is programmed to transfer the | |
138 | * data into descriptor buffer being built in Workspace Area. | |
139 | */ | |
140 | enum rta_data_type { | |
141 | RTA_DATA_PTR = 1, | |
142 | RTA_DATA_IMM, | |
143 | RTA_DATA_IMM_DMA | |
144 | }; | |
145 | ||
146 | /* Registers definitions */ | |
147 | enum rta_regs { | |
148 | /* CCB Registers */ | |
149 | CONTEXT1 = 1, | |
150 | CONTEXT2, | |
151 | KEY1, | |
152 | KEY2, | |
153 | KEY1SZ, | |
154 | KEY2SZ, | |
155 | ICV1SZ, | |
156 | ICV2SZ, | |
157 | DATA1SZ, | |
158 | DATA2SZ, | |
159 | ALTDS1, | |
160 | IV1SZ, | |
161 | AAD1SZ, | |
162 | MODE1, | |
163 | MODE2, | |
164 | CCTRL, | |
165 | DCTRL, | |
166 | ICTRL, | |
167 | CLRW, | |
168 | CSTAT, | |
169 | IFIFO, | |
170 | NFIFO, | |
171 | OFIFO, | |
172 | PKASZ, | |
173 | PKBSZ, | |
174 | PKNSZ, | |
175 | PKESZ, | |
176 | /* DECO Registers */ | |
177 | MATH0, | |
178 | MATH1, | |
179 | MATH2, | |
180 | MATH3, | |
181 | DESCBUF, | |
182 | JOBDESCBUF, | |
183 | SHAREDESCBUF, | |
184 | DPOVRD, | |
185 | DJQDA, | |
186 | DSTAT, | |
187 | DPID, | |
188 | DJQCTRL, | |
189 | ALTSOURCE, | |
190 | SEQINSZ, | |
191 | SEQOUTSZ, | |
192 | VSEQINSZ, | |
193 | VSEQOUTSZ, | |
194 | /* PKHA Registers */ | |
195 | PKA, | |
196 | PKN, | |
197 | PKA0, | |
198 | PKA1, | |
199 | PKA2, | |
200 | PKA3, | |
201 | PKB, | |
202 | PKB0, | |
203 | PKB1, | |
204 | PKB2, | |
205 | PKB3, | |
206 | PKE, | |
207 | /* Pseudo registers */ | |
208 | AB1, | |
209 | AB2, | |
210 | ABD, | |
211 | IFIFOABD, | |
212 | IFIFOAB1, | |
213 | IFIFOAB2, | |
214 | AFHA_SBOX, | |
215 | MDHA_SPLIT_KEY, | |
216 | JOBSRC, | |
217 | ZERO, | |
218 | ONE, | |
219 | AAD1, | |
220 | IV1, | |
221 | IV2, | |
222 | MSG1, | |
223 | MSG2, | |
224 | MSG, | |
225 | MSG_CKSUM, | |
226 | MSGOUTSNOOP, | |
227 | MSGINSNOOP, | |
228 | ICV1, | |
229 | ICV2, | |
230 | SKIP, | |
231 | NONE, | |
232 | RNGOFIFO, | |
233 | RNG, | |
234 | IDFNS, | |
235 | ODFNS, | |
236 | NFIFOSZ, | |
237 | SZ, | |
238 | PAD, | |
239 | SAD1, | |
240 | AAD2, | |
241 | BIT_DATA, | |
242 | NFIFO_SZL, | |
243 | NFIFO_SZM, | |
244 | NFIFO_L, | |
245 | NFIFO_M, | |
246 | SZL, | |
247 | SZM, | |
248 | JOBDESCBUF_EFF, | |
249 | SHAREDESCBUF_EFF, | |
250 | METADATA, | |
251 | GTR, | |
252 | STR, | |
253 | OFIFO_SYNC, | |
254 | MSGOUTSNOOP_ALT | |
255 | }; | |
256 | ||
257 | /* Command flags */ | |
258 | #define FLUSH1 BIT(0) | |
259 | #define LAST1 BIT(1) | |
260 | #define LAST2 BIT(2) | |
261 | #define IMMED BIT(3) | |
262 | #define SGF BIT(4) | |
263 | #define VLF BIT(5) | |
264 | #define EXT BIT(6) | |
265 | #define CONT BIT(7) | |
266 | #define SEQ BIT(8) | |
267 | #define AIDF BIT(9) | |
268 | #define FLUSH2 BIT(10) | |
269 | #define CLASS1 BIT(11) | |
270 | #define CLASS2 BIT(12) | |
271 | #define BOTH BIT(13) | |
272 | ||
273 | /** | |
274 | * DCOPY - (AIOP only) command param is pointer to external memory | |
275 | * | |
276 | * CDMA must be used to transfer the key via DMA into Workspace Area. | |
277 | * Valid only in combination with IMMED flag. | |
278 | */ | |
279 | #define DCOPY BIT(30) | |
280 | ||
281 | #define COPY BIT(31) /* command param is pointer (not immediate) | |
282 | * valid only in combination when IMMED | |
283 | */ | |
284 | ||
285 | #define __COPY_MASK (COPY | DCOPY) | |
286 | ||
287 | /* SEQ IN/OUT PTR Command specific flags */ | |
288 | #define RBS BIT(16) | |
289 | #define INL BIT(17) | |
290 | #define PRE BIT(18) | |
291 | #define RTO BIT(19) | |
292 | #define RJD BIT(20) | |
293 | #define SOP BIT(21) | |
294 | #define RST BIT(22) | |
295 | #define EWS BIT(23) | |
296 | ||
297 | #define ENC BIT(14) /* Encrypted Key */ | |
298 | #define EKT BIT(15) /* AES CCM Encryption (default is | |
299 | * AES ECB Encryption) | |
300 | */ | |
301 | #define TK BIT(16) /* Trusted Descriptor Key (default is | |
302 | * Job Descriptor Key) | |
303 | */ | |
304 | #define NWB BIT(17) /* No Write Back Key */ | |
305 | #define PTS BIT(18) /* Plaintext Store */ | |
306 | ||
307 | /* HEADER Command specific flags */ | |
308 | #define RIF BIT(16) | |
309 | #define DNR BIT(17) | |
310 | #define CIF BIT(18) | |
311 | #define PD BIT(19) | |
312 | #define RSMS BIT(20) | |
313 | #define TD BIT(21) | |
314 | #define MTD BIT(22) | |
315 | #define REO BIT(23) | |
316 | #define SHR BIT(24) | |
317 | #define SC BIT(25) | |
318 | /* Extended HEADER specific flags */ | |
319 | #define DSV BIT(7) | |
320 | #define DSEL_MASK 0x00000007 /* DECO Select */ | |
321 | #define FTD BIT(8) | |
322 | ||
323 | /* JUMP Command specific flags */ | |
324 | #define NIFP BIT(20) | |
325 | #define NIP BIT(21) | |
326 | #define NOP BIT(22) | |
327 | #define NCP BIT(23) | |
328 | #define CALM BIT(24) | |
329 | ||
330 | #define MATH_Z BIT(25) | |
331 | #define MATH_N BIT(26) | |
332 | #define MATH_NV BIT(27) | |
333 | #define MATH_C BIT(28) | |
334 | #define PK_0 BIT(29) | |
335 | #define PK_GCD_1 BIT(30) | |
336 | #define PK_PRIME BIT(31) | |
337 | #define SELF BIT(0) | |
338 | #define SHRD BIT(1) | |
339 | #define JQP BIT(2) | |
340 | ||
341 | /* NFIFOADD specific flags */ | |
342 | #define PAD_ZERO BIT(16) | |
343 | #define PAD_NONZERO BIT(17) | |
344 | #define PAD_INCREMENT BIT(18) | |
345 | #define PAD_RANDOM BIT(19) | |
346 | #define PAD_ZERO_N1 BIT(20) | |
347 | #define PAD_NONZERO_0 BIT(21) | |
348 | #define PAD_N1 BIT(23) | |
349 | #define PAD_NONZERO_N BIT(24) | |
350 | #define OC BIT(25) | |
351 | #define BM BIT(26) | |
352 | #define PR BIT(27) | |
353 | #define PS BIT(28) | |
354 | #define BP BIT(29) | |
355 | ||
356 | /* MOVE Command specific flags */ | |
357 | #define WAITCOMP BIT(16) | |
358 | #define SIZE_WORD BIT(17) | |
359 | #define SIZE_BYTE BIT(18) | |
360 | #define SIZE_DWORD BIT(19) | |
361 | ||
362 | /* MATH command specific flags */ | |
363 | #define IFB MATH_IFB | |
364 | #define NFU MATH_NFU | |
365 | #define STL MATH_STL | |
366 | #define SSEL MATH_SSEL | |
367 | #define SWP MATH_SWP | |
368 | #define IMMED2 BIT(31) | |
369 | ||
370 | /** | |
371 | * struct program - descriptor buffer management structure | |
372 | * @current_pc: current offset in descriptor | |
373 | * @current_instruction: current instruction in descriptor | |
374 | * @first_error_pc: offset of the first error in descriptor | |
375 | * @start_pc: start offset in descriptor buffer | |
376 | * @buffer: buffer carrying descriptor | |
377 | * @shrhdr: shared descriptor header | |
378 | * @jobhdr: job descriptor header | |
379 | * @ps: pointer fields size; if ps is true, pointers will be 36bits in | |
380 | * length; if ps is false, pointers will be 32bits in length | |
381 | * @bswap: if true, perform byte swap on a 4-byte boundary | |
382 | */ | |
383 | struct program { | |
384 | unsigned int current_pc; | |
385 | unsigned int current_instruction; | |
386 | unsigned int first_error_pc; | |
387 | unsigned int start_pc; | |
388 | uint32_t *buffer; | |
389 | uint32_t *shrhdr; | |
390 | uint32_t *jobhdr; | |
391 | bool ps; | |
392 | bool bswap; | |
393 | }; | |
394 | ||
395 | static inline void | |
396 | rta_program_cntxt_init(struct program *program, | |
397 | uint32_t *buffer, unsigned int offset) | |
398 | { | |
399 | program->current_pc = 0; | |
400 | program->current_instruction = 0; | |
401 | program->first_error_pc = 0; | |
402 | program->start_pc = offset; | |
403 | program->buffer = buffer; | |
404 | program->shrhdr = NULL; | |
405 | program->jobhdr = NULL; | |
406 | program->ps = false; | |
407 | program->bswap = false; | |
408 | } | |
409 | ||
410 | static inline int | |
411 | rta_program_finalize(struct program *program) | |
412 | { | |
413 | /* Descriptor is usually not allowed to go beyond 64 words size */ | |
414 | if (program->current_pc > MAX_CAAM_DESCSIZE) | |
415 | pr_warn("Descriptor Size exceeded max limit of 64 words\n"); | |
416 | ||
417 | /* Descriptor is erroneous */ | |
418 | if (program->first_error_pc) { | |
419 | pr_err("Descriptor creation error\n"); | |
420 | return -EINVAL; | |
421 | } | |
422 | ||
423 | /* Update descriptor length in shared and job descriptor headers */ | |
424 | if (program->shrhdr != NULL) | |
425 | *program->shrhdr |= program->bswap ? | |
426 | swab32(program->current_pc) : | |
427 | program->current_pc; | |
428 | else if (program->jobhdr != NULL) | |
429 | *program->jobhdr |= program->bswap ? | |
430 | swab32(program->current_pc) : | |
431 | program->current_pc; | |
432 | ||
433 | return (int)program->current_pc; | |
434 | } | |
435 | ||
436 | static inline unsigned int | |
437 | rta_program_set_36bit_addr(struct program *program) | |
438 | { | |
439 | program->ps = true; | |
440 | return program->current_pc; | |
441 | } | |
442 | ||
443 | static inline unsigned int | |
444 | rta_program_set_bswap(struct program *program) | |
445 | { | |
446 | program->bswap = true; | |
447 | return program->current_pc; | |
448 | } | |
449 | ||
450 | static inline void | |
451 | __rta_out32(struct program *program, uint32_t val) | |
452 | { | |
453 | program->buffer[program->current_pc] = program->bswap ? | |
454 | swab32(val) : val; | |
455 | program->current_pc++; | |
456 | } | |
457 | ||
458 | static inline void | |
459 | __rta_out_be32(struct program *program, uint32_t val) | |
460 | { | |
461 | program->buffer[program->current_pc] = cpu_to_be32(val); | |
462 | program->current_pc++; | |
463 | } | |
464 | ||
465 | static inline void | |
466 | __rta_out_le32(struct program *program, uint32_t val) | |
467 | { | |
468 | program->buffer[program->current_pc] = cpu_to_le32(val); | |
469 | program->current_pc++; | |
470 | } | |
471 | ||
472 | static inline void | |
473 | __rta_out64(struct program *program, bool is_ext, uint64_t val) | |
474 | { | |
475 | if (is_ext) { | |
476 | /* | |
477 | * Since we are guaranteed only a 4-byte alignment in the | |
478 | * descriptor buffer, we have to do 2 x 32-bit (word) writes. | |
479 | * For the order of the 2 words to be correct, we need to | |
480 | * take into account the endianness of the CPU. | |
481 | */ | |
482 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ | |
483 | __rta_out32(program, program->bswap ? lower_32_bits(val) : | |
484 | upper_32_bits(val)); | |
485 | ||
486 | __rta_out32(program, program->bswap ? upper_32_bits(val) : | |
487 | lower_32_bits(val)); | |
488 | #else | |
489 | __rta_out32(program, program->bswap ? upper_32_bits(val) : | |
490 | lower_32_bits(val)); | |
491 | ||
492 | __rta_out32(program, program->bswap ? lower_32_bits(val) : | |
493 | upper_32_bits(val)); | |
494 | #endif | |
495 | } else { | |
496 | __rta_out32(program, lower_32_bits(val)); | |
497 | } | |
498 | } | |
499 | ||
9f95a23c TL |
500 | static inline void __rta_out_be64(struct program *program, bool is_ext, |
501 | uint64_t val) | |
502 | { | |
503 | if (is_ext) { | |
504 | __rta_out_be32(program, upper_32_bits(val)); | |
505 | __rta_out_be32(program, lower_32_bits(val)); | |
506 | } else { | |
507 | __rta_out_be32(program, lower_32_bits(val)); | |
508 | } | |
509 | } | |
510 | ||
511 | static inline void __rta_out_le64(struct program *program, bool is_ext, | |
512 | uint64_t val) | |
513 | { | |
514 | if (is_ext) { | |
515 | __rta_out_le32(program, lower_32_bits(val)); | |
516 | __rta_out_le32(program, upper_32_bits(val)); | |
517 | } else { | |
518 | __rta_out_le32(program, lower_32_bits(val)); | |
519 | } | |
520 | } | |
521 | ||
11fdf7f2 TL |
522 | static inline unsigned int |
523 | rta_word(struct program *program, uint32_t val) | |
524 | { | |
525 | unsigned int start_pc = program->current_pc; | |
526 | ||
527 | __rta_out32(program, val); | |
528 | ||
529 | return start_pc; | |
530 | } | |
531 | ||
532 | static inline unsigned int | |
533 | rta_dword(struct program *program, uint64_t val) | |
534 | { | |
535 | unsigned int start_pc = program->current_pc; | |
536 | ||
537 | __rta_out64(program, true, val); | |
538 | ||
539 | return start_pc; | |
540 | } | |
541 | ||
542 | static inline uint32_t | |
543 | inline_flags(enum rta_data_type data_type) | |
544 | { | |
545 | switch (data_type) { | |
546 | case RTA_DATA_PTR: | |
547 | return 0; | |
548 | case RTA_DATA_IMM: | |
549 | return IMMED | COPY; | |
550 | case RTA_DATA_IMM_DMA: | |
551 | return IMMED | DCOPY; | |
552 | default: | |
553 | /* warn and default to RTA_DATA_PTR */ | |
554 | pr_warn("RTA: defaulting to RTA_DATA_PTR parameter type\n"); | |
555 | return 0; | |
556 | } | |
557 | } | |
558 | ||
559 | static inline unsigned int | |
560 | rta_copy_data(struct program *program, uint8_t *data, unsigned int length) | |
561 | { | |
562 | unsigned int i; | |
563 | unsigned int start_pc = program->current_pc; | |
564 | uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc]; | |
565 | ||
566 | for (i = 0; i < length; i++) | |
567 | *tmp++ = data[i]; | |
568 | program->current_pc += (length + 3) / 4; | |
569 | ||
570 | return start_pc; | |
571 | } | |
572 | ||
573 | #if defined(__EWL__) && defined(AIOP) | |
574 | static inline void | |
575 | __rta_dma_data(void *ws_dst, uint64_t ext_address, uint16_t size) | |
576 | { cdma_read(ws_dst, ext_address, size); } | |
577 | #else | |
578 | static inline void | |
579 | __rta_dma_data(void *ws_dst __maybe_unused, | |
580 | uint64_t ext_address __maybe_unused, | |
581 | uint16_t size __maybe_unused) | |
582 | { pr_warn("RTA: DCOPY not supported, DMA will be skipped\n"); } | |
583 | #endif /* defined(__EWL__) && defined(AIOP) */ | |
584 | ||
585 | static inline void | |
586 | __rta_inline_data(struct program *program, uint64_t data, | |
587 | uint32_t copy_data, uint32_t length) | |
588 | { | |
589 | if (!copy_data) { | |
590 | __rta_out64(program, length > 4, data); | |
591 | } else if (copy_data & COPY) { | |
592 | uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc]; | |
593 | uint32_t i; | |
594 | ||
595 | for (i = 0; i < length; i++) | |
596 | *tmp++ = ((uint8_t *)(uintptr_t)data)[i]; | |
597 | program->current_pc += ((length + 3) / 4); | |
598 | } else if (copy_data & DCOPY) { | |
599 | __rta_dma_data(&program->buffer[program->current_pc], data, | |
600 | (uint16_t)length); | |
601 | program->current_pc += ((length + 3) / 4); | |
602 | } | |
603 | } | |
604 | ||
605 | static inline unsigned int | |
606 | rta_desc_len(uint32_t *buffer) | |
607 | { | |
608 | if ((*buffer & CMD_MASK) == CMD_DESC_HDR) | |
609 | return *buffer & HDR_DESCLEN_MASK; | |
610 | else | |
611 | return *buffer & HDR_DESCLEN_SHR_MASK; | |
612 | } | |
613 | ||
614 | static inline unsigned int | |
615 | rta_desc_bytes(uint32_t *buffer) | |
616 | { | |
617 | return (unsigned int)(rta_desc_len(buffer) * CAAM_CMD_SZ); | |
618 | } | |
619 | ||
620 | /** | |
621 | * split_key_len - Compute MDHA split key length for a given algorithm | |
622 | * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* or | |
623 | * OP_PCLID_DKP_* - MD5, SHA1, SHA224, SHA256, SHA384, SHA512. | |
624 | * | |
625 | * Return: MDHA split key length | |
626 | */ | |
627 | static inline uint32_t | |
628 | split_key_len(uint32_t hash) | |
629 | { | |
630 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | |
631 | static const uint8_t mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | |
632 | uint32_t idx; | |
633 | ||
634 | idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT; | |
635 | ||
636 | return (uint32_t)(mdpadlen[idx] * 2); | |
637 | } | |
638 | ||
639 | /** | |
640 | * split_key_pad_len - Compute MDHA split key pad length for a given algorithm | |
641 | * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, | |
642 | * SHA224, SHA384, SHA512. | |
643 | * | |
644 | * Return: MDHA split key pad length | |
645 | */ | |
646 | static inline uint32_t | |
647 | split_key_pad_len(uint32_t hash) | |
648 | { | |
649 | return ALIGN(split_key_len(hash), 16); | |
650 | } | |
651 | ||
652 | static inline unsigned int | |
653 | rta_set_label(struct program *program) | |
654 | { | |
655 | return program->current_pc + program->start_pc; | |
656 | } | |
657 | ||
658 | static inline int | |
659 | rta_patch_move(struct program *program, int line, unsigned int new_ref) | |
660 | { | |
661 | uint32_t opcode; | |
662 | bool bswap = program->bswap; | |
663 | ||
664 | if (line < 0) | |
665 | return -EINVAL; | |
666 | ||
667 | opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line]; | |
668 | ||
669 | opcode &= (uint32_t)~MOVE_OFFSET_MASK; | |
670 | opcode |= (new_ref << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK; | |
671 | program->buffer[line] = bswap ? swab32(opcode) : opcode; | |
672 | ||
673 | return 0; | |
674 | } | |
675 | ||
676 | static inline int | |
677 | rta_patch_jmp(struct program *program, int line, unsigned int new_ref) | |
678 | { | |
679 | uint32_t opcode; | |
680 | bool bswap = program->bswap; | |
681 | ||
682 | if (line < 0) | |
683 | return -EINVAL; | |
684 | ||
685 | opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line]; | |
686 | ||
687 | opcode &= (uint32_t)~JUMP_OFFSET_MASK; | |
688 | opcode |= (new_ref - (line + program->start_pc)) & JUMP_OFFSET_MASK; | |
689 | program->buffer[line] = bswap ? swab32(opcode) : opcode; | |
690 | ||
691 | return 0; | |
692 | } | |
693 | ||
694 | static inline int | |
695 | rta_patch_header(struct program *program, int line, unsigned int new_ref) | |
696 | { | |
697 | uint32_t opcode; | |
698 | bool bswap = program->bswap; | |
699 | ||
700 | if (line < 0) | |
701 | return -EINVAL; | |
702 | ||
703 | opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line]; | |
704 | ||
705 | opcode &= (uint32_t)~HDR_START_IDX_MASK; | |
706 | opcode |= (new_ref << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK; | |
707 | program->buffer[line] = bswap ? swab32(opcode) : opcode; | |
708 | ||
709 | return 0; | |
710 | } | |
711 | ||
712 | static inline int | |
713 | rta_patch_load(struct program *program, int line, unsigned int new_ref) | |
714 | { | |
715 | uint32_t opcode; | |
716 | bool bswap = program->bswap; | |
717 | ||
718 | if (line < 0) | |
719 | return -EINVAL; | |
720 | ||
721 | opcode = (bswap ? swab32(program->buffer[line]) : | |
722 | program->buffer[line]) & (uint32_t)~LDST_OFFSET_MASK; | |
723 | ||
724 | if (opcode & (LDST_SRCDST_WORD_DESCBUF | LDST_CLASS_DECO)) | |
725 | opcode |= (new_ref << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK; | |
726 | else | |
727 | opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) & | |
728 | LDST_OFFSET_MASK; | |
729 | ||
730 | program->buffer[line] = bswap ? swab32(opcode) : opcode; | |
731 | ||
732 | return 0; | |
733 | } | |
734 | ||
735 | static inline int | |
736 | rta_patch_store(struct program *program, int line, unsigned int new_ref) | |
737 | { | |
738 | uint32_t opcode; | |
739 | bool bswap = program->bswap; | |
740 | ||
741 | if (line < 0) | |
742 | return -EINVAL; | |
743 | ||
744 | opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line]; | |
745 | ||
746 | opcode &= (uint32_t)~LDST_OFFSET_MASK; | |
747 | ||
748 | switch (opcode & LDST_SRCDST_MASK) { | |
749 | case LDST_SRCDST_WORD_DESCBUF: | |
750 | case LDST_SRCDST_WORD_DESCBUF_JOB: | |
751 | case LDST_SRCDST_WORD_DESCBUF_SHARED: | |
752 | case LDST_SRCDST_WORD_DESCBUF_JOB_WE: | |
753 | case LDST_SRCDST_WORD_DESCBUF_SHARED_WE: | |
754 | opcode |= ((new_ref) << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK; | |
755 | break; | |
756 | default: | |
757 | opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) & | |
758 | LDST_OFFSET_MASK; | |
759 | } | |
760 | ||
761 | program->buffer[line] = bswap ? swab32(opcode) : opcode; | |
762 | ||
763 | return 0; | |
764 | } | |
765 | ||
766 | static inline int | |
767 | rta_patch_raw(struct program *program, int line, unsigned int mask, | |
768 | unsigned int new_val) | |
769 | { | |
770 | uint32_t opcode; | |
771 | bool bswap = program->bswap; | |
772 | ||
773 | if (line < 0) | |
774 | return -EINVAL; | |
775 | ||
776 | opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line]; | |
777 | ||
778 | opcode &= (uint32_t)~mask; | |
779 | opcode |= new_val & mask; | |
780 | program->buffer[line] = bswap ? swab32(opcode) : opcode; | |
781 | ||
782 | return 0; | |
783 | } | |
784 | ||
785 | static inline int | |
786 | __rta_map_opcode(uint32_t name, const uint32_t (*map_table)[2], | |
787 | unsigned int num_of_entries, uint32_t *val) | |
788 | { | |
789 | unsigned int i; | |
790 | ||
791 | for (i = 0; i < num_of_entries; i++) | |
792 | if (map_table[i][0] == name) { | |
793 | *val = map_table[i][1]; | |
794 | return 0; | |
795 | } | |
796 | ||
797 | return -EINVAL; | |
798 | } | |
799 | ||
800 | static inline void | |
801 | __rta_map_flags(uint32_t flags, const uint32_t (*flags_table)[2], | |
802 | unsigned int num_of_entries, uint32_t *opcode) | |
803 | { | |
804 | unsigned int i; | |
805 | ||
806 | for (i = 0; i < num_of_entries; i++) { | |
807 | if (flags_table[i][0] & flags) | |
808 | *opcode |= flags_table[i][1]; | |
809 | } | |
810 | } | |
811 | ||
812 | #endif /* __RTA_SEC_RUN_TIME_ASM_H__ */ |