]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / crypto / dpaa2_sec / hw / rta / sec_run_time_asm.h
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 *
3 * Copyright 2008-2016 Freescale Semiconductor Inc.
4 * Copyright 2016 NXP
5 *
6 */
7
8 #ifndef __RTA_SEC_RUN_TIME_ASM_H__
9 #define __RTA_SEC_RUN_TIME_ASM_H__
10
11 #include "hw/desc.h"
12
13 /* hw/compat.h is not delivered in kernel */
14 #ifndef __KERNEL__
15 #include "hw/compat.h"
16 #endif
17
18 /**
19 * enum rta_sec_era - SEC HW block revisions supported by the RTA library
20 * @RTA_SEC_ERA_1: SEC Era 1
21 * @RTA_SEC_ERA_2: SEC Era 2
22 * @RTA_SEC_ERA_3: SEC Era 3
23 * @RTA_SEC_ERA_4: SEC Era 4
24 * @RTA_SEC_ERA_5: SEC Era 5
25 * @RTA_SEC_ERA_6: SEC Era 6
26 * @RTA_SEC_ERA_7: SEC Era 7
27 * @RTA_SEC_ERA_8: SEC Era 8
28 * @MAX_SEC_ERA: maximum SEC HW block revision supported by RTA library
29 */
30 enum rta_sec_era {
31 RTA_SEC_ERA_1,
32 RTA_SEC_ERA_2,
33 RTA_SEC_ERA_3,
34 RTA_SEC_ERA_4,
35 RTA_SEC_ERA_5,
36 RTA_SEC_ERA_6,
37 RTA_SEC_ERA_7,
38 RTA_SEC_ERA_8,
39 MAX_SEC_ERA = RTA_SEC_ERA_8
40 };
41
42 /**
43 * DEFAULT_SEC_ERA - the default value for the SEC era in case the user provides
44 * an unsupported value.
45 */
46 #define DEFAULT_SEC_ERA MAX_SEC_ERA
47
48 /**
49 * USER_SEC_ERA - translates the SEC Era from internal to user representation.
50 * @sec_era: SEC Era in internal (library) representation
51 */
52 #define USER_SEC_ERA(sec_era) (sec_era + 1)
53
54 /**
55 * INTL_SEC_ERA - translates the SEC Era from user representation to internal.
56 * @sec_era: SEC Era in user representation
57 */
58 #define INTL_SEC_ERA(sec_era) (sec_era - 1)
59
60 /**
61 * enum rta_jump_type - Types of action taken by JUMP command
62 * @LOCAL_JUMP: conditional jump to an offset within the descriptor buffer
63 * @FAR_JUMP: conditional jump to a location outside the descriptor buffer,
64 * indicated by the POINTER field after the JUMP command.
65 * @HALT: conditional halt - stop the execution of the current descriptor and
66 * writes PKHA / Math condition bits as status / error code.
67 * @HALT_STATUS: conditional halt with user-specified status - stop the
68 * execution of the current descriptor and writes the value of
69 * "LOCAL OFFSET" JUMP field as status / error code.
70 * @GOSUB: conditional subroutine call - similar to @LOCAL_JUMP, but also saves
71 * return address in the Return Address register; subroutine calls
72 * cannot be nested.
73 * @RETURN: conditional subroutine return - similar to @LOCAL_JUMP, but the
74 * offset is taken from the Return Address register.
75 * @LOCAL_JUMP_INC: similar to @LOCAL_JUMP, but increment the register specified
76 * in "SRC_DST" JUMP field before evaluating the jump
77 * condition.
78 * @LOCAL_JUMP_DEC: similar to @LOCAL_JUMP, but decrement the register specified
79 * in "SRC_DST" JUMP field before evaluating the jump
80 * condition.
81 */
82 enum rta_jump_type {
83 LOCAL_JUMP,
84 FAR_JUMP,
85 HALT,
86 HALT_STATUS,
87 GOSUB,
88 RETURN,
89 LOCAL_JUMP_INC,
90 LOCAL_JUMP_DEC
91 };
92
93 /**
94 * enum rta_jump_cond - How test conditions are evaluated by JUMP command
95 * @ALL_TRUE: perform action if ALL selected conditions are true
96 * @ALL_FALSE: perform action if ALL selected conditions are false
97 * @ANY_TRUE: perform action if ANY of the selected conditions is true
98 * @ANY_FALSE: perform action if ANY of the selected conditions is false
99 */
100 enum rta_jump_cond {
101 ALL_TRUE,
102 ALL_FALSE,
103 ANY_TRUE,
104 ANY_FALSE
105 };
106
107 /**
108 * enum rta_share_type - Types of sharing for JOB_HDR and SHR_HDR commands
109 * @SHR_NEVER: nothing is shared; descriptors can execute in parallel (i.e. no
110 * dependencies are allowed between them).
111 * @SHR_WAIT: shared descriptor and keys are shared once the descriptor sets
112 * "OK to share" in DECO Control Register (DCTRL).
113 * @SHR_SERIAL: shared descriptor and keys are shared once the descriptor has
114 * completed.
115 * @SHR_ALWAYS: shared descriptor is shared anytime after the descriptor is
116 * loaded.
117 * @SHR_DEFER: valid only for JOB_HDR; sharing type is the one specified
118 * in the shared descriptor associated with the job descriptor.
119 */
120 enum rta_share_type {
121 SHR_NEVER,
122 SHR_WAIT,
123 SHR_SERIAL,
124 SHR_ALWAYS,
125 SHR_DEFER
126 };
127
128 /**
129 * enum rta_data_type - Indicates how is the data provided and how to include it
130 * in the descriptor.
131 * @RTA_DATA_PTR: Data is in memory and accessed by reference; data address is a
132 * physical (bus) address.
133 * @RTA_DATA_IMM: Data is inlined in descriptor and accessed as immediate data;
134 * data address is a virtual address.
135 * @RTA_DATA_IMM_DMA: (AIOP only) Data is inlined in descriptor and accessed as
136 * immediate data; data address is a physical (bus) address
137 * in external memory and CDMA is programmed to transfer the
138 * data into descriptor buffer being built in Workspace Area.
139 */
140 enum rta_data_type {
141 RTA_DATA_PTR = 1,
142 RTA_DATA_IMM,
143 RTA_DATA_IMM_DMA
144 };
145
146 /* Registers definitions */
147 enum rta_regs {
148 /* CCB Registers */
149 CONTEXT1 = 1,
150 CONTEXT2,
151 KEY1,
152 KEY2,
153 KEY1SZ,
154 KEY2SZ,
155 ICV1SZ,
156 ICV2SZ,
157 DATA1SZ,
158 DATA2SZ,
159 ALTDS1,
160 IV1SZ,
161 AAD1SZ,
162 MODE1,
163 MODE2,
164 CCTRL,
165 DCTRL,
166 ICTRL,
167 CLRW,
168 CSTAT,
169 IFIFO,
170 NFIFO,
171 OFIFO,
172 PKASZ,
173 PKBSZ,
174 PKNSZ,
175 PKESZ,
176 /* DECO Registers */
177 MATH0,
178 MATH1,
179 MATH2,
180 MATH3,
181 DESCBUF,
182 JOBDESCBUF,
183 SHAREDESCBUF,
184 DPOVRD,
185 DJQDA,
186 DSTAT,
187 DPID,
188 DJQCTRL,
189 ALTSOURCE,
190 SEQINSZ,
191 SEQOUTSZ,
192 VSEQINSZ,
193 VSEQOUTSZ,
194 /* PKHA Registers */
195 PKA,
196 PKN,
197 PKA0,
198 PKA1,
199 PKA2,
200 PKA3,
201 PKB,
202 PKB0,
203 PKB1,
204 PKB2,
205 PKB3,
206 PKE,
207 /* Pseudo registers */
208 AB1,
209 AB2,
210 ABD,
211 IFIFOABD,
212 IFIFOAB1,
213 IFIFOAB2,
214 AFHA_SBOX,
215 MDHA_SPLIT_KEY,
216 JOBSRC,
217 ZERO,
218 ONE,
219 AAD1,
220 IV1,
221 IV2,
222 MSG1,
223 MSG2,
224 MSG,
225 MSG_CKSUM,
226 MSGOUTSNOOP,
227 MSGINSNOOP,
228 ICV1,
229 ICV2,
230 SKIP,
231 NONE,
232 RNGOFIFO,
233 RNG,
234 IDFNS,
235 ODFNS,
236 NFIFOSZ,
237 SZ,
238 PAD,
239 SAD1,
240 AAD2,
241 BIT_DATA,
242 NFIFO_SZL,
243 NFIFO_SZM,
244 NFIFO_L,
245 NFIFO_M,
246 SZL,
247 SZM,
248 JOBDESCBUF_EFF,
249 SHAREDESCBUF_EFF,
250 METADATA,
251 GTR,
252 STR,
253 OFIFO_SYNC,
254 MSGOUTSNOOP_ALT
255 };
256
257 /* Command flags */
258 #define FLUSH1 BIT(0)
259 #define LAST1 BIT(1)
260 #define LAST2 BIT(2)
261 #define IMMED BIT(3)
262 #define SGF BIT(4)
263 #define VLF BIT(5)
264 #define EXT BIT(6)
265 #define CONT BIT(7)
266 #define SEQ BIT(8)
267 #define AIDF BIT(9)
268 #define FLUSH2 BIT(10)
269 #define CLASS1 BIT(11)
270 #define CLASS2 BIT(12)
271 #define BOTH BIT(13)
272
273 /**
274 * DCOPY - (AIOP only) command param is pointer to external memory
275 *
276 * CDMA must be used to transfer the key via DMA into Workspace Area.
277 * Valid only in combination with IMMED flag.
278 */
279 #define DCOPY BIT(30)
280
281 #define COPY BIT(31) /* command param is pointer (not immediate)
282 * valid only in combination when IMMED
283 */
284
285 #define __COPY_MASK (COPY | DCOPY)
286
287 /* SEQ IN/OUT PTR Command specific flags */
288 #define RBS BIT(16)
289 #define INL BIT(17)
290 #define PRE BIT(18)
291 #define RTO BIT(19)
292 #define RJD BIT(20)
293 #define SOP BIT(21)
294 #define RST BIT(22)
295 #define EWS BIT(23)
296
297 #define ENC BIT(14) /* Encrypted Key */
298 #define EKT BIT(15) /* AES CCM Encryption (default is
299 * AES ECB Encryption)
300 */
301 #define TK BIT(16) /* Trusted Descriptor Key (default is
302 * Job Descriptor Key)
303 */
304 #define NWB BIT(17) /* No Write Back Key */
305 #define PTS BIT(18) /* Plaintext Store */
306
307 /* HEADER Command specific flags */
308 #define RIF BIT(16)
309 #define DNR BIT(17)
310 #define CIF BIT(18)
311 #define PD BIT(19)
312 #define RSMS BIT(20)
313 #define TD BIT(21)
314 #define MTD BIT(22)
315 #define REO BIT(23)
316 #define SHR BIT(24)
317 #define SC BIT(25)
318 /* Extended HEADER specific flags */
319 #define DSV BIT(7)
320 #define DSEL_MASK 0x00000007 /* DECO Select */
321 #define FTD BIT(8)
322
323 /* JUMP Command specific flags */
324 #define NIFP BIT(20)
325 #define NIP BIT(21)
326 #define NOP BIT(22)
327 #define NCP BIT(23)
328 #define CALM BIT(24)
329
330 #define MATH_Z BIT(25)
331 #define MATH_N BIT(26)
332 #define MATH_NV BIT(27)
333 #define MATH_C BIT(28)
334 #define PK_0 BIT(29)
335 #define PK_GCD_1 BIT(30)
336 #define PK_PRIME BIT(31)
337 #define SELF BIT(0)
338 #define SHRD BIT(1)
339 #define JQP BIT(2)
340
341 /* NFIFOADD specific flags */
342 #define PAD_ZERO BIT(16)
343 #define PAD_NONZERO BIT(17)
344 #define PAD_INCREMENT BIT(18)
345 #define PAD_RANDOM BIT(19)
346 #define PAD_ZERO_N1 BIT(20)
347 #define PAD_NONZERO_0 BIT(21)
348 #define PAD_N1 BIT(23)
349 #define PAD_NONZERO_N BIT(24)
350 #define OC BIT(25)
351 #define BM BIT(26)
352 #define PR BIT(27)
353 #define PS BIT(28)
354 #define BP BIT(29)
355
356 /* MOVE Command specific flags */
357 #define WAITCOMP BIT(16)
358 #define SIZE_WORD BIT(17)
359 #define SIZE_BYTE BIT(18)
360 #define SIZE_DWORD BIT(19)
361
362 /* MATH command specific flags */
363 #define IFB MATH_IFB
364 #define NFU MATH_NFU
365 #define STL MATH_STL
366 #define SSEL MATH_SSEL
367 #define SWP MATH_SWP
368 #define IMMED2 BIT(31)
369
370 /**
371 * struct program - descriptor buffer management structure
372 * @current_pc: current offset in descriptor
373 * @current_instruction: current instruction in descriptor
374 * @first_error_pc: offset of the first error in descriptor
375 * @start_pc: start offset in descriptor buffer
376 * @buffer: buffer carrying descriptor
377 * @shrhdr: shared descriptor header
378 * @jobhdr: job descriptor header
379 * @ps: pointer fields size; if ps is true, pointers will be 36bits in
380 * length; if ps is false, pointers will be 32bits in length
381 * @bswap: if true, perform byte swap on a 4-byte boundary
382 */
383 struct program {
384 unsigned int current_pc;
385 unsigned int current_instruction;
386 unsigned int first_error_pc;
387 unsigned int start_pc;
388 uint32_t *buffer;
389 uint32_t *shrhdr;
390 uint32_t *jobhdr;
391 bool ps;
392 bool bswap;
393 };
394
395 static inline void
396 rta_program_cntxt_init(struct program *program,
397 uint32_t *buffer, unsigned int offset)
398 {
399 program->current_pc = 0;
400 program->current_instruction = 0;
401 program->first_error_pc = 0;
402 program->start_pc = offset;
403 program->buffer = buffer;
404 program->shrhdr = NULL;
405 program->jobhdr = NULL;
406 program->ps = false;
407 program->bswap = false;
408 }
409
410 static inline int
411 rta_program_finalize(struct program *program)
412 {
413 /* Descriptor is usually not allowed to go beyond 64 words size */
414 if (program->current_pc > MAX_CAAM_DESCSIZE)
415 pr_warn("Descriptor Size exceeded max limit of 64 words\n");
416
417 /* Descriptor is erroneous */
418 if (program->first_error_pc) {
419 pr_err("Descriptor creation error\n");
420 return -EINVAL;
421 }
422
423 /* Update descriptor length in shared and job descriptor headers */
424 if (program->shrhdr != NULL)
425 *program->shrhdr |= program->bswap ?
426 swab32(program->current_pc) :
427 program->current_pc;
428 else if (program->jobhdr != NULL)
429 *program->jobhdr |= program->bswap ?
430 swab32(program->current_pc) :
431 program->current_pc;
432
433 return (int)program->current_pc;
434 }
435
436 static inline unsigned int
437 rta_program_set_36bit_addr(struct program *program)
438 {
439 program->ps = true;
440 return program->current_pc;
441 }
442
443 static inline unsigned int
444 rta_program_set_bswap(struct program *program)
445 {
446 program->bswap = true;
447 return program->current_pc;
448 }
449
450 static inline void
451 __rta_out32(struct program *program, uint32_t val)
452 {
453 program->buffer[program->current_pc] = program->bswap ?
454 swab32(val) : val;
455 program->current_pc++;
456 }
457
458 static inline void
459 __rta_out_be32(struct program *program, uint32_t val)
460 {
461 program->buffer[program->current_pc] = cpu_to_be32(val);
462 program->current_pc++;
463 }
464
465 static inline void
466 __rta_out_le32(struct program *program, uint32_t val)
467 {
468 program->buffer[program->current_pc] = cpu_to_le32(val);
469 program->current_pc++;
470 }
471
472 static inline void
473 __rta_out64(struct program *program, bool is_ext, uint64_t val)
474 {
475 if (is_ext) {
476 /*
477 * Since we are guaranteed only a 4-byte alignment in the
478 * descriptor buffer, we have to do 2 x 32-bit (word) writes.
479 * For the order of the 2 words to be correct, we need to
480 * take into account the endianness of the CPU.
481 */
482 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
483 __rta_out32(program, program->bswap ? lower_32_bits(val) :
484 upper_32_bits(val));
485
486 __rta_out32(program, program->bswap ? upper_32_bits(val) :
487 lower_32_bits(val));
488 #else
489 __rta_out32(program, program->bswap ? upper_32_bits(val) :
490 lower_32_bits(val));
491
492 __rta_out32(program, program->bswap ? lower_32_bits(val) :
493 upper_32_bits(val));
494 #endif
495 } else {
496 __rta_out32(program, lower_32_bits(val));
497 }
498 }
499
500 static inline unsigned int
501 rta_word(struct program *program, uint32_t val)
502 {
503 unsigned int start_pc = program->current_pc;
504
505 __rta_out32(program, val);
506
507 return start_pc;
508 }
509
510 static inline unsigned int
511 rta_dword(struct program *program, uint64_t val)
512 {
513 unsigned int start_pc = program->current_pc;
514
515 __rta_out64(program, true, val);
516
517 return start_pc;
518 }
519
520 static inline uint32_t
521 inline_flags(enum rta_data_type data_type)
522 {
523 switch (data_type) {
524 case RTA_DATA_PTR:
525 return 0;
526 case RTA_DATA_IMM:
527 return IMMED | COPY;
528 case RTA_DATA_IMM_DMA:
529 return IMMED | DCOPY;
530 default:
531 /* warn and default to RTA_DATA_PTR */
532 pr_warn("RTA: defaulting to RTA_DATA_PTR parameter type\n");
533 return 0;
534 }
535 }
536
537 static inline unsigned int
538 rta_copy_data(struct program *program, uint8_t *data, unsigned int length)
539 {
540 unsigned int i;
541 unsigned int start_pc = program->current_pc;
542 uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
543
544 for (i = 0; i < length; i++)
545 *tmp++ = data[i];
546 program->current_pc += (length + 3) / 4;
547
548 return start_pc;
549 }
550
551 #if defined(__EWL__) && defined(AIOP)
552 static inline void
553 __rta_dma_data(void *ws_dst, uint64_t ext_address, uint16_t size)
554 { cdma_read(ws_dst, ext_address, size); }
555 #else
556 static inline void
557 __rta_dma_data(void *ws_dst __maybe_unused,
558 uint64_t ext_address __maybe_unused,
559 uint16_t size __maybe_unused)
560 { pr_warn("RTA: DCOPY not supported, DMA will be skipped\n"); }
561 #endif /* defined(__EWL__) && defined(AIOP) */
562
563 static inline void
564 __rta_inline_data(struct program *program, uint64_t data,
565 uint32_t copy_data, uint32_t length)
566 {
567 if (!copy_data) {
568 __rta_out64(program, length > 4, data);
569 } else if (copy_data & COPY) {
570 uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
571 uint32_t i;
572
573 for (i = 0; i < length; i++)
574 *tmp++ = ((uint8_t *)(uintptr_t)data)[i];
575 program->current_pc += ((length + 3) / 4);
576 } else if (copy_data & DCOPY) {
577 __rta_dma_data(&program->buffer[program->current_pc], data,
578 (uint16_t)length);
579 program->current_pc += ((length + 3) / 4);
580 }
581 }
582
583 static inline unsigned int
584 rta_desc_len(uint32_t *buffer)
585 {
586 if ((*buffer & CMD_MASK) == CMD_DESC_HDR)
587 return *buffer & HDR_DESCLEN_MASK;
588 else
589 return *buffer & HDR_DESCLEN_SHR_MASK;
590 }
591
592 static inline unsigned int
593 rta_desc_bytes(uint32_t *buffer)
594 {
595 return (unsigned int)(rta_desc_len(buffer) * CAAM_CMD_SZ);
596 }
597
598 /**
599 * split_key_len - Compute MDHA split key length for a given algorithm
600 * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* or
601 * OP_PCLID_DKP_* - MD5, SHA1, SHA224, SHA256, SHA384, SHA512.
602 *
603 * Return: MDHA split key length
604 */
605 static inline uint32_t
606 split_key_len(uint32_t hash)
607 {
608 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
609 static const uint8_t mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
610 uint32_t idx;
611
612 idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
613
614 return (uint32_t)(mdpadlen[idx] * 2);
615 }
616
617 /**
618 * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
619 * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
620 * SHA224, SHA384, SHA512.
621 *
622 * Return: MDHA split key pad length
623 */
624 static inline uint32_t
625 split_key_pad_len(uint32_t hash)
626 {
627 return ALIGN(split_key_len(hash), 16);
628 }
629
630 static inline unsigned int
631 rta_set_label(struct program *program)
632 {
633 return program->current_pc + program->start_pc;
634 }
635
636 static inline int
637 rta_patch_move(struct program *program, int line, unsigned int new_ref)
638 {
639 uint32_t opcode;
640 bool bswap = program->bswap;
641
642 if (line < 0)
643 return -EINVAL;
644
645 opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
646
647 opcode &= (uint32_t)~MOVE_OFFSET_MASK;
648 opcode |= (new_ref << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK;
649 program->buffer[line] = bswap ? swab32(opcode) : opcode;
650
651 return 0;
652 }
653
654 static inline int
655 rta_patch_jmp(struct program *program, int line, unsigned int new_ref)
656 {
657 uint32_t opcode;
658 bool bswap = program->bswap;
659
660 if (line < 0)
661 return -EINVAL;
662
663 opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
664
665 opcode &= (uint32_t)~JUMP_OFFSET_MASK;
666 opcode |= (new_ref - (line + program->start_pc)) & JUMP_OFFSET_MASK;
667 program->buffer[line] = bswap ? swab32(opcode) : opcode;
668
669 return 0;
670 }
671
672 static inline int
673 rta_patch_header(struct program *program, int line, unsigned int new_ref)
674 {
675 uint32_t opcode;
676 bool bswap = program->bswap;
677
678 if (line < 0)
679 return -EINVAL;
680
681 opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
682
683 opcode &= (uint32_t)~HDR_START_IDX_MASK;
684 opcode |= (new_ref << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK;
685 program->buffer[line] = bswap ? swab32(opcode) : opcode;
686
687 return 0;
688 }
689
690 static inline int
691 rta_patch_load(struct program *program, int line, unsigned int new_ref)
692 {
693 uint32_t opcode;
694 bool bswap = program->bswap;
695
696 if (line < 0)
697 return -EINVAL;
698
699 opcode = (bswap ? swab32(program->buffer[line]) :
700 program->buffer[line]) & (uint32_t)~LDST_OFFSET_MASK;
701
702 if (opcode & (LDST_SRCDST_WORD_DESCBUF | LDST_CLASS_DECO))
703 opcode |= (new_ref << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
704 else
705 opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
706 LDST_OFFSET_MASK;
707
708 program->buffer[line] = bswap ? swab32(opcode) : opcode;
709
710 return 0;
711 }
712
713 static inline int
714 rta_patch_store(struct program *program, int line, unsigned int new_ref)
715 {
716 uint32_t opcode;
717 bool bswap = program->bswap;
718
719 if (line < 0)
720 return -EINVAL;
721
722 opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
723
724 opcode &= (uint32_t)~LDST_OFFSET_MASK;
725
726 switch (opcode & LDST_SRCDST_MASK) {
727 case LDST_SRCDST_WORD_DESCBUF:
728 case LDST_SRCDST_WORD_DESCBUF_JOB:
729 case LDST_SRCDST_WORD_DESCBUF_SHARED:
730 case LDST_SRCDST_WORD_DESCBUF_JOB_WE:
731 case LDST_SRCDST_WORD_DESCBUF_SHARED_WE:
732 opcode |= ((new_ref) << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
733 break;
734 default:
735 opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
736 LDST_OFFSET_MASK;
737 }
738
739 program->buffer[line] = bswap ? swab32(opcode) : opcode;
740
741 return 0;
742 }
743
744 static inline int
745 rta_patch_raw(struct program *program, int line, unsigned int mask,
746 unsigned int new_val)
747 {
748 uint32_t opcode;
749 bool bswap = program->bswap;
750
751 if (line < 0)
752 return -EINVAL;
753
754 opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
755
756 opcode &= (uint32_t)~mask;
757 opcode |= new_val & mask;
758 program->buffer[line] = bswap ? swab32(opcode) : opcode;
759
760 return 0;
761 }
762
763 static inline int
764 __rta_map_opcode(uint32_t name, const uint32_t (*map_table)[2],
765 unsigned int num_of_entries, uint32_t *val)
766 {
767 unsigned int i;
768
769 for (i = 0; i < num_of_entries; i++)
770 if (map_table[i][0] == name) {
771 *val = map_table[i][1];
772 return 0;
773 }
774
775 return -EINVAL;
776 }
777
778 static inline void
779 __rta_map_flags(uint32_t flags, const uint32_t (*flags_table)[2],
780 unsigned int num_of_entries, uint32_t *opcode)
781 {
782 unsigned int i;
783
784 for (i = 0; i < num_of_entries; i++) {
785 if (flags_table[i][0] & flags)
786 *opcode |= flags_table[i][1];
787 }
788 }
789
790 #endif /* __RTA_SEC_RUN_TIME_ASM_H__ */