]> git.proxmox.com Git - mirror_edk2.git/blob - MdeModulePkg/Universal/EbcDxe/EbcExecute.c
apply for doxgen format.
[mirror_edk2.git] / MdeModulePkg / Universal / EbcDxe / EbcExecute.c
1 /** @file
2 Contains code that implements the virtual machine.
3
4 Copyright (c) 2006, Intel Corporation
5 All rights reserved. This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "EbcInt.h"
16 #include "EbcExecute.h"
17
18
19 //
20 // Define some useful data size constants to allow switch statements based on
21 // size of operands or data.
22 //
23 #define DATA_SIZE_INVALID 0
24 #define DATA_SIZE_8 1
25 #define DATA_SIZE_16 2
26 #define DATA_SIZE_32 4
27 #define DATA_SIZE_64 8
28 #define DATA_SIZE_N 48 // 4 or 8
29 //
30 // Structure we'll use to dispatch opcodes to execute functions.
31 //
32 typedef struct {
33 EFI_STATUS (*ExecuteFunction) (IN VM_CONTEXT * VmPtr);
34 }
35 VM_TABLE_ENTRY;
36
37 typedef
38 UINT64
39 (*DATA_MANIP_EXEC_FUNCTION) (
40 IN VM_CONTEXT * VmPtr,
41 IN UINT64 Op1,
42 IN UINT64 Op2
43 );
44
45 STATIC
46 INT16
47 VmReadIndex16 (
48 IN VM_CONTEXT *VmPtr,
49 IN UINT32 CodeOffset
50 );
51
52 STATIC
53 INT32
54 VmReadIndex32 (
55 IN VM_CONTEXT *VmPtr,
56 IN UINT32 CodeOffset
57 );
58
59 STATIC
60 INT64
61 VmReadIndex64 (
62 IN VM_CONTEXT *VmPtr,
63 IN UINT32 CodeOffset
64 );
65
66 STATIC
67 UINT8
68 VmReadMem8 (
69 IN VM_CONTEXT *VmPtr,
70 IN UINTN Addr
71 );
72
73 STATIC
74 UINT16
75 VmReadMem16 (
76 IN VM_CONTEXT *VmPtr,
77 IN UINTN Addr
78 );
79
80 STATIC
81 UINT32
82 VmReadMem32 (
83 IN VM_CONTEXT *VmPtr,
84 IN UINTN Addr
85 );
86
87 STATIC
88 UINT64
89 VmReadMem64 (
90 IN VM_CONTEXT *VmPtr,
91 IN UINTN Addr
92 );
93
94 STATIC
95 UINTN
96 VmReadMemN (
97 IN VM_CONTEXT *VmPtr,
98 IN UINTN Addr
99 );
100
101 STATIC
102 EFI_STATUS
103 VmWriteMem8 (
104 IN VM_CONTEXT *VmPtr,
105 UINTN Addr,
106 IN UINT8 Data
107 );
108
109 STATIC
110 EFI_STATUS
111 VmWriteMem16 (
112 IN VM_CONTEXT *VmPtr,
113 UINTN Addr,
114 IN UINT16 Data
115 );
116
117 STATIC
118 EFI_STATUS
119 VmWriteMem32 (
120 IN VM_CONTEXT *VmPtr,
121 UINTN Addr,
122 IN UINT32 Data
123 );
124
125 STATIC
126 UINT16
127 VmReadCode16 (
128 IN VM_CONTEXT *VmPtr,
129 IN UINT32 Offset
130 );
131
132 STATIC
133 UINT32
134 VmReadCode32 (
135 IN VM_CONTEXT *VmPtr,
136 IN UINT32 Offset
137 );
138
139 STATIC
140 UINT64
141 VmReadCode64 (
142 IN VM_CONTEXT *VmPtr,
143 IN UINT32 Offset
144 );
145
146 STATIC
147 INT8
148 VmReadImmed8 (
149 IN VM_CONTEXT *VmPtr,
150 IN UINT32 Offset
151 );
152
153 STATIC
154 INT16
155 VmReadImmed16 (
156 IN VM_CONTEXT *VmPtr,
157 IN UINT32 Offset
158 );
159
160 STATIC
161 INT32
162 VmReadImmed32 (
163 IN VM_CONTEXT *VmPtr,
164 IN UINT32 Offset
165 );
166
167 STATIC
168 INT64
169 VmReadImmed64 (
170 IN VM_CONTEXT *VmPtr,
171 IN UINT32 Offset
172 );
173
174 STATIC
175 UINTN
176 ConvertStackAddr (
177 IN VM_CONTEXT *VmPtr,
178 IN UINTN Addr
179 );
180
181 STATIC
182 EFI_STATUS
183 ExecuteDataManip (
184 IN VM_CONTEXT *VmPtr,
185 IN BOOLEAN IsSignedOperation
186 );
187
188 //
189 // Functions that execute VM opcodes
190 //
191 STATIC
192 EFI_STATUS
193 ExecuteBREAK (
194 IN VM_CONTEXT *VmPtr
195 );
196
197 STATIC
198 EFI_STATUS
199 ExecuteJMP (
200 IN VM_CONTEXT *VmPtr
201 );
202
203 STATIC
204 EFI_STATUS
205 ExecuteJMP8 (
206 IN VM_CONTEXT *VmPtr
207 );
208
209 STATIC
210 EFI_STATUS
211 ExecuteCALL (
212 IN VM_CONTEXT *VmPtr
213 );
214
215 STATIC
216 EFI_STATUS
217 ExecuteRET (
218 IN VM_CONTEXT *VmPtr
219 );
220
221 STATIC
222 EFI_STATUS
223 ExecuteCMP (
224 IN VM_CONTEXT *VmPtr
225 );
226
227 STATIC
228 EFI_STATUS
229 ExecuteCMPI (
230 IN VM_CONTEXT *VmPtr
231 );
232
233 STATIC
234 EFI_STATUS
235 ExecuteMOVxx (
236 IN VM_CONTEXT *VmPtr
237 );
238
239 STATIC
240 EFI_STATUS
241 ExecuteMOVI (
242 IN VM_CONTEXT *VmPtr
243 );
244
245 STATIC
246 EFI_STATUS
247 ExecuteMOVIn (
248 IN VM_CONTEXT *VmPtr
249 );
250
251 STATIC
252 EFI_STATUS
253 ExecuteMOVREL (
254 IN VM_CONTEXT *VmPtr
255 );
256
257 STATIC
258 EFI_STATUS
259 ExecutePUSHn (
260 IN VM_CONTEXT *VmPtr
261 );
262
263 STATIC
264 EFI_STATUS
265 ExecutePUSH (
266 IN VM_CONTEXT *VmPtr
267 );
268
269 STATIC
270 EFI_STATUS
271 ExecutePOPn (
272 IN VM_CONTEXT *VmPtr
273 );
274
275 STATIC
276 EFI_STATUS
277 ExecutePOP (
278 IN VM_CONTEXT *VmPtr
279 );
280
281 STATIC
282 EFI_STATUS
283 ExecuteSignedDataManip (
284 IN VM_CONTEXT *VmPtr
285 );
286
287 STATIC
288 EFI_STATUS
289 ExecuteUnsignedDataManip (
290 IN VM_CONTEXT *VmPtr
291 );
292
293 STATIC
294 EFI_STATUS
295 ExecuteLOADSP (
296 IN VM_CONTEXT *VmPtr
297 );
298
299 STATIC
300 EFI_STATUS
301 ExecuteSTORESP (
302 IN VM_CONTEXT *VmPtr
303 );
304
305 STATIC
306 EFI_STATUS
307 ExecuteMOVsnd (
308 IN VM_CONTEXT *VmPtr
309 );
310
311 STATIC
312 EFI_STATUS
313 ExecuteMOVsnw (
314 IN VM_CONTEXT *VmPtr
315 );
316
317 //
318 // Data manipulation subfunctions
319 //
320 STATIC
321 UINT64
322 ExecuteNOT (
323 IN VM_CONTEXT *VmPtr,
324 IN UINT64 Op1,
325 IN UINT64 Op2
326 );
327
328 STATIC
329 UINT64
330 ExecuteNEG (
331 IN VM_CONTEXT *VmPtr,
332 IN UINT64 Op1,
333 IN UINT64 Op2
334 );
335
336 STATIC
337 UINT64
338 ExecuteADD (
339 IN VM_CONTEXT *VmPtr,
340 IN UINT64 Op1,
341 IN UINT64 Op2
342 );
343
344 STATIC
345 UINT64
346 ExecuteSUB (
347 IN VM_CONTEXT *VmPtr,
348 IN UINT64 Op1,
349 IN UINT64 Op2
350 );
351
352 STATIC
353 UINT64
354 ExecuteMUL (
355 IN VM_CONTEXT *VmPtr,
356 IN UINT64 Op1,
357 IN UINT64 Op2
358 );
359
360 STATIC
361 UINT64
362 ExecuteMULU (
363 IN VM_CONTEXT *VmPtr,
364 IN UINT64 Op1,
365 IN UINT64 Op2
366 );
367
368 STATIC
369 UINT64
370 ExecuteDIV (
371 IN VM_CONTEXT *VmPtr,
372 IN UINT64 Op1,
373 IN UINT64 Op2
374 );
375
376 STATIC
377 UINT64
378 ExecuteDIVU (
379 IN VM_CONTEXT *VmPtr,
380 IN UINT64 Op1,
381 IN UINT64 Op2
382 );
383
384 STATIC
385 UINT64
386 ExecuteMOD (
387 IN VM_CONTEXT *VmPtr,
388 IN UINT64 Op1,
389 IN UINT64 Op2
390 );
391
392 STATIC
393 UINT64
394 ExecuteMODU (
395 IN VM_CONTEXT *VmPtr,
396 IN UINT64 Op1,
397 IN UINT64 Op2
398 );
399
400 STATIC
401 UINT64
402 ExecuteAND (
403 IN VM_CONTEXT *VmPtr,
404 IN UINT64 Op1,
405 IN UINT64 Op2
406 );
407
408 STATIC
409 UINT64
410 ExecuteOR (
411 IN VM_CONTEXT *VmPtr,
412 IN UINT64 Op1,
413 IN UINT64 Op2
414 );
415
416 STATIC
417 UINT64
418 ExecuteXOR (
419 IN VM_CONTEXT *VmPtr,
420 IN UINT64 Op1,
421 IN UINT64 Op2
422 );
423
424 STATIC
425 UINT64
426 ExecuteSHL (
427 IN VM_CONTEXT *VmPtr,
428 IN UINT64 Op1,
429 IN UINT64 Op2
430 );
431
432 STATIC
433 UINT64
434 ExecuteSHR (
435 IN VM_CONTEXT *VmPtr,
436 IN UINT64 Op1,
437 IN UINT64 Op2
438 );
439
440 STATIC
441 UINT64
442 ExecuteASHR (
443 IN VM_CONTEXT *VmPtr,
444 IN UINT64 Op1,
445 IN UINT64 Op2
446 );
447
448 STATIC
449 UINT64
450 ExecuteEXTNDB (
451 IN VM_CONTEXT *VmPtr,
452 IN UINT64 Op1,
453 IN UINT64 Op2
454 );
455
456 STATIC
457 UINT64
458 ExecuteEXTNDW (
459 IN VM_CONTEXT *VmPtr,
460 IN UINT64 Op1,
461 IN UINT64 Op2
462 );
463
464 STATIC
465 UINT64
466 ExecuteEXTNDD (
467 IN VM_CONTEXT *VmPtr,
468 IN UINT64 Op1,
469 IN UINT64 Op2
470 );
471
472 //
473 // Once we retrieve the operands for the data manipulation instructions,
474 // call these functions to perform the operation.
475 //
476 static CONST DATA_MANIP_EXEC_FUNCTION mDataManipDispatchTable[] = {
477 ExecuteNOT,
478 ExecuteNEG,
479 ExecuteADD,
480 ExecuteSUB,
481 ExecuteMUL,
482 ExecuteMULU,
483 ExecuteDIV,
484 ExecuteDIVU,
485 ExecuteMOD,
486 ExecuteMODU,
487 ExecuteAND,
488 ExecuteOR,
489 ExecuteXOR,
490 ExecuteSHL,
491 ExecuteSHR,
492 ExecuteASHR,
493 ExecuteEXTNDB,
494 ExecuteEXTNDW,
495 ExecuteEXTNDD,
496 };
497
498 static CONST VM_TABLE_ENTRY mVmOpcodeTable[] = {
499 { ExecuteBREAK }, // opcode 0x00
500 { ExecuteJMP }, // opcode 0x01
501 { ExecuteJMP8 }, // opcode 0x02
502 { ExecuteCALL }, // opcode 0x03
503 { ExecuteRET }, // opcode 0x04
504 { ExecuteCMP }, // opcode 0x05 CMPeq
505 { ExecuteCMP }, // opcode 0x06 CMPlte
506 { ExecuteCMP }, // opcode 0x07 CMPgte
507 { ExecuteCMP }, // opcode 0x08 CMPulte
508 { ExecuteCMP }, // opcode 0x09 CMPugte
509 { ExecuteUnsignedDataManip }, // opcode 0x0A NOT
510 { ExecuteSignedDataManip }, // opcode 0x0B NEG
511 { ExecuteSignedDataManip }, // opcode 0x0C ADD
512 { ExecuteSignedDataManip }, // opcode 0x0D SUB
513 { ExecuteSignedDataManip }, // opcode 0x0E MUL
514 { ExecuteUnsignedDataManip }, // opcode 0x0F MULU
515 { ExecuteSignedDataManip }, // opcode 0x10 DIV
516 { ExecuteUnsignedDataManip }, // opcode 0x11 DIVU
517 { ExecuteSignedDataManip }, // opcode 0x12 MOD
518 { ExecuteUnsignedDataManip }, // opcode 0x13 MODU
519 { ExecuteUnsignedDataManip }, // opcode 0x14 AND
520 { ExecuteUnsignedDataManip }, // opcode 0x15 OR
521 { ExecuteUnsignedDataManip }, // opcode 0x16 XOR
522 { ExecuteUnsignedDataManip }, // opcode 0x17 SHL
523 { ExecuteUnsignedDataManip }, // opcode 0x18 SHR
524 { ExecuteSignedDataManip }, // opcode 0x19 ASHR
525 { ExecuteUnsignedDataManip }, // opcode 0x1A EXTNDB
526 { ExecuteUnsignedDataManip }, // opcode 0x1B EXTNDW
527 { ExecuteUnsignedDataManip }, // opcode 0x1C EXTNDD
528 { ExecuteMOVxx }, // opcode 0x1D MOVBW
529 { ExecuteMOVxx }, // opcode 0x1E MOVWW
530 { ExecuteMOVxx }, // opcode 0x1F MOVDW
531 { ExecuteMOVxx }, // opcode 0x20 MOVQW
532 { ExecuteMOVxx }, // opcode 0x21 MOVBD
533 { ExecuteMOVxx }, // opcode 0x22 MOVWD
534 { ExecuteMOVxx }, // opcode 0x23 MOVDD
535 { ExecuteMOVxx }, // opcode 0x24 MOVQD
536 { ExecuteMOVsnw }, // opcode 0x25 MOVsnw
537 { ExecuteMOVsnd }, // opcode 0x26 MOVsnd
538 { NULL }, // opcode 0x27
539 { ExecuteMOVxx }, // opcode 0x28 MOVqq
540 { ExecuteLOADSP }, // opcode 0x29 LOADSP SP1, R2
541 { ExecuteSTORESP }, // opcode 0x2A STORESP R1, SP2
542 { ExecutePUSH }, // opcode 0x2B PUSH {@}R1 [imm16]
543 { ExecutePOP }, // opcode 0x2C POP {@}R1 [imm16]
544 { ExecuteCMPI }, // opcode 0x2D CMPIEQ
545 { ExecuteCMPI }, // opcode 0x2E CMPILTE
546 { ExecuteCMPI }, // opcode 0x2F CMPIGTE
547 { ExecuteCMPI }, // opcode 0x30 CMPIULTE
548 { ExecuteCMPI }, // opcode 0x31 CMPIUGTE
549 { ExecuteMOVxx }, // opcode 0x32 MOVN
550 { ExecuteMOVxx }, // opcode 0x33 MOVND
551 { NULL }, // opcode 0x34
552 { ExecutePUSHn }, // opcode 0x35
553 { ExecutePOPn }, // opcode 0x36
554 { ExecuteMOVI }, // opcode 0x37 - mov immediate data
555 { ExecuteMOVIn }, // opcode 0x38 - mov immediate natural
556 { ExecuteMOVREL } // opcode 0x39 - move data relative to PC
557 };
558
559 //
560 // Length of JMP instructions, depending on upper two bits of opcode.
561 //
562 static CONST UINT8 mJMPLen[] = { 2, 2, 6, 10 };
563
564 //
565 // Simple Debugger Protocol GUID
566 //
567 EFI_GUID mEbcSimpleDebuggerProtocolGuid = EFI_EBC_SIMPLE_DEBUGGER_PROTOCOL_GUID;
568
569
570 /**
571 Given a pointer to a new VM context, execute one or more instructions. This
572 function is only used for test purposes via the EBC VM test protocol.
573
574 @param This pointer to protocol interface
575 @param VmPtr pointer to a VM context
576 @param InstructionCount how many instructions to execute. 0 if don't count.
577
578 @return EFI_UNSUPPORTED
579 @return EFI_SUCCESS
580
581 **/
582 EFI_STATUS
583 EbcExecuteInstructions (
584 IN EFI_EBC_VM_TEST_PROTOCOL *This,
585 IN VM_CONTEXT *VmPtr,
586 IN OUT UINTN *InstructionCount
587 )
588 {
589 UINTN ExecFunc;
590 EFI_STATUS Status;
591 UINTN InstructionsLeft;
592 UINTN SavedInstructionCount;
593
594 Status = EFI_SUCCESS;
595
596 if (*InstructionCount == 0) {
597 InstructionsLeft = 1;
598 } else {
599 InstructionsLeft = *InstructionCount;
600 }
601
602 SavedInstructionCount = *InstructionCount;
603 *InstructionCount = 0;
604
605 //
606 // Index into the opcode table using the opcode byte for this instruction.
607 // This gives you the execute function, which we first test for null, then
608 // call it if it's not null.
609 //
610 while (InstructionsLeft != 0) {
611 ExecFunc = (UINTN) mVmOpcodeTable[(*VmPtr->Ip & 0x3F)].ExecuteFunction;
612 if (ExecFunc == (UINTN) NULL) {
613 EbcDebugSignalException (EXCEPT_EBC_INVALID_OPCODE, EXCEPTION_FLAG_FATAL, VmPtr);
614 return EFI_UNSUPPORTED;
615 } else {
616 mVmOpcodeTable[(*VmPtr->Ip & 0x3F)].ExecuteFunction (VmPtr);
617 *InstructionCount = *InstructionCount + 1;
618 }
619
620 //
621 // Decrement counter if applicable
622 //
623 if (SavedInstructionCount != 0) {
624 InstructionsLeft--;
625 }
626 }
627
628 return Status;
629 }
630
631
632 /**
633 Execute an EBC image from an entry point or from a published protocol.
634
635 @param VmPtr pointer to prepared VM context.
636
637 @return Standard EBC status.
638
639 **/
640 EFI_STATUS
641 EbcExecute (
642 IN VM_CONTEXT *VmPtr
643 )
644 {
645 UINTN ExecFunc;
646 UINT8 StackCorrupted;
647 EFI_STATUS Status;
648 EFI_EBC_SIMPLE_DEBUGGER_PROTOCOL *EbcSimpleDebugger;
649
650 mVmPtr = VmPtr;
651 EbcSimpleDebugger = NULL;
652 Status = EFI_SUCCESS;
653 StackCorrupted = 0;
654
655 //
656 // Make sure the magic value has been put on the stack before we got here.
657 //
658 if (*VmPtr->StackMagicPtr != (UINTN) VM_STACK_KEY_VALUE) {
659 StackCorrupted = 1;
660 }
661
662 VmPtr->FramePtr = (VOID *) ((UINT8 *) (UINTN) VmPtr->R[0] + 8);
663
664 //
665 // Try to get the debug support for EBC
666 //
667 DEBUG_CODE_BEGIN ();
668 Status = gBS->LocateProtocol (
669 &mEbcSimpleDebuggerProtocolGuid,
670 NULL,
671 (VOID **) &EbcSimpleDebugger
672 );
673 if (EFI_ERROR (Status)) {
674 EbcSimpleDebugger = NULL;
675 }
676 DEBUG_CODE_END ();
677
678 //
679 // Save the start IP for debug. For example, if we take an exception we
680 // can print out the location of the exception relative to the entry point,
681 // which could then be used in a disassembly listing to find the problem.
682 //
683 VmPtr->EntryPoint = (VOID *) VmPtr->Ip;
684
685 //
686 // We'll wait for this flag to know when we're done. The RET
687 // instruction sets it if it runs out of stack.
688 //
689 VmPtr->StopFlags = 0;
690 while (!(VmPtr->StopFlags & STOPFLAG_APP_DONE)) {
691 //
692 // If we've found a simple debugger protocol, call it
693 //
694 DEBUG_CODE_BEGIN ();
695 if (EbcSimpleDebugger != NULL) {
696 EbcSimpleDebugger->Debugger (EbcSimpleDebugger, VmPtr);
697 }
698 DEBUG_CODE_END ();
699
700 //
701 // Verify the opcode is in range. Otherwise generate an exception.
702 //
703 if ((*VmPtr->Ip & OPCODE_M_OPCODE) >= (sizeof (mVmOpcodeTable) / sizeof (mVmOpcodeTable[0]))) {
704 EbcDebugSignalException (EXCEPT_EBC_INVALID_OPCODE, EXCEPTION_FLAG_FATAL, VmPtr);
705 Status = EFI_UNSUPPORTED;
706 goto Done;
707 }
708 //
709 // Use the opcode bits to index into the opcode dispatch table. If the
710 // function pointer is null then generate an exception.
711 //
712 ExecFunc = (UINTN) mVmOpcodeTable[(*VmPtr->Ip & OPCODE_M_OPCODE)].ExecuteFunction;
713 if (ExecFunc == (UINTN) NULL) {
714 EbcDebugSignalException (EXCEPT_EBC_INVALID_OPCODE, EXCEPTION_FLAG_FATAL, VmPtr);
715 Status = EFI_UNSUPPORTED;
716 goto Done;
717 }
718 //
719 // The EBC VM is a strongly ordered processor, so perform a fence operation before
720 // and after each instruction is executed.
721 //
722 MemoryFence ();
723
724 mVmOpcodeTable[(*VmPtr->Ip & OPCODE_M_OPCODE)].ExecuteFunction (VmPtr);
725
726 MemoryFence ();
727
728 //
729 // If the step flag is set, signal an exception and continue. We don't
730 // clear it here. Assuming the debugger is responsible for clearing it.
731 //
732 if (VMFLAG_ISSET (VmPtr, VMFLAGS_STEP)) {
733 EbcDebugSignalException (EXCEPT_EBC_STEP, EXCEPTION_FLAG_NONE, VmPtr);
734 }
735 //
736 // Make sure stack has not been corrupted. Only report it once though.
737 //
738 if (!StackCorrupted && (*VmPtr->StackMagicPtr != (UINTN) VM_STACK_KEY_VALUE)) {
739 EbcDebugSignalException (EXCEPT_EBC_STACK_FAULT, EXCEPTION_FLAG_FATAL, VmPtr);
740 StackCorrupted = 1;
741 }
742 if (!StackCorrupted && ((UINT64)VmPtr->R[0] <= (UINT64)(UINTN) VmPtr->StackTop)) {
743 EbcDebugSignalException (EXCEPT_EBC_STACK_FAULT, EXCEPTION_FLAG_FATAL, VmPtr);
744 StackCorrupted = 1;
745 }
746 }
747
748 Done:
749 mVmPtr = NULL;
750
751 return Status;
752 }
753
754
755 /**
756 Execute the MOVxx instructions.
757
758 @param VmPtr pointer to a VM context.
759
760 @return EFI_UNSUPPORTED
761 @return EFI_SUCCESS
762 @return Instruction format:
763 @return MOV[b|w|d|q|n]{w|d} {@}R1 {Index16|32}, {@}R2 {Index16|32}
764 @return MOVqq {@}R1 {Index64}, {@}R2 {Index64}
765 @return Copies contents of [R2] -> [R1], zero extending where required.
766 @return First character indicates the size of the move.
767 @return Second character indicates the size of the index(s).
768 @return Invalid to have R1 direct with index.
769
770 **/
771 STATIC
772 EFI_STATUS
773 ExecuteMOVxx (
774 IN VM_CONTEXT *VmPtr
775 )
776 {
777 UINT8 Opcode;
778 UINT8 OpcMasked;
779 UINT8 Operands;
780 UINT8 Size;
781 UINT8 MoveSize;
782 INT16 Index16;
783 INT32 Index32;
784 INT64 Index64Op1;
785 INT64 Index64Op2;
786 UINT64 Data64;
787 UINT64 DataMask;
788 UINTN Source;
789
790 Opcode = GETOPCODE (VmPtr);
791 OpcMasked = (UINT8) (Opcode & OPCODE_M_OPCODE);
792
793 //
794 // Get the operands byte so we can get R1 and R2
795 //
796 Operands = GETOPERANDS (VmPtr);
797
798 //
799 // Assume no indexes
800 //
801 Index64Op1 = 0;
802 Index64Op2 = 0;
803 Data64 = 0;
804
805 //
806 // Determine if we have an index/immediate data. Base instruction size
807 // is 2 (opcode + operands). Add to this size each index specified.
808 //
809 Size = 2;
810 if (Opcode & (OPCODE_M_IMMED_OP1 | OPCODE_M_IMMED_OP2)) {
811 //
812 // Determine size of the index from the opcode. Then get it.
813 //
814 if ((OpcMasked <= OPCODE_MOVQW) || (OpcMasked == OPCODE_MOVNW)) {
815 //
816 // MOVBW, MOVWW, MOVDW, MOVQW, and MOVNW have 16-bit immediate index.
817 // Get one or both index values.
818 //
819 if (Opcode & OPCODE_M_IMMED_OP1) {
820 Index16 = VmReadIndex16 (VmPtr, 2);
821 Index64Op1 = (INT64) Index16;
822 Size += sizeof (UINT16);
823 }
824
825 if (Opcode & OPCODE_M_IMMED_OP2) {
826 Index16 = VmReadIndex16 (VmPtr, Size);
827 Index64Op2 = (INT64) Index16;
828 Size += sizeof (UINT16);
829 }
830 } else if ((OpcMasked <= OPCODE_MOVQD) || (OpcMasked == OPCODE_MOVND)) {
831 //
832 // MOVBD, MOVWD, MOVDD, MOVQD, and MOVND have 32-bit immediate index
833 //
834 if (Opcode & OPCODE_M_IMMED_OP1) {
835 Index32 = VmReadIndex32 (VmPtr, 2);
836 Index64Op1 = (INT64) Index32;
837 Size += sizeof (UINT32);
838 }
839
840 if (Opcode & OPCODE_M_IMMED_OP2) {
841 Index32 = VmReadIndex32 (VmPtr, Size);
842 Index64Op2 = (INT64) Index32;
843 Size += sizeof (UINT32);
844 }
845 } else if (OpcMasked == OPCODE_MOVQQ) {
846 //
847 // MOVqq -- only form with a 64-bit index
848 //
849 if (Opcode & OPCODE_M_IMMED_OP1) {
850 Index64Op1 = VmReadIndex64 (VmPtr, 2);
851 Size += sizeof (UINT64);
852 }
853
854 if (Opcode & OPCODE_M_IMMED_OP2) {
855 Index64Op2 = VmReadIndex64 (VmPtr, Size);
856 Size += sizeof (UINT64);
857 }
858 } else {
859 //
860 // Obsolete MOVBQ, MOVWQ, MOVDQ, and MOVNQ have 64-bit immediate index
861 //
862 EbcDebugSignalException (
863 EXCEPT_EBC_INSTRUCTION_ENCODING,
864 EXCEPTION_FLAG_FATAL,
865 VmPtr
866 );
867 return EFI_UNSUPPORTED;
868 }
869 }
870 //
871 // Determine the size of the move, and create a mask for it so we can
872 // clear unused bits.
873 //
874 if ((OpcMasked == OPCODE_MOVBW) || (OpcMasked == OPCODE_MOVBD)) {
875 MoveSize = DATA_SIZE_8;
876 DataMask = 0xFF;
877 } else if ((OpcMasked == OPCODE_MOVWW) || (OpcMasked == OPCODE_MOVWD)) {
878 MoveSize = DATA_SIZE_16;
879 DataMask = 0xFFFF;
880 } else if ((OpcMasked == OPCODE_MOVDW) || (OpcMasked == OPCODE_MOVDD)) {
881 MoveSize = DATA_SIZE_32;
882 DataMask = 0xFFFFFFFF;
883 } else if ((OpcMasked == OPCODE_MOVQW) || (OpcMasked == OPCODE_MOVQD) || (OpcMasked == OPCODE_MOVQQ)) {
884 MoveSize = DATA_SIZE_64;
885 DataMask = (UINT64)~0;
886 } else if ((OpcMasked == OPCODE_MOVNW) || (OpcMasked == OPCODE_MOVND)) {
887 MoveSize = DATA_SIZE_N;
888 DataMask = (UINT64)~0 >> (64 - 8 * sizeof (UINTN));
889 } else {
890 //
891 // We were dispatched to this function and we don't recognize the opcode
892 //
893 EbcDebugSignalException (EXCEPT_EBC_UNDEFINED, EXCEPTION_FLAG_FATAL, VmPtr);
894 return EFI_UNSUPPORTED;
895 }
896 //
897 // Now get the source address
898 //
899 if (OPERAND2_INDIRECT (Operands)) {
900 //
901 // Indirect form @R2. Compute address of operand2
902 //
903 Source = (UINTN) (VmPtr->R[OPERAND2_REGNUM (Operands)] + Index64Op2);
904 //
905 // Now get the data from the source. Always 0-extend and let the compiler
906 // sign-extend where required.
907 //
908 switch (MoveSize) {
909 case DATA_SIZE_8:
910 Data64 = (UINT64) (UINT8) VmReadMem8 (VmPtr, Source);
911 break;
912
913 case DATA_SIZE_16:
914 Data64 = (UINT64) (UINT16) VmReadMem16 (VmPtr, Source);
915 break;
916
917 case DATA_SIZE_32:
918 Data64 = (UINT64) (UINT32) VmReadMem32 (VmPtr, Source);
919 break;
920
921 case DATA_SIZE_64:
922 Data64 = (UINT64) VmReadMem64 (VmPtr, Source);
923 break;
924
925 case DATA_SIZE_N:
926 Data64 = (UINT64) (UINTN) VmReadMemN (VmPtr, Source);
927 break;
928
929 default:
930 //
931 // not reached
932 //
933 break;
934 }
935 } else {
936 //
937 // Not indirect source: MOVxx {@}Rx, Ry [Index]
938 //
939 Data64 = VmPtr->R[OPERAND2_REGNUM (Operands)] + Index64Op2;
940 //
941 // Did Operand2 have an index? If so, treat as two signed values since
942 // indexes are signed values.
943 //
944 if (Opcode & OPCODE_M_IMMED_OP2) {
945 //
946 // NOTE: need to find a way to fix this, most likely by changing the VM
947 // implementation to remove the stack gap. To do that, we'd need to
948 // allocate stack space for the VM and actually set the system
949 // stack pointer to the allocated buffer when the VM starts.
950 //
951 // Special case -- if someone took the address of a function parameter
952 // then we need to make sure it's not in the stack gap. We can identify
953 // this situation if (Operand2 register == 0) && (Operand2 is direct)
954 // && (Index applies to Operand2) && (Index > 0) && (Operand1 register != 0)
955 // Situations that to be aware of:
956 // * stack adjustments at beginning and end of functions R0 = R0 += stacksize
957 //
958 if ((OPERAND2_REGNUM (Operands) == 0) &&
959 (!OPERAND2_INDIRECT (Operands)) &&
960 (Index64Op2 > 0) &&
961 (OPERAND1_REGNUM (Operands) == 0) &&
962 (OPERAND1_INDIRECT (Operands))
963 ) {
964 Data64 = (UINT64) ConvertStackAddr (VmPtr, (UINTN) (INT64) Data64);
965 }
966 }
967 }
968 //
969 // Now write it back
970 //
971 if (OPERAND1_INDIRECT (Operands)) {
972 //
973 // Reuse the Source variable to now be dest.
974 //
975 Source = (UINTN) (VmPtr->R[OPERAND1_REGNUM (Operands)] + Index64Op1);
976 //
977 // Do the write based on the size
978 //
979 switch (MoveSize) {
980 case DATA_SIZE_8:
981 VmWriteMem8 (VmPtr, Source, (UINT8) Data64);
982 break;
983
984 case DATA_SIZE_16:
985 VmWriteMem16 (VmPtr, Source, (UINT16) Data64);
986 break;
987
988 case DATA_SIZE_32:
989 VmWriteMem32 (VmPtr, Source, (UINT32) Data64);
990 break;
991
992 case DATA_SIZE_64:
993 VmWriteMem64 (VmPtr, Source, Data64);
994 break;
995
996 case DATA_SIZE_N:
997 VmWriteMemN (VmPtr, Source, (UINTN) Data64);
998 break;
999
1000 default:
1001 //
1002 // not reached
1003 //
1004 break;
1005 }
1006 } else {
1007 //
1008 // Operand1 direct.
1009 // Make sure we didn't have an index on operand1.
1010 //
1011 if (Opcode & OPCODE_M_IMMED_OP1) {
1012 EbcDebugSignalException (
1013 EXCEPT_EBC_INSTRUCTION_ENCODING,
1014 EXCEPTION_FLAG_FATAL,
1015 VmPtr
1016 );
1017 return EFI_UNSUPPORTED;
1018 }
1019 //
1020 // Direct storage in register. Clear unused bits and store back to
1021 // register.
1022 //
1023 VmPtr->R[OPERAND1_REGNUM (Operands)] = Data64 & DataMask;
1024 }
1025 //
1026 // Advance the instruction pointer
1027 //
1028 VmPtr->Ip += Size;
1029 return EFI_SUCCESS;
1030 }
1031
1032
1033 /**
1034 Execute the EBC BREAK instruction
1035
1036 @param VmPtr pointer to current VM context
1037
1038 @return EFI_UNSUPPORTED
1039 @return EFI_SUCCESS
1040
1041 **/
1042 STATIC
1043 EFI_STATUS
1044 ExecuteBREAK (
1045 IN VM_CONTEXT *VmPtr
1046 )
1047 {
1048 UINT8 Operands;
1049 VOID *EbcEntryPoint;
1050 VOID *Thunk;
1051 UINT64 U64EbcEntryPoint;
1052 INT32 Offset;
1053
1054 Operands = GETOPERANDS (VmPtr);
1055 switch (Operands) {
1056 //
1057 // Runaway program break. Generate an exception and terminate
1058 //
1059 case 0:
1060 EbcDebugSignalException (EXCEPT_EBC_BAD_BREAK, EXCEPTION_FLAG_FATAL, VmPtr);
1061 break;
1062
1063 //
1064 // Get VM version -- return VM revision number in R7
1065 //
1066 case 1:
1067 //
1068 // Bits:
1069 // 63-17 = 0
1070 // 16-8 = Major version
1071 // 7-0 = Minor version
1072 //
1073 VmPtr->R[7] = GetVmVersion ();
1074 break;
1075
1076 //
1077 // Debugger breakpoint
1078 //
1079 case 3:
1080 VmPtr->StopFlags |= STOPFLAG_BREAKPOINT;
1081 //
1082 // See if someone has registered a handler
1083 //
1084 EbcDebugSignalException (
1085 EXCEPT_EBC_BREAKPOINT,
1086 EXCEPTION_FLAG_NONE,
1087 VmPtr
1088 );
1089 break;
1090
1091 //
1092 // System call, which there are none, so NOP it.
1093 //
1094 case 4:
1095 break;
1096
1097 //
1098 // Create a thunk for EBC code. R7 points to a 32-bit (in a 64-bit slot)
1099 // "offset from self" pointer to the EBC entry point.
1100 // After we're done, *(UINT64 *)R7 will be the address of the new thunk.
1101 //
1102 case 5:
1103 Offset = (INT32) VmReadMem32 (VmPtr, (UINTN) VmPtr->R[7]);
1104 U64EbcEntryPoint = (UINT64) (VmPtr->R[7] + Offset + 4);
1105 EbcEntryPoint = (VOID *) (UINTN) U64EbcEntryPoint;
1106
1107 //
1108 // Now create a new thunk
1109 //
1110 EbcCreateThunks (VmPtr->ImageHandle, EbcEntryPoint, &Thunk, 0);
1111
1112 //
1113 // Finally replace the EBC entry point memory with the thunk address
1114 //
1115 VmWriteMem64 (VmPtr, (UINTN) VmPtr->R[7], (UINT64) (UINTN) Thunk);
1116 break;
1117
1118 //
1119 // Compiler setting version per value in R7
1120 //
1121 case 6:
1122 VmPtr->CompilerVersion = (UINT32) VmPtr->R[7];
1123 //
1124 // Check compiler version against VM version?
1125 //
1126 break;
1127
1128 //
1129 // Unhandled break code. Signal exception.
1130 //
1131 default:
1132 EbcDebugSignalException (EXCEPT_EBC_BAD_BREAK, EXCEPTION_FLAG_FATAL, VmPtr);
1133 break;
1134 }
1135 //
1136 // Advance IP
1137 //
1138 VmPtr->Ip += 2;
1139 return EFI_SUCCESS;
1140 }
1141
1142
1143 /**
1144 Execute the JMP instruction
1145
1146 @param VmPtr pointer to VM context
1147
1148 @return Standard EFI_STATUS
1149 @return Instruction syntax:
1150 @return JMP64{cs|cc} Immed64
1151 @return JMP32{cs|cc} {@}R1 {Immed32|Index32}
1152 @return Encoding:
1153 @retval b0.7 immediate data present
1154 @retval b0.6 1 = 64 bit immediate data 0 = 32 bit immediate data
1155 @retval b1.7 1 = conditional b1.6 1 = CS (condition set) 0 = CC
1156 (condition clear) b1.4 1 = relative address 0 =
1157 absolute address b1.3 1 = operand1 indirect b1.2-0
1158 operand 1
1159
1160 **/
1161 STATIC
1162 EFI_STATUS
1163 ExecuteJMP (
1164 IN VM_CONTEXT *VmPtr
1165 )
1166 {
1167 UINT8 Opcode;
1168 UINT8 CompareSet;
1169 UINT8 ConditionFlag;
1170 UINT8 Size;
1171 UINT8 Operand;
1172 UINT64 Data64;
1173 INT32 Index32;
1174 UINTN Addr;
1175
1176 Operand = GETOPERANDS (VmPtr);
1177 Opcode = GETOPCODE (VmPtr);
1178
1179 //
1180 // Get instruction length from the opcode. The upper two bits are used here
1181 // to index into the length array.
1182 //
1183 Size = mJMPLen[(Opcode >> 6) & 0x03];
1184
1185 //
1186 // Decode instruction conditions
1187 // If we haven't met the condition, then simply advance the IP and return.
1188 //
1189 CompareSet = (UINT8) ((Operand & JMP_M_CS) ? 1 : 0);
1190 ConditionFlag = (UINT8) VMFLAG_ISSET (VmPtr, VMFLAGS_CC);
1191 if (Operand & CONDITION_M_CONDITIONAL) {
1192 if (CompareSet != ConditionFlag) {
1193 VmPtr->Ip += Size;
1194 return EFI_SUCCESS;
1195 }
1196 }
1197 //
1198 // Check for 64-bit form and do it right away since it's the most
1199 // straight-forward form.
1200 //
1201 if (Opcode & OPCODE_M_IMMDATA64) {
1202 //
1203 // Double check for immediate-data, which is required. If not there,
1204 // then signal an exception
1205 //
1206 if (!(Opcode & OPCODE_M_IMMDATA)) {
1207 EbcDebugSignalException (
1208 EXCEPT_EBC_INSTRUCTION_ENCODING,
1209 EXCEPTION_FLAG_ERROR,
1210 VmPtr
1211 );
1212 return EFI_UNSUPPORTED;
1213 }
1214 //
1215 // 64-bit immediate data is full address. Read the immediate data,
1216 // check for alignment, and jump absolute.
1217 //
1218 Data64 = VmReadImmed64 (VmPtr, 2);
1219 if (!IS_ALIGNED ((UINTN) Data64, sizeof (UINT16))) {
1220 EbcDebugSignalException (
1221 EXCEPT_EBC_ALIGNMENT_CHECK,
1222 EXCEPTION_FLAG_FATAL,
1223 VmPtr
1224 );
1225
1226 return EFI_UNSUPPORTED;
1227 }
1228
1229 //
1230 // Take jump -- relative or absolute
1231 //
1232 if (Operand & JMP_M_RELATIVE) {
1233 VmPtr->Ip += (UINTN) Data64 + Size;
1234 } else {
1235 VmPtr->Ip = (VMIP) (UINTN) Data64;
1236 }
1237
1238 return EFI_SUCCESS;
1239 }
1240 //
1241 // 32-bit forms:
1242 // Get the index if there is one. May be either an index, or an immediate
1243 // offset depending on indirect operand.
1244 // JMP32 @R1 Index32 -- immediate data is an index
1245 // JMP32 R1 Immed32 -- immedate data is an offset
1246 //
1247 if (Opcode & OPCODE_M_IMMDATA) {
1248 if (OPERAND1_INDIRECT (Operand)) {
1249 Index32 = VmReadIndex32 (VmPtr, 2);
1250 } else {
1251 Index32 = VmReadImmed32 (VmPtr, 2);
1252 }
1253 } else {
1254 Index32 = 0;
1255 }
1256 //
1257 // Get the register data. If R == 0, then special case where it's ignored.
1258 //
1259 if (OPERAND1_REGNUM (Operand) == 0) {
1260 Data64 = 0;
1261 } else {
1262 Data64 = OPERAND1_REGDATA (VmPtr, Operand);
1263 }
1264 //
1265 // Decode the forms
1266 //
1267 if (OPERAND1_INDIRECT (Operand)) {
1268 //
1269 // Form: JMP32 @Rx {Index32}
1270 //
1271 Addr = VmReadMemN (VmPtr, (UINTN) Data64 + Index32);
1272 if (!IS_ALIGNED ((UINTN) Addr, sizeof (UINT16))) {
1273 EbcDebugSignalException (
1274 EXCEPT_EBC_ALIGNMENT_CHECK,
1275 EXCEPTION_FLAG_FATAL,
1276 VmPtr
1277 );
1278
1279 return EFI_UNSUPPORTED;
1280 }
1281
1282 if (Operand & JMP_M_RELATIVE) {
1283 VmPtr->Ip += (UINTN) Addr + Size;
1284 } else {
1285 VmPtr->Ip = (VMIP) Addr;
1286 }
1287 } else {
1288 //
1289 // Form: JMP32 Rx {Immed32}
1290 //
1291 Addr = (UINTN) (Data64 + Index32);
1292 if (!IS_ALIGNED ((UINTN) Addr, sizeof (UINT16))) {
1293 EbcDebugSignalException (
1294 EXCEPT_EBC_ALIGNMENT_CHECK,
1295 EXCEPTION_FLAG_FATAL,
1296 VmPtr
1297 );
1298
1299 return EFI_UNSUPPORTED;
1300 }
1301
1302 if (Operand & JMP_M_RELATIVE) {
1303 VmPtr->Ip += (UINTN) Addr + Size;
1304 } else {
1305 VmPtr->Ip = (VMIP) Addr;
1306 }
1307 }
1308
1309 return EFI_SUCCESS;
1310 }
1311
1312
1313 /**
1314 Execute the EBC JMP8 instruction
1315
1316 @param VmPtr pointer to a VM context
1317
1318 @return Standard EFI_STATUS
1319 @return Instruction syntax:
1320 @return JMP8{cs|cc} Offset/2
1321
1322 **/
1323 STATIC
1324 EFI_STATUS
1325 ExecuteJMP8 (
1326 IN VM_CONTEXT *VmPtr
1327 )
1328 {
1329 UINT8 Opcode;
1330 UINT8 ConditionFlag;
1331 UINT8 CompareSet;
1332 INT8 Offset;
1333
1334 //
1335 // Decode instruction.
1336 //
1337 Opcode = GETOPCODE (VmPtr);
1338 CompareSet = (UINT8) ((Opcode & JMP_M_CS) ? 1 : 0);
1339 ConditionFlag = (UINT8) VMFLAG_ISSET (VmPtr, VMFLAGS_CC);
1340
1341 //
1342 // If we haven't met the condition, then simply advance the IP and return
1343 //
1344 if (Opcode & CONDITION_M_CONDITIONAL) {
1345 if (CompareSet != ConditionFlag) {
1346 VmPtr->Ip += 2;
1347 return EFI_SUCCESS;
1348 }
1349 }
1350 //
1351 // Get the offset from the instruction stream. It's relative to the
1352 // following instruction, and divided by 2.
1353 //
1354 Offset = VmReadImmed8 (VmPtr, 1);
1355 //
1356 // Want to check for offset == -2 and then raise an exception?
1357 //
1358 VmPtr->Ip += (Offset * 2) + 2;
1359 return EFI_SUCCESS;
1360 }
1361
1362
1363 /**
1364 Execute the EBC MOVI
1365
1366 @param VmPtr pointer to a VM context
1367
1368 @return Standard EFI_STATUS
1369 @return Instruction syntax:
1370 @return MOVI[b|w|d|q][w|d|q] {@}R1 {Index16}, ImmData16|32|64
1371 @return First variable character specifies the move size
1372 @return Second variable character specifies size of the immediate data
1373 @return Sign-extend the immediate data to the size of the operation, and zero-extend
1374 @return if storing to a register.
1375 @return Operand1 direct with index/immed is invalid.
1376
1377 **/
1378 STATIC
1379 EFI_STATUS
1380 ExecuteMOVI (
1381 IN VM_CONTEXT *VmPtr
1382 )
1383 {
1384 UINT8 Opcode;
1385 UINT8 Operands;
1386 UINT8 Size;
1387 INT16 Index16;
1388 INT64 ImmData64;
1389 UINT64 Op1;
1390 UINT64 Mask64;
1391
1392 //
1393 // Get the opcode and operands byte so we can get R1 and R2
1394 //
1395 Opcode = GETOPCODE (VmPtr);
1396 Operands = GETOPERANDS (VmPtr);
1397
1398 //
1399 // Get the index (16-bit) if present
1400 //
1401 if (Operands & MOVI_M_IMMDATA) {
1402 Index16 = VmReadIndex16 (VmPtr, 2);
1403 Size = 4;
1404 } else {
1405 Index16 = 0;
1406 Size = 2;
1407 }
1408 //
1409 // Extract the immediate data. Sign-extend always.
1410 //
1411 if ((Opcode & MOVI_M_DATAWIDTH) == MOVI_DATAWIDTH16) {
1412 ImmData64 = (INT64) (INT16) VmReadImmed16 (VmPtr, Size);
1413 Size += 2;
1414 } else if ((Opcode & MOVI_M_DATAWIDTH) == MOVI_DATAWIDTH32) {
1415 ImmData64 = (INT64) (INT32) VmReadImmed32 (VmPtr, Size);
1416 Size += 4;
1417 } else if ((Opcode & MOVI_M_DATAWIDTH) == MOVI_DATAWIDTH64) {
1418 ImmData64 = (INT64) VmReadImmed64 (VmPtr, Size);
1419 Size += 8;
1420 } else {
1421 //
1422 // Invalid encoding
1423 //
1424 EbcDebugSignalException (
1425 EXCEPT_EBC_INSTRUCTION_ENCODING,
1426 EXCEPTION_FLAG_FATAL,
1427 VmPtr
1428 );
1429 return EFI_UNSUPPORTED;
1430 }
1431 //
1432 // Now write back the result
1433 //
1434 if (!OPERAND1_INDIRECT (Operands)) {
1435 //
1436 // Operand1 direct. Make sure it didn't have an index.
1437 //
1438 if (Operands & MOVI_M_IMMDATA) {
1439 EbcDebugSignalException (
1440 EXCEPT_EBC_INSTRUCTION_ENCODING,
1441 EXCEPTION_FLAG_FATAL,
1442 VmPtr
1443 );
1444 return EFI_UNSUPPORTED;
1445 }
1446 //
1447 // Writing directly to a register. Clear unused bits.
1448 //
1449 if ((Operands & MOVI_M_MOVEWIDTH) == MOVI_MOVEWIDTH8) {
1450 Mask64 = 0x000000FF;
1451 } else if ((Operands & MOVI_M_MOVEWIDTH) == MOVI_MOVEWIDTH16) {
1452 Mask64 = 0x0000FFFF;
1453 } else if ((Operands & MOVI_M_MOVEWIDTH) == MOVI_MOVEWIDTH32) {
1454 Mask64 = 0x00000000FFFFFFFF;
1455 } else {
1456 Mask64 = (UINT64)~0;
1457 }
1458
1459 VmPtr->R[OPERAND1_REGNUM (Operands)] = ImmData64 & Mask64;
1460 } else {
1461 //
1462 // Get the address then write back based on size of the move
1463 //
1464 Op1 = (UINT64) VmPtr->R[OPERAND1_REGNUM (Operands)] + Index16;
1465 if ((Operands & MOVI_M_MOVEWIDTH) == MOVI_MOVEWIDTH8) {
1466 VmWriteMem8 (VmPtr, (UINTN) Op1, (UINT8) ImmData64);
1467 } else if ((Operands & MOVI_M_MOVEWIDTH) == MOVI_MOVEWIDTH16) {
1468 VmWriteMem16 (VmPtr, (UINTN) Op1, (UINT16) ImmData64);
1469 } else if ((Operands & MOVI_M_MOVEWIDTH) == MOVI_MOVEWIDTH32) {
1470 VmWriteMem32 (VmPtr, (UINTN) Op1, (UINT32) ImmData64);
1471 } else {
1472 VmWriteMem64 (VmPtr, (UINTN) Op1, ImmData64);
1473 }
1474 }
1475 //
1476 // Advance the instruction pointer
1477 //
1478 VmPtr->Ip += Size;
1479 return EFI_SUCCESS;
1480 }
1481
1482
1483 /**
1484 Execute the EBC MOV immediate natural. This instruction moves an immediate
1485 index value into a register or memory location.
1486
1487 @param VmPtr pointer to a VM context
1488
1489 @return Standard EFI_STATUS
1490 @return Instruction syntax:
1491 @return MOVIn[w|d|q] {@}R1 {Index16}, Index16|32|64
1492
1493 **/
1494 STATIC
1495 EFI_STATUS
1496 ExecuteMOVIn (
1497 IN VM_CONTEXT *VmPtr
1498 )
1499 {
1500 UINT8 Opcode;
1501 UINT8 Operands;
1502 UINT8 Size;
1503 INT16 Index16;
1504 INT16 ImmedIndex16;
1505 INT32 ImmedIndex32;
1506 INT64 ImmedIndex64;
1507 UINT64 Op1;
1508
1509 //
1510 // Get the opcode and operands byte so we can get R1 and R2
1511 //
1512 Opcode = GETOPCODE (VmPtr);
1513 Operands = GETOPERANDS (VmPtr);
1514
1515 //
1516 // Get the operand1 index (16-bit) if present
1517 //
1518 if (Operands & MOVI_M_IMMDATA) {
1519 Index16 = VmReadIndex16 (VmPtr, 2);
1520 Size = 4;
1521 } else {
1522 Index16 = 0;
1523 Size = 2;
1524 }
1525 //
1526 // Extract the immediate data and convert to a 64-bit index.
1527 //
1528 if ((Opcode & MOVI_M_DATAWIDTH) == MOVI_DATAWIDTH16) {
1529 ImmedIndex16 = VmReadIndex16 (VmPtr, Size);
1530 ImmedIndex64 = (INT64) ImmedIndex16;
1531 Size += 2;
1532 } else if ((Opcode & MOVI_M_DATAWIDTH) == MOVI_DATAWIDTH32) {
1533 ImmedIndex32 = VmReadIndex32 (VmPtr, Size);
1534 ImmedIndex64 = (INT64) ImmedIndex32;
1535 Size += 4;
1536 } else if ((Opcode & MOVI_M_DATAWIDTH) == MOVI_DATAWIDTH64) {
1537 ImmedIndex64 = VmReadIndex64 (VmPtr, Size);
1538 Size += 8;
1539 } else {
1540 //
1541 // Invalid encoding
1542 //
1543 EbcDebugSignalException (
1544 EXCEPT_EBC_INSTRUCTION_ENCODING,
1545 EXCEPTION_FLAG_FATAL,
1546 VmPtr
1547 );
1548 return EFI_UNSUPPORTED;
1549 }
1550 //
1551 // Now write back the result
1552 //
1553 if (!OPERAND1_INDIRECT (Operands)) {
1554 //
1555 // Check for MOVIn R1 Index16, Immed (not indirect, with index), which
1556 // is illegal
1557 //
1558 if (Operands & MOVI_M_IMMDATA) {
1559 EbcDebugSignalException (
1560 EXCEPT_EBC_INSTRUCTION_ENCODING,
1561 EXCEPTION_FLAG_FATAL,
1562 VmPtr
1563 );
1564 return EFI_UNSUPPORTED;
1565 }
1566
1567 VmPtr->R[OPERAND1_REGNUM (Operands)] = ImmedIndex64;
1568 } else {
1569 //
1570 // Get the address
1571 //
1572 Op1 = (UINT64) VmPtr->R[OPERAND1_REGNUM (Operands)] + Index16;
1573 VmWriteMemN (VmPtr, (UINTN) Op1, (INTN) ImmedIndex64);
1574 }
1575 //
1576 // Advance the instruction pointer
1577 //
1578 VmPtr->Ip += Size;
1579 return EFI_SUCCESS;
1580 }
1581
1582
1583 /**
1584 Execute the EBC MOVREL instruction.
1585 Dest <- Ip + ImmData
1586
1587 @param VmPtr pointer to a VM context
1588
1589 @return Standard EFI_STATUS
1590 @return Instruction syntax:
1591 @return MOVREL[w|d|q] {@}R1 {Index16}, ImmData16|32|64
1592
1593 **/
1594 STATIC
1595 EFI_STATUS
1596 ExecuteMOVREL (
1597 IN VM_CONTEXT *VmPtr
1598 )
1599 {
1600 UINT8 Opcode;
1601 UINT8 Operands;
1602 UINT8 Size;
1603 INT16 Index16;
1604 INT64 ImmData64;
1605 UINT64 Op1;
1606 UINT64 Op2;
1607
1608 //
1609 // Get the opcode and operands byte so we can get R1 and R2
1610 //
1611 Opcode = GETOPCODE (VmPtr);
1612 Operands = GETOPERANDS (VmPtr);
1613
1614 //
1615 // Get the Operand 1 index (16-bit) if present
1616 //
1617 if (Operands & MOVI_M_IMMDATA) {
1618 Index16 = VmReadIndex16 (VmPtr, 2);
1619 Size = 4;
1620 } else {
1621 Index16 = 0;
1622 Size = 2;
1623 }
1624 //
1625 // Get the immediate data.
1626 //
1627 if ((Opcode & MOVI_M_DATAWIDTH) == MOVI_DATAWIDTH16) {
1628 ImmData64 = (INT64) VmReadImmed16 (VmPtr, Size);
1629 Size += 2;
1630 } else if ((Opcode & MOVI_M_DATAWIDTH) == MOVI_DATAWIDTH32) {
1631 ImmData64 = (INT64) VmReadImmed32 (VmPtr, Size);
1632 Size += 4;
1633 } else if ((Opcode & MOVI_M_DATAWIDTH) == MOVI_DATAWIDTH64) {
1634 ImmData64 = VmReadImmed64 (VmPtr, Size);
1635 Size += 8;
1636 } else {
1637 //
1638 // Invalid encoding
1639 //
1640 EbcDebugSignalException (
1641 EXCEPT_EBC_INSTRUCTION_ENCODING,
1642 EXCEPTION_FLAG_FATAL,
1643 VmPtr
1644 );
1645 return EFI_UNSUPPORTED;
1646 }
1647 //
1648 // Compute the value and write back the result
1649 //
1650 Op2 = (UINT64) ((INT64) ((UINT64) (UINTN) VmPtr->Ip) + (INT64) ImmData64 + Size);
1651 if (!OPERAND1_INDIRECT (Operands)) {
1652 //
1653 // Check for illegal combination of operand1 direct with immediate data
1654 //
1655 if (Operands & MOVI_M_IMMDATA) {
1656 EbcDebugSignalException (
1657 EXCEPT_EBC_INSTRUCTION_ENCODING,
1658 EXCEPTION_FLAG_FATAL,
1659 VmPtr
1660 );
1661 return EFI_UNSUPPORTED;
1662 }
1663
1664 VmPtr->R[OPERAND1_REGNUM (Operands)] = (VM_REGISTER) Op2;
1665 } else {
1666 //
1667 // Get the address = [Rx] + Index16
1668 // Write back the result. Always a natural size write, since
1669 // we're talking addresses here.
1670 //
1671 Op1 = (UINT64) VmPtr->R[OPERAND1_REGNUM (Operands)] + Index16;
1672 VmWriteMemN (VmPtr, (UINTN) Op1, (UINTN) Op2);
1673 }
1674 //
1675 // Advance the instruction pointer
1676 //
1677 VmPtr->Ip += Size;
1678 return EFI_SUCCESS;
1679 }
1680
1681
1682 /**
1683 Execute the EBC MOVsnw instruction. This instruction loads a signed
1684 natural value from memory or register to another memory or register. On
1685 32-bit machines, the value gets sign-extended to 64 bits if the destination
1686 is a register.
1687
1688 @param VmPtr pointer to a VM context
1689
1690 @return Standard EFI_STATUS
1691 @return Instruction syntax:
1692 @return MOVsnw {@}R1 {Index16}, {@}R2 {Index16|Immed16}
1693 @return 0:7 1=>operand1 index present
1694 @return 0:6 1=>operand2 index present
1695
1696 **/
1697 STATIC
1698 EFI_STATUS
1699 ExecuteMOVsnw (
1700 IN VM_CONTEXT *VmPtr
1701 )
1702 {
1703 UINT8 Opcode;
1704 UINT8 Operands;
1705 UINT8 Size;
1706 INT16 Op1Index;
1707 INT16 Op2Index;
1708 UINT64 Op2;
1709
1710 //
1711 // Get the opcode and operand bytes
1712 //
1713 Opcode = GETOPCODE (VmPtr);
1714 Operands = GETOPERANDS (VmPtr);
1715
1716 Op1Index = Op2Index = 0;
1717
1718 //
1719 // Get the indexes if present.
1720 //
1721 Size = 2;
1722 if (Opcode & OPCODE_M_IMMED_OP1) {
1723 if (OPERAND1_INDIRECT (Operands)) {
1724 Op1Index = VmReadIndex16 (VmPtr, 2);
1725 } else {
1726 //
1727 // Illegal form operand1 direct with index: MOVsnw R1 Index16, {@}R2
1728 //
1729 EbcDebugSignalException (
1730 EXCEPT_EBC_INSTRUCTION_ENCODING,
1731 EXCEPTION_FLAG_FATAL,
1732 VmPtr
1733 );
1734 return EFI_UNSUPPORTED;
1735 }
1736
1737 Size += sizeof (UINT16);
1738 }
1739
1740 if (Opcode & OPCODE_M_IMMED_OP2) {
1741 if (OPERAND2_INDIRECT (Operands)) {
1742 Op2Index = VmReadIndex16 (VmPtr, Size);
1743 } else {
1744 Op2Index = VmReadImmed16 (VmPtr, Size);
1745 }
1746
1747 Size += sizeof (UINT16);
1748 }
1749 //
1750 // Get the data from the source.
1751 //
1752 Op2 = (INT64) ((INTN) (VmPtr->R[OPERAND2_REGNUM (Operands)] + Op2Index));
1753 if (OPERAND2_INDIRECT (Operands)) {
1754 Op2 = (INT64) (INTN) VmReadMemN (VmPtr, (UINTN) Op2);
1755 }
1756 //
1757 // Now write back the result.
1758 //
1759 if (!OPERAND1_INDIRECT (Operands)) {
1760 VmPtr->R[OPERAND1_REGNUM (Operands)] = Op2;
1761 } else {
1762 VmWriteMemN (VmPtr, (UINTN) (VmPtr->R[OPERAND1_REGNUM (Operands)] + Op1Index), (UINTN) Op2);
1763 }
1764 //
1765 // Advance the instruction pointer
1766 //
1767 VmPtr->Ip += Size;
1768 return EFI_SUCCESS;
1769 }
1770
1771
1772 /**
1773 Execute the EBC MOVsnw instruction. This instruction loads a signed
1774 natural value from memory or register to another memory or register. On
1775 32-bit machines, the value gets sign-extended to 64 bits if the destination
1776 is a register.
1777
1778 @param VmPtr pointer to a VM context
1779
1780 @return Standard EFI_STATUS
1781 @return Instruction syntax:
1782 @return MOVsnd {@}R1 {Indx32}, {@}R2 {Index32|Immed32}
1783 @return 0:7 1=>operand1 index present
1784 @return 0:6 1=>operand2 index present
1785
1786 **/
1787 STATIC
1788 EFI_STATUS
1789 ExecuteMOVsnd (
1790 IN VM_CONTEXT *VmPtr
1791 )
1792 {
1793 UINT8 Opcode;
1794 UINT8 Operands;
1795 UINT8 Size;
1796 INT32 Op1Index;
1797 INT32 Op2Index;
1798 UINT64 Op2;
1799
1800 //
1801 // Get the opcode and operand bytes
1802 //
1803 Opcode = GETOPCODE (VmPtr);
1804 Operands = GETOPERANDS (VmPtr);
1805
1806 Op1Index = Op2Index = 0;
1807
1808 //
1809 // Get the indexes if present.
1810 //
1811 Size = 2;
1812 if (Opcode & OPCODE_M_IMMED_OP1) {
1813 if (OPERAND1_INDIRECT (Operands)) {
1814 Op1Index = VmReadIndex32 (VmPtr, 2);
1815 } else {
1816 //
1817 // Illegal form operand1 direct with index: MOVsnd R1 Index16,..
1818 //
1819 EbcDebugSignalException (
1820 EXCEPT_EBC_INSTRUCTION_ENCODING,
1821 EXCEPTION_FLAG_FATAL,
1822 VmPtr
1823 );
1824 return EFI_UNSUPPORTED;
1825 }
1826
1827 Size += sizeof (UINT32);
1828 }
1829
1830 if (Opcode & OPCODE_M_IMMED_OP2) {
1831 if (OPERAND2_INDIRECT (Operands)) {
1832 Op2Index = VmReadIndex32 (VmPtr, Size);
1833 } else {
1834 Op2Index = VmReadImmed32 (VmPtr, Size);
1835 }
1836
1837 Size += sizeof (UINT32);
1838 }
1839 //
1840 // Get the data from the source.
1841 //
1842 Op2 = (INT64) ((INTN) (VmPtr->R[OPERAND2_REGNUM (Operands)] + Op2Index));
1843 if (OPERAND2_INDIRECT (Operands)) {
1844 Op2 = (INT64) (INTN) VmReadMemN (VmPtr, (UINTN) Op2);
1845 }
1846 //
1847 // Now write back the result.
1848 //
1849 if (!OPERAND1_INDIRECT (Operands)) {
1850 VmPtr->R[OPERAND1_REGNUM (Operands)] = Op2;
1851 } else {
1852 VmWriteMemN (VmPtr, (UINTN) (VmPtr->R[OPERAND1_REGNUM (Operands)] + Op1Index), (UINTN) Op2);
1853 }
1854 //
1855 // Advance the instruction pointer
1856 //
1857 VmPtr->Ip += Size;
1858 return EFI_SUCCESS;
1859 }
1860
1861
1862 /**
1863 Execute the EBC PUSHn instruction
1864
1865 @param VmPtr pointer to a VM context
1866
1867 @return Standard EFI_STATUS
1868 @return Instruction syntax:
1869 @return PUSHn {@}R1 {Index16|Immed16}
1870
1871 **/
1872 STATIC
1873 EFI_STATUS
1874 ExecutePUSHn (
1875 IN VM_CONTEXT *VmPtr
1876 )
1877 {
1878 UINT8 Opcode;
1879 UINT8 Operands;
1880 INT16 Index16;
1881 UINTN DataN;
1882
1883 //
1884 // Get opcode and operands
1885 //
1886 Opcode = GETOPCODE (VmPtr);
1887 Operands = GETOPERANDS (VmPtr);
1888
1889 //
1890 // Get index if present
1891 //
1892 if (Opcode & PUSHPOP_M_IMMDATA) {
1893 if (OPERAND1_INDIRECT (Operands)) {
1894 Index16 = VmReadIndex16 (VmPtr, 2);
1895 } else {
1896 Index16 = VmReadImmed16 (VmPtr, 2);
1897 }
1898
1899 VmPtr->Ip += 4;
1900 } else {
1901 Index16 = 0;
1902 VmPtr->Ip += 2;
1903 }
1904 //
1905 // Get the data to push
1906 //
1907 if (OPERAND1_INDIRECT (Operands)) {
1908 DataN = VmReadMemN (VmPtr, (UINTN) (VmPtr->R[OPERAND1_REGNUM (Operands)] + Index16));
1909 } else {
1910 DataN = (UINTN) (VmPtr->R[OPERAND1_REGNUM (Operands)] + Index16);
1911 }
1912 //
1913 // Adjust the stack down.
1914 //
1915 VmPtr->R[0] -= sizeof (UINTN);
1916 VmWriteMemN (VmPtr, (UINTN) VmPtr->R[0], DataN);
1917 return EFI_SUCCESS;
1918 }
1919
1920
1921 /**
1922 Execute the EBC PUSH instruction
1923
1924 @param VmPtr pointer to a VM context
1925
1926 @return Standard EFI_STATUS
1927 @return Instruction syntax:
1928 @return PUSH[32|64] {@}R1 {Index16|Immed16}
1929
1930 **/
1931 STATIC
1932 EFI_STATUS
1933 ExecutePUSH (
1934 IN VM_CONTEXT *VmPtr
1935 )
1936 {
1937 UINT8 Opcode;
1938 UINT8 Operands;
1939 UINT32 Data32;
1940 UINT64 Data64;
1941 INT16 Index16;
1942
1943 //
1944 // Get opcode and operands
1945 //
1946 Opcode = GETOPCODE (VmPtr);
1947 Operands = GETOPERANDS (VmPtr);
1948 //
1949 // Get immediate index if present, then advance the IP.
1950 //
1951 if (Opcode & PUSHPOP_M_IMMDATA) {
1952 if (OPERAND1_INDIRECT (Operands)) {
1953 Index16 = VmReadIndex16 (VmPtr, 2);
1954 } else {
1955 Index16 = VmReadImmed16 (VmPtr, 2);
1956 }
1957
1958 VmPtr->Ip += 4;
1959 } else {
1960 Index16 = 0;
1961 VmPtr->Ip += 2;
1962 }
1963 //
1964 // Get the data to push
1965 //
1966 if (Opcode & PUSHPOP_M_64) {
1967 if (OPERAND1_INDIRECT (Operands)) {
1968 Data64 = VmReadMem64 (VmPtr, (UINTN) (VmPtr->R[OPERAND1_REGNUM (Operands)] + Index16));
1969 } else {
1970 Data64 = (UINT64) VmPtr->R[OPERAND1_REGNUM (Operands)] + Index16;
1971 }
1972 //
1973 // Adjust the stack down, then write back the data
1974 //
1975 VmPtr->R[0] -= sizeof (UINT64);
1976 VmWriteMem64 (VmPtr, (UINTN) VmPtr->R[0], Data64);
1977 } else {
1978 //
1979 // 32-bit data
1980 //
1981 if (OPERAND1_INDIRECT (Operands)) {
1982 Data32 = VmReadMem32 (VmPtr, (UINTN) (VmPtr->R[OPERAND1_REGNUM (Operands)] + Index16));
1983 } else {
1984 Data32 = (UINT32) VmPtr->R[OPERAND1_REGNUM (Operands)] + Index16;
1985 }
1986 //
1987 // Adjust the stack down and write the data
1988 //
1989 VmPtr->R[0] -= sizeof (UINT32);
1990 VmWriteMem32 (VmPtr, (UINTN) VmPtr->R[0], Data32);
1991 }
1992
1993 return EFI_SUCCESS;
1994 }
1995
1996
1997 /**
1998 Execute the EBC POPn instruction
1999
2000 @param VmPtr pointer to a VM context
2001
2002 @return Standard EFI_STATUS
2003 @return Instruction syntax:
2004 @return POPn {@}R1 {Index16|Immed16}
2005
2006 **/
2007 STATIC
2008 EFI_STATUS
2009 ExecutePOPn (
2010 IN VM_CONTEXT *VmPtr
2011 )
2012 {
2013 UINT8 Opcode;
2014 UINT8 Operands;
2015 INT16 Index16;
2016 UINTN DataN;
2017
2018 //
2019 // Get opcode and operands
2020 //
2021 Opcode = GETOPCODE (VmPtr);
2022 Operands = GETOPERANDS (VmPtr);
2023 //
2024 // Get immediate data if present, and advance the IP
2025 //
2026 if (Opcode & PUSHPOP_M_IMMDATA) {
2027 if (OPERAND1_INDIRECT (Operands)) {
2028 Index16 = VmReadIndex16 (VmPtr, 2);
2029 } else {
2030 Index16 = VmReadImmed16 (VmPtr, 2);
2031 }
2032
2033 VmPtr->Ip += 4;
2034 } else {
2035 Index16 = 0;
2036 VmPtr->Ip += 2;
2037 }
2038 //
2039 // Read the data off the stack, then adjust the stack pointer
2040 //
2041 DataN = VmReadMemN (VmPtr, (UINTN) VmPtr->R[0]);
2042 VmPtr->R[0] += sizeof (UINTN);
2043 //
2044 // Do the write-back
2045 //
2046 if (OPERAND1_INDIRECT (Operands)) {
2047 VmWriteMemN (VmPtr, (UINTN) (VmPtr->R[OPERAND1_REGNUM (Operands)] + Index16), DataN);
2048 } else {
2049 VmPtr->R[OPERAND1_REGNUM (Operands)] = (INT64) (UINT64) ((UINTN) DataN + Index16);
2050 }
2051
2052 return EFI_SUCCESS;
2053 }
2054
2055
2056 /**
2057 Execute the EBC POP instruction
2058
2059 @param VmPtr pointer to a VM context
2060
2061 @return Standard EFI_STATUS
2062 @return Instruction syntax:
2063 @return POP {@}R1 {Index16|Immed16}
2064
2065 **/
2066 STATIC
2067 EFI_STATUS
2068 ExecutePOP (
2069 IN VM_CONTEXT *VmPtr
2070 )
2071 {
2072 UINT8 Opcode;
2073 UINT8 Operands;
2074 INT16 Index16;
2075 INT32 Data32;
2076 UINT64 Data64;
2077
2078 //
2079 // Get opcode and operands
2080 //
2081 Opcode = GETOPCODE (VmPtr);
2082 Operands = GETOPERANDS (VmPtr);
2083 //
2084 // Get immediate data if present, and advance the IP.
2085 //
2086 if (Opcode & PUSHPOP_M_IMMDATA) {
2087 if (OPERAND1_INDIRECT (Operands)) {
2088 Index16 = VmReadIndex16 (VmPtr, 2);
2089 } else {
2090 Index16 = VmReadImmed16 (VmPtr, 2);
2091 }
2092
2093 VmPtr->Ip += 4;
2094 } else {
2095 Index16 = 0;
2096 VmPtr->Ip += 2;
2097 }
2098 //
2099 // Get the data off the stack, then write it to the appropriate location
2100 //
2101 if (Opcode & PUSHPOP_M_64) {
2102 //
2103 // Read the data off the stack, then adjust the stack pointer
2104 //
2105 Data64 = VmReadMem64 (VmPtr, (UINTN) VmPtr->R[0]);
2106 VmPtr->R[0] += sizeof (UINT64);
2107 //
2108 // Do the write-back
2109 //
2110 if (OPERAND1_INDIRECT (Operands)) {
2111 VmWriteMem64 (VmPtr, (UINTN) (VmPtr->R[OPERAND1_REGNUM (Operands)] + Index16), Data64);
2112 } else {
2113 VmPtr->R[OPERAND1_REGNUM (Operands)] = Data64 + Index16;
2114 }
2115 } else {
2116 //
2117 // 32-bit pop. Read it off the stack and adjust the stack pointer
2118 //
2119 Data32 = (INT32) VmReadMem32 (VmPtr, (UINTN) VmPtr->R[0]);
2120 VmPtr->R[0] += sizeof (UINT32);
2121 //
2122 // Do the write-back
2123 //
2124 if (OPERAND1_INDIRECT (Operands)) {
2125 VmWriteMem32 (VmPtr, (UINTN) (VmPtr->R[OPERAND1_REGNUM (Operands)] + Index16), Data32);
2126 } else {
2127 VmPtr->R[OPERAND1_REGNUM (Operands)] = (INT64) Data32 + Index16;
2128 }
2129 }
2130
2131 return EFI_SUCCESS;
2132 }
2133
2134
2135 /**
2136 Implements the EBC CALL instruction.
2137 Instruction format:
2138 CALL64 Immed64
2139 CALL32 {@}R1 {Immed32|Index32}
2140 CALLEX64 Immed64
2141 CALLEX16 {@}R1 {Immed32}
2142 If Rx == R0, then it's a PC relative call to PC = PC + imm32.
2143
2144 @param VmPtr pointer to a VM context.
2145
2146 @return Standard EFI_STATUS
2147
2148 **/
2149 STATIC
2150 EFI_STATUS
2151 ExecuteCALL (
2152 IN VM_CONTEXT *VmPtr
2153 )
2154 {
2155 UINT8 Opcode;
2156 UINT8 Operands;
2157 INT32 Immed32;
2158 UINT8 Size;
2159 INT64 Immed64;
2160 VOID *FramePtr;
2161
2162 //
2163 // Get opcode and operands
2164 //
2165 Opcode = GETOPCODE (VmPtr);
2166 Operands = GETOPERANDS (VmPtr);
2167 //
2168 // Assign these as well to avoid compiler warnings
2169 //
2170 Immed64 = 0;
2171 Immed32 = 0;
2172
2173 FramePtr = VmPtr->FramePtr;
2174 //
2175 // Determine the instruction size, and get immediate data if present
2176 //
2177 if (Opcode & OPCODE_M_IMMDATA) {
2178 if (Opcode & OPCODE_M_IMMDATA64) {
2179 Immed64 = VmReadImmed64 (VmPtr, 2);
2180 Size = 10;
2181 } else {
2182 //
2183 // If register operand is indirect, then the immediate data is an index
2184 //
2185 if (OPERAND1_INDIRECT (Operands)) {
2186 Immed32 = VmReadIndex32 (VmPtr, 2);
2187 } else {
2188 Immed32 = VmReadImmed32 (VmPtr, 2);
2189 }
2190
2191 Size = 6;
2192 }
2193 } else {
2194 Size = 2;
2195 }
2196 //
2197 // If it's a call to EBC, adjust the stack pointer down 16 bytes and
2198 // put our return address and frame pointer on the VM stack.
2199 //
2200 if ((Operands & OPERAND_M_NATIVE_CALL) == 0) {
2201 VmPtr->R[0] -= 8;
2202 VmWriteMemN (VmPtr, (UINTN) VmPtr->R[0], (UINTN) FramePtr);
2203 VmPtr->FramePtr = (VOID *) (UINTN) VmPtr->R[0];
2204 VmPtr->R[0] -= 8;
2205 VmWriteMem64 (VmPtr, (UINTN) VmPtr->R[0], (UINT64) (UINTN) (VmPtr->Ip + Size));
2206 }
2207 //
2208 // If 64-bit data, then absolute jump only
2209 //
2210 if (Opcode & OPCODE_M_IMMDATA64) {
2211 //
2212 // Native or EBC call?
2213 //
2214 if ((Operands & OPERAND_M_NATIVE_CALL) == 0) {
2215 VmPtr->Ip = (VMIP) (UINTN) Immed64;
2216 } else {
2217 //
2218 // Call external function, get the return value, and advance the IP
2219 //
2220 EbcLLCALLEX (VmPtr, (UINTN) Immed64, (UINTN) VmPtr->R[0], FramePtr, Size);
2221 }
2222 } else {
2223 //
2224 // Get the register data. If operand1 == 0, then ignore register and
2225 // take immediate data as relative or absolute address.
2226 // Compiler should take care of upper bits if 32-bit machine.
2227 //
2228 if (OPERAND1_REGNUM (Operands) != 0) {
2229 Immed64 = (UINT64) (UINTN) VmPtr->R[OPERAND1_REGNUM (Operands)];
2230 }
2231 //
2232 // Get final address
2233 //
2234 if (OPERAND1_INDIRECT (Operands)) {
2235 Immed64 = (INT64) (UINT64) (UINTN) VmReadMemN (VmPtr, (UINTN) (Immed64 + Immed32));
2236 } else {
2237 Immed64 += Immed32;
2238 }
2239 //
2240 // Now determine if external call, and then if relative or absolute
2241 //
2242 if ((Operands & OPERAND_M_NATIVE_CALL) == 0) {
2243 //
2244 // EBC call. Relative or absolute? If relative, then it's relative to the
2245 // start of the next instruction.
2246 //
2247 if (Operands & OPERAND_M_RELATIVE_ADDR) {
2248 VmPtr->Ip += Immed64 + Size;
2249 } else {
2250 VmPtr->Ip = (VMIP) (UINTN) Immed64;
2251 }
2252 } else {
2253 //
2254 // Native call. Relative or absolute?
2255 //
2256 if (Operands & OPERAND_M_RELATIVE_ADDR) {
2257 EbcLLCALLEX (VmPtr, (UINTN) (Immed64 + VmPtr->Ip + Size), (UINTN) VmPtr->R[0], FramePtr, Size);
2258 } else {
2259 if (VmPtr->StopFlags & STOPFLAG_BREAK_ON_CALLEX) {
2260 CpuBreakpoint ();
2261 }
2262
2263 EbcLLCALLEX (VmPtr, (UINTN) Immed64, (UINTN) VmPtr->R[0], FramePtr, Size);
2264 }
2265 }
2266 }
2267
2268 return EFI_SUCCESS;
2269 }
2270
2271
2272 /**
2273 Execute the EBC RET instruction
2274
2275 @param VmPtr pointer to a VM context
2276
2277 @return Standard EFI_STATUS
2278 @return Instruction syntax:
2279 @return RET
2280
2281 **/
2282 STATIC
2283 EFI_STATUS
2284 ExecuteRET (
2285 IN VM_CONTEXT *VmPtr
2286 )
2287 {
2288 //
2289 // If we're at the top of the stack, then simply set the done
2290 // flag and return
2291 //
2292 if (VmPtr->StackRetAddr == (UINT64) VmPtr->R[0]) {
2293 VmPtr->StopFlags |= STOPFLAG_APP_DONE;
2294 } else {
2295 //
2296 // Pull the return address off the VM app's stack and set the IP
2297 // to it
2298 //
2299 if (!IS_ALIGNED ((UINTN) VmPtr->R[0], sizeof (UINT16))) {
2300 EbcDebugSignalException (
2301 EXCEPT_EBC_ALIGNMENT_CHECK,
2302 EXCEPTION_FLAG_FATAL,
2303 VmPtr
2304 );
2305 }
2306 //
2307 // Restore the IP and frame pointer from the stack
2308 //
2309 VmPtr->Ip = (VMIP) (UINTN) VmReadMem64 (VmPtr, (UINTN) VmPtr->R[0]);
2310 VmPtr->R[0] += 8;
2311 VmPtr->FramePtr = (VOID *) VmReadMemN (VmPtr, (UINTN) VmPtr->R[0]);
2312 VmPtr->R[0] += 8;
2313 }
2314
2315 return EFI_SUCCESS;
2316 }
2317
2318
2319 /**
2320 Execute the EBC CMP instruction
2321
2322 @param VmPtr pointer to a VM context
2323
2324 @return Standard EFI_STATUS
2325 @return Instruction syntax:
2326 @return CMP[32|64][eq|lte|gte|ulte|ugte] R1, {@}R2 {Index16|Immed16}
2327
2328 **/
2329 STATIC
2330 EFI_STATUS
2331 ExecuteCMP (
2332 IN VM_CONTEXT *VmPtr
2333 )
2334 {
2335 UINT8 Opcode;
2336 UINT8 Operands;
2337 UINT8 Size;
2338 INT16 Index16;
2339 UINT32 Flag;
2340 INT64 Op2;
2341 INT64 Op1;
2342
2343 //
2344 // Get opcode and operands
2345 //
2346 Opcode = GETOPCODE (VmPtr);
2347 Operands = GETOPERANDS (VmPtr);
2348 //
2349 // Get the register data we're going to compare to
2350 //
2351 Op1 = VmPtr->R[OPERAND1_REGNUM (Operands)];
2352 //
2353 // Get immediate data
2354 //
2355 if (Opcode & OPCODE_M_IMMDATA) {
2356 if (OPERAND2_INDIRECT (Operands)) {
2357 Index16 = VmReadIndex16 (VmPtr, 2);
2358 } else {
2359 Index16 = VmReadImmed16 (VmPtr, 2);
2360 }
2361
2362 Size = 4;
2363 } else {
2364 Index16 = 0;
2365 Size = 2;
2366 }
2367 //
2368 // Now get Op2
2369 //
2370 if (OPERAND2_INDIRECT (Operands)) {
2371 if (Opcode & OPCODE_M_64BIT) {
2372 Op2 = (INT64) VmReadMem64 (VmPtr, (UINTN) (VmPtr->R[OPERAND2_REGNUM (Operands)] + Index16));
2373 } else {
2374 //
2375 // 32-bit operations. 0-extend the values for all cases.
2376 //
2377 Op2 = (INT64) (UINT64) ((UINT32) VmReadMem32 (VmPtr, (UINTN) (VmPtr->R[OPERAND2_REGNUM (Operands)] + Index16)));
2378 }
2379 } else {
2380 Op2 = VmPtr->R[OPERAND2_REGNUM (Operands)] + Index16;
2381 }
2382 //
2383 // Now do the compare
2384 //
2385 Flag = 0;
2386 if (Opcode & OPCODE_M_64BIT) {
2387 //
2388 // 64-bit compares
2389 //
2390 switch (Opcode & OPCODE_M_OPCODE) {
2391 case OPCODE_CMPEQ:
2392 if (Op1 == Op2) {
2393 Flag = 1;
2394 }
2395 break;
2396
2397 case OPCODE_CMPLTE:
2398 if (Op1 <= Op2) {
2399 Flag = 1;
2400 }
2401 break;
2402
2403 case OPCODE_CMPGTE:
2404 if (Op1 >= Op2) {
2405 Flag = 1;
2406 }
2407 break;
2408
2409 case OPCODE_CMPULTE:
2410 if ((UINT64) Op1 <= (UINT64) Op2) {
2411 Flag = 1;
2412 }
2413 break;
2414
2415 case OPCODE_CMPUGTE:
2416 if ((UINT64) Op1 >= (UINT64) Op2) {
2417 Flag = 1;
2418 }
2419 break;
2420
2421 default:
2422 ASSERT (0);
2423 }
2424 } else {
2425 //
2426 // 32-bit compares
2427 //
2428 switch (Opcode & OPCODE_M_OPCODE) {
2429 case OPCODE_CMPEQ:
2430 if ((INT32) Op1 == (INT32) Op2) {
2431 Flag = 1;
2432 }
2433 break;
2434
2435 case OPCODE_CMPLTE:
2436 if ((INT32) Op1 <= (INT32) Op2) {
2437 Flag = 1;
2438 }
2439 break;
2440
2441 case OPCODE_CMPGTE:
2442 if ((INT32) Op1 >= (INT32) Op2) {
2443 Flag = 1;
2444 }
2445 break;
2446
2447 case OPCODE_CMPULTE:
2448 if ((UINT32) Op1 <= (UINT32) Op2) {
2449 Flag = 1;
2450 }
2451 break;
2452
2453 case OPCODE_CMPUGTE:
2454 if ((UINT32) Op1 >= (UINT32) Op2) {
2455 Flag = 1;
2456 }
2457 break;
2458
2459 default:
2460 ASSERT (0);
2461 }
2462 }
2463 //
2464 // Now set the flag accordingly for the comparison
2465 //
2466 if (Flag) {
2467 VMFLAG_SET (VmPtr, VMFLAGS_CC);
2468 } else {
2469 VMFLAG_CLEAR (VmPtr, VMFLAGS_CC);
2470 }
2471 //
2472 // Advance the IP
2473 //
2474 VmPtr->Ip += Size;
2475 return EFI_SUCCESS;
2476 }
2477
2478
2479 /**
2480 Execute the EBC CMPI instruction
2481
2482 @param VmPtr pointer to a VM context
2483
2484 @return Standard EFI_STATUS
2485 @return Instruction syntax:
2486 @return CMPI[32|64]{w|d}[eq|lte|gte|ulte|ugte] {@}Rx {Index16}, Immed16|Immed32
2487
2488 **/
2489 STATIC
2490 EFI_STATUS
2491 ExecuteCMPI (
2492 IN VM_CONTEXT *VmPtr
2493 )
2494 {
2495 UINT8 Opcode;
2496 UINT8 Operands;
2497 UINT8 Size;
2498 INT64 Op1;
2499 INT64 Op2;
2500 INT16 Index16;
2501 UINT32 Flag;
2502
2503 //
2504 // Get opcode and operands
2505 //
2506 Opcode = GETOPCODE (VmPtr);
2507 Operands = GETOPERANDS (VmPtr);
2508
2509 //
2510 // Get operand1 index if present
2511 //
2512 Size = 2;
2513 if (Operands & OPERAND_M_CMPI_INDEX) {
2514 Index16 = VmReadIndex16 (VmPtr, 2);
2515 Size += 2;
2516 } else {
2517 Index16 = 0;
2518 }
2519 //
2520 // Get operand1 data we're going to compare to
2521 //
2522 Op1 = (INT64) VmPtr->R[OPERAND1_REGNUM (Operands)];
2523 if (OPERAND1_INDIRECT (Operands)) {
2524 //
2525 // Indirect operand1. Fetch 32 or 64-bit value based on compare size.
2526 //
2527 if (Opcode & OPCODE_M_CMPI64) {
2528 Op1 = (INT64) VmReadMem64 (VmPtr, (UINTN) Op1 + Index16);
2529 } else {
2530 Op1 = (INT64) VmReadMem32 (VmPtr, (UINTN) Op1 + Index16);
2531 }
2532 } else {
2533 //
2534 // Better not have been an index with direct. That is, CMPI R1 Index,...
2535 // is illegal.
2536 //
2537 if (Operands & OPERAND_M_CMPI_INDEX) {
2538 EbcDebugSignalException (
2539 EXCEPT_EBC_INSTRUCTION_ENCODING,
2540 EXCEPTION_FLAG_ERROR,
2541 VmPtr
2542 );
2543 VmPtr->Ip += Size;
2544 return EFI_UNSUPPORTED;
2545 }
2546 }
2547 //
2548 // Get immediate data -- 16- or 32-bit sign extended
2549 //
2550 if (Opcode & OPCODE_M_CMPI32_DATA) {
2551 Op2 = (INT64) VmReadImmed32 (VmPtr, Size);
2552 Size += 4;
2553 } else {
2554 //
2555 // 16-bit immediate data. Sign extend always.
2556 //
2557 Op2 = (INT64) ((INT16) VmReadImmed16 (VmPtr, Size));
2558 Size += 2;
2559 }
2560 //
2561 // Now do the compare
2562 //
2563 Flag = 0;
2564 if (Opcode & OPCODE_M_CMPI64) {
2565 //
2566 // 64 bit comparison
2567 //
2568 switch (Opcode & OPCODE_M_OPCODE) {
2569 case OPCODE_CMPIEQ:
2570 if (Op1 == (INT64) Op2) {
2571 Flag = 1;
2572 }
2573 break;
2574
2575 case OPCODE_CMPILTE:
2576 if (Op1 <= (INT64) Op2) {
2577 Flag = 1;
2578 }
2579 break;
2580
2581 case OPCODE_CMPIGTE:
2582 if (Op1 >= (INT64) Op2) {
2583 Flag = 1;
2584 }
2585 break;
2586
2587 case OPCODE_CMPIULTE:
2588 if ((UINT64) Op1 <= (UINT64) ((UINT32) Op2)) {
2589 Flag = 1;
2590 }
2591 break;
2592
2593 case OPCODE_CMPIUGTE:
2594 if ((UINT64) Op1 >= (UINT64) ((UINT32) Op2)) {
2595 Flag = 1;
2596 }
2597 break;
2598
2599 default:
2600 ASSERT (0);
2601 }
2602 } else {
2603 //
2604 // 32-bit comparisons
2605 //
2606 switch (Opcode & OPCODE_M_OPCODE) {
2607 case OPCODE_CMPIEQ:
2608 if ((INT32) Op1 == Op2) {
2609 Flag = 1;
2610 }
2611 break;
2612
2613 case OPCODE_CMPILTE:
2614 if ((INT32) Op1 <= Op2) {
2615 Flag = 1;
2616 }
2617 break;
2618
2619 case OPCODE_CMPIGTE:
2620 if ((INT32) Op1 >= Op2) {
2621 Flag = 1;
2622 }
2623 break;
2624
2625 case OPCODE_CMPIULTE:
2626 if ((UINT32) Op1 <= (UINT32) Op2) {
2627 Flag = 1;
2628 }
2629 break;
2630
2631 case OPCODE_CMPIUGTE:
2632 if ((UINT32) Op1 >= (UINT32) Op2) {
2633 Flag = 1;
2634 }
2635 break;
2636
2637 default:
2638 ASSERT (0);
2639 }
2640 }
2641 //
2642 // Now set the flag accordingly for the comparison
2643 //
2644 if (Flag) {
2645 VMFLAG_SET (VmPtr, VMFLAGS_CC);
2646 } else {
2647 VMFLAG_CLEAR (VmPtr, VMFLAGS_CC);
2648 }
2649 //
2650 // Advance the IP
2651 //
2652 VmPtr->Ip += Size;
2653 return EFI_SUCCESS;
2654 }
2655
2656
2657 /**
2658 Execute the EBC NOT instruction
2659
2660 @param VmPtr pointer to a VM context
2661 @param Op1 Operand 1 from the instruction
2662 @param Op2 Operand 2 from the instruction
2663
2664 @return ~Op2
2665 @return Instruction syntax:
2666 @return NOT[32|64] {@}R1, {@}R2 {Index16|Immed16}
2667
2668 **/
2669 STATIC
2670 UINT64
2671 ExecuteNOT (
2672 IN VM_CONTEXT *VmPtr,
2673 IN UINT64 Op1,
2674 IN UINT64 Op2
2675 )
2676 {
2677 return ~Op2;
2678 }
2679
2680
2681 /**
2682 Execute the EBC NEG instruction
2683
2684 @param VmPtr pointer to a VM context
2685 @param Op1 Operand 1 from the instruction
2686 @param Op2 Operand 2 from the instruction
2687
2688 @return Op2 * -1
2689 @return Instruction syntax:
2690 @return NEG[32|64] {@}R1, {@}R2 {Index16|Immed16}
2691
2692 **/
2693 STATIC
2694 UINT64
2695 ExecuteNEG (
2696 IN VM_CONTEXT *VmPtr,
2697 IN UINT64 Op1,
2698 IN UINT64 Op2
2699 )
2700 {
2701 return ~Op2 + 1;
2702 }
2703
2704
2705 /**
2706 Execute the EBC ADD instruction
2707
2708 @param VmPtr pointer to a VM context
2709 @param Op1 Operand 1 from the instruction
2710 @param Op2 Operand 2 from the instruction
2711
2712 @return Op1 + Op2
2713 @return Instruction syntax:
2714 @return ADD[32|64] {@}R1, {@}R2 {Index16}
2715
2716 **/
2717 STATIC
2718 UINT64
2719 ExecuteADD (
2720 IN VM_CONTEXT *VmPtr,
2721 IN UINT64 Op1,
2722 IN UINT64 Op2
2723 )
2724 {
2725 return Op1 + Op2;
2726 }
2727
2728
2729 /**
2730 Execute the EBC SUB instruction
2731
2732 @param VmPtr pointer to a VM context
2733 @param Op1 Operand 1 from the instruction
2734 @param Op2 Operand 2 from the instruction
2735
2736 @retval Op1 Op2 Standard EFI_STATUS
2737 @return Instruction syntax:
2738 @return SUB[32|64] {@}R1, {@}R2 {Index16|Immed16}
2739
2740 **/
2741 STATIC
2742 UINT64
2743 ExecuteSUB (
2744 IN VM_CONTEXT *VmPtr,
2745 IN UINT64 Op1,
2746 IN UINT64 Op2
2747 )
2748 {
2749 if (*VmPtr->Ip & DATAMANIP_M_64) {
2750 return (UINT64) ((INT64) ((INT64) Op1 - (INT64) Op2));
2751 } else {
2752 return (UINT64) ((INT64) ((INT32) Op1 - (INT32) Op2));
2753 }
2754 }
2755
2756
2757 /**
2758 Execute the EBC MUL instruction
2759
2760 @param VmPtr pointer to a VM context
2761 @param Op1 Operand 1 from the instruction
2762 @param Op2 Operand 2 from the instruction
2763
2764 @return Op1 * Op2
2765 @return Instruction syntax:
2766 @return MUL[32|64] {@}R1, {@}R2 {Index16|Immed16}
2767
2768 **/
2769 STATIC
2770 UINT64
2771 ExecuteMUL (
2772 IN VM_CONTEXT *VmPtr,
2773 IN UINT64 Op1,
2774 IN UINT64 Op2
2775 )
2776 {
2777 if (*VmPtr->Ip & DATAMANIP_M_64) {
2778 return MultS64x64 ((INT64)Op1, (INT64)Op2);
2779 } else {
2780 return (UINT64) ((INT64) ((INT32) Op1 * (INT32) Op2));
2781 }
2782 }
2783
2784
2785 /**
2786 Execute the EBC MULU instruction
2787
2788 @param VmPtr pointer to a VM context
2789 @param Op1 Operand 1 from the instruction
2790 @param Op2 Operand 2 from the instruction
2791
2792 @return (unsigned)Op1 * (unsigned)Op2
2793 @return Instruction syntax:
2794 @return MULU[32|64] {@}R1, {@}R2 {Index16|Immed16}
2795
2796 **/
2797 STATIC
2798 UINT64
2799 ExecuteMULU (
2800 IN VM_CONTEXT *VmPtr,
2801 IN UINT64 Op1,
2802 IN UINT64 Op2
2803 )
2804 {
2805 if (*VmPtr->Ip & DATAMANIP_M_64) {
2806 return MultU64x64 (Op1, Op2);
2807 } else {
2808 return (UINT64) ((UINT32) Op1 * (UINT32) Op2);
2809 }
2810 }
2811
2812
2813 /**
2814 Execute the EBC DIV instruction
2815
2816 @param VmPtr pointer to a VM context
2817 @param Op1 Operand 1 from the instruction
2818 @param Op2 Operand 2 from the instruction
2819
2820 @return Op1/Op2
2821 @return Instruction syntax:
2822 @return DIV[32|64] {@}R1, {@}R2 {Index16|Immed16}
2823
2824 **/
2825 STATIC
2826 UINT64
2827 ExecuteDIV (
2828 IN VM_CONTEXT *VmPtr,
2829 IN UINT64 Op1,
2830 IN UINT64 Op2
2831 )
2832 {
2833 INT64 Remainder;
2834
2835 //
2836 // Check for divide-by-0
2837 //
2838 if (Op2 == 0) {
2839 EbcDebugSignalException (
2840 EXCEPT_EBC_DIVIDE_ERROR,
2841 EXCEPTION_FLAG_FATAL,
2842 VmPtr
2843 );
2844
2845 return 0;
2846 } else {
2847 if (*VmPtr->Ip & DATAMANIP_M_64) {
2848 return (UINT64) (DivS64x64Remainder (Op1, Op2, &Remainder));
2849 } else {
2850 return (UINT64) ((INT64) ((INT32) Op1 / (INT32) Op2));
2851 }
2852 }
2853 }
2854
2855
2856 /**
2857 Execute the EBC DIVU instruction
2858
2859 @param VmPtr pointer to a VM context
2860 @param Op1 Operand 1 from the instruction
2861 @param Op2 Operand 2 from the instruction
2862
2863 @return (unsigned)Op1 / (unsigned)Op2
2864 @return Instruction syntax:
2865 @return DIVU[32|64] {@}R1, {@}R2 {Index16|Immed16}
2866
2867 **/
2868 STATIC
2869 UINT64
2870 ExecuteDIVU (
2871 IN VM_CONTEXT *VmPtr,
2872 IN UINT64 Op1,
2873 IN UINT64 Op2
2874 )
2875 {
2876 UINT64 Remainder;
2877
2878 //
2879 // Check for divide-by-0
2880 //
2881 if (Op2 == 0) {
2882 EbcDebugSignalException (
2883 EXCEPT_EBC_DIVIDE_ERROR,
2884 EXCEPTION_FLAG_FATAL,
2885 VmPtr
2886 );
2887 return 0;
2888 } else {
2889 //
2890 // Get the destination register
2891 //
2892 if (*VmPtr->Ip & DATAMANIP_M_64) {
2893 return (UINT64) (DivU64x64Remainder ((INT64)Op1, (INT64)Op2, &Remainder));
2894 } else {
2895 return (UINT64) ((UINT32) Op1 / (UINT32) Op2);
2896 }
2897 }
2898 }
2899
2900
2901 /**
2902 Execute the EBC MOD instruction
2903
2904 @param VmPtr pointer to a VM context
2905 @param Op1 Operand 1 from the instruction
2906 @param Op2 Operand 2 from the instruction
2907
2908 @return Op1 MODULUS Op2
2909 @return Instruction syntax:
2910 @return MOD[32|64] {@}R1, {@}R2 {Index16|Immed16}
2911
2912 **/
2913 STATIC
2914 UINT64
2915 ExecuteMOD (
2916 IN VM_CONTEXT *VmPtr,
2917 IN UINT64 Op1,
2918 IN UINT64 Op2
2919 )
2920 {
2921 INT64 Remainder;
2922
2923 //
2924 // Check for divide-by-0
2925 //
2926 if (Op2 == 0) {
2927 EbcDebugSignalException (
2928 EXCEPT_EBC_DIVIDE_ERROR,
2929 EXCEPTION_FLAG_FATAL,
2930 VmPtr
2931 );
2932 return 0;
2933 } else {
2934 DivS64x64Remainder ((INT64)Op1, (INT64)Op2, &Remainder);
2935 return Remainder;
2936 }
2937 }
2938
2939
2940 /**
2941 Execute the EBC MODU instruction
2942
2943 @param VmPtr pointer to a VM context
2944 @param Op1 Operand 1 from the instruction
2945 @param Op2 Operand 2 from the instruction
2946
2947 @return Op1 UNSIGNED_MODULUS Op2
2948 @return Instruction syntax:
2949 @return MODU[32|64] {@}R1, {@}R2 {Index16|Immed16}
2950
2951 **/
2952 STATIC
2953 UINT64
2954 ExecuteMODU (
2955 IN VM_CONTEXT *VmPtr,
2956 IN UINT64 Op1,
2957 IN UINT64 Op2
2958 )
2959 {
2960 UINT64 Remainder;
2961
2962 //
2963 // Check for divide-by-0
2964 //
2965 if (Op2 == 0) {
2966 EbcDebugSignalException (
2967 EXCEPT_EBC_DIVIDE_ERROR,
2968 EXCEPTION_FLAG_FATAL,
2969 VmPtr
2970 );
2971 return 0;
2972 } else {
2973 DivU64x64Remainder (Op1, Op2, &Remainder);
2974 return Remainder;
2975 }
2976 }
2977
2978
2979 /**
2980 Execute the EBC AND instruction
2981
2982 @param VmPtr pointer to a VM context
2983 @param Op1 Operand 1 from the instruction
2984 @param Op2 Operand 2 from the instruction
2985
2986 @return Op1 AND Op2
2987 @return Instruction syntax:
2988 @return AND[32|64] {@}R1, {@}R2 {Index16|Immed16}
2989
2990 **/
2991 STATIC
2992 UINT64
2993 ExecuteAND (
2994 IN VM_CONTEXT *VmPtr,
2995 IN UINT64 Op1,
2996 IN UINT64 Op2
2997 )
2998 {
2999 return Op1 & Op2;
3000 }
3001
3002
3003 /**
3004 Execute the EBC OR instruction
3005
3006 @param VmPtr pointer to a VM context
3007 @param Op1 Operand 1 from the instruction
3008 @param Op2 Operand 2 from the instruction
3009
3010 @return Op1 OR Op2
3011 @return Instruction syntax:
3012 @return OR[32|64] {@}R1, {@}R2 {Index16|Immed16}
3013
3014 **/
3015 STATIC
3016 UINT64
3017 ExecuteOR (
3018 IN VM_CONTEXT *VmPtr,
3019 IN UINT64 Op1,
3020 IN UINT64 Op2
3021 )
3022 {
3023 return Op1 | Op2;
3024 }
3025
3026
3027 /**
3028 Execute the EBC XOR instruction
3029
3030 @param VmPtr pointer to a VM context
3031 @param Op1 Operand 1 from the instruction
3032 @param Op2 Operand 2 from the instruction
3033
3034 @return Op1 XOR Op2
3035 @return Instruction syntax:
3036 @return XOR[32|64] {@}R1, {@}R2 {Index16|Immed16}
3037
3038 **/
3039 STATIC
3040 UINT64
3041 ExecuteXOR (
3042 IN VM_CONTEXT *VmPtr,
3043 IN UINT64 Op1,
3044 IN UINT64 Op2
3045 )
3046 {
3047 return Op1 ^ Op2;
3048 }
3049
3050
3051 /**
3052 Execute the EBC SHL shift left instruction
3053
3054 @param VmPtr pointer to a VM context
3055 @param Op1 Operand 1 from the instruction
3056 @param Op2 Operand 2 from the instruction
3057
3058 @return Op1 << Op2
3059 @return Instruction syntax:
3060 @return SHL[32|64] {@}R1, {@}R2 {Index16|Immed16}
3061
3062 **/
3063 STATIC
3064 UINT64
3065 ExecuteSHL (
3066 IN VM_CONTEXT *VmPtr,
3067 IN UINT64 Op1,
3068 IN UINT64 Op2
3069 )
3070 {
3071 if (*VmPtr->Ip & DATAMANIP_M_64) {
3072 return LShiftU64 (Op1, (UINTN)Op2);
3073 } else {
3074 return (UINT64) ((UINT32) ((UINT32) Op1 << (UINT32) Op2));
3075 }
3076 }
3077
3078
3079 /**
3080 Execute the EBC SHR instruction
3081
3082 @param VmPtr pointer to a VM context
3083 @param Op1 Operand 1 from the instruction
3084 @param Op2 Operand 2 from the instruction
3085
3086 @return Op1 >> Op2 (unsigned operands)
3087 @return Instruction syntax:
3088 @return SHR[32|64] {@}R1, {@}R2 {Index16|Immed16}
3089
3090 **/
3091 STATIC
3092 UINT64
3093 ExecuteSHR (
3094 IN VM_CONTEXT *VmPtr,
3095 IN UINT64 Op1,
3096 IN UINT64 Op2
3097 )
3098 {
3099 if (*VmPtr->Ip & DATAMANIP_M_64) {
3100 return RShiftU64 (Op1, (UINTN)Op2);
3101 } else {
3102 return (UINT64) ((UINT32) Op1 >> (UINT32) Op2);
3103 }
3104 }
3105
3106
3107 /**
3108 Execute the EBC ASHR instruction
3109
3110 @param VmPtr pointer to a VM context
3111 @param Op1 Operand 1 from the instruction
3112 @param Op2 Operand 2 from the instruction
3113
3114 @return Op1 >> Op2 (signed)
3115 @return Instruction syntax:
3116 @return ASHR[32|64] {@}R1, {@}R2 {Index16|Immed16}
3117
3118 **/
3119 STATIC
3120 UINT64
3121 ExecuteASHR (
3122 IN VM_CONTEXT *VmPtr,
3123 IN UINT64 Op1,
3124 IN UINT64 Op2
3125 )
3126 {
3127 if (*VmPtr->Ip & DATAMANIP_M_64) {
3128 return ARShiftU64 (Op1, (UINTN)Op2);
3129 } else {
3130 return (UINT64) ((INT64) ((INT32) Op1 >> (UINT32) Op2));
3131 }
3132 }
3133
3134
3135 /**
3136 Execute the EBC EXTNDB instruction to sign-extend a byte value.
3137
3138 @param VmPtr pointer to a VM context
3139 @param Op1 Operand 1 from the instruction
3140 @param Op2 Operand 2 from the instruction
3141
3142 @return (INT64)(INT8)Op2
3143 @return Instruction syntax:
3144 @return EXTNDB[32|64] {@}R1, {@}R2 {Index16|Immed16}
3145
3146 **/
3147 STATIC
3148 UINT64
3149 ExecuteEXTNDB (
3150 IN VM_CONTEXT *VmPtr,
3151 IN UINT64 Op1,
3152 IN UINT64 Op2
3153 )
3154 {
3155 INT8 Data8;
3156 INT64 Data64;
3157 //
3158 // Convert to byte, then return as 64-bit signed value to let compiler
3159 // sign-extend the value
3160 //
3161 Data8 = (INT8) Op2;
3162 Data64 = (INT64) Data8;
3163
3164 return (UINT64) Data64;
3165 }
3166
3167
3168 /**
3169 Execute the EBC EXTNDW instruction to sign-extend a 16-bit value.
3170
3171 @param VmPtr pointer to a VM context
3172 @param Op1 Operand 1 from the instruction
3173 @param Op2 Operand 2 from the instruction
3174
3175 @return (INT64)(INT16)Op2
3176 @return Instruction syntax:
3177 @return EXTNDW[32|64] {@}R1, {@}R2 {Index16|Immed16}
3178
3179 **/
3180 STATIC
3181 UINT64
3182 ExecuteEXTNDW (
3183 IN VM_CONTEXT *VmPtr,
3184 IN UINT64 Op1,
3185 IN UINT64 Op2
3186 )
3187 {
3188 INT16 Data16;
3189 INT64 Data64;
3190 //
3191 // Convert to word, then return as 64-bit signed value to let compiler
3192 // sign-extend the value
3193 //
3194 Data16 = (INT16) Op2;
3195 Data64 = (INT64) Data16;
3196
3197 return (UINT64) Data64;
3198 }
3199 //
3200 // Execute the EBC EXTNDD instruction.
3201 //
3202 // Format: EXTNDD {@}Rx, {@}Ry [Index16|Immed16]
3203 // EXTNDD Dest, Source
3204 //
3205 // Operation: Dest <- SignExtended((DWORD)Source))
3206 //
3207
3208 /**
3209 Execute the EBC EXTNDD instruction to sign-extend a 32-bit value.
3210
3211 @param VmPtr pointer to a VM context
3212 @param Op1 Operand 1 from the instruction
3213 @param Op2 Operand 2 from the instruction
3214
3215 @return (INT64)(INT32)Op2
3216 @return Instruction syntax:
3217 @return EXTNDD[32|64] {@}R1, {@}R2 {Index16|Immed16}
3218
3219 **/
3220 STATIC
3221 UINT64
3222 ExecuteEXTNDD (
3223 IN VM_CONTEXT *VmPtr,
3224 IN UINT64 Op1,
3225 IN UINT64 Op2
3226 )
3227 {
3228 INT32 Data32;
3229 INT64 Data64;
3230 //
3231 // Convert to 32-bit value, then return as 64-bit signed value to let compiler
3232 // sign-extend the value
3233 //
3234 Data32 = (INT32) Op2;
3235 Data64 = (INT64) Data32;
3236
3237 return (UINT64) Data64;
3238 }
3239
3240 STATIC
3241 EFI_STATUS
3242 ExecuteSignedDataManip (
3243 IN VM_CONTEXT *VmPtr
3244 )
3245 {
3246 //
3247 // Just call the data manipulation function with a flag indicating this
3248 // is a signed operation.
3249 //
3250 return ExecuteDataManip (VmPtr, TRUE);
3251 }
3252
3253 STATIC
3254 EFI_STATUS
3255 ExecuteUnsignedDataManip (
3256 IN VM_CONTEXT *VmPtr
3257 )
3258 {
3259 //
3260 // Just call the data manipulation function with a flag indicating this
3261 // is not a signed operation.
3262 //
3263 return ExecuteDataManip (VmPtr, FALSE);
3264 }
3265
3266
3267 /**
3268 Execute all the EBC data manipulation instructions.
3269 Since the EBC data manipulation instructions all have the same basic form,
3270 they can share the code that does the fetch of operands and the write-back
3271 of the result. This function performs the fetch of the operands (even if
3272 both are not needed to be fetched, like NOT instruction), dispatches to the
3273 appropriate subfunction, then writes back the returned result.
3274
3275 @param VmPtr pointer to VM context
3276
3277 @return Standard EBC status
3278 @return Format:
3279 @return INSTRUCITON[32|64] {@}R1, {@}R2 {Immed16|Index16}
3280
3281 **/
3282 STATIC
3283 EFI_STATUS
3284 ExecuteDataManip (
3285 IN VM_CONTEXT *VmPtr,
3286 IN BOOLEAN IsSignedOp
3287 )
3288 {
3289 UINT8 Opcode;
3290 INT16 Index16;
3291 UINT8 Operands;
3292 UINT8 Size;
3293 UINT64 Op1;
3294 UINT64 Op2;
3295
3296 //
3297 // Get opcode and operands
3298 //
3299 Opcode = GETOPCODE (VmPtr);
3300 Operands = GETOPERANDS (VmPtr);
3301
3302 //
3303 // Determine if we have immediate data by the opcode
3304 //
3305 if (Opcode & DATAMANIP_M_IMMDATA) {
3306 //
3307 // Index16 if Ry is indirect, or Immed16 if Ry direct.
3308 //
3309 if (OPERAND2_INDIRECT (Operands)) {
3310 Index16 = VmReadIndex16 (VmPtr, 2);
3311 } else {
3312 Index16 = VmReadImmed16 (VmPtr, 2);
3313 }
3314
3315 Size = 4;
3316 } else {
3317 Index16 = 0;
3318 Size = 2;
3319 }
3320 //
3321 // Now get operand2 (source). It's of format {@}R2 {Index16|Immed16}
3322 //
3323 Op2 = (UINT64) VmPtr->R[OPERAND2_REGNUM (Operands)] + Index16;
3324 if (OPERAND2_INDIRECT (Operands)) {
3325 //
3326 // Indirect form: @R2 Index16. Fetch as 32- or 64-bit data
3327 //
3328 if (Opcode & DATAMANIP_M_64) {
3329 Op2 = VmReadMem64 (VmPtr, (UINTN) Op2);
3330 } else {
3331 //
3332 // Read as signed value where appropriate.
3333 //
3334 if (IsSignedOp) {
3335 Op2 = (UINT64) (INT64) ((INT32) VmReadMem32 (VmPtr, (UINTN) Op2));
3336 } else {
3337 Op2 = (UINT64) VmReadMem32 (VmPtr, (UINTN) Op2);
3338 }
3339 }
3340 } else {
3341 if ((Opcode & DATAMANIP_M_64) == 0) {
3342 if (IsSignedOp) {
3343 Op2 = (UINT64) (INT64) ((INT32) Op2);
3344 } else {
3345 Op2 = (UINT64) ((UINT32) Op2);
3346 }
3347 }
3348 }
3349 //
3350 // Get operand1 (destination and sometimes also an actual operand)
3351 // of form {@}R1
3352 //
3353 Op1 = VmPtr->R[OPERAND1_REGNUM (Operands)];
3354 if (OPERAND1_INDIRECT (Operands)) {
3355 if (Opcode & DATAMANIP_M_64) {
3356 Op1 = VmReadMem64 (VmPtr, (UINTN) Op1);
3357 } else {
3358 if (IsSignedOp) {
3359 Op1 = (UINT64) (INT64) ((INT32) VmReadMem32 (VmPtr, (UINTN) Op1));
3360 } else {
3361 Op1 = (UINT64) VmReadMem32 (VmPtr, (UINTN) Op1);
3362 }
3363 }
3364 } else {
3365 if ((Opcode & DATAMANIP_M_64) == 0) {
3366 if (IsSignedOp) {
3367 Op1 = (UINT64) (INT64) ((INT32) Op1);
3368 } else {
3369 Op1 = (UINT64) ((UINT32) Op1);
3370 }
3371 }
3372 }
3373 //
3374 // Dispatch to the computation function
3375 //
3376 if (((Opcode & OPCODE_M_OPCODE) - OPCODE_NOT) >=
3377 (sizeof (mDataManipDispatchTable) / sizeof (mDataManipDispatchTable[0]))
3378 ) {
3379 EbcDebugSignalException (
3380 EXCEPT_EBC_INVALID_OPCODE,
3381 EXCEPTION_FLAG_ERROR,
3382 VmPtr
3383 );
3384 //
3385 // Advance and return
3386 //
3387 VmPtr->Ip += Size;
3388 return EFI_UNSUPPORTED;
3389 } else {
3390 Op2 = mDataManipDispatchTable[(Opcode & OPCODE_M_OPCODE) - OPCODE_NOT](VmPtr, Op1, Op2);
3391 }
3392 //
3393 // Write back the result.
3394 //
3395 if (OPERAND1_INDIRECT (Operands)) {
3396 Op1 = VmPtr->R[OPERAND1_REGNUM (Operands)];
3397 if (Opcode & DATAMANIP_M_64) {
3398 VmWriteMem64 (VmPtr, (UINTN) Op1, Op2);
3399 } else {
3400 VmWriteMem32 (VmPtr, (UINTN) Op1, (UINT32) Op2);
3401 }
3402 } else {
3403 //
3404 // Storage back to a register. Write back, clearing upper bits (as per
3405 // the specification) if 32-bit operation.
3406 //
3407 VmPtr->R[OPERAND1_REGNUM (Operands)] = Op2;
3408 if ((Opcode & DATAMANIP_M_64) == 0) {
3409 VmPtr->R[OPERAND1_REGNUM (Operands)] &= 0xFFFFFFFF;
3410 }
3411 }
3412 //
3413 // Advance the instruction pointer
3414 //
3415 VmPtr->Ip += Size;
3416 return EFI_SUCCESS;
3417 }
3418
3419
3420 /**
3421 Execute the EBC LOADSP instruction
3422
3423 @param VmPtr pointer to a VM context
3424
3425 @return Standard EFI_STATUS
3426 @return Instruction syntax:
3427 @return LOADSP SP1, R2
3428
3429 **/
3430 STATIC
3431 EFI_STATUS
3432 ExecuteLOADSP (
3433 IN VM_CONTEXT *VmPtr
3434 )
3435 {
3436 UINT8 Operands;
3437
3438 //
3439 // Get the operands
3440 //
3441 Operands = GETOPERANDS (VmPtr);
3442
3443 //
3444 // Do the operation
3445 //
3446 switch (OPERAND1_REGNUM (Operands)) {
3447 //
3448 // Set flags
3449 //
3450 case 0:
3451 //
3452 // Spec states that this instruction will not modify reserved bits in
3453 // the flags register.
3454 //
3455 VmPtr->Flags = (VmPtr->Flags &~VMFLAGS_ALL_VALID) | (VmPtr->R[OPERAND2_REGNUM (Operands)] & VMFLAGS_ALL_VALID);
3456 break;
3457
3458 default:
3459 EbcDebugSignalException (
3460 EXCEPT_EBC_INSTRUCTION_ENCODING,
3461 EXCEPTION_FLAG_WARNING,
3462 VmPtr
3463 );
3464 VmPtr->Ip += 2;
3465 return EFI_UNSUPPORTED;
3466 }
3467
3468 VmPtr->Ip += 2;
3469 return EFI_SUCCESS;
3470 }
3471
3472
3473 /**
3474 Execute the EBC STORESP instruction
3475
3476 @param VmPtr pointer to a VM context
3477
3478 @return Standard EFI_STATUS
3479 @return Instruction syntax:
3480 @return STORESP Rx, FLAGS|IP
3481
3482 **/
3483 STATIC
3484 EFI_STATUS
3485 ExecuteSTORESP (
3486 IN VM_CONTEXT *VmPtr
3487 )
3488 {
3489 UINT8 Operands;
3490
3491 //
3492 // Get the operands
3493 //
3494 Operands = GETOPERANDS (VmPtr);
3495
3496 //
3497 // Do the operation
3498 //
3499 switch (OPERAND2_REGNUM (Operands)) {
3500 //
3501 // Get flags
3502 //
3503 case 0:
3504 //
3505 // Retrieve the value in the flags register, then clear reserved bits
3506 //
3507 VmPtr->R[OPERAND1_REGNUM (Operands)] = (UINT64) (VmPtr->Flags & VMFLAGS_ALL_VALID);
3508 break;
3509
3510 //
3511 // Get IP -- address of following instruction
3512 //
3513 case 1:
3514 VmPtr->R[OPERAND1_REGNUM (Operands)] = (UINT64) (UINTN) VmPtr->Ip + 2;
3515 break;
3516
3517 default:
3518 EbcDebugSignalException (
3519 EXCEPT_EBC_INSTRUCTION_ENCODING,
3520 EXCEPTION_FLAG_WARNING,
3521 VmPtr
3522 );
3523 VmPtr->Ip += 2;
3524 return EFI_UNSUPPORTED;
3525 break;
3526 }
3527
3528 VmPtr->Ip += 2;
3529 return EFI_SUCCESS;
3530 }
3531
3532
3533 /**
3534 Decode a 16-bit index to determine the offset. Given an index value:
3535 b15 - sign bit
3536 b14:12 - number of bits in this index assigned to natural units (=a)
3537 ba:11 - constant units = C
3538 b0:a - natural units = N
3539 Given this info, the offset can be computed by:
3540 offset = sign_bit * (C + N * sizeof(UINTN))
3541 Max offset is achieved with index = 0x7FFF giving an offset of
3542 0x27B (32-bit machine) or 0x477 (64-bit machine).
3543 Min offset is achieved with index =
3544
3545 @param VmPtr pointer to VM context
3546 @param CodeOffset offset from IP of the location of the 16-bit index to
3547 decode
3548
3549 @return The decoded offset.
3550
3551 **/
3552 STATIC
3553 INT16
3554 VmReadIndex16 (
3555 IN VM_CONTEXT *VmPtr,
3556 IN UINT32 CodeOffset
3557 )
3558 {
3559 UINT16 Index;
3560 INT16 Offset;
3561 INT16 C;
3562 INT16 N;
3563 INT16 NBits;
3564 INT16 Mask;
3565
3566 //
3567 // First read the index from the code stream
3568 //
3569 Index = VmReadCode16 (VmPtr, CodeOffset);
3570
3571 //
3572 // Get the mask for N. First get the number of bits from the index.
3573 //
3574 NBits = (INT16) ((Index & 0x7000) >> 12);
3575
3576 //
3577 // Scale it for 16-bit indexes
3578 //
3579 NBits *= 2;
3580
3581 //
3582 // Now using the number of bits, create a mask.
3583 //
3584 Mask = (INT16) ((INT16)~0 << NBits);
3585
3586 //
3587 // Now using the mask, extract N from the lower bits of the index.
3588 //
3589 N = (INT16) (Index &~Mask);
3590
3591 //
3592 // Now compute C
3593 //
3594 C = (INT16) (((Index &~0xF000) & Mask) >> NBits);
3595
3596 Offset = (INT16) (N * sizeof (UINTN) + C);
3597
3598 //
3599 // Now set the sign
3600 //
3601 if (Index & 0x8000) {
3602 //
3603 // Do it the hard way to work around a bogus compiler warning
3604 //
3605 // Offset = -1 * Offset;
3606 //
3607 Offset = (INT16) ((INT32) Offset * -1);
3608 }
3609
3610 return Offset;
3611 }
3612
3613
3614 /**
3615 Decode a 32-bit index to determine the offset.
3616
3617 @param VmPtr pointer to VM context
3618 @param CodeOffset offset from IP of the location of the 32-bit index to
3619 decode
3620
3621 @return Converted index per EBC VM specification
3622
3623 **/
3624 STATIC
3625 INT32
3626 VmReadIndex32 (
3627 IN VM_CONTEXT *VmPtr,
3628 IN UINT32 CodeOffset
3629 )
3630 {
3631 UINT32 Index;
3632 INT32 Offset;
3633 INT32 C;
3634 INT32 N;
3635 INT32 NBits;
3636 INT32 Mask;
3637
3638 Index = VmReadImmed32 (VmPtr, CodeOffset);
3639
3640 //
3641 // Get the mask for N. First get the number of bits from the index.
3642 //
3643 NBits = (Index & 0x70000000) >> 28;
3644
3645 //
3646 // Scale it for 32-bit indexes
3647 //
3648 NBits *= 4;
3649
3650 //
3651 // Now using the number of bits, create a mask.
3652 //
3653 Mask = (INT32)~0 << NBits;
3654
3655 //
3656 // Now using the mask, extract N from the lower bits of the index.
3657 //
3658 N = Index &~Mask;
3659
3660 //
3661 // Now compute C
3662 //
3663 C = ((Index &~0xF0000000) & Mask) >> NBits;
3664
3665 Offset = N * sizeof (UINTN) + C;
3666
3667 //
3668 // Now set the sign
3669 //
3670 if (Index & 0x80000000) {
3671 Offset = Offset * -1;
3672 }
3673
3674 return Offset;
3675 }
3676
3677
3678 /**
3679 Decode a 64-bit index to determine the offset.
3680
3681 @param VmPtr pointer to VM context
3682 @param CodeOffset offset from IP of the location of the 64-bit index to
3683 decode
3684
3685 @return Converted index per EBC VM specification
3686
3687 **/
3688 STATIC
3689 INT64
3690 VmReadIndex64 (
3691 IN VM_CONTEXT *VmPtr,
3692 IN UINT32 CodeOffset
3693 )
3694 {
3695 UINT64 Index;
3696 INT64 Offset;
3697 INT64 C;
3698 INT64 N;
3699 INT64 NBits;
3700 INT64 Mask;
3701
3702 Index = VmReadCode64 (VmPtr, CodeOffset);
3703
3704 //
3705 // Get the mask for N. First get the number of bits from the index.
3706 //
3707 NBits = RShiftU64 ((Index & 0x7000000000000000ULL), 60);
3708
3709 //
3710 // Scale it for 64-bit indexes (multiply by 8 by shifting left 3)
3711 //
3712 NBits = LShiftU64 ((UINT64)NBits, 3);
3713
3714 //
3715 // Now using the number of bits, create a mask.
3716 //
3717 Mask = (LShiftU64 ((UINT64)~0, (UINTN)NBits));
3718
3719 //
3720 // Now using the mask, extract N from the lower bits of the index.
3721 //
3722 N = Index &~Mask;
3723
3724 //
3725 // Now compute C
3726 //
3727 C = ARShiftU64 (((Index &~0xF000000000000000ULL) & Mask), (UINTN)NBits);
3728
3729 Offset = MultU64x64 (N, sizeof (UINTN)) + C;
3730
3731 //
3732 // Now set the sign
3733 //
3734 if (Index & 0x8000000000000000ULL) {
3735 Offset = MultS64x64 (Offset, -1);
3736 }
3737
3738 return Offset;
3739 }
3740
3741
3742 /**
3743 The following VmWriteMem? routines are called by the EBC data
3744 movement instructions that write to memory. Since these writes
3745 may be to the stack, which looks like (high address on top) this,
3746 [EBC entry point arguments]
3747 [VM stack]
3748 [EBC stack]
3749 we need to detect all attempts to write to the EBC entry point argument
3750 stack area and adjust the address (which will initially point into the
3751 VM stack) to point into the EBC entry point arguments.
3752
3753 @param VmPtr pointer to a VM context
3754 @param Addr adddress to write to
3755 @param Data value to write to Addr
3756
3757 @return Standard EFI_STATUS
3758
3759 **/
3760 STATIC
3761 EFI_STATUS
3762 VmWriteMem8 (
3763 IN VM_CONTEXT *VmPtr,
3764 IN UINTN Addr,
3765 IN UINT8 Data
3766 )
3767 {
3768 //
3769 // Convert the address if it's in the stack gap
3770 //
3771 Addr = ConvertStackAddr (VmPtr, Addr);
3772 *(UINT8 *) Addr = Data;
3773 return EFI_SUCCESS;
3774 }
3775
3776 STATIC
3777 EFI_STATUS
3778 VmWriteMem16 (
3779 IN VM_CONTEXT *VmPtr,
3780 IN UINTN Addr,
3781 IN UINT16 Data
3782 )
3783 {
3784 EFI_STATUS Status;
3785
3786 //
3787 // Convert the address if it's in the stack gap
3788 //
3789 Addr = ConvertStackAddr (VmPtr, Addr);
3790
3791 //
3792 // Do a simple write if aligned
3793 //
3794 if (IS_ALIGNED (Addr, sizeof (UINT16))) {
3795 *(UINT16 *) Addr = Data;
3796 } else {
3797 //
3798 // Write as two bytes
3799 //
3800 MemoryFence ();
3801 if ((Status = VmWriteMem8 (VmPtr, Addr, (UINT8) Data)) != EFI_SUCCESS) {
3802 return Status;
3803 }
3804
3805 MemoryFence ();
3806 if ((Status = VmWriteMem8 (VmPtr, Addr + 1, (UINT8) (Data >> 8))) != EFI_SUCCESS) {
3807 return Status;
3808 }
3809
3810 MemoryFence ();
3811 }
3812
3813 return EFI_SUCCESS;
3814 }
3815
3816 STATIC
3817 EFI_STATUS
3818 VmWriteMem32 (
3819 IN VM_CONTEXT *VmPtr,
3820 IN UINTN Addr,
3821 IN UINT32 Data
3822 )
3823 {
3824 EFI_STATUS Status;
3825
3826 //
3827 // Convert the address if it's in the stack gap
3828 //
3829 Addr = ConvertStackAddr (VmPtr, Addr);
3830
3831 //
3832 // Do a simple write if aligned
3833 //
3834 if (IS_ALIGNED (Addr, sizeof (UINT32))) {
3835 *(UINT32 *) Addr = Data;
3836 } else {
3837 //
3838 // Write as two words
3839 //
3840 MemoryFence ();
3841 if ((Status = VmWriteMem16 (VmPtr, Addr, (UINT16) Data)) != EFI_SUCCESS) {
3842 return Status;
3843 }
3844
3845 MemoryFence ();
3846 if ((Status = VmWriteMem16 (VmPtr, Addr + sizeof (UINT16), (UINT16) (Data >> 16))) != EFI_SUCCESS) {
3847 return Status;
3848 }
3849
3850 MemoryFence ();
3851 }
3852
3853 return EFI_SUCCESS;
3854 }
3855
3856 EFI_STATUS
3857 VmWriteMem64 (
3858 IN VM_CONTEXT *VmPtr,
3859 IN UINTN Addr,
3860 IN UINT64 Data
3861 )
3862 {
3863 EFI_STATUS Status;
3864 UINT32 Data32;
3865
3866 //
3867 // Convert the address if it's in the stack gap
3868 //
3869 Addr = ConvertStackAddr (VmPtr, Addr);
3870
3871 //
3872 // Do a simple write if aligned
3873 //
3874 if (IS_ALIGNED (Addr, sizeof (UINT64))) {
3875 *(UINT64 *) Addr = Data;
3876 } else {
3877 //
3878 // Write as two 32-bit words
3879 //
3880 MemoryFence ();
3881 if ((Status = VmWriteMem32 (VmPtr, Addr, (UINT32) Data)) != EFI_SUCCESS) {
3882 return Status;
3883 }
3884
3885 MemoryFence ();
3886 Data32 = (UINT32) (((UINT32 *) &Data)[1]);
3887 if ((Status = VmWriteMem32 (VmPtr, Addr + sizeof (UINT32), Data32)) != EFI_SUCCESS) {
3888 return Status;
3889 }
3890
3891 MemoryFence ();
3892 }
3893
3894 return EFI_SUCCESS;
3895 }
3896
3897 EFI_STATUS
3898 VmWriteMemN (
3899 IN VM_CONTEXT *VmPtr,
3900 IN UINTN Addr,
3901 IN UINTN Data
3902 )
3903 {
3904 EFI_STATUS Status;
3905 UINTN Index;
3906
3907 Status = EFI_SUCCESS;
3908
3909 //
3910 // Convert the address if it's in the stack gap
3911 //
3912 Addr = ConvertStackAddr (VmPtr, Addr);
3913
3914 //
3915 // Do a simple write if aligned
3916 //
3917 if (IS_ALIGNED (Addr, sizeof (UINTN))) {
3918 *(UINTN *) Addr = Data;
3919 } else {
3920 for (Index = 0; Index < sizeof (UINTN) / sizeof (UINT32); Index++) {
3921 MemoryFence ();
3922 Status = VmWriteMem32 (VmPtr, Addr + Index * sizeof (UINT32), (UINT32) Data);
3923 MemoryFence ();
3924 Data = (UINTN)RShiftU64 ((UINT64)Data, 32);
3925 }
3926 }
3927
3928 return Status;
3929 }
3930
3931
3932 /**
3933 The following VmReadImmed routines are called by the EBC execute
3934 functions to read EBC immediate values from the code stream.
3935 Since we can't assume alignment, each tries to read in the biggest
3936 chunks size available, but will revert to smaller reads if necessary.
3937
3938 @param VmPtr pointer to a VM context
3939 @param Offset offset from IP of the code bytes to read.
3940
3941 @return Signed data of the requested size from the specified address.
3942
3943 **/
3944 STATIC
3945 INT8
3946 VmReadImmed8 (
3947 IN VM_CONTEXT *VmPtr,
3948 IN UINT32 Offset
3949 )
3950 {
3951 //
3952 // Simply return the data in flat memory space
3953 //
3954 return * (INT8 *) (VmPtr->Ip + Offset);
3955 }
3956
3957 STATIC
3958 INT16
3959 VmReadImmed16 (
3960 IN VM_CONTEXT *VmPtr,
3961 IN UINT32 Offset
3962 )
3963 {
3964 //
3965 // Read direct if aligned
3966 //
3967 if (IS_ALIGNED ((UINTN) VmPtr->Ip + Offset, sizeof (INT16))) {
3968 return * (INT16 *) (VmPtr->Ip + Offset);
3969 } else {
3970 //
3971 // All code word reads should be aligned
3972 //
3973 EbcDebugSignalException (
3974 EXCEPT_EBC_ALIGNMENT_CHECK,
3975 EXCEPTION_FLAG_WARNING,
3976 VmPtr
3977 );
3978 }
3979 //
3980 // Return unaligned data
3981 //
3982 return (INT16) (*(UINT8 *) (VmPtr->Ip + Offset) + (*(UINT8 *) (VmPtr->Ip + Offset + 1) << 8));
3983 }
3984
3985 STATIC
3986 INT32
3987 VmReadImmed32 (
3988 IN VM_CONTEXT *VmPtr,
3989 IN UINT32 Offset
3990 )
3991 {
3992 UINT32 Data;
3993
3994 //
3995 // Read direct if aligned
3996 //
3997 if (IS_ALIGNED ((UINTN) VmPtr->Ip + Offset, sizeof (UINT32))) {
3998 return * (INT32 *) (VmPtr->Ip + Offset);
3999 }
4000 //
4001 // Return unaligned data
4002 //
4003 Data = (UINT32) VmReadCode16 (VmPtr, Offset);
4004 Data |= (UINT32) (VmReadCode16 (VmPtr, Offset + 2) << 16);
4005 return Data;
4006 }
4007
4008 STATIC
4009 INT64
4010 VmReadImmed64 (
4011 IN VM_CONTEXT *VmPtr,
4012 IN UINT32 Offset
4013 )
4014 {
4015 UINT64 Data64;
4016 UINT32 Data32;
4017 UINT8 *Ptr;
4018
4019 //
4020 // Read direct if aligned
4021 //
4022 if (IS_ALIGNED ((UINTN) VmPtr->Ip + Offset, sizeof (UINT64))) {
4023 return * (UINT64 *) (VmPtr->Ip + Offset);
4024 }
4025 //
4026 // Return unaligned data.
4027 //
4028 Ptr = (UINT8 *) &Data64;
4029 Data32 = VmReadCode32 (VmPtr, Offset);
4030 *(UINT32 *) Ptr = Data32;
4031 Ptr += sizeof (Data32);
4032 Data32 = VmReadCode32 (VmPtr, Offset + sizeof (UINT32));
4033 *(UINT32 *) Ptr = Data32;
4034 return Data64;
4035 }
4036
4037
4038 /**
4039 The following VmReadCode() routines provide the ability to read raw
4040 unsigned data from the code stream.
4041
4042 @param VmPtr pointer to VM context
4043 @param Offset offset from current IP to the raw data to read.
4044
4045 @return The raw unsigned 16-bit value from the code stream.
4046
4047 **/
4048 STATIC
4049 UINT16
4050 VmReadCode16 (
4051 IN VM_CONTEXT *VmPtr,
4052 IN UINT32 Offset
4053 )
4054 {
4055 //
4056 // Read direct if aligned
4057 //
4058 if (IS_ALIGNED ((UINTN) VmPtr->Ip + Offset, sizeof (UINT16))) {
4059 return * (UINT16 *) (VmPtr->Ip + Offset);
4060 } else {
4061 //
4062 // All code word reads should be aligned
4063 //
4064 EbcDebugSignalException (
4065 EXCEPT_EBC_ALIGNMENT_CHECK,
4066 EXCEPTION_FLAG_WARNING,
4067 VmPtr
4068 );
4069 }
4070 //
4071 // Return unaligned data
4072 //
4073 return (UINT16) (*(UINT8 *) (VmPtr->Ip + Offset) + (*(UINT8 *) (VmPtr->Ip + Offset + 1) << 8));
4074 }
4075
4076 STATIC
4077 UINT32
4078 VmReadCode32 (
4079 IN VM_CONTEXT *VmPtr,
4080 IN UINT32 Offset
4081 )
4082 {
4083 UINT32 Data;
4084 //
4085 // Read direct if aligned
4086 //
4087 if (IS_ALIGNED ((UINTN) VmPtr->Ip + Offset, sizeof (UINT32))) {
4088 return * (UINT32 *) (VmPtr->Ip + Offset);
4089 }
4090 //
4091 // Return unaligned data
4092 //
4093 Data = (UINT32) VmReadCode16 (VmPtr, Offset);
4094 Data |= (VmReadCode16 (VmPtr, Offset + 2) << 16);
4095 return Data;
4096 }
4097
4098 STATIC
4099 UINT64
4100 VmReadCode64 (
4101 IN VM_CONTEXT *VmPtr,
4102 IN UINT32 Offset
4103 )
4104 {
4105 UINT64 Data64;
4106 UINT32 Data32;
4107 UINT8 *Ptr;
4108
4109 //
4110 // Read direct if aligned
4111 //
4112 if (IS_ALIGNED ((UINTN) VmPtr->Ip + Offset, sizeof (UINT64))) {
4113 return * (UINT64 *) (VmPtr->Ip + Offset);
4114 }
4115 //
4116 // Return unaligned data.
4117 //
4118 Ptr = (UINT8 *) &Data64;
4119 Data32 = VmReadCode32 (VmPtr, Offset);
4120 *(UINT32 *) Ptr = Data32;
4121 Ptr += sizeof (Data32);
4122 Data32 = VmReadCode32 (VmPtr, Offset + sizeof (UINT32));
4123 *(UINT32 *) Ptr = Data32;
4124 return Data64;
4125 }
4126
4127 STATIC
4128 UINT8
4129 VmReadMem8 (
4130 IN VM_CONTEXT *VmPtr,
4131 IN UINTN Addr
4132 )
4133 {
4134 //
4135 // Convert the address if it's in the stack gap
4136 //
4137 Addr = ConvertStackAddr (VmPtr, Addr);
4138 //
4139 // Simply return the data in flat memory space
4140 //
4141 return * (UINT8 *) Addr;
4142 }
4143
4144 STATIC
4145 UINT16
4146 VmReadMem16 (
4147 IN VM_CONTEXT *VmPtr,
4148 IN UINTN Addr
4149 )
4150 {
4151 //
4152 // Convert the address if it's in the stack gap
4153 //
4154 Addr = ConvertStackAddr (VmPtr, Addr);
4155 //
4156 // Read direct if aligned
4157 //
4158 if (IS_ALIGNED (Addr, sizeof (UINT16))) {
4159 return * (UINT16 *) Addr;
4160 }
4161 //
4162 // Return unaligned data
4163 //
4164 return (UINT16) (*(UINT8 *) Addr + (*(UINT8 *) (Addr + 1) << 8));
4165 }
4166
4167 STATIC
4168 UINT32
4169 VmReadMem32 (
4170 IN VM_CONTEXT *VmPtr,
4171 IN UINTN Addr
4172 )
4173 {
4174 UINT32 Data;
4175
4176 //
4177 // Convert the address if it's in the stack gap
4178 //
4179 Addr = ConvertStackAddr (VmPtr, Addr);
4180 //
4181 // Read direct if aligned
4182 //
4183 if (IS_ALIGNED (Addr, sizeof (UINT32))) {
4184 return * (UINT32 *) Addr;
4185 }
4186 //
4187 // Return unaligned data
4188 //
4189 Data = (UINT32) VmReadMem16 (VmPtr, Addr);
4190 Data |= (VmReadMem16 (VmPtr, Addr + 2) << 16);
4191 return Data;
4192 }
4193
4194 STATIC
4195 UINT64
4196 VmReadMem64 (
4197 IN VM_CONTEXT *VmPtr,
4198 IN UINTN Addr
4199 )
4200 {
4201 UINT64 Data;
4202 UINT32 Data32;
4203
4204 //
4205 // Convert the address if it's in the stack gap
4206 //
4207 Addr = ConvertStackAddr (VmPtr, Addr);
4208
4209 //
4210 // Read direct if aligned
4211 //
4212 if (IS_ALIGNED (Addr, sizeof (UINT64))) {
4213 return * (UINT64 *) Addr;
4214 }
4215 //
4216 // Return unaligned data. Assume little endian.
4217 //
4218 Data = (UINT64) VmReadMem32 (VmPtr, Addr);
4219 Data32 = VmReadMem32 (VmPtr, Addr + sizeof (UINT32));
4220 *(UINT32 *) ((UINT32 *) &Data + 1) = Data32;
4221 return Data;
4222 }
4223
4224
4225 /**
4226 Given an address that EBC is going to read from or write to, return
4227 an appropriate address that accounts for a gap in the stack.
4228 The stack for this application looks like this (high addr on top)
4229 [EBC entry point arguments]
4230 [VM stack]
4231 [EBC stack]
4232 The EBC assumes that its arguments are at the top of its stack, which
4233 is where the VM stack is really. Therefore if the EBC does memory
4234 accesses into the VM stack area, then we need to convert the address
4235 to point to the EBC entry point arguments area. Do this here.
4236
4237 @param VmPtr pointer to VM context
4238 @param Addr address of interest
4239
4240 @return The unchanged address if it's not in the VM stack region. Otherwise,
4241 @return adjust for the stack gap and return the modified address.
4242
4243 **/
4244 STATIC
4245 UINTN
4246 ConvertStackAddr (
4247 IN VM_CONTEXT *VmPtr,
4248 IN UINTN Addr
4249 )
4250 {
4251 ASSERT(((Addr < VmPtr->LowStackTop) || (Addr > VmPtr->HighStackBottom)));
4252 return Addr;
4253 }
4254
4255
4256 /**
4257 Read a natural value from memory. May or may not be aligned.
4258
4259 @param VmPtr current VM context
4260 @param Addr the address to read from
4261
4262 @return The natural value at address Addr.
4263
4264 **/
4265 STATIC
4266 UINTN
4267 VmReadMemN (
4268 IN VM_CONTEXT *VmPtr,
4269 IN UINTN Addr
4270 )
4271 {
4272 UINTN Data;
4273 volatile UINT32 Size;
4274 UINT8 *FromPtr;
4275 UINT8 *ToPtr;
4276 //
4277 // Convert the address if it's in the stack gap
4278 //
4279 Addr = ConvertStackAddr (VmPtr, Addr);
4280 //
4281 // Read direct if aligned
4282 //
4283 if (IS_ALIGNED (Addr, sizeof (UINTN))) {
4284 return * (UINTN *) Addr;
4285 }
4286 //
4287 // Return unaligned data
4288 //
4289 Data = 0;
4290 FromPtr = (UINT8 *) Addr;
4291 ToPtr = (UINT8 *) &Data;
4292
4293 for (Size = 0; Size < sizeof (Data); Size++) {
4294 *ToPtr = *FromPtr;
4295 ToPtr++;
4296 FromPtr++;
4297 }
4298
4299 return Data;
4300 }
4301
4302 UINT64
4303 GetVmVersion (
4304 VOID
4305 )
4306 {
4307 return (UINT64) (((VM_MAJOR_VERSION & 0xFFFF) << 16) | ((VM_MINOR_VERSION & 0xFFFF)));
4308 }