//****************************************************************************\r
// UINTN EbcLLCALLEXNative(UINTN FuncAddr, UINTN NewStackPointer, VOID *FramePtr)\r
ASM_PFX(EbcLLCALLEXNative):\r
- stp x19, x20, [sp, #-16]!\r
- stp x29, x30, [sp, #-16]!\r
+ mov x8, x0 // Preserve x0\r
+ mov x9, x1 // Preserve x1\r
\r
- mov x19, x0\r
- mov x20, sp\r
- sub x2, x2, x1 // Length = NewStackPointer-FramePtr\r
- sub sp, sp, x2\r
- sub sp, sp, #64 // Make sure there is room for at least 8 args in the new stack\r
- mov x0, sp\r
-\r
- bl CopyMem // Sp, NewStackPointer, Length\r
-\r
- ldp x0, x1, [sp], #16\r
- ldp x2, x3, [sp], #16\r
- ldp x4, x5, [sp], #16\r
- ldp x6, x7, [sp], #16\r
+ //\r
+ // If the EBC stack frame is smaller than or equal to 64 bytes, we know there\r
+ // are no stacked arguments #9 and beyond that we need to copy to the native\r
+ // stack. In this case, we can perform a tail call which is much more\r
+ // efficient, since there is no need to touch the native stack at all.\r
+ //\r
+ sub x3, x2, x1 // Length = NewStackPointer - FramePtr\r
+ cmp x3, #64\r
+ b.gt 1f\r
\r
- blr x19\r
+ //\r
+ // While probably harmless in practice, we should not access the VM stack\r
+ // outside of the interval [NewStackPointer, FramePtr), which means we\r
+ // should not blindly fill all 8 argument registers with VM stack data.\r
+ // So instead, calculate how many argument registers we can fill based on\r
+ // the size of the VM stack frame, and skip the remaining ones.\r
+ //\r
+ adr x0, 0f // Take address of 'br' instruction below\r
+ bic x3, x3, #7 // Ensure correct alignment\r
+ sub x0, x0, x3, lsr #1 // Subtract 4 bytes for each arg to unstack\r
+ br x0 // Skip remaining argument registers\r
+\r
+ ldr x7, [x9, #56] // Call with 8 arguments\r
+ ldr x6, [x9, #48] // |\r
+ ldr x5, [x9, #40] // |\r
+ ldr x4, [x9, #32] // |\r
+ ldr x3, [x9, #24] // |\r
+ ldr x2, [x9, #16] // |\r
+ ldr x1, [x9, #8] // V\r
+ ldr x0, [x9] // Call with 1 argument\r
+\r
+0: br x8 // Call with no arguments\r
\r
- mov sp, x20\r
- ldp x29, x30, [sp], #16\r
- ldp x19, x20, [sp], #16\r
+ //\r
+ // More than 64 bytes: we need to build the full native stack frame and copy\r
+ // the part of the VM stack exceeding 64 bytes (which may contain stacked\r
+ // arguments) to the native stack\r
+ //\r
+1: stp x29, x30, [sp, #-16]!\r
+ mov x29, sp\r
\r
- ret\r
+ //\r
+ // Ensure that the stack pointer remains 16 byte aligned,\r
+ // even if the size of the VM stack frame is not a multiple of 16\r
+ //\r
+ add x1, x1, #64 // Skip over [potential] reg params\r
+ tbz x3, #3, 2f // Multiple of 16?\r
+ ldr x4, [x2, #-8]! // No? Then push one word\r
+ str x4, [sp, #-16]! // ... but use two slots\r
+ b 3f\r
+\r
+2: ldp x4, x5, [x2, #-16]!\r
+ stp x4, x5, [sp, #-16]!\r
+3: cmp x2, x1\r
+ b.gt 2b\r
+\r
+ ldp x0, x1, [x9]\r
+ ldp x2, x3, [x9, #16]\r
+ ldp x4, x5, [x9, #32]\r
+ ldp x6, x7, [x9, #48]\r
+\r
+ blr x8\r
+\r
+ mov sp, x29\r
+ ldp x29, x30, [sp], #16\r
+ ret\r
\r
//****************************************************************************\r
// EbcLLEbcInterpret\r