ASM_GLOBAL ASM_PFX(SetCodeSelector)\r
ASM_PFX(SetCodeSelector):\r
subq $0x10, %rsp \r
- leaq setCodeSelectorLongJump(%rip), %rax \r
+ leaq L_setCodeSelectorLongJump(%rip), %rax \r
movq %rax, (%rsp) \r
movw %cx, 4(%rsp)\r
.byte 0xFF, 0x2C, 0x24 # jmp (%rsp) note:fword jmp\r
-setCodeSelectorLongJump:\r
+L_setCodeSelectorLongJump:\r
addq $0x10, %rsp\r
ret\r
\r
pushq %rax # for ss\r
movzwq 32(%rbp), %rax\r
pushq %rax # for cs\r
- movq %ds, %rax\r
+ movl %ds, %eax\r
pushq %rax\r
- movq %es, %rax\r
+ movl %es, erax\r
pushq %rax\r
- movq %fs, %rax\r
+ movl %fs, %eax\r
pushq %rax\r
- movq %gs, %rax\r
+ movl %gs, %eax\r
pushq %rax\r
\r
movq %rcx, 8(%rbp) # save vector number\r
# mov %rax, %fs ; not for fs\r
# (X64 will not use fs and gs, so we do not restore it)\r
popq %rax\r
- movq %rax, %es\r
+ movl %eax, %es\r
popq %rax\r
- movq %rax, %ds\r
+ movl %eax, %ds\r
popq 32(%rbp) # for cs\r
popq 56(%rbp) # for ss\r
\r