*/
#define ENTRY(x) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x; \
x: MCOUNT(x)
#define ENTRY_NP(x) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x; \
x:
#define ENTRY_ALIGN(x, a) \
.text; \
- .align a; \
+ .balign a; \
.globl x; \
x:
*/
#define ENTRY2(x, y) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x, y; \
x:; \
y: MCOUNT(x)
#define ENTRY_NP2(x, y) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x, y; \
x:; \
y:
#undef ENTRY
#define ENTRY(x) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x: MCOUNT(x)
#define ENTRY_NP(x) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x:
#define ENTRY_ALIGN(x, a) \
.text; \
- .align a; \
+ .balign a; \
.globl x; \
.type x, @function; \
x:
*/
#define ENTRY2(x, y) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
#define ENTRY_NP2(x, y) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
*/
#define ENTRY(x) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x; \
x: MCOUNT(x)
#define ENTRY_NP(x) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x; \
x:
#define ENTRY_ALIGN(x, a) \
.text; \
- .align a; \
+ .balign a; \
.globl x; \
x:
*/
#define ENTRY2(x, y) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x, y; \
x:; \
y: MCOUNT(x)
#define ENTRY_NP2(x, y) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x, y; \
x:; \
y:
#undef ENTRY
#define ENTRY(x) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x: MCOUNT(x)
#define ENTRY_NP(x) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x:
#define ENTRY_ALIGN(x, a) \
.text; \
- .align a; \
+ .balign a; \
.globl x; \
.type x, @function; \
x:
*/
#define ENTRY2(x, y) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
#define ENTRY_NP2(x, y) \
.text; \
- .align ASM_ENTRY_ALIGN; \
+ .balign ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
FRAME_END
RET
-.align 4
+.balign 4
.Lenc_key192:
cmp $192, %KEYSIZE32
jnz .Lenc_key128
FRAME_END
RET
-.align 4
+.balign 4
.Lenc_key128:
cmp $128, %KEYSIZE32
jnz .Lenc_key_invalid_key_bits
add %AESKEY, %ROUNDS64
mov %ROUNDS64, %ENDAESKEY
-.align 4
+.balign 4
.Ldec_key_reorder_loop:
movups (%AESKEY), %xmm0
movups (%ROUNDS64), %xmm1
cmp %AESKEY, %ROUNDS64
ja .Ldec_key_reorder_loop
-.align 4
+.balign 4
.Ldec_key_inv_loop:
movups (%rcx), %xmm0
// Convert an encryption round key to a form usable for decryption
movups -0x50(%KEYP), %KEY
aesenc %KEY, %STATE
-.align 4
+.balign 4
.Lenc192:
// AES 192 and 256
movups -0x40(%KEYP), %KEY
movups -0x30(%KEYP), %KEY
aesenc %KEY, %STATE
-.align 4
+.balign 4
.Lenc128:
// AES 128, 192, and 256
movups -0x20(%KEYP), %KEY
movups -0x50(%KEYP), %KEY
aesdec %KEY, %STATE
-.align 4
+.balign 4
.Ldec192:
// AES 192 and 256
movups -0x40(%KEYP), %KEY
movups -0x30(%KEYP), %KEY
aesdec %KEY, %STATE
-.align 4
+.balign 4
.Ldec128:
// AES 128, 192, and 256
movups -0x20(%KEYP), %KEY
* unsigned char *out, const aes_encrypt_ctx cx[1])/
*/
SECTION_STATIC
-.align 64
+.balign 64
enc_tab:
enc_vals(u8)
#ifdef LAST_ROUND_TABLES
* unsigned char *out, const aes_encrypt_ctx cx[1])/
*/
SECTION_STATIC
-.align 64
+.balign 64
dec_tab:
dec_vals(v8)
#ifdef LAST_ROUND_TABLES
.text
#ifdef HAVE_MOVBE
-.align 32
+.balign 32
FUNCTION(_aesni_ctr32_ghash_6x)
.cfi_startproc
ENDBR
vmovdqu %xmm4,16+8(%rsp)
jmp .Loop6x
-.align 32
+.balign 32
.Loop6x:
addl $100663296,%ebx
jc .Lhandle_ctr32
vmovups 224-128(%rcx),%xmm1
jmp .Lenc_tail
-.align 32
+.balign 32
.Lhandle_ctr32:
vmovdqu (%r11),%xmm0
vpshufb %xmm0,%xmm1,%xmm6
vpshufb %xmm0,%xmm1,%xmm1
jmp .Lresume_ctr32
-.align 32
+.balign 32
.Lenc_tail:
vaesenc %xmm15,%xmm9,%xmm9
vmovdqu %xmm7,16+8(%rsp)
SET_SIZE(_aesni_ctr32_ghash_6x)
#endif /* ifdef HAVE_MOVBE */
-.align 32
+.balign 32
FUNCTION(_aesni_ctr32_ghash_no_movbe_6x)
.cfi_startproc
ENDBR
vmovdqu %xmm4,16+8(%rsp)
jmp .Loop6x_nmb
-.align 32
+.balign 32
.Loop6x_nmb:
addl $100663296,%ebx
jc .Lhandle_ctr32_nmb
vmovups 224-128(%rcx),%xmm1
jmp .Lenc_tail_nmb
-.align 32
+.balign 32
.Lhandle_ctr32_nmb:
vmovdqu (%r11),%xmm0
vpshufb %xmm0,%xmm1,%xmm6
vpshufb %xmm0,%xmm1,%xmm1
jmp .Lresume_ctr32_nmb
-.align 32
+.balign 32
.Lenc_tail_nmb:
vaesenc %xmm15,%xmm9,%xmm9
vmovdqu %xmm7,16+8(%rsp)
.cfi_endproc
SET_SIZE(aesni_gcm_decrypt)
-.align 32
+.balign 32
FUNCTION(_aesni_ctr32_6x)
.cfi_startproc
ENDBR
vpxor %xmm4,%xmm14,%xmm14
jmp .Loop_ctr32
-.align 16
+.balign 16
.Loop_ctr32:
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
leaq 96(%rsi),%rsi
RET
-.align 32
+.balign 32
.Lhandle_ctr32_2:
vpshufb %xmm0,%xmm1,%xmm6
vmovdqu 48(%r11),%xmm5
SECTION_STATIC
-.align 64
+.balign 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.Lpoly:
.Lone_lsb:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
-.align 64
+.balign 64
/* Mark the stack non-executable. */
#if defined(__linux__) && defined(__ELF__)
// static uint8_t byte_swap16_mask[] = {
// 15, 14, 13, 12, 11, 10, 9, 8, 7, 6 ,5, 4, 3, 2, 1, 0 };
.section .rodata
-.align XMM_ALIGN
+.balign XMM_ALIGN
.Lbyte_swap16_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
vpxor %xmm2,%xmm6,%xmm6
movq $4,%r10
jmp .Linit_start_avx
-.align 32
+.balign 32
.Linit_loop_avx:
vpalignr $8,%xmm3,%xmm4,%xmm5
vmovdqu %xmm5,-16(%rdi)
subq $0x80,%rcx
jmp .Loop8x_avx
-.align 32
+.balign 32
.Loop8x_avx:
vpunpckhqdq %xmm15,%xmm15,%xmm8
vmovdqu 112(%rdx),%xmm14
addq $0x80,%rcx
jmp .Ltail_no_xor_avx
-.align 32
+.balign 32
.Lshort_avx:
vmovdqu -16(%rdx,%rcx,1),%xmm14
leaq (%rdx,%rcx,1),%rdx
subq $0x10,%rcx
jmp .Ltail_avx
-.align 32
+.balign 32
.Ltail_avx:
vpxor %xmm10,%xmm15,%xmm15
.Ltail_no_xor_avx:
#endif /* !_WIN32 || _KERNEL */
SECTION_STATIC
-.align 64
+.balign 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.L0x1c2_polynomial:
.long 7,0,7,0
.L7_mask_poly:
.long 7,0,450,0
-.align 64
+.balign 64
SET_OBJ(.Lrem_4bit)
.Lrem_4bit:
.long 0,0,0,471859200,0,943718400,0,610271232
.value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE
.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
-.align 64
+.balign 64
/* Mark the stack non-executable. */
#if defined(__linux__) && defined(__ELF__)
mov 4*7(%rdi),%r11d
jmp .Lloop
-.align 16
+.balign 16
.Lloop:
xor %rdi,%rdi
mov 4*0(%rsi),%r12d
add %r14d,%eax # h+=Maj(a,b,c)
jmp .Lrounds_16_xx
-.align 16
+.balign 16
.Lrounds_16_xx:
mov 4(%rsp),%r13d
mov 56(%rsp),%r12d
SET_SIZE(SHA256TransformBlocks)
.section .rodata
-.align 64
+.balign 64
SET_OBJ(K256)
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
mov 8*7(%rdi),%r11
jmp .Lloop
-.align 16
+.balign 16
.Lloop:
xor %rdi,%rdi
mov 8*0(%rsi),%r12
add %r14,%rax # h+=Maj(a,b,c)
jmp .Lrounds_16_xx
-.align 16
+.balign 16
.Lrounds_16_xx:
mov 8(%rsp),%r13
mov 112(%rsp),%r12
SET_SIZE(SHA512TransformBlocks)
.section .rodata
-.align 64
+.balign 64
SET_OBJ(K512)
K512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
#define ENTRY(sym) \
.text; \
.globl sym; \
- .align 2; \
+ .balign 2; \
.type sym,#function; \
sym:
#define ENTRY(x) \
.text; \
.syntax unified; \
- .align 2; \
+ .balign 2; \
.global x; \
.type x,#function; \
_FUNC_MODE; \
#define ENTRY(x) \
.text; \
- .align 8; \
+ .balign 8; \
.globl x; \
.type x, @function; \
x:
#ifdef PPC64_ELF_ABI_v2
#define ENTRY(name) \
- .align 2 ; \
+ .balign 2 ; \
.type name,@function; \
.weak name; \
name:
#define XGLUE(a,b) a##b
#define GLUE(a,b) XGLUE(a,b)
#define ENTRY(name) \
- .align 2 ; \
+ .balign 2 ; \
.weak name; \
.weak GLUE(.,name); \
.pushsection ".opd","aw"; \
#define ENTRY(x) \
.text ; \
- .align 32 ; \
+ .balign 32 ; \
.globl x ; \
.type x,@function ; \
x: