summaryrefslogtreecommitdiff
path: root/third_party
diff options
context:
space:
mode:
authorJohann <johannkoenig@google.com>2016-02-17 17:37:24 -0800
committerJohann <johannkoenig@google.com>2016-02-17 17:41:21 -0800
commit2497356d0665cee2b3f818968c152f43ada74cc0 (patch)
tree37f7fb081eb6486e973d7e2f75f1de4ca652cd50 /third_party
parentfdc977afc6b431c7577e70d151f89ea726bcaf8f (diff)
downloadlibvpx-2497356d0665cee2b3f818968c152f43ada74cc0.tar.gz
libvpx-2497356d0665cee2b3f818968c152f43ada74cc0.tar.bz2
libvpx-2497356d0665cee2b3f818968c152f43ada74cc0.zip
Update x86inc.asm from x264
Revision d23d18655249944c1ca894b451e2c82c7a584c62 from https://git.videolan.org/x264.git Change-Id: I841ec8c5ea71935aa364657299c6bba0c8742fc1
Diffstat (limited to 'third_party')
-rw-r--r--third_party/x86inc/README.libvpx18
-rw-r--r--third_party/x86inc/x86inc.asm497
2 files changed, 207 insertions, 308 deletions
diff --git a/third_party/x86inc/README.libvpx b/third_party/x86inc/README.libvpx
index e91e305a2..82d1a2c4d 100644
--- a/third_party/x86inc/README.libvpx
+++ b/third_party/x86inc/README.libvpx
@@ -1,5 +1,5 @@
-URL: http://git.videolan.org/?p=x264.git
-Version: a95584945dd9ce3acc66c6cd8f6796bc4404d40d
+URL: https://git.videolan.org/git/x264.git
+Version: d23d18655249944c1ca894b451e2c82c7a584c62
License: ISC
License File: LICENSE
@@ -8,17 +8,3 @@ x264/libav's framework for x86 assembly. Contains a variety of macros and
defines that help automatically allow assembly to work cross-platform.
Local Modifications:
-Get configuration from vpx_config.asm.
-Prefix functions with vpx by default.
-Manage name mangling (prefixing with '_') manually because 'PREFIX' does not
- exist in libvpx.
-Expand PIC default to macho64 and respect CONFIG_PIC from libvpx
-Catch all elf formats for 'hidden' status and SECTION notes.
-Avoid 'amdnop' when building with nasm.
-Set 'private_extern' visibility for macho targets.
-Copy PIC 'GLOBAL' macros from x86_abi_support.asm
-Use .text instead of .rodata on macho to avoid broken tables in PIC mode.
-Use .text with no alignment for aout
-Only use 'hidden' visibility with Chromium
-Move '%use smartalign' for nasm out of 'INIT_CPUFLAGS' and before
- 'ALIGNMODE'.
diff --git a/third_party/x86inc/x86inc.asm b/third_party/x86inc/x86inc.asm
index e7d3fa5eb..094df43b6 100644
--- a/third_party/x86inc/x86inc.asm
+++ b/third_party/x86inc/x86inc.asm
@@ -1,7 +1,7 @@
;*****************************************************************************
;* x86inc.asm: x264asm abstraction layer
;*****************************************************************************
-;* Copyright (C) 2005-2015 x264 project
+;* Copyright (C) 2005-2016 x264 project
;*
;* Authors: Loren Merritt <lorenm@u.washington.edu>
;* Anton Mitrofanov <BugMaster@narod.ru>
@@ -34,10 +34,8 @@
; as this feature might be useful for others as well. Send patches or ideas
; to x264-devel@videolan.org .
-%include "vpx_config.asm"
-
%ifndef private_prefix
- %define private_prefix vpx
+ %define private_prefix x264
%endif
%ifndef public_prefix
@@ -66,129 +64,40 @@
%endif
%endif
-%ifidn __OUTPUT_FORMAT__,elf32
- %define mangle(x) x
+%define FORMAT_ELF 0
+%ifidn __OUTPUT_FORMAT__,elf
+ %define FORMAT_ELF 1
+%elifidn __OUTPUT_FORMAT__,elf32
+ %define FORMAT_ELF 1
%elifidn __OUTPUT_FORMAT__,elf64
- %define mangle(x) x
-%elifidn __OUTPUT_FORMAT__,x64
- %define mangle(x) x
-%elifidn __OUTPUT_FORMAT__,win64
- %define mangle(x) x
-%else
- %define mangle(x) _ %+ x
+ %define FORMAT_ELF 1
%endif
-; In some instances macho32 tables get misaligned when using .rodata.
-; When looking at the disassembly it appears that the offset is either
-; correct or consistently off by 90. Placing them in the .text section
-; works around the issue. It appears to be specific to the way libvpx
-; handles the tables.
-%macro SECTION_RODATA 0-1 16
- %ifidn __OUTPUT_FORMAT__,macho32
- SECTION .text align=%1
- fakegot:
- %elifidn __OUTPUT_FORMAT__,aout
- SECTION .text
- %else
- SECTION .rodata align=%1
- %endif
-%endmacro
-
-%macro SECTION_TEXT 0-1 16
- %ifidn __OUTPUT_FORMAT__,aout
- SECTION .text
- %else
- SECTION .text align=%1
- %endif
-%endmacro
-
-; PIC macros are copied from vpx_ports/x86_abi_support.asm. The "define PIC"
-; from original code is added in for 64bit.
-%ifidn __OUTPUT_FORMAT__,elf32
-%define ABI_IS_32BIT 1
-%elifidn __OUTPUT_FORMAT__,macho32
-%define ABI_IS_32BIT 1
-%elifidn __OUTPUT_FORMAT__,win32
-%define ABI_IS_32BIT 1
-%elifidn __OUTPUT_FORMAT__,aout
-%define ABI_IS_32BIT 1
+%ifdef PREFIX
+ %define mangle(x) _ %+ x
%else
-%define ABI_IS_32BIT 0
+ %define mangle(x) x
%endif
-%if ABI_IS_32BIT
- %if CONFIG_PIC=1
- %ifidn __OUTPUT_FORMAT__,elf32
- %define GET_GOT_DEFINED 1
- %define WRT_PLT wrt ..plt
- %macro GET_GOT 1
- extern _GLOBAL_OFFSET_TABLE_
- push %1
- call %%get_got
- %%sub_offset:
- jmp %%exitGG
- %%get_got:
- mov %1, [esp]
- add %1, _GLOBAL_OFFSET_TABLE_ + $$ - %%sub_offset wrt ..gotpc
- ret
- %%exitGG:
- %undef GLOBAL
- %define GLOBAL(x) x + %1 wrt ..gotoff
- %undef RESTORE_GOT
- %define RESTORE_GOT pop %1
- %endmacro
- %elifidn __OUTPUT_FORMAT__,macho32
- %define GET_GOT_DEFINED 1
- %macro GET_GOT 1
- push %1
- call %%get_got
- %%get_got:
- pop %1
- %undef GLOBAL
- %define GLOBAL(x) x + %1 - %%get_got
- %undef RESTORE_GOT
- %define RESTORE_GOT pop %1
- %endmacro
- %else
- %define GET_GOT_DEFINED 0
- %endif
- %endif
-
- %if ARCH_X86_64 == 0
- %undef PIC
- %endif
-
-%else
- %macro GET_GOT 1
- %endmacro
- %define GLOBAL(x) rel x
- %define WRT_PLT wrt ..plt
-
- %if WIN64
- %define PIC
- %elifidn __OUTPUT_FORMAT__,macho64
- %define PIC
- %elif CONFIG_PIC
- %define PIC
- %endif
-%endif
+%macro SECTION_RODATA 0-1 16
+ SECTION .rodata align=%1
+%endmacro
-%ifnmacro GET_GOT
- %macro GET_GOT 1
- %endmacro
- %define GLOBAL(x) x
-%endif
-%ifndef RESTORE_GOT
- %define RESTORE_GOT
+%if WIN64
+ %define PIC
+%elif ARCH_X86_64 == 0
+; x86_32 doesn't require PIC.
+; Some distros prefer shared objects to be PIC, but nothing breaks if
+; the code contains a few textrels, so we'll skip that complexity.
+ %undef PIC
%endif
-%ifndef WRT_PLT
- %define WRT_PLT
-%endif
-
%ifdef PIC
default rel
%endif
-; Done with PIC macros
+
+%ifdef __NASM_VER__
+ %use smartalign
+%endif
; Macros to eliminate most code duplication between x86_32 and x86_64:
; Currently this works only for leaf functions which load all their arguments
@@ -237,6 +146,7 @@
%define r%1w %2w
%define r%1b %2b
%define r%1h %2h
+ %define %2q %2
%if %0 == 2
%define r%1m %2d
%define r%1mp %2
@@ -261,9 +171,9 @@
%define e%1h %3
%define r%1b %2
%define e%1b %2
-%if ARCH_X86_64 == 0
- %define r%1 e%1
-%endif
+ %if ARCH_X86_64 == 0
+ %define r%1 e%1
+ %endif
%endmacro
DECLARE_REG_SIZE ax, al, ah
@@ -373,7 +283,7 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
%macro ASSERT 1
%if (%1) == 0
- %error assert failed
+ %error assertion ``%1'' failed
%endif
%endmacro
@@ -464,8 +374,10 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
%if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT
%if %1 > 0
%assign regs_used (regs_used + 1)
- %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2
- %warning "Stack pointer will overwrite register argument"
+ %endif
+ %if ARCH_X86_64 && regs_used < 5 + UNIX64 * 3
+ ; Ensure that we don't clobber any registers containing arguments
+ %assign regs_used 5 + UNIX64 * 3
%endif
%endif
%endif
@@ -579,9 +491,9 @@ DECLARE_REG 14, R15, 120
%macro RET 0
WIN64_RESTORE_XMM_INTERNAL rsp
POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
-%if mmsize == 32
- vzeroupper
-%endif
+ %if mmsize == 32
+ vzeroupper
+ %endif
AUTO_REP_RET
%endmacro
@@ -618,17 +530,17 @@ DECLARE_REG 14, R15, 72
%define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
%macro RET 0
-%if stack_size_padded > 0
-%if required_stack_alignment > STACK_ALIGNMENT
- mov rsp, rstkm
-%else
- add rsp, stack_size_padded
-%endif
-%endif
+ %if stack_size_padded > 0
+ %if required_stack_alignment > STACK_ALIGNMENT
+ mov rsp, rstkm
+ %else
+ add rsp, stack_size_padded
+ %endif
+ %endif
POP_IF_USED 14, 13, 12, 11, 10, 9
-%if mmsize == 32
- vzeroupper
-%endif
+ %if mmsize == 32
+ vzeroupper
+ %endif
AUTO_REP_RET
%endmacro
@@ -674,29 +586,29 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
%define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
%macro RET 0
-%if stack_size_padded > 0
-%if required_stack_alignment > STACK_ALIGNMENT
- mov rsp, rstkm
-%else
- add rsp, stack_size_padded
-%endif
-%endif
+ %if stack_size_padded > 0
+ %if required_stack_alignment > STACK_ALIGNMENT
+ mov rsp, rstkm
+ %else
+ add rsp, stack_size_padded
+ %endif
+ %endif
POP_IF_USED 6, 5, 4, 3
-%if mmsize == 32
- vzeroupper
-%endif
+ %if mmsize == 32
+ vzeroupper
+ %endif
AUTO_REP_RET
%endmacro
%endif ;======================================================================
%if WIN64 == 0
-%macro WIN64_SPILL_XMM 1
-%endmacro
-%macro WIN64_RESTORE_XMM 1
-%endmacro
-%macro WIN64_PUSH_XMM 0
-%endmacro
+ %macro WIN64_SPILL_XMM 1
+ %endmacro
+ %macro WIN64_RESTORE_XMM 1
+ %endmacro
+ %macro WIN64_PUSH_XMM 0
+ %endmacro
%endif
; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
@@ -709,24 +621,26 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
%else
rep ret
%endif
+ annotate_function_size
%endmacro
%define last_branch_adr $$
%macro AUTO_REP_RET 0
- %ifndef cpuflags
- times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ != last_branch_adr.
- %elif notcpuflag(ssse3)
- times ((last_branch_adr-$)>>31)+1 rep
+ %if notcpuflag(ssse3)
+ times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ == last_branch_adr.
%endif
ret
+ annotate_function_size
%endmacro
%macro BRANCH_INSTR 0-*
%rep %0
%macro %1 1-2 %1
%2 %1
- %%branch_instr:
- %xdefine last_branch_adr %%branch_instr
+ %if notcpuflag(ssse3)
+ %%branch_instr equ $
+ %xdefine last_branch_adr %%branch_instr
+ %endif
%endmacro
%rotate 1
%endrep
@@ -741,6 +655,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
%elif %2
jmp %1
%endif
+ annotate_function_size
%endmacro
;=============================================================================
@@ -762,16 +677,10 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
cglobal_internal 0, %1 %+ SUFFIX, %2
%endmacro
%macro cglobal_internal 2-3+
+ annotate_function_size
%if %1
%xdefine %%FUNCTION_PREFIX private_prefix
- ; libvpx explicitly sets visibility in shared object builds. Avoid
- ; setting visibility to hidden as it may break builds that split
- ; sources on e.g., directory boundaries.
- %ifdef CHROMIUM
- %xdefine %%VISIBILITY hidden
- %else
- %xdefine %%VISIBILITY
- %endif
+ %xdefine %%VISIBILITY hidden
%else
%xdefine %%FUNCTION_PREFIX public_prefix
%xdefine %%VISIBILITY
@@ -782,22 +691,9 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
CAT_XDEFINE cglobaled_, %2, 1
%endif
%xdefine current_function %2
- %ifidn __OUTPUT_FORMAT__,elf32
- global %2:function %%VISIBILITY
- %elifidn __OUTPUT_FORMAT__,elf64
+ %xdefine current_function_section __SECT__
+ %if FORMAT_ELF
global %2:function %%VISIBILITY
- %elifidn __OUTPUT_FORMAT__,macho32
- %ifdef __NASM_VER__
- global %2
- %else
- global %2:private_extern
- %endif
- %elifidn __OUTPUT_FORMAT__,macho64
- %ifdef __NASM_VER__
- global %2
- %else
- global %2:private_extern
- %endif
%else
global %2
%endif
@@ -822,16 +718,16 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
; like cextern, but without the prefix
%macro cextern_naked 1
- %xdefine %1 mangle(%1)
+ %ifdef PREFIX
+ %xdefine %1 mangle(%1)
+ %endif
CAT_XDEFINE cglobaled_, %1, 1
extern %1
%endmacro
%macro const 1-2+
%xdefine %1 mangle(private_prefix %+ _ %+ %1)
- %ifidn __OUTPUT_FORMAT__,elf32
- global %1:data hidden
- %elifidn __OUTPUT_FORMAT__,elf64
+ %if FORMAT_ELF
global %1:data hidden
%else
global %1
@@ -839,14 +735,29 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
%1: %2
%endmacro
-; This is needed for ELF, otherwise the GNU linker assumes the stack is
-; executable by default.
-%ifidn __OUTPUT_FORMAT__,elf32
-SECTION .note.GNU-stack noalloc noexec nowrite progbits
-%elifidn __OUTPUT_FORMAT__,elf64
-SECTION .note.GNU-stack noalloc noexec nowrite progbits
+; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default.
+%if FORMAT_ELF
+ [SECTION .note.GNU-stack noalloc noexec nowrite progbits]
%endif
+; Tell debuggers how large the function was.
+; This may be invoked multiple times per function; we rely on later instances overriding earlier ones.
+; This is invoked by RET and similar macros, and also cglobal does it for the previous function,
+; but if the last function in a source file doesn't use any of the standard macros for its epilogue,
+; then its size might be unspecified.
+%macro annotate_function_size 0
+ %ifdef __YASM_VER__
+ %ifdef current_function
+ %if FORMAT_ELF
+ current_function_section
+ %%ecf equ $
+ size current_function %%ecf - current_function
+ __SECT__
+ %endif
+ %endif
+ %endif
+%endmacro
+
; cpuflags
%assign cpuflags_mmx (1<<0)
@@ -875,12 +786,9 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%assign cpuflags_bmi1 (1<<22)|cpuflags_lzcnt
%assign cpuflags_bmi2 (1<<23)|cpuflags_bmi1
-%define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
-%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
-
-%ifdef __NASM_VER__
- %use smartalign
-%endif
+; Returns a boolean value expressing whether or not the specified cpuflag is enabled.
+%define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1)
+%define notcpuflag(x) (cpuflag(x) ^ 1)
; Takes an arbitrary number of cpuflags from the above list.
; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
@@ -917,12 +825,18 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%endif
%endif
- %ifdef __NASM_VER__
- ALIGNMODE k7
- %elif ARCH_X86_64 || cpuflag(sse2)
- CPU amdnop
+ %if ARCH_X86_64 || cpuflag(sse2)
+ %ifdef __NASM_VER__
+ ALIGNMODE k8
+ %else
+ CPU amdnop
+ %endif
%else
- CPU basicnop
+ %ifdef __NASM_VER__
+ ALIGNMODE nop
+ %else
+ CPU basicnop
+ %endif
%endif
%endmacro
@@ -951,14 +865,14 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%define movnta movntq
%assign %%i 0
%rep 8
- CAT_XDEFINE m, %%i, mm %+ %%i
- CAT_XDEFINE nnmm, %%i, %%i
- %assign %%i %%i+1
+ CAT_XDEFINE m, %%i, mm %+ %%i
+ CAT_XDEFINE nnmm, %%i, %%i
+ %assign %%i %%i+1
%endrep
%rep 8
- CAT_UNDEF m, %%i
- CAT_UNDEF nnmm, %%i
- %assign %%i %%i+1
+ CAT_UNDEF m, %%i
+ CAT_UNDEF nnmm, %%i
+ %assign %%i %%i+1
%endrep
INIT_CPUFLAGS %1
%endmacro
@@ -969,7 +883,7 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%define mmsize 16
%define num_mmregs 8
%if ARCH_X86_64
- %define num_mmregs 16
+ %define num_mmregs 16
%endif
%define mova movdqa
%define movu movdqu
@@ -977,9 +891,9 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%define movnta movntdq
%assign %%i 0
%rep num_mmregs
- CAT_XDEFINE m, %%i, xmm %+ %%i
- CAT_XDEFINE nnxmm, %%i, %%i
- %assign %%i %%i+1
+ CAT_XDEFINE m, %%i, xmm %+ %%i
+ CAT_XDEFINE nnxmm, %%i, %%i
+ %assign %%i %%i+1
%endrep
INIT_CPUFLAGS %1
%endmacro
@@ -990,7 +904,7 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%define mmsize 32
%define num_mmregs 8
%if ARCH_X86_64
- %define num_mmregs 16
+ %define num_mmregs 16
%endif
%define mova movdqa
%define movu movdqu
@@ -998,9 +912,9 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
%define movnta movntdq
%assign %%i 0
%rep num_mmregs
- CAT_XDEFINE m, %%i, ymm %+ %%i
- CAT_XDEFINE nnymm, %%i, %%i
- %assign %%i %%i+1
+ CAT_XDEFINE m, %%i, ymm %+ %%i
+ CAT_XDEFINE nnymm, %%i, %%i
+ %assign %%i %%i+1
%endrep
INIT_CPUFLAGS %1
%endmacro
@@ -1024,7 +938,7 @@ INIT_XMM
%assign i 0
%rep 16
DECLARE_MMCAST i
-%assign i i+1
+ %assign i i+1
%endrep
; I often want to use macros that permute their arguments. e.g. there's no
@@ -1042,23 +956,23 @@ INIT_XMM
; doesn't cost any cycles.
%macro PERMUTE 2-* ; takes a list of pairs to swap
-%rep %0/2
- %xdefine %%tmp%2 m%2
- %rotate 2
-%endrep
-%rep %0/2
- %xdefine m%1 %%tmp%2
- CAT_XDEFINE nn, m%1, %1
- %rotate 2
-%endrep
+ %rep %0/2
+ %xdefine %%tmp%2 m%2
+ %rotate 2
+ %endrep
+ %rep %0/2
+ %xdefine m%1 %%tmp%2
+ CAT_XDEFINE nn, m%1, %1
+ %rotate 2
+ %endrep
%endmacro
%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
-%ifnum %1 ; SWAP 0, 1, ...
- SWAP_INTERNAL_NUM %1, %2
-%else ; SWAP m0, m1, ...
- SWAP_INTERNAL_NAME %1, %2
-%endif
+ %ifnum %1 ; SWAP 0, 1, ...
+ SWAP_INTERNAL_NUM %1, %2
+ %else ; SWAP m0, m1, ...
+ SWAP_INTERNAL_NAME %1, %2
+ %endif
%endmacro
%macro SWAP_INTERNAL_NUM 2-*
@@ -1068,7 +982,7 @@ INIT_XMM
%xdefine m%2 %%tmp
CAT_XDEFINE nn, m%1, %1
CAT_XDEFINE nn, m%2, %2
- %rotate 1
+ %rotate 1
%endrep
%endmacro
@@ -1076,7 +990,7 @@ INIT_XMM
%xdefine %%args nn %+ %1
%rep %0-1
%xdefine %%args %%args, nn %+ %2
- %rotate 1
+ %rotate 1
%endrep
SWAP_INTERNAL_NUM %%args
%endmacro
@@ -1093,7 +1007,7 @@ INIT_XMM
%assign %%i 0
%rep num_mmregs
CAT_XDEFINE %%f, %%i, m %+ %%i
- %assign %%i %%i+1
+ %assign %%i %%i+1
%endrep
%endmacro
@@ -1103,20 +1017,20 @@ INIT_XMM
%rep num_mmregs
CAT_XDEFINE m, %%i, %1_m %+ %%i
CAT_XDEFINE nn, m %+ %%i, %%i
- %assign %%i %%i+1
+ %assign %%i %%i+1
%endrep
%endif
%endmacro
; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
%macro call 1
- call_internal %1, %1 %+ SUFFIX
+ call_internal %1 %+ SUFFIX, %1
%endmacro
%macro call_internal 2
- %xdefine %%i %1
- %ifndef cglobaled_%1
- %ifdef cglobaled_%2
- %xdefine %%i %2
+ %xdefine %%i %2
+ %ifndef cglobaled_%2
+ %ifdef cglobaled_%1
+ %xdefine %%i %1
%endif
%endif
call %%i
@@ -1159,7 +1073,7 @@ INIT_XMM
%endif
CAT_XDEFINE sizeofxmm, i, 16
CAT_XDEFINE sizeofymm, i, 32
-%assign i i+1
+ %assign i i+1
%endrep
%undef i
@@ -1536,7 +1450,7 @@ AVX_INSTR pfmul, 3dnow, 1, 0, 1
%else
CAT_XDEFINE q, j, i
%endif
-%assign i i+1
+ %assign i i+1
%endrep
%undef i
%undef j
@@ -1559,55 +1473,54 @@ FMA_INSTR pmacsdd, pmulld, paddd ; sse4 emulation
FMA_INSTR pmacsdql, pmuldq, paddq ; sse4 emulation
FMA_INSTR pmadcswd, pmaddwd, paddd
-; convert FMA4 to FMA3 if possible
-%macro FMA4_INSTR 4
- %macro %1 4-8 %1, %2, %3, %4
- %if cpuflag(fma4)
- v%5 %1, %2, %3, %4
- %elifidn %1, %2
- v%6 %1, %4, %3 ; %1 = %1 * %3 + %4
- %elifidn %1, %3
- v%7 %1, %2, %4 ; %1 = %2 * %1 + %4
- %elifidn %1, %4
- v%8 %1, %2, %3 ; %1 = %2 * %3 + %1
- %else
- %error fma3 emulation of ``%5 %1, %2, %3, %4'' is not supported
- %endif
- %endmacro
-%endmacro
-
-FMA4_INSTR fmaddpd, fmadd132pd, fmadd213pd, fmadd231pd
-FMA4_INSTR fmaddps, fmadd132ps, fmadd213ps, fmadd231ps
-FMA4_INSTR fmaddsd, fmadd132sd, fmadd213sd, fmadd231sd
-FMA4_INSTR fmaddss, fmadd132ss, fmadd213ss, fmadd231ss
-
-FMA4_INSTR fmaddsubpd, fmaddsub132pd, fmaddsub213pd, fmaddsub231pd
-FMA4_INSTR fmaddsubps, fmaddsub132ps, fmaddsub213ps, fmaddsub231ps
-FMA4_INSTR fmsubaddpd, fmsubadd132pd, fmsubadd213pd, fmsubadd231pd
-FMA4_INSTR fmsubaddps, fmsubadd132ps, fmsubadd213ps, fmsubadd231ps
-
-FMA4_INSTR fmsubpd, fmsub132pd, fmsub213pd, fmsub231pd
-FMA4_INSTR fmsubps, fmsub132ps, fmsub213ps, fmsub231ps
-FMA4_INSTR fmsubsd, fmsub132sd, fmsub213sd, fmsub231sd
-FMA4_INSTR fmsubss, fmsub132ss, fmsub213ss, fmsub231ss
-
-FMA4_INSTR fnmaddpd, fnmadd132pd, fnmadd213pd, fnmadd231pd
-FMA4_INSTR fnmaddps, fnmadd132ps, fnmadd213ps, fnmadd231ps
-FMA4_INSTR fnmaddsd, fnmadd132sd, fnmadd213sd, fnmadd231sd
-FMA4_INSTR fnmaddss, fnmadd132ss, fnmadd213ss, fnmadd231ss
-
-FMA4_INSTR fnmsubpd, fnmsub132pd, fnmsub213pd, fnmsub231pd
-FMA4_INSTR fnmsubps, fnmsub132ps, fnmsub213ps, fnmsub231ps
-FMA4_INSTR fnmsubsd, fnmsub132sd, fnmsub213sd, fnmsub231sd
-FMA4_INSTR fnmsubss, fnmsub132ss, fnmsub213ss, fnmsub231ss
-
-; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug
-%if ARCH_X86_64 == 0
-%macro vpbroadcastq 2
-%if sizeof%1 == 16
- movddup %1, %2
-%else
- vbroadcastsd %1, %2
-%endif
-%endmacro
+; Macros for consolidating FMA3 and FMA4 using 4-operand (dst, src1, src2, src3) syntax.
+; FMA3 is only possible if dst is the same as one of the src registers.
+; Either src2 or src3 can be a memory operand.
+%macro FMA4_INSTR 2-*
+ %push fma4_instr
+ %xdefine %$prefix %1
+ %rep %0 - 1
+ %macro %$prefix%2 4-6 %$prefix, %2
+ %if notcpuflag(fma3) && notcpuflag(fma4)
+ %error use of ``%5%6'' fma instruction in cpuname function: current_function
+ %elif cpuflag(fma4)
+ v%5%6 %1, %2, %3, %4
+ %elifidn %1, %2
+ ; If %3 or %4 is a memory operand it needs to be encoded as the last operand.
+ %ifid %3
+ v%{5}213%6 %2, %3, %4
+ %else
+ v%{5}132%6 %2, %4, %3
+ %endif
+ %elifidn %1, %3
+ v%{5}213%6 %3, %2, %4
+ %elifidn %1, %4
+ v%{5}231%6 %4, %2, %3
+ %else
+ %error fma3 emulation of ``%5%6 %1, %2, %3, %4'' is not supported
+ %endif
+ %endmacro
+ %rotate 1
+ %endrep
+ %pop
+%endmacro
+
+FMA4_INSTR fmadd, pd, ps, sd, ss
+FMA4_INSTR fmaddsub, pd, ps
+FMA4_INSTR fmsub, pd, ps, sd, ss
+FMA4_INSTR fmsubadd, pd, ps
+FMA4_INSTR fnmadd, pd, ps, sd, ss
+FMA4_INSTR fnmsub, pd, ps, sd, ss
+
+; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug (fixed in 1.3.0)
+%ifdef __YASM_VER__
+ %if __YASM_VERSION_ID__ < 0x01030000 && ARCH_X86_64 == 0
+ %macro vpbroadcastq 2
+ %if sizeof%1 == 16
+ movddup %1, %2
+ %else
+ vbroadcastsd %1, %2
+ %endif
+ %endmacro
+ %endif
%endif