diff options
author | Ronald S. Bultje <rsbultje@gmail.com> | 2012-01-23 17:45:58 +0800 |
---|---|---|
committer | Ronald S. Bultje <rsbultje@gmail.com> | 2012-01-27 10:19:57 +0800 |
commit | 3b15a6d742edd368696a1feb6fa99892768e8a73 (patch) | |
tree | 0ba73cc23175f3fb0e99cb842b8c2119c9cdf352 /libavcodec/x86/dsputil_yasm.asm | |
parent | 08628b6afbc9b708b46f871f25a7a6be76ba4337 (diff) | |
download | ffmpeg-3b15a6d742edd368696a1feb6fa99892768e8a73.tar.gz |
config.asm: change %ifdef directives to %if directives.
This allows combining multiple conditionals in a single statement.
Diffstat (limited to 'libavcodec/x86/dsputil_yasm.asm')
-rw-r--r-- | libavcodec/x86/dsputil_yasm.asm | 54 |
1 files changed, 27 insertions, 27 deletions
diff --git a/libavcodec/x86/dsputil_yasm.asm b/libavcodec/x86/dsputil_yasm.asm index 4d2fb6a373..4607ff15d7 100644 --- a/libavcodec/x86/dsputil_yasm.asm +++ b/libavcodec/x86/dsputil_yasm.asm @@ -138,7 +138,7 @@ align 16 %endif %define t0 [v1q + orderq] %define t1 [v1q + orderq + mmsize] -%ifdef ARCH_X86_64 +%if ARCH_X86_64 mova m8, t0 mova m9, t1 %define t0 m8 @@ -474,7 +474,7 @@ cglobal scalarproduct_float_sse, 3,3,2, v1, v2, offset movss xmm1, xmm0 shufps xmm0, xmm0, 1 addss xmm0, xmm1 -%ifndef ARCH_X86_64 +%if ARCH_X86_64 == 0 movd r0m, xmm0 fld dword r0m %endif @@ -498,7 +498,7 @@ cglobal scalarproduct_float_sse, 3,3,2, v1, v2, offset ; function implementations. Fast are fixed-width, slow is variable-width %macro EMU_EDGE_FUNC 0 -%ifdef ARCH_X86_64 +%if ARCH_X86_64 %define w_reg r10 cglobal emu_edge_core, 6, 7, 1 mov r11, r5 ; save block_h @@ -513,14 +513,14 @@ cglobal emu_edge_core, 2, 7, 0 mov w_reg, r7m sub w_reg, r6m ; w = start_x - end_x sub r5, r4 -%ifdef ARCH_X86_64 +%if ARCH_X86_64 sub r4, r3 %else sub r4, dword r3m %endif cmp w_reg, 22 jg .slow_v_extend_loop -%ifdef ARCH_X86_32 +%if ARCH_X86_32 mov r2, r2m ; linesize %endif sal w_reg, 7 ; w * 128 @@ -536,7 +536,7 @@ cglobal emu_edge_core, 2, 7, 0 ; horizontal extend (left/right) mov w_reg, r6m ; start_x sub r0, w_reg -%ifdef ARCH_X86_64 +%if ARCH_X86_64 mov r3, r0 ; backup of buf+block_h*linesize mov r5, r11 %else @@ -564,7 +564,7 @@ cglobal emu_edge_core, 2, 7, 0 ; now r3(64)/r0(32)=buf,r2=linesize,r11/r5=block_h,r6/r3=val, r10/r6=end_x, r1=block_w .right_extend: -%ifdef ARCH_X86_32 +%if ARCH_X86_32 mov r0, r0m mov r5, r5m %endif @@ -589,13 +589,13 @@ cglobal emu_edge_core, 2, 7, 0 .h_extend_end: RET -%ifdef ARCH_X86_64 +%if ARCH_X86_64 %define vall al %define valh ah %define valw ax %define valw2 r10w %define valw3 r3w -%ifdef WIN64 +%if WIN64 %define valw4 r4w %else ; unix64 %define valw4 r3w @@ -643,7 +643,7 @@ cglobal emu_edge_core, 2, 7, 0 %endrep ; %2/16 %endif -%ifdef ARCH_X86_64 +%if ARCH_X86_64 %if (%2-%%src_off) == 8 mov rax, [r1+%%src_off] %assign %%src_off %%src_off+8 @@ -692,7 +692,7 @@ cglobal emu_edge_core, 2, 7, 0 %endrep ; %2/16 %endif -%ifdef ARCH_X86_64 +%if ARCH_X86_64 %if (%2-%%dst_off) == 8 mov [r0+%%dst_off], rax %assign %%dst_off %%dst_off+8 @@ -740,7 +740,7 @@ cglobal emu_edge_core, 2, 7, 0 ALIGN 128 .emuedge_v_extend_ %+ %%n: ; extend pixels above body -%ifdef ARCH_X86_64 +%if ARCH_X86_64 test r3 , r3 ; if (!start_y) jz .emuedge_copy_body_ %+ %%n %+ _loop ; goto body %else ; ARCH_X86_32 @@ -751,7 +751,7 @@ ALIGN 128 .emuedge_extend_top_ %+ %%n %+ _loop: ; do { WRITE_NUM_BYTES top, %%n ; write bytes add r0 , r2 ; dst += linesize -%ifdef ARCH_X86_64 +%if ARCH_X86_64 dec r3d %else ; ARCH_X86_32 dec dword r3m @@ -779,7 +779,7 @@ ALIGN 128 jnz .emuedge_extend_bottom_ %+ %%n %+ _loop ; } while (--block_h) .emuedge_v_extend_end_ %+ %%n: -%ifdef ARCH_X86_64 +%if ARCH_X86_64 ret %else ; ARCH_X86_32 rep ret @@ -841,7 +841,7 @@ ALIGN 64 WRITE_V_PIXEL %%n, r0 ; write pixels dec r5 jnz .emuedge_extend_left_ %+ %%n ; } while (--block_h) -%ifdef ARCH_X86_64 +%if ARCH_X86_64 ret %else ; ARCH_X86_32 rep ret @@ -856,7 +856,7 @@ ALIGN 64 %rep 11 ALIGN 64 .emuedge_extend_right_ %+ %%n: ; do { -%ifdef ARCH_X86_64 +%if ARCH_X86_64 sub r3, r2 ; dst -= linesize READ_V_PIXEL %%n, [r3+w_reg-1] ; read pixels WRITE_V_PIXEL %%n, r3+r4-%%n ; write pixels @@ -868,7 +868,7 @@ ALIGN 64 dec r5 %endif ; ARCH_X86_64/32 jnz .emuedge_extend_right_ %+ %%n ; } while (--block_h) -%ifdef ARCH_X86_64 +%if ARCH_X86_64 ret %else ; ARCH_X86_32 rep ret @@ -876,7 +876,7 @@ ALIGN 64 %assign %%n %%n+2 %endrep -%ifdef ARCH_X86_32 +%if ARCH_X86_32 %define stack_offset 0x10 %endif %endmacro ; RIGHT_EXTEND @@ -916,7 +916,7 @@ ALIGN 64 V_COPY_NPX %1, mm0, movq, 8, 0xFFFFFFF8 %else ; sse V_COPY_NPX %1, xmm0, movups, 16, 0xFFFFFFF0 -%ifdef ARCH_X86_64 +%if ARCH_X86_64 %define linesize r2 V_COPY_NPX %1, rax , mov, 8 %else ; ARCH_X86_32 @@ -940,7 +940,7 @@ ALIGN 64 .slow_v_extend_loop: ; r0=buf,r1=src,r2(64)/r2m(32)=linesize,r3(64)/r3m(32)=start_x,r4=end_y,r5=block_h ; r11(64)/r3(later-64)/r2(32)=cnt_reg,r6(64)/r3(32)=val_reg,r10(64)/r6(32)=w=end_x-start_x -%ifdef ARCH_X86_64 +%if ARCH_X86_64 push r11 ; save old value of block_h test r3, r3 %define cnt_reg r11 @@ -956,18 +956,18 @@ ALIGN 64 .do_body_copy: V_COPY_ROW body, r4 -%ifdef ARCH_X86_64 +%if ARCH_X86_64 pop r11 ; restore old value of block_h %define cnt_reg r3 %endif test r5, r5 -%ifdef ARCH_X86_64 +%if ARCH_X86_64 jz .v_extend_end %else jz .skip_bottom_extend %endif V_COPY_ROW bottom, r5 -%ifdef ARCH_X86_32 +%if ARCH_X86_32 .skip_bottom_extend: mov r2, r2m %endif @@ -996,7 +996,7 @@ ALIGN 64 .left_extend_loop_end: dec r5 jnz .slow_left_extend_loop -%ifdef ARCH_X86_32 +%if ARCH_X86_32 mov r2, r2m %endif jmp .right_extend @@ -1006,7 +1006,7 @@ ALIGN 64 .slow_right_extend_loop: ; r3(64)/r0(32)=buf+block_h*linesize,r2=linesize,r4=block_w,r11(64)/r5(32)=block_h, ; r10(64)/r6(32)=end_x,r6/r3=val,r1=cntr -%ifdef ARCH_X86_64 +%if ARCH_X86_64 %define buf_reg r3 %define bh_reg r11 %else @@ -1047,7 +1047,7 @@ SLOW_RIGHT_EXTEND %endmacro emu_edge sse -%ifdef ARCH_X86_32 +%if ARCH_X86_32 emu_edge mmx %endif @@ -1138,7 +1138,7 @@ VECTOR_CLIP_INT32 6, 1, 0, 0 %macro BUTTERFLIES_FLOAT_INTERLEAVE 0 cglobal butterflies_float_interleave, 4,4,3, dst, src0, src1, len -%ifdef ARCH_X86_64 +%if ARCH_X86_64 movsxd lenq, lend %endif test lenq, lenq |