aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Niedermayer <michael@niedermayer.cc>2017-09-30 00:20:09 +0200
committerMichael Niedermayer <michael@niedermayer.cc>2017-10-05 01:33:26 +0200
commit0eb0b21c7f4f2b6a3a74d2d252f95b81a4d472c3 (patch)
treeb5118c5c6c739ba394a63aba64dec9a830cb1b53
parent2db9b3199641612c4cf91e1b356e40100019aa0b (diff)
downloadffmpeg-0eb0b21c7f4f2b6a3a74d2d252f95b81a4d472c3.tar.gz
avcodec/x86/lossless_videoencdsp: Fix handling of small widths
Fixes out of array access Fixes: crash-huf.avi Regression since: 6b41b4414934cc930468ccd5db598dd6ef643987 This could also be fixed by adding checks in the C code that calls the dsp Found-by: Zhibin Hu and 连一汉 <lianyihan@360.cn> Signed-off-by: Michael Niedermayer <michael@niedermayer.cc> (cherry picked from commit df62b70de8aaa285168e72fe8f6e740843ca91fa) Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
-rw-r--r--libavcodec/x86/huffyuvencdsp.asm13
1 files changed, 7 insertions, 6 deletions
diff --git a/libavcodec/x86/huffyuvencdsp.asm b/libavcodec/x86/huffyuvencdsp.asm
index a55a1de65d..7a1ce2e839 100644
--- a/libavcodec/x86/huffyuvencdsp.asm
+++ b/libavcodec/x86/huffyuvencdsp.asm
@@ -42,10 +42,11 @@ cglobal diff_bytes, 4,5,2, dst, src1, src2, w
%define i t0q
%endmacro
-; label to jump to if w < regsize
-%macro DIFF_BYTES_LOOP_PREP 1
+; labels to jump to if w < regsize and w < 0
+%macro DIFF_BYTES_LOOP_PREP 2
mov i, wq
and i, -2 * regsize
+ js %2
jz %1
add dstq, i
add src1q, i
@@ -87,7 +88,7 @@ cglobal diff_bytes, 4,5,2, dst, src1, src2, w
%if mmsize > 16
; fall back to narrower xmm
%define regsize mmsize / 2
- DIFF_BYTES_LOOP_PREP .setup_loop_gpr_aa
+ DIFF_BYTES_LOOP_PREP .setup_loop_gpr_aa, .end_aa
.loop2_%1%2:
DIFF_BYTES_LOOP_CORE %1, %2, xm0, xm1
add i, 2 * regsize
@@ -114,7 +115,7 @@ cglobal diff_bytes, 4,5,2, dst, src1, src2, w
INIT_MMX mmx
DIFF_BYTES_PROLOGUE
%define regsize mmsize
- DIFF_BYTES_LOOP_PREP .skip_main_aa
+ DIFF_BYTES_LOOP_PREP .skip_main_aa, .end_aa
DIFF_BYTES_BODY a, a
%undef i
%endif
@@ -122,7 +123,7 @@ DIFF_BYTES_PROLOGUE
INIT_XMM sse2
DIFF_BYTES_PROLOGUE
%define regsize mmsize
- DIFF_BYTES_LOOP_PREP .skip_main_aa
+ DIFF_BYTES_LOOP_PREP .skip_main_aa, .end_aa
test dstq, regsize - 1
jnz .loop_uu
test src1q, regsize - 1
@@ -138,7 +139,7 @@ DIFF_BYTES_PROLOGUE
%define regsize mmsize
; Directly using unaligned SSE2 version is marginally faster than
; branching based on arguments.
- DIFF_BYTES_LOOP_PREP .skip_main_uu
+ DIFF_BYTES_LOOP_PREP .skip_main_uu, .end_uu
test dstq, regsize - 1
jnz .loop_uu
test src1q, regsize - 1