diff options
Diffstat (limited to 'chromium/third_party/ffmpeg/libswresample/x86/resample_mmx.h')
-rw-r--r-- | chromium/third_party/ffmpeg/libswresample/x86/resample_mmx.h | 183 |
1 files changed, 179 insertions, 4 deletions
diff --git a/chromium/third_party/ffmpeg/libswresample/x86/resample_mmx.h b/chromium/third_party/ffmpeg/libswresample/x86/resample_mmx.h index d96fd5a9d2d..2bd48a92955 100644 --- a/chromium/third_party/ffmpeg/libswresample/x86/resample_mmx.h +++ b/chromium/third_party/ffmpeg/libswresample/x86/resample_mmx.h @@ -23,7 +23,9 @@ #include "libswresample/swresample_internal.h" int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx); -int swri_resample_int16_ssse3(struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx); +int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx); +int swri_resample_float_sse (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx); +int swri_resample_double_sse2(struct ResampleContext *c, double *dst, const double *src, int *consumed, int src_size, int dst_size, int update_ctx); DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL}; @@ -46,9 +48,38 @@ __asm__ volatile(\ : "r" (((uint8_t*)(src+sample_index))-len),\ "r" (((uint8_t*)filter)-len),\ "r" (dst+dst_index)\ + NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\ ); -#define COMMON_CORE_INT16_SSSE3 \ +#define LINEAR_CORE_INT16_MMX2 \ + x86_reg len= -2*c->filter_length;\ +__asm__ volatile(\ + "pxor %%mm0, %%mm0 \n\t"\ + "pxor %%mm2, %%mm2 \n\t"\ + "1: \n\t"\ + "movq (%3, %0), %%mm1 \n\t"\ + "movq %%mm1, %%mm3 \n\t"\ + "pmaddwd (%4, %0), %%mm1 \n\t"\ + "pmaddwd (%5, %0), %%mm3 \n\t"\ + "paddd %%mm1, %%mm0 \n\t"\ + "paddd %%mm3, %%mm2 \n\t"\ + "add $8, %0 \n\t"\ + " js 1b \n\t"\ + "pshufw $0x0E, %%mm0, %%mm1 \n\t"\ + "pshufw $0x0E, %%mm2, %%mm3 \n\t"\ + "paddd %%mm1, %%mm0 \n\t"\ + "paddd %%mm3, %%mm2 \n\t"\ + "movd %%mm0, %1 \n\t"\ + "movd %%mm2, %2 \n\t"\ + : "+r" (len),\ + "=r" (val),\ + "=r" (v2)\ + : "r" (((uint8_t*)(src+sample_index))-len),\ + "r" (((uint8_t*)filter)-len),\ + "r" (((uint8_t*)(filter+c->filter_alloc))-len)\ +); + +#define COMMON_CORE_INT16_SSE2 \ x86_reg len= -2*c->filter_length;\ __asm__ volatile(\ "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\ @@ -58,8 +89,10 @@ __asm__ volatile(\ "paddd %%xmm1, %%xmm0 \n\t"\ "add $16, %0 \n\t"\ " js 1b \n\t"\ - "phaddd %%xmm0, %%xmm0 \n\t"\ - "phaddd %%xmm0, %%xmm0 \n\t"\ + "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\ + "paddd %%xmm1, %%xmm0 \n\t"\ + "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\ + "paddd %%xmm1, %%xmm0 \n\t"\ "psrad $15, %%xmm0 \n\t"\ "packssdw %%xmm0, %%xmm0 \n\t"\ "movd %%xmm0, (%3) \n\t"\ @@ -67,4 +100,146 @@ __asm__ volatile(\ : "r" (((uint8_t*)(src+sample_index))-len),\ "r" (((uint8_t*)filter)-len),\ "r" (dst+dst_index)\ + NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\ + XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\ +); + +#define LINEAR_CORE_INT16_SSE2 \ + x86_reg len= -2*c->filter_length;\ +__asm__ volatile(\ + "pxor %%xmm0, %%xmm0 \n\t"\ + "pxor %%xmm2, %%xmm2 \n\t"\ + "1: \n\t"\ + "movdqu (%3, %0), %%xmm1 \n\t"\ + "movdqa %%xmm1, %%xmm3 \n\t"\ + "pmaddwd (%4, %0), %%xmm1 \n\t"\ + "pmaddwd (%5, %0), %%xmm3 \n\t"\ + "paddd %%xmm1, %%xmm0 \n\t"\ + "paddd %%xmm3, %%xmm2 \n\t"\ + "add $16, %0 \n\t"\ + " js 1b \n\t"\ + "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\ + "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\ + "paddd %%xmm1, %%xmm0 \n\t"\ + "paddd %%xmm3, %%xmm2 \n\t"\ + "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\ + "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\ + "paddd %%xmm1, %%xmm0 \n\t"\ + "paddd %%xmm3, %%xmm2 \n\t"\ + "movd %%xmm0, %1 \n\t"\ + "movd %%xmm2, %2 \n\t"\ + : "+r" (len),\ + "=r" (val),\ + "=r" (v2)\ + : "r" (((uint8_t*)(src+sample_index))-len),\ + "r" (((uint8_t*)filter)-len),\ + "r" (((uint8_t*)(filter+c->filter_alloc))-len)\ + XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\ +); + +#define COMMON_CORE_FLT_SSE \ + x86_reg len= -4*c->filter_length;\ +__asm__ volatile(\ + "xorps %%xmm0, %%xmm0 \n\t"\ + "1: \n\t"\ + "movups (%1, %0), %%xmm1 \n\t"\ + "mulps (%2, %0), %%xmm1 \n\t"\ + "addps %%xmm1, %%xmm0 \n\t"\ + "add $16, %0 \n\t"\ + " js 1b \n\t"\ + "movhlps %%xmm0, %%xmm1 \n\t"\ + "addps %%xmm1, %%xmm0 \n\t"\ + "movss %%xmm0, %%xmm1 \n\t"\ + "shufps $1, %%xmm0, %%xmm0 \n\t"\ + "addps %%xmm1, %%xmm0 \n\t"\ + "movss %%xmm0, (%3) \n\t"\ + : "+r" (len)\ + : "r" (((uint8_t*)(src+sample_index))-len),\ + "r" (((uint8_t*)filter)-len),\ + "r" (dst+dst_index)\ + XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\ +); + +#define LINEAR_CORE_FLT_SSE \ + x86_reg len= -4*c->filter_length;\ +__asm__ volatile(\ + "xorps %%xmm0, %%xmm0 \n\t"\ + "xorps %%xmm2, %%xmm2 \n\t"\ + "1: \n\t"\ + "movups (%3, %0), %%xmm1 \n\t"\ + "movaps %%xmm1, %%xmm3 \n\t"\ + "mulps (%4, %0), %%xmm1 \n\t"\ + "mulps (%5, %0), %%xmm3 \n\t"\ + "addps %%xmm1, %%xmm0 \n\t"\ + "addps %%xmm3, %%xmm2 \n\t"\ + "add $16, %0 \n\t"\ + " js 1b \n\t"\ + "movhlps %%xmm0, %%xmm1 \n\t"\ + "movhlps %%xmm2, %%xmm3 \n\t"\ + "addps %%xmm1, %%xmm0 \n\t"\ + "addps %%xmm3, %%xmm2 \n\t"\ + "movss %%xmm0, %%xmm1 \n\t"\ + "movss %%xmm2, %%xmm3 \n\t"\ + "shufps $1, %%xmm0, %%xmm0 \n\t"\ + "shufps $1, %%xmm2, %%xmm2 \n\t"\ + "addps %%xmm1, %%xmm0 \n\t"\ + "addps %%xmm3, %%xmm2 \n\t"\ + "movss %%xmm0, %1 \n\t"\ + "movss %%xmm2, %2 \n\t"\ + : "+r" (len),\ + "=m" (val),\ + "=m" (v2)\ + : "r" (((uint8_t*)(src+sample_index))-len),\ + "r" (((uint8_t*)filter)-len),\ + "r" (((uint8_t*)(filter+c->filter_alloc))-len)\ + XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\ +); + +#define COMMON_CORE_DBL_SSE2 \ + x86_reg len= -8*c->filter_length;\ +__asm__ volatile(\ + "xorpd %%xmm0, %%xmm0 \n\t"\ + "1: \n\t"\ + "movupd (%1, %0), %%xmm1 \n\t"\ + "mulpd (%2, %0), %%xmm1 \n\t"\ + "addpd %%xmm1, %%xmm0 \n\t"\ + "add $16, %0 \n\t"\ + " js 1b \n\t"\ + "movhlps %%xmm0, %%xmm1 \n\t"\ + "addpd %%xmm1, %%xmm0 \n\t"\ + "movsd %%xmm0, (%3) \n\t"\ + : "+r" (len)\ + : "r" (((uint8_t*)(src+sample_index))-len),\ + "r" (((uint8_t*)filter)-len),\ + "r" (dst+dst_index)\ + XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\ +); + +#define LINEAR_CORE_DBL_SSE2 \ + x86_reg len= -8*c->filter_length;\ +__asm__ volatile(\ + "xorpd %%xmm0, %%xmm0 \n\t"\ + "xorpd %%xmm2, %%xmm2 \n\t"\ + "1: \n\t"\ + "movupd (%3, %0), %%xmm1 \n\t"\ + "movapd %%xmm1, %%xmm3 \n\t"\ + "mulpd (%4, %0), %%xmm1 \n\t"\ + "mulpd (%5, %0), %%xmm3 \n\t"\ + "addpd %%xmm1, %%xmm0 \n\t"\ + "addpd %%xmm3, %%xmm2 \n\t"\ + "add $16, %0 \n\t"\ + " js 1b \n\t"\ + "movhlps %%xmm0, %%xmm1 \n\t"\ + "movhlps %%xmm2, %%xmm3 \n\t"\ + "addpd %%xmm1, %%xmm0 \n\t"\ + "addpd %%xmm3, %%xmm2 \n\t"\ + "movsd %%xmm0, %1 \n\t"\ + "movsd %%xmm2, %2 \n\t"\ + : "+r" (len),\ + "=m" (val),\ + "=m" (v2)\ + : "r" (((uint8_t*)(src+sample_index))-len),\ + "r" (((uint8_t*)filter)-len),\ + "r" (((uint8_t*)(filter+c->filter_alloc))-len)\ + XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\ ); |