[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[GNUnet-SVN] r7842 - Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavc
From: |
gnunet |
Subject: |
[GNUnet-SVN] r7842 - Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386 |
Date: |
Mon, 3 Nov 2008 12:00:07 -0700 (MST) |
Author: holindho
Date: 2008-11-03 12:00:03 -0700 (Mon, 03 Nov 2008)
New Revision: 7842
Added:
Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/dsputil_h264_template_ssse3.c
Removed:
Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/dsputil_h264_ssse3.c
Modified:
Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/h264dsp_mmx.c
Log:
revert r7835
Deleted:
Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/dsputil_h264_ssse3.c
===================================================================
---
Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/dsputil_h264_ssse3.c
2008-11-03 18:02:33 UTC (rev 7841)
+++
Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/dsputil_h264_ssse3.c
2008-11-03 19:00:03 UTC (rev 7842)
@@ -1,211 +0,0 @@
-/*
- * Copyright (c) 2008 Loren Merritt
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * SSSE3 optimized version of (put|avg)_h264_chroma_mc8.
- * H264_CHROMA_MC8_TMPL must be defined to the desired function name
- * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function
- * AVG_OP must be defined to empty for put and the identify for avg
- *
- * This file used to be called "dsputil_h264_template_ssse3.c" but
- * that filename is too long for our build process.
- */
-static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align
1*/, int stride, int h, int x, int y, int rnd)
-{
- if(y==0 && x==0) {
- /* no filter needed */
- H264_CHROMA_MC8_MV0(dst, src, stride, h);
- return;
- }
-
- assert(x<8 && y<8 && x>=0 && y>=0);
-
- if(y==0 || x==0)
- {
- /* 1 dimensional filter only */
- asm volatile(
- "movd %0, %%xmm7 \n\t"
- "movq %1, %%xmm6 \n\t"
- "pshuflw $0, %%xmm7, %%xmm7 \n\t"
- "movlhps %%xmm6, %%xmm6 \n\t"
- "movlhps %%xmm7, %%xmm7 \n\t"
- :: "r"(255*(x+y)+8), "m"(*(rnd?&ff_pw_4:&ff_pw_3))
- );
-
- if(x) {
- asm volatile(
- "1: \n\t"
- "movq (%1), %%xmm0 \n\t"
- "movq 1(%1), %%xmm1 \n\t"
- "movq (%1,%3), %%xmm2 \n\t"
- "movq 1(%1,%3), %%xmm3 \n\t"
- "punpcklbw %%xmm1, %%xmm0 \n\t"
- "punpcklbw %%xmm3, %%xmm2 \n\t"
- "pmaddubsw %%xmm7, %%xmm0 \n\t"
- "pmaddubsw %%xmm7, %%xmm2 \n\t"
- AVG_OP("movq (%0), %%xmm4 \n\t")
- AVG_OP("movhps (%0,%3), %%xmm4 \n\t")
- "paddw %%xmm6, %%xmm0 \n\t"
- "paddw %%xmm6, %%xmm2 \n\t"
- "psrlw $3, %%xmm0 \n\t"
- "psrlw $3, %%xmm2 \n\t"
- "packuswb %%xmm2, %%xmm0 \n\t"
- AVG_OP("pavgb %%xmm4, %%xmm0 \n\t")
- "movq %%xmm0, (%0) \n\t"
- "movhps %%xmm0, (%0,%3) \n\t"
- "sub $2, %2 \n\t"
- "lea (%1,%3,2), %1 \n\t"
- "lea (%0,%3,2), %0 \n\t"
- "jg 1b \n\t"
- :"+r"(dst), "+r"(src), "+r"(h)
- :"r"((x86_reg)stride)
- );
- } else {
- asm volatile(
- "1: \n\t"
- "movq (%1), %%xmm0 \n\t"
- "movq (%1,%3), %%xmm1 \n\t"
- "movdqa %%xmm1, %%xmm2 \n\t"
- "movq (%1,%3,2), %%xmm3 \n\t"
- "punpcklbw %%xmm1, %%xmm0 \n\t"
- "punpcklbw %%xmm3, %%xmm2 \n\t"
- "pmaddubsw %%xmm7, %%xmm0 \n\t"
- "pmaddubsw %%xmm7, %%xmm2 \n\t"
- AVG_OP("movq (%0), %%xmm4 \n\t")
- AVG_OP("movhps (%0,%3), %%xmm4 \n\t")
- "paddw %%xmm6, %%xmm0 \n\t"
- "paddw %%xmm6, %%xmm2 \n\t"
- "psrlw $3, %%xmm0 \n\t"
- "psrlw $3, %%xmm2 \n\t"
- "packuswb %%xmm2, %%xmm0 \n\t"
- AVG_OP("pavgb %%xmm4, %%xmm0 \n\t")
- "movq %%xmm0, (%0) \n\t"
- "movhps %%xmm0, (%0,%3) \n\t"
- "sub $2, %2 \n\t"
- "lea (%1,%3,2), %1 \n\t"
- "lea (%0,%3,2), %0 \n\t"
- "jg 1b \n\t"
- :"+r"(dst), "+r"(src), "+r"(h)
- :"r"((x86_reg)stride)
- );
- }
- return;
- }
-
- /* general case, bilinear */
- asm volatile(
- "movd %0, %%xmm7 \n\t"
- "movd %1, %%xmm6 \n\t"
- "movdqa %2, %%xmm5 \n\t"
- "pshuflw $0, %%xmm7, %%xmm7 \n\t"
- "pshuflw $0, %%xmm6, %%xmm6 \n\t"
- "movlhps %%xmm7, %%xmm7 \n\t"
- "movlhps %%xmm6, %%xmm6 \n\t"
- :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y),
"m"(*(rnd?&ff_pw_32:&ff_pw_28))
- );
-
- asm volatile(
- "movq (%1), %%xmm0 \n\t"
- "movq 1(%1), %%xmm1 \n\t"
- "punpcklbw %%xmm1, %%xmm0 \n\t"
- "add %3, %1 \n\t"
- "1: \n\t"
- "movq (%1), %%xmm1 \n\t"
- "movq 1(%1), %%xmm2 \n\t"
- "movq (%1,%3), %%xmm3 \n\t"
- "movq 1(%1,%3), %%xmm4 \n\t"
- "lea (%1,%3,2), %1 \n\t"
- "punpcklbw %%xmm2, %%xmm1 \n\t"
- "punpcklbw %%xmm4, %%xmm3 \n\t"
- "movdqa %%xmm1, %%xmm2 \n\t"
- "movdqa %%xmm3, %%xmm4 \n\t"
- "pmaddubsw %%xmm7, %%xmm0 \n\t"
- "pmaddubsw %%xmm6, %%xmm1 \n\t"
- "pmaddubsw %%xmm7, %%xmm2 \n\t"
- "pmaddubsw %%xmm6, %%xmm3 \n\t"
- "paddw %%xmm5, %%xmm0 \n\t"
- "paddw %%xmm5, %%xmm2 \n\t"
- "paddw %%xmm0, %%xmm1 \n\t"
- "paddw %%xmm2, %%xmm3 \n\t"
- "movdqa %%xmm4, %%xmm0 \n\t"
- "psrlw $6, %%xmm1 \n\t"
- "psrlw $6, %%xmm3 \n\t"
- AVG_OP("movq (%0), %%xmm2 \n\t")
- AVG_OP("movhps (%0,%3), %%xmm2 \n\t")
- "packuswb %%xmm3, %%xmm1 \n\t"
- AVG_OP("pavgb %%xmm2, %%xmm1 \n\t")
- "movq %%xmm1, (%0)\n\t"
- "movhps %%xmm1, (%0,%3)\n\t"
- "sub $2, %2 \n\t"
- "lea (%0,%3,2), %0 \n\t"
- "jg 1b \n\t"
- :"+r"(dst), "+r"(src), "+r"(h)
- :"r"((x86_reg)stride)
- );
-}
-
-static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align
1*/, int stride, int h, int x, int y)
-{
- asm volatile(
- "movd %0, %%mm7 \n\t"
- "movd %1, %%mm6 \n\t"
- "movq %2, %%mm5 \n\t"
- "pshufw $0, %%mm7, %%mm7 \n\t"
- "pshufw $0, %%mm6, %%mm6 \n\t"
- :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(ff_pw_32)
- );
-
- asm volatile(
- "movd (%1), %%mm0 \n\t"
- "punpcklbw 1(%1), %%mm0 \n\t"
- "add %3, %1 \n\t"
- "1: \n\t"
- "movd (%1), %%mm1 \n\t"
- "movd (%1,%3), %%mm3 \n\t"
- "punpcklbw 1(%1), %%mm1 \n\t"
- "punpcklbw 1(%1,%3), %%mm3 \n\t"
- "lea (%1,%3,2), %1 \n\t"
- "movq %%mm1, %%mm2 \n\t"
- "movq %%mm3, %%mm4 \n\t"
- "pmaddubsw %%mm7, %%mm0 \n\t"
- "pmaddubsw %%mm6, %%mm1 \n\t"
- "pmaddubsw %%mm7, %%mm2 \n\t"
- "pmaddubsw %%mm6, %%mm3 \n\t"
- "paddw %%mm5, %%mm0 \n\t"
- "paddw %%mm5, %%mm2 \n\t"
- "paddw %%mm0, %%mm1 \n\t"
- "paddw %%mm2, %%mm3 \n\t"
- "movq %%mm4, %%mm0 \n\t"
- "psrlw $6, %%mm1 \n\t"
- "psrlw $6, %%mm3 \n\t"
- "packuswb %%mm1, %%mm1 \n\t"
- "packuswb %%mm3, %%mm3 \n\t"
- AVG_OP("pavgb (%0), %%mm1 \n\t")
- AVG_OP("pavgb (%0,%3), %%mm3 \n\t")
- "movd %%mm1, (%0)\n\t"
- "movd %%mm3, (%0,%3)\n\t"
- "sub $2, %2 \n\t"
- "lea (%0,%3,2), %0 \n\t"
- "jg 1b \n\t"
- :"+r"(dst), "+r"(src), "+r"(h)
- :"r"((x86_reg)stride)
- );
-}
-
Copied:
Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/dsputil_h264_template_ssse3.c
(from rev 7834,
Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/dsputil_h264_template_ssse3.c)
===================================================================
---
Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/dsputil_h264_template_ssse3.c
(rev 0)
+++
Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/dsputil_h264_template_ssse3.c
2008-11-03 19:00:03 UTC (rev 7842)
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2008 Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * SSSE3 optimized version of (put|avg)_h264_chroma_mc8.
+ * H264_CHROMA_MC8_TMPL must be defined to the desired function name
+ * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function
+ * AVG_OP must be defined to empty for put and the identify for avg
+ */
+static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align
1*/, int stride, int h, int x, int y, int rnd)
+{
+ if(y==0 && x==0) {
+ /* no filter needed */
+ H264_CHROMA_MC8_MV0(dst, src, stride, h);
+ return;
+ }
+
+ assert(x<8 && y<8 && x>=0 && y>=0);
+
+ if(y==0 || x==0)
+ {
+ /* 1 dimensional filter only */
+ asm volatile(
+ "movd %0, %%xmm7 \n\t"
+ "movq %1, %%xmm6 \n\t"
+ "pshuflw $0, %%xmm7, %%xmm7 \n\t"
+ "movlhps %%xmm6, %%xmm6 \n\t"
+ "movlhps %%xmm7, %%xmm7 \n\t"
+ :: "r"(255*(x+y)+8), "m"(*(rnd?&ff_pw_4:&ff_pw_3))
+ );
+
+ if(x) {
+ asm volatile(
+ "1: \n\t"
+ "movq (%1), %%xmm0 \n\t"
+ "movq 1(%1), %%xmm1 \n\t"
+ "movq (%1,%3), %%xmm2 \n\t"
+ "movq 1(%1,%3), %%xmm3 \n\t"
+ "punpcklbw %%xmm1, %%xmm0 \n\t"
+ "punpcklbw %%xmm3, %%xmm2 \n\t"
+ "pmaddubsw %%xmm7, %%xmm0 \n\t"
+ "pmaddubsw %%xmm7, %%xmm2 \n\t"
+ AVG_OP("movq (%0), %%xmm4 \n\t")
+ AVG_OP("movhps (%0,%3), %%xmm4 \n\t")
+ "paddw %%xmm6, %%xmm0 \n\t"
+ "paddw %%xmm6, %%xmm2 \n\t"
+ "psrlw $3, %%xmm0 \n\t"
+ "psrlw $3, %%xmm2 \n\t"
+ "packuswb %%xmm2, %%xmm0 \n\t"
+ AVG_OP("pavgb %%xmm4, %%xmm0 \n\t")
+ "movq %%xmm0, (%0) \n\t"
+ "movhps %%xmm0, (%0,%3) \n\t"
+ "sub $2, %2 \n\t"
+ "lea (%1,%3,2), %1 \n\t"
+ "lea (%0,%3,2), %0 \n\t"
+ "jg 1b \n\t"
+ :"+r"(dst), "+r"(src), "+r"(h)
+ :"r"((x86_reg)stride)
+ );
+ } else {
+ asm volatile(
+ "1: \n\t"
+ "movq (%1), %%xmm0 \n\t"
+ "movq (%1,%3), %%xmm1 \n\t"
+ "movdqa %%xmm1, %%xmm2 \n\t"
+ "movq (%1,%3,2), %%xmm3 \n\t"
+ "punpcklbw %%xmm1, %%xmm0 \n\t"
+ "punpcklbw %%xmm3, %%xmm2 \n\t"
+ "pmaddubsw %%xmm7, %%xmm0 \n\t"
+ "pmaddubsw %%xmm7, %%xmm2 \n\t"
+ AVG_OP("movq (%0), %%xmm4 \n\t")
+ AVG_OP("movhps (%0,%3), %%xmm4 \n\t")
+ "paddw %%xmm6, %%xmm0 \n\t"
+ "paddw %%xmm6, %%xmm2 \n\t"
+ "psrlw $3, %%xmm0 \n\t"
+ "psrlw $3, %%xmm2 \n\t"
+ "packuswb %%xmm2, %%xmm0 \n\t"
+ AVG_OP("pavgb %%xmm4, %%xmm0 \n\t")
+ "movq %%xmm0, (%0) \n\t"
+ "movhps %%xmm0, (%0,%3) \n\t"
+ "sub $2, %2 \n\t"
+ "lea (%1,%3,2), %1 \n\t"
+ "lea (%0,%3,2), %0 \n\t"
+ "jg 1b \n\t"
+ :"+r"(dst), "+r"(src), "+r"(h)
+ :"r"((x86_reg)stride)
+ );
+ }
+ return;
+ }
+
+ /* general case, bilinear */
+ asm volatile(
+ "movd %0, %%xmm7 \n\t"
+ "movd %1, %%xmm6 \n\t"
+ "movdqa %2, %%xmm5 \n\t"
+ "pshuflw $0, %%xmm7, %%xmm7 \n\t"
+ "pshuflw $0, %%xmm6, %%xmm6 \n\t"
+ "movlhps %%xmm7, %%xmm7 \n\t"
+ "movlhps %%xmm6, %%xmm6 \n\t"
+ :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y),
"m"(*(rnd?&ff_pw_32:&ff_pw_28))
+ );
+
+ asm volatile(
+ "movq (%1), %%xmm0 \n\t"
+ "movq 1(%1), %%xmm1 \n\t"
+ "punpcklbw %%xmm1, %%xmm0 \n\t"
+ "add %3, %1 \n\t"
+ "1: \n\t"
+ "movq (%1), %%xmm1 \n\t"
+ "movq 1(%1), %%xmm2 \n\t"
+ "movq (%1,%3), %%xmm3 \n\t"
+ "movq 1(%1,%3), %%xmm4 \n\t"
+ "lea (%1,%3,2), %1 \n\t"
+ "punpcklbw %%xmm2, %%xmm1 \n\t"
+ "punpcklbw %%xmm4, %%xmm3 \n\t"
+ "movdqa %%xmm1, %%xmm2 \n\t"
+ "movdqa %%xmm3, %%xmm4 \n\t"
+ "pmaddubsw %%xmm7, %%xmm0 \n\t"
+ "pmaddubsw %%xmm6, %%xmm1 \n\t"
+ "pmaddubsw %%xmm7, %%xmm2 \n\t"
+ "pmaddubsw %%xmm6, %%xmm3 \n\t"
+ "paddw %%xmm5, %%xmm0 \n\t"
+ "paddw %%xmm5, %%xmm2 \n\t"
+ "paddw %%xmm0, %%xmm1 \n\t"
+ "paddw %%xmm2, %%xmm3 \n\t"
+ "movdqa %%xmm4, %%xmm0 \n\t"
+ "psrlw $6, %%xmm1 \n\t"
+ "psrlw $6, %%xmm3 \n\t"
+ AVG_OP("movq (%0), %%xmm2 \n\t")
+ AVG_OP("movhps (%0,%3), %%xmm2 \n\t")
+ "packuswb %%xmm3, %%xmm1 \n\t"
+ AVG_OP("pavgb %%xmm2, %%xmm1 \n\t")
+ "movq %%xmm1, (%0)\n\t"
+ "movhps %%xmm1, (%0,%3)\n\t"
+ "sub $2, %2 \n\t"
+ "lea (%0,%3,2), %0 \n\t"
+ "jg 1b \n\t"
+ :"+r"(dst), "+r"(src), "+r"(h)
+ :"r"((x86_reg)stride)
+ );
+}
+
+static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align
1*/, int stride, int h, int x, int y)
+{
+ asm volatile(
+ "movd %0, %%mm7 \n\t"
+ "movd %1, %%mm6 \n\t"
+ "movq %2, %%mm5 \n\t"
+ "pshufw $0, %%mm7, %%mm7 \n\t"
+ "pshufw $0, %%mm6, %%mm6 \n\t"
+ :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(ff_pw_32)
+ );
+
+ asm volatile(
+ "movd (%1), %%mm0 \n\t"
+ "punpcklbw 1(%1), %%mm0 \n\t"
+ "add %3, %1 \n\t"
+ "1: \n\t"
+ "movd (%1), %%mm1 \n\t"
+ "movd (%1,%3), %%mm3 \n\t"
+ "punpcklbw 1(%1), %%mm1 \n\t"
+ "punpcklbw 1(%1,%3), %%mm3 \n\t"
+ "lea (%1,%3,2), %1 \n\t"
+ "movq %%mm1, %%mm2 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pmaddubsw %%mm7, %%mm0 \n\t"
+ "pmaddubsw %%mm6, %%mm1 \n\t"
+ "pmaddubsw %%mm7, %%mm2 \n\t"
+ "pmaddubsw %%mm6, %%mm3 \n\t"
+ "paddw %%mm5, %%mm0 \n\t"
+ "paddw %%mm5, %%mm2 \n\t"
+ "paddw %%mm0, %%mm1 \n\t"
+ "paddw %%mm2, %%mm3 \n\t"
+ "movq %%mm4, %%mm0 \n\t"
+ "psrlw $6, %%mm1 \n\t"
+ "psrlw $6, %%mm3 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ "packuswb %%mm3, %%mm3 \n\t"
+ AVG_OP("pavgb (%0), %%mm1 \n\t")
+ AVG_OP("pavgb (%0,%3), %%mm3 \n\t")
+ "movd %%mm1, (%0)\n\t"
+ "movd %%mm3, (%0,%3)\n\t"
+ "sub $2, %2 \n\t"
+ "lea (%0,%3,2), %0 \n\t"
+ "jg 1b \n\t"
+ :"+r"(dst), "+r"(src), "+r"(h)
+ :"r"((x86_reg)stride)
+ );
+}
+
Modified:
Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/h264dsp_mmx.c
===================================================================
--- Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/h264dsp_mmx.c
2008-11-03 18:02:33 UTC (rev 7841)
+++ Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386/h264dsp_mmx.c
2008-11-03 19:00:03 UTC (rev 7842)
@@ -1985,7 +1985,7 @@
#define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3
#define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3
#define H264_CHROMA_MC8_MV0 put_pixels8_mmx
-#include "dsputil_h264_ssse3.c"
+#include "dsputil_h264_template_ssse3.c"
static void put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t
*src/*align 1*/, int stride, int h, int x, int y)
{
put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
@@ -2003,7 +2003,7 @@
#define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_ssse3
#define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_ssse3
#define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
-#include "dsputil_h264_ssse3.c"
+#include "dsputil_h264_template_ssse3.c"
static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t
*src/*align 1*/, int stride, int h, int x, int y)
{
avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- [GNUnet-SVN] r7842 - Extractor/src/plugins/thumbnailffmpeg/ffmpeg/libavcodec/i386,
gnunet <=