00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #include "dsputil.h"
00022
00023 #include "gcc_fixes.h"
00024
00025 #include "dsputil_ppc.h"
00026 #include "dsputil_altivec.h"
00027 #include "util_altivec.h"
00028 #include "types_altivec.h"
00029
00030 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
00031 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
00032
00033 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
00034 #define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
00035 #define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
00036 #define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
00037 #define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
00038 #define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
00039 #define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
00040 #define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
00041 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
00042 #include "h264_template_altivec.c"
00043 #undef OP_U8_ALTIVEC
00044 #undef PREFIX_h264_chroma_mc8_altivec
00045 #undef PREFIX_h264_chroma_mc8_num
00046 #undef PREFIX_h264_qpel16_h_lowpass_altivec
00047 #undef PREFIX_h264_qpel16_h_lowpass_num
00048 #undef PREFIX_h264_qpel16_v_lowpass_altivec
00049 #undef PREFIX_h264_qpel16_v_lowpass_num
00050 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
00051 #undef PREFIX_h264_qpel16_hv_lowpass_num
00052
00053 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
00054 #define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
00055 #define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
00056 #define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
00057 #define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
00058 #define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
00059 #define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
00060 #define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
00061 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
00062 #include "h264_template_altivec.c"
00063 #undef OP_U8_ALTIVEC
00064 #undef PREFIX_h264_chroma_mc8_altivec
00065 #undef PREFIX_h264_chroma_mc8_num
00066 #undef PREFIX_h264_qpel16_h_lowpass_altivec
00067 #undef PREFIX_h264_qpel16_h_lowpass_num
00068 #undef PREFIX_h264_qpel16_v_lowpass_altivec
00069 #undef PREFIX_h264_qpel16_v_lowpass_num
00070 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
00071 #undef PREFIX_h264_qpel16_hv_lowpass_num
00072
00073 #define H264_MC(OPNAME, SIZE, CODETYPE) \
00074 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
00075 OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
00076 }\
00077 \
00078 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
00079 DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
00080 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00081 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
00082 }\
00083 \
00084 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00085 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
00086 }\
00087 \
00088 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00089 DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
00090 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00091 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
00092 }\
00093 \
00094 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00095 DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
00096 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00097 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
00098 }\
00099 \
00100 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00101 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
00102 }\
00103 \
00104 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00105 DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
00106 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00107 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
00108 }\
00109 \
00110 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00111 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
00112 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
00113 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
00114 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
00115 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00116 }\
00117 \
00118 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00119 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
00120 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
00121 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
00122 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
00123 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00124 }\
00125 \
00126 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00127 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
00128 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
00129 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
00130 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
00131 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00132 }\
00133 \
00134 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00135 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
00136 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
00137 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
00138 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
00139 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00140 }\
00141 \
00142 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00143 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
00144 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
00145 }\
00146 \
00147 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00148 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
00149 DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
00150 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
00151 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
00152 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00153 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
00154 }\
00155 \
00156 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00157 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
00158 DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
00159 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
00160 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
00161 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00162 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
00163 }\
00164 \
00165 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00166 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
00167 DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
00168 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
00169 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
00170 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00171 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
00172 }\
00173 \
00174 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00175 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
00176 DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
00177 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
00178 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
00179 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00180 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
00181 }\
00182
00183
00184 void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
00185 DECLARE_ALIGNED_16(signed int, ABCD[4]) =
00186 {((8 - x) * (8 - y)),
00187 ((x) * (8 - y)),
00188 ((8 - x) * (y)),
00189 ((x) * (y))};
00190 register int i;
00191 vec_u8_t fperm;
00192 const vec_s32_t vABCD = vec_ld(0, ABCD);
00193 const vec_s16_t vA = vec_splat((vec_s16_t)vABCD, 1);
00194 const vec_s16_t vB = vec_splat((vec_s16_t)vABCD, 3);
00195 const vec_s16_t vC = vec_splat((vec_s16_t)vABCD, 5);
00196 const vec_s16_t vD = vec_splat((vec_s16_t)vABCD, 7);
00197 LOAD_ZERO;
00198 const vec_s16_t v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
00199 const vec_u16_t v6us = vec_splat_u16(6);
00200 register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
00201 register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
00202
00203 vec_u8_t vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
00204 vec_u8_t vsrc0uc, vsrc1uc;
00205 vec_s16_t vsrc0ssH, vsrc1ssH;
00206 vec_u8_t vsrcCuc, vsrc2uc, vsrc3uc;
00207 vec_s16_t vsrc2ssH, vsrc3ssH, psum;
00208 vec_u8_t vdst, ppsum, fsum;
00209
00210 if (((unsigned long)dst) % 16 == 0) {
00211 fperm = (vec_u8_t)AVV(0x10, 0x11, 0x12, 0x13,
00212 0x14, 0x15, 0x16, 0x17,
00213 0x08, 0x09, 0x0A, 0x0B,
00214 0x0C, 0x0D, 0x0E, 0x0F);
00215 } else {
00216 fperm = (vec_u8_t)AVV(0x00, 0x01, 0x02, 0x03,
00217 0x04, 0x05, 0x06, 0x07,
00218 0x18, 0x19, 0x1A, 0x1B,
00219 0x1C, 0x1D, 0x1E, 0x1F);
00220 }
00221
00222 vsrcAuc = vec_ld(0, src);
00223
00224 if (loadSecond)
00225 vsrcBuc = vec_ld(16, src);
00226 vsrcperm0 = vec_lvsl(0, src);
00227 vsrcperm1 = vec_lvsl(1, src);
00228
00229 vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
00230 if (reallyBadAlign)
00231 vsrc1uc = vsrcBuc;
00232 else
00233 vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
00234
00235 vsrc0ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc0uc);
00236 vsrc1ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc1uc);
00237
00238 if (!loadSecond) {
00239 for (i = 0 ; i < h ; i++) {
00240
00241
00242 vsrcCuc = vec_ld(stride + 0, src);
00243
00244 vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
00245 vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
00246
00247 vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc2uc);
00248 vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc3uc);
00249
00250 psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
00251 psum = vec_mladd(vB, vsrc1ssH, psum);
00252 psum = vec_mladd(vC, vsrc2ssH, psum);
00253 psum = vec_mladd(vD, vsrc3ssH, psum);
00254 psum = vec_add(v28ss, psum);
00255 psum = vec_sra(psum, v6us);
00256
00257 vdst = vec_ld(0, dst);
00258 ppsum = (vec_u8_t)vec_packsu(psum, psum);
00259 fsum = vec_perm(vdst, ppsum, fperm);
00260
00261 vec_st(fsum, 0, dst);
00262
00263 vsrc0ssH = vsrc2ssH;
00264 vsrc1ssH = vsrc3ssH;
00265
00266 dst += stride;
00267 src += stride;
00268 }
00269 } else {
00270 vec_u8_t vsrcDuc;
00271 for (i = 0 ; i < h ; i++) {
00272 vsrcCuc = vec_ld(stride + 0, src);
00273 vsrcDuc = vec_ld(stride + 16, src);
00274
00275 vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
00276 if (reallyBadAlign)
00277 vsrc3uc = vsrcDuc;
00278 else
00279 vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
00280
00281 vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc2uc);
00282 vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc3uc);
00283
00284 psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
00285 psum = vec_mladd(vB, vsrc1ssH, psum);
00286 psum = vec_mladd(vC, vsrc2ssH, psum);
00287 psum = vec_mladd(vD, vsrc3ssH, psum);
00288 psum = vec_add(v28ss, psum);
00289 psum = vec_sr(psum, v6us);
00290
00291 vdst = vec_ld(0, dst);
00292 ppsum = (vec_u8_t)vec_pack(psum, psum);
00293 fsum = vec_perm(vdst, ppsum, fperm);
00294
00295 vec_st(fsum, 0, dst);
00296
00297 vsrc0ssH = vsrc2ssH;
00298 vsrc1ssH = vsrc3ssH;
00299
00300 dst += stride;
00301 src += stride;
00302 }
00303 }
00304 }
00305
00306 static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
00307 const uint8_t * src2, int dst_stride,
00308 int src_stride1, int h)
00309 {
00310 int i;
00311 vec_u8_t a, b, d, tmp1, tmp2, mask, mask_, edges, align;
00312
00313 mask_ = vec_lvsl(0, src2);
00314
00315 for (i = 0; i < h; i++) {
00316
00317 tmp1 = vec_ld(i * src_stride1, src1);
00318 mask = vec_lvsl(i * src_stride1, src1);
00319 tmp2 = vec_ld(i * src_stride1 + 15, src1);
00320
00321 a = vec_perm(tmp1, tmp2, mask);
00322
00323 tmp1 = vec_ld(i * 16, src2);
00324 tmp2 = vec_ld(i * 16 + 15, src2);
00325
00326 b = vec_perm(tmp1, tmp2, mask_);
00327
00328 tmp1 = vec_ld(0, dst);
00329 mask = vec_lvsl(0, dst);
00330 tmp2 = vec_ld(15, dst);
00331
00332 d = vec_avg(a, b);
00333
00334 edges = vec_perm(tmp2, tmp1, mask);
00335
00336 align = vec_lvsr(0, dst);
00337
00338 tmp2 = vec_perm(d, edges, align);
00339 tmp1 = vec_perm(edges, d, align);
00340
00341 vec_st(tmp2, 15, dst);
00342 vec_st(tmp1, 0 , dst);
00343
00344 dst += dst_stride;
00345 }
00346 }
00347
00348 static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
00349 const uint8_t * src2, int dst_stride,
00350 int src_stride1, int h)
00351 {
00352 int i;
00353 vec_u8_t a, b, d, tmp1, tmp2, mask, mask_, edges, align;
00354
00355 mask_ = vec_lvsl(0, src2);
00356
00357 for (i = 0; i < h; i++) {
00358
00359 tmp1 = vec_ld(i * src_stride1, src1);
00360 mask = vec_lvsl(i * src_stride1, src1);
00361 tmp2 = vec_ld(i * src_stride1 + 15, src1);
00362
00363 a = vec_perm(tmp1, tmp2, mask);
00364
00365 tmp1 = vec_ld(i * 16, src2);
00366 tmp2 = vec_ld(i * 16 + 15, src2);
00367
00368 b = vec_perm(tmp1, tmp2, mask_);
00369
00370 tmp1 = vec_ld(0, dst);
00371 mask = vec_lvsl(0, dst);
00372 tmp2 = vec_ld(15, dst);
00373
00374 d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
00375
00376 edges = vec_perm(tmp2, tmp1, mask);
00377
00378 align = vec_lvsr(0, dst);
00379
00380 tmp2 = vec_perm(d, edges, align);
00381 tmp1 = vec_perm(edges, d, align);
00382
00383 vec_st(tmp2, 15, dst);
00384 vec_st(tmp1, 0 , dst);
00385
00386 dst += dst_stride;
00387 }
00388 }
00389
00390
00391
00392
00393
00394
00395 H264_MC(put_, 16, altivec)
00396 H264_MC(avg_, 16, altivec)
00397
00398
00399
00400
00401
00402
00403 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \
00404 \
00405 vz0 = vec_add(vb0,vb2); \
00406 vz1 = vec_sub(vb0,vb2); \
00407 vz2 = vec_sra(vb1,vec_splat_u16(1)); \
00408 vz2 = vec_sub(vz2,vb3); \
00409 vz3 = vec_sra(vb3,vec_splat_u16(1)); \
00410 vz3 = vec_add(vb1,vz3); \
00411 \
00412 va0 = vec_add(vz0,vz3); \
00413 va1 = vec_add(vz1,vz2); \
00414 va2 = vec_sub(vz1,vz2); \
00415 va3 = vec_sub(vz0,vz3)
00416
00417 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
00418 b0 = vec_mergeh( a0, a0 ); \
00419 b1 = vec_mergeh( a1, a0 ); \
00420 b2 = vec_mergeh( a2, a0 ); \
00421 b3 = vec_mergeh( a3, a0 ); \
00422 a0 = vec_mergeh( b0, b2 ); \
00423 a1 = vec_mergel( b0, b2 ); \
00424 a2 = vec_mergeh( b1, b3 ); \
00425 a3 = vec_mergel( b1, b3 ); \
00426 b0 = vec_mergeh( a0, a2 ); \
00427 b1 = vec_mergel( a0, a2 ); \
00428 b2 = vec_mergeh( a1, a3 ); \
00429 b3 = vec_mergel( a1, a3 )
00430
00431 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
00432 vdst_orig = vec_ld(0, dst); \
00433 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
00434 vdst_ss = (vec_s16_t) vec_mergeh(zero_u8v, vdst); \
00435 va = vec_add(va, vdst_ss); \
00436 va_u8 = vec_packsu(va, zero_s16v); \
00437 va_u32 = vec_splat((vec_u32_t)va_u8, 0); \
00438 vec_ste(va_u32, element, (uint32_t*)dst);
00439
00440 static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
00441 {
00442 vec_s16_t va0, va1, va2, va3;
00443 vec_s16_t vz0, vz1, vz2, vz3;
00444 vec_s16_t vtmp0, vtmp1, vtmp2, vtmp3;
00445 vec_u8_t va_u8;
00446 vec_u32_t va_u32;
00447 vec_s16_t vdst_ss;
00448 const vec_u16_t v6us = vec_splat_u16(6);
00449 vec_u8_t vdst, vdst_orig;
00450 vec_u8_t vdst_mask = vec_lvsl(0, dst);
00451 int element = ((unsigned long)dst & 0xf) >> 2;
00452 LOAD_ZERO;
00453
00454 block[0] += 32;
00455
00456 vtmp0 = vec_ld(0,block);
00457 vtmp1 = vec_sld(vtmp0, vtmp0, 8);
00458 vtmp2 = vec_ld(16,block);
00459 vtmp3 = vec_sld(vtmp2, vtmp2, 8);
00460
00461 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
00462 VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
00463 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
00464
00465 va0 = vec_sra(va0,v6us);
00466 va1 = vec_sra(va1,v6us);
00467 va2 = vec_sra(va2,v6us);
00468 va3 = vec_sra(va3,v6us);
00469
00470 VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
00471 dst += stride;
00472 VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
00473 dst += stride;
00474 VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
00475 dst += stride;
00476 VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
00477 }
00478
00479 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
00480 \
00481 vec_s16_t a0v = vec_add(s0, s4); \
00482 \
00483 vec_s16_t a2v = vec_sub(s0, s4); \
00484 \
00485 vec_s16_t a4v = vec_sub(vec_sra(s2, onev), s6); \
00486 \
00487 vec_s16_t a6v = vec_add(vec_sra(s6, onev), s2); \
00488 \
00489 vec_s16_t b0v = vec_add(a0v, a6v); \
00490 \
00491 vec_s16_t b2v = vec_add(a2v, a4v); \
00492 \
00493 vec_s16_t b4v = vec_sub(a2v, a4v); \
00494 \
00495 vec_s16_t b6v = vec_sub(a0v, a6v); \
00496 \
00497 \
00498 vec_s16_t a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
00499 \
00500 \
00501 vec_s16_t a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
00502 \
00503 \
00504 vec_s16_t a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
00505 \
00506 vec_s16_t a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
00507 \
00508 vec_s16_t b1v = vec_add( vec_sra(a7v, twov), a1v); \
00509 \
00510 vec_s16_t b3v = vec_add(a3v, vec_sra(a5v, twov)); \
00511 \
00512 vec_s16_t b5v = vec_sub( vec_sra(a3v, twov), a5v); \
00513 \
00514 vec_s16_t b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
00515 \
00516 d0 = vec_add(b0v, b7v); \
00517 \
00518 d1 = vec_add(b2v, b5v); \
00519 \
00520 d2 = vec_add(b4v, b3v); \
00521 \
00522 d3 = vec_add(b6v, b1v); \
00523 \
00524 d4 = vec_sub(b6v, b1v); \
00525 \
00526 d5 = vec_sub(b4v, b3v); \
00527 \
00528 d6 = vec_sub(b2v, b5v); \
00529 \
00530 d7 = vec_sub(b0v, b7v); \
00531 }
00532
00533 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
00534 \
00535 vec_u8_t hv = vec_ld( 0, dest ); \
00536 vec_u8_t lv = vec_ld( 7, dest ); \
00537 vec_u8_t dstv = vec_perm( hv, lv, (vec_u8_t)perm_ldv ); \
00538 vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
00539 vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv); \
00540 vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
00541 vec_u8_t idstsum8 = vec_packsu(zero_s16v, idstsum); \
00542 vec_u8_t edgehv; \
00543 \
00544 vec_u8_t bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
00545 vec_u8_t edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
00546 lv = vec_sel( lv, bodyv, edgelv ); \
00547 vec_st( lv, 7, dest ); \
00548 hv = vec_ld( 0, dest ); \
00549 edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
00550 hv = vec_sel( hv, bodyv, edgehv ); \
00551 vec_st( hv, 0, dest ); \
00552 }
00553
00554 void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
00555 vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;
00556 vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;
00557 vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
00558
00559 vec_u8_t perm_ldv = vec_lvsl(0, dst);
00560 vec_u8_t perm_stv = vec_lvsr(8, dst);
00561
00562 const vec_u16_t onev = vec_splat_u16(1);
00563 const vec_u16_t twov = vec_splat_u16(2);
00564 const vec_u16_t sixv = vec_splat_u16(6);
00565
00566 const vec_u8_t sel = (vec_u8_t) AVV(0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1);
00567 LOAD_ZERO;
00568
00569 dct[0] += 32;
00570
00571 s0 = vec_ld(0x00, (int16_t*)dct);
00572 s1 = vec_ld(0x10, (int16_t*)dct);
00573 s2 = vec_ld(0x20, (int16_t*)dct);
00574 s3 = vec_ld(0x30, (int16_t*)dct);
00575 s4 = vec_ld(0x40, (int16_t*)dct);
00576 s5 = vec_ld(0x50, (int16_t*)dct);
00577 s6 = vec_ld(0x60, (int16_t*)dct);
00578 s7 = vec_ld(0x70, (int16_t*)dct);
00579
00580 IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
00581 d0, d1, d2, d3, d4, d5, d6, d7);
00582
00583 TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 );
00584
00585 IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7,
00586 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
00587
00588 ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
00589 ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
00590 ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
00591 ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
00592 ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
00593 ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
00594 ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
00595 ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
00596 }
00597
00598 #define transpose4x16(r0, r1, r2, r3) { \
00599 register vec_u8_t r4; \
00600 register vec_u8_t r5; \
00601 register vec_u8_t r6; \
00602 register vec_u8_t r7; \
00603 \
00604 r4 = vec_mergeh(r0, r2); \
00605 r5 = vec_mergel(r0, r2); \
00606 r6 = vec_mergeh(r1, r3); \
00607 r7 = vec_mergel(r1, r3); \
00608 \
00609 r0 = vec_mergeh(r4, r6); \
00610 r1 = vec_mergel(r4, r6); \
00611 r2 = vec_mergeh(r5, r7); \
00612 r3 = vec_mergel(r5, r7); \
00613 }
00614
00615 static inline void write16x4(uint8_t *dst, int dst_stride,
00616 register vec_u8_t r0, register vec_u8_t r1,
00617 register vec_u8_t r2, register vec_u8_t r3) {
00618 DECLARE_ALIGNED_16(unsigned char, result[64]);
00619 uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
00620 int int_dst_stride = dst_stride/4;
00621
00622 vec_st(r0, 0, result);
00623 vec_st(r1, 16, result);
00624 vec_st(r2, 32, result);
00625 vec_st(r3, 48, result);
00626
00627 *dst_int = *src_int;
00628 *(dst_int+ int_dst_stride) = *(src_int + 1);
00629 *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
00630 *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
00631 *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
00632 *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
00633 *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
00634 *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
00635 *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
00636 *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
00637 *(dst_int+10*int_dst_stride) = *(src_int + 10);
00638 *(dst_int+11*int_dst_stride) = *(src_int + 11);
00639 *(dst_int+12*int_dst_stride) = *(src_int + 12);
00640 *(dst_int+13*int_dst_stride) = *(src_int + 13);
00641 *(dst_int+14*int_dst_stride) = *(src_int + 14);
00642 *(dst_int+15*int_dst_stride) = *(src_int + 15);
00643 }
00644
00648 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
00649 register vec_u8_t r0 = unaligned_load(0, src); \
00650 register vec_u8_t r1 = unaligned_load( src_stride, src); \
00651 register vec_u8_t r2 = unaligned_load(2* src_stride, src); \
00652 register vec_u8_t r3 = unaligned_load(3* src_stride, src); \
00653 register vec_u8_t r4 = unaligned_load(4* src_stride, src); \
00654 register vec_u8_t r5 = unaligned_load(5* src_stride, src); \
00655 register vec_u8_t r6 = unaligned_load(6* src_stride, src); \
00656 register vec_u8_t r7 = unaligned_load(7* src_stride, src); \
00657 register vec_u8_t r14 = unaligned_load(14*src_stride, src); \
00658 register vec_u8_t r15 = unaligned_load(15*src_stride, src); \
00659 \
00660 r8 = unaligned_load( 8*src_stride, src); \
00661 r9 = unaligned_load( 9*src_stride, src); \
00662 r10 = unaligned_load(10*src_stride, src); \
00663 r11 = unaligned_load(11*src_stride, src); \
00664 r12 = unaligned_load(12*src_stride, src); \
00665 r13 = unaligned_load(13*src_stride, src); \
00666 \
00667 \
00668 r0 = vec_mergeh(r0, r8); \
00669 r1 = vec_mergeh(r1, r9); \
00670 r2 = vec_mergeh(r2, r10); \
00671 r3 = vec_mergeh(r3, r11); \
00672 r4 = vec_mergeh(r4, r12); \
00673 r5 = vec_mergeh(r5, r13); \
00674 r6 = vec_mergeh(r6, r14); \
00675 r7 = vec_mergeh(r7, r15); \
00676 \
00677 \
00678 r8 = vec_mergeh(r0, r4); \
00679 r9 = vec_mergel(r0, r4); \
00680 r10 = vec_mergeh(r1, r5); \
00681 r11 = vec_mergel(r1, r5); \
00682 r12 = vec_mergeh(r2, r6); \
00683 r13 = vec_mergel(r2, r6); \
00684 r14 = vec_mergeh(r3, r7); \
00685 r15 = vec_mergel(r3, r7); \
00686 \
00687 \
00688 r0 = vec_mergeh(r8, r12); \
00689 r1 = vec_mergel(r8, r12); \
00690 r2 = vec_mergeh(r9, r13); \
00691 r4 = vec_mergeh(r10, r14); \
00692 r5 = vec_mergel(r10, r14); \
00693 r6 = vec_mergeh(r11, r15); \
00694 \
00695 \
00696 \
00697 r8 = vec_mergeh(r0, r4); \
00698 r9 = vec_mergel(r0, r4); \
00699 r10 = vec_mergeh(r1, r5); \
00700 r11 = vec_mergel(r1, r5); \
00701 r12 = vec_mergeh(r2, r6); \
00702 r13 = vec_mergel(r2, r6); \
00703 \
00704 \
00705 }
00706
00707
00708 static inline vec_u8_t diff_lt_altivec ( register vec_u8_t x,
00709 register vec_u8_t y,
00710 register vec_u8_t a) {
00711
00712 register vec_u8_t diff = vec_subs(x, y);
00713 register vec_u8_t diffneg = vec_subs(y, x);
00714 register vec_u8_t o = vec_or(diff, diffneg);
00715 o = (vec_u8_t)vec_cmplt(o, a);
00716 return o;
00717 }
00718
00719 static inline vec_u8_t h264_deblock_mask ( register vec_u8_t p0,
00720 register vec_u8_t p1,
00721 register vec_u8_t q0,
00722 register vec_u8_t q1,
00723 register vec_u8_t alpha,
00724 register vec_u8_t beta) {
00725
00726 register vec_u8_t mask;
00727 register vec_u8_t tempmask;
00728
00729 mask = diff_lt_altivec(p0, q0, alpha);
00730 tempmask = diff_lt_altivec(p1, p0, beta);
00731 mask = vec_and(mask, tempmask);
00732 tempmask = diff_lt_altivec(q1, q0, beta);
00733 mask = vec_and(mask, tempmask);
00734
00735 return mask;
00736 }
00737
00738
00739 static inline vec_u8_t h264_deblock_q1(register vec_u8_t p0,
00740 register vec_u8_t p1,
00741 register vec_u8_t p2,
00742 register vec_u8_t q0,
00743 register vec_u8_t tc0) {
00744
00745 register vec_u8_t average = vec_avg(p0, q0);
00746 register vec_u8_t temp;
00747 register vec_u8_t uncliped;
00748 register vec_u8_t ones;
00749 register vec_u8_t max;
00750 register vec_u8_t min;
00751 register vec_u8_t newp1;
00752
00753 temp = vec_xor(average, p2);
00754 average = vec_avg(average, p2);
00755 ones = vec_splat_u8(1);
00756 temp = vec_and(temp, ones);
00757 uncliped = vec_subs(average, temp);
00758 max = vec_adds(p1, tc0);
00759 min = vec_subs(p1, tc0);
00760 newp1 = vec_max(min, uncliped);
00761 newp1 = vec_min(max, newp1);
00762 return newp1;
00763 }
00764
00765 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
00766 \
00767 const vec_u8_t A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
00768 \
00769 register vec_u8_t pq0bit = vec_xor(p0,q0); \
00770 register vec_u8_t q1minus; \
00771 register vec_u8_t p0minus; \
00772 register vec_u8_t stage1; \
00773 register vec_u8_t stage2; \
00774 register vec_u8_t vec160; \
00775 register vec_u8_t delta; \
00776 register vec_u8_t deltaneg; \
00777 \
00778 q1minus = vec_nor(q1, q1); \
00779 stage1 = vec_avg(p1, q1minus); \
00780 stage2 = vec_sr(stage1, vec_splat_u8(1)); \
00781 p0minus = vec_nor(p0, p0); \
00782 stage1 = vec_avg(q0, p0minus); \
00783 pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
00784 stage2 = vec_avg(stage2, pq0bit); \
00785 stage2 = vec_adds(stage2, stage1); \
00786 vec160 = vec_ld(0, &A0v); \
00787 deltaneg = vec_subs(vec160, stage2); \
00788 delta = vec_subs(stage2, vec160); \
00789 deltaneg = vec_min(tc0masked, deltaneg); \
00790 delta = vec_min(tc0masked, delta); \
00791 p0 = vec_subs(p0, deltaneg); \
00792 q0 = vec_subs(q0, delta); \
00793 p0 = vec_adds(p0, delta); \
00794 q0 = vec_adds(q0, deltaneg); \
00795 }
00796
00797 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
00798 DECLARE_ALIGNED_16(unsigned char, temp[16]); \
00799 register vec_u8_t alphavec; \
00800 register vec_u8_t betavec; \
00801 register vec_u8_t mask; \
00802 register vec_u8_t p1mask; \
00803 register vec_u8_t q1mask; \
00804 register vector signed char tc0vec; \
00805 register vec_u8_t finaltc0; \
00806 register vec_u8_t tc0masked; \
00807 register vec_u8_t newp1; \
00808 register vec_u8_t newq1; \
00809 \
00810 temp[0] = alpha; \
00811 temp[1] = beta; \
00812 alphavec = vec_ld(0, temp); \
00813 betavec = vec_splat(alphavec, 0x1); \
00814 alphavec = vec_splat(alphavec, 0x0); \
00815 mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); \
00816 \
00817 *((int *)temp) = *((int *)tc0); \
00818 tc0vec = vec_ld(0, (signed char*)temp); \
00819 tc0vec = vec_mergeh(tc0vec, tc0vec); \
00820 tc0vec = vec_mergeh(tc0vec, tc0vec); \
00821 mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); \
00822 finaltc0 = vec_and((vec_u8_t)tc0vec, mask); \
00823 \
00824 p1mask = diff_lt_altivec(p2, p0, betavec); \
00825 p1mask = vec_and(p1mask, mask); \
00826 tc0masked = vec_and(p1mask, (vec_u8_t)tc0vec); \
00827 finaltc0 = vec_sub(finaltc0, p1mask); \
00828 newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
00829 \
00830 \
00831 q1mask = diff_lt_altivec(q2, q0, betavec); \
00832 q1mask = vec_and(q1mask, mask); \
00833 tc0masked = vec_and(q1mask, (vec_u8_t)tc0vec); \
00834 finaltc0 = vec_sub(finaltc0, q1mask); \
00835 newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
00836 \
00837 \
00838 h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
00839 p1 = newp1; \
00840 q1 = newq1; \
00841 }
00842
00843 static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
00844
00845 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
00846 register vec_u8_t p2 = vec_ld(-3*stride, pix);
00847 register vec_u8_t p1 = vec_ld(-2*stride, pix);
00848 register vec_u8_t p0 = vec_ld(-1*stride, pix);
00849 register vec_u8_t q0 = vec_ld(0, pix);
00850 register vec_u8_t q1 = vec_ld(stride, pix);
00851 register vec_u8_t q2 = vec_ld(2*stride, pix);
00852 h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
00853 vec_st(p1, -2*stride, pix);
00854 vec_st(p0, -1*stride, pix);
00855 vec_st(q0, 0, pix);
00856 vec_st(q1, stride, pix);
00857 }
00858 }
00859
00860 static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
00861
00862 register vec_u8_t line0, line1, line2, line3, line4, line5;
00863 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
00864 return;
00865 readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
00866 h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
00867 transpose4x16(line1, line2, line3, line4);
00868 write16x4(pix-2, stride, line1, line2, line3, line4);
00869 }
00870
00871 void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
00872
00873 if (has_altivec()) {
00874 c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
00875 c->put_no_rnd_h264_chroma_pixels_tab[0] = put_no_rnd_h264_chroma_mc8_altivec;
00876 c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
00877 c->h264_idct_add = ff_h264_idct_add_altivec;
00878 c->h264_idct8_add = ff_h264_idct8_add_altivec;
00879 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
00880 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
00881
00882 #define dspfunc(PFX, IDX, NUM) \
00883 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
00884 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
00885 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
00886 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
00887 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
00888 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
00889 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
00890 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
00891 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
00892 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
00893 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
00894 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
00895 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
00896 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
00897 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
00898 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
00899
00900 dspfunc(put_h264_qpel, 0, 16);
00901 dspfunc(avg_h264_qpel, 0, 16);
00902 #undef dspfunc
00903 }
00904 }