00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #include "dsputil.h"
00023
00024 #include "gcc_fixes.h"
00025 #include "dsputil_altivec.h"
00026 #include "snow.h"
00027
00028 #undef NDEBUG
00029 #include <assert.h>
00030
00031
00032
00033
00034 #define slice_buffer_get_line(slice_buf, line_num) ((slice_buf)->line[line_num] ? (slice_buf)->line[line_num] : slice_buffer_load_line((slice_buf), (line_num)))
00035
00036 static DWTELEM * slice_buffer_load_line(slice_buffer * buf, int line)
00037 {
00038 int offset;
00039 DWTELEM * buffer;
00040
00041
00042
00043 assert(buf->data_stack_top >= 0);
00044
00045 if (buf->line[line])
00046 return buf->line[line];
00047
00048 offset = buf->line_width * line;
00049 buffer = buf->data_stack[buf->data_stack_top];
00050 buf->data_stack_top--;
00051 buf->line[line] = buffer;
00052
00053
00054
00055 return buffer;
00056 }
00057
00058
00059
00060
00061 void ff_snow_horizontal_compose97i_altivec(IDWTELEM *b, int width)
00062 {
00063 #if 0
00064 const int w2= (width+1)>>1;
00065 DECLARE_ALIGNED_16(IDWTELEM, temp[(width>>1)]);
00066 const int w_l= (width>>1);
00067 const int w_r= w2 - 1;
00068 int i;
00069 vector signed short t1, t2, x, y, tmp1, tmp2;
00070 vector signed short *vbuf, *vtmp;
00071 vector unsigned char align;
00072
00073 {
00074 IDWTELEM * const ref = b + w2 - 1;
00075 IDWTELEM b_0 = b[0];
00076 vector signed short v7 = vec_splat_s16(7);
00077 vbuf = (vector signed short *)b;
00078
00079 tmp1 = vec_ld (0, ref);
00080 align = vec_lvsl (0, ref);
00081 tmp2 = vec_ld (15, ref);
00082 t1 = vec_perm(tmp1, tmp2, align);
00083
00084 for (i=0; i<w_l-15; i+=16) {
00085 #if 0
00086
00087
00088
00089
00090 b[i+0] = b[i+0] + ((7 * (ref[i+0] + ref[i+1])-1) >> 8);
00091 #else
00092
00093 tmp1 = vec_ld (0, ref+8+i);
00094 tmp2 = vec_ld (15, ref+8+i);
00095
00096 t2 = vec_perm(tmp1, tmp2, align);
00097
00098 y = vec_add(t1, vec_sld(t1,t2,2));
00099
00100
00101 tmp1 = vec_ld (0, ref+12+i);
00102
00103 y = vec_add(y, vec_splat_s32(4));
00104 y = vec_sra(y, vec_splat_u32(3));
00105
00106 tmp2 = vec_ld (15, ref+12+i);
00107
00108 *vbuf = vec_sub(*vbuf, y);
00109
00110 t1 = t2;
00111
00112 vbuf++;
00113
00114 t2 = vec_perm(tmp1, tmp2, align);
00115
00116 y = vec_add(t1,vec_sld(t1,t2,4));
00117 y = vec_add(vec_add(y,y),y);
00118
00119 tmp1 = vec_ld (0, ref+12+i);
00120
00121 y = vec_add(y, vec_splat_s32(4));
00122 y = vec_sra(y, vec_splat_u32(3));
00123
00124 tmp2 = vec_ld (15, ref+12+i);
00125
00126 *vbuf = vec_sub(*vbuf, y);
00127
00128 t1=t2;
00129
00130 vbuf++;
00131
00132 t2 = vec_perm(tmp1, tmp2, align);
00133
00134 y = vec_add(t1,vec_sld(t1,t2,4));
00135 y = vec_add(vec_add(y,y),y);
00136
00137 tmp1 = vec_ld (0, ref+16+i);
00138
00139 y = vec_add(y, vec_splat_s32(4));
00140 y = vec_sra(y, vec_splat_u32(3));
00141
00142 tmp2 = vec_ld (15, ref+16+i);
00143
00144 *vbuf = vec_sub(*vbuf, y);
00145
00146 t1=t2;
00147
00148 t2 = vec_perm(tmp1, tmp2, align);
00149
00150 y = vec_add(t1,vec_sld(t1,t2,4));
00151 y = vec_add(vec_add(y,y),y);
00152
00153 vbuf++;
00154
00155 y = vec_add(y, vec_splat_s32(4));
00156 y = vec_sra(y, vec_splat_u32(3));
00157 *vbuf = vec_sub(*vbuf, y);
00158
00159 t1=t2;
00160
00161 vbuf++;
00162
00163 #endif
00164
00165 }
00166
00167 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
00168 b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
00169 }
00170
00171 {
00172 DWTELEM * const dst = b+w2;
00173
00174 i = 0;
00175 for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){
00176 dst[i] = dst[i] - (b[i] + b[i + 1]);
00177 }
00178
00179 align = vec_lvsl(0, b+i);
00180 tmp1 = vec_ld(0, b+i);
00181 vbuf = (vector signed int*) (dst + i);
00182 tmp2 = vec_ld(15, b+i);
00183
00184 t1 = vec_perm(tmp1, tmp2, align);
00185
00186 for (; i<w_r-3; i+=4) {
00187
00188 #if 0
00189 dst[i] = dst[i] - (b[i] + b[i + 1]);
00190 dst[i+1] = dst[i+1] - (b[i+1] + b[i + 2]);
00191 dst[i+2] = dst[i+2] - (b[i+2] + b[i + 3]);
00192 dst[i+3] = dst[i+3] - (b[i+3] + b[i + 4]);
00193 #else
00194
00195 tmp1 = vec_ld(0, b+4+i);
00196 tmp2 = vec_ld(15, b+4+i);
00197
00198 t2 = vec_perm(tmp1, tmp2, align);
00199
00200 y = vec_add(t1, vec_sld(t1,t2,4));
00201 *vbuf = vec_sub (*vbuf, y);
00202
00203 vbuf++;
00204
00205 t1 = t2;
00206
00207 #endif
00208
00209 }
00210
00211 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
00212 }
00213
00214 {
00215 DWTELEM * const ref = b+w2 - 1;
00216 DWTELEM b_0 = b[0];
00217 vbuf= (vector signed int *) b;
00218
00219 tmp1 = vec_ld (0, ref);
00220 align = vec_lvsl (0, ref);
00221 tmp2 = vec_ld (15, ref);
00222 t1= vec_perm(tmp1, tmp2, align);
00223
00224 i = 0;
00225 for (; i<w_l-15; i+=16) {
00226 #if 0
00227 b[i] = b[i] - (((8 -(ref[i] + ref[i+1])) - (b[i] <<2)) >> 4);
00228 b[i+1] = b[i+1] - (((8 -(ref[i+1] + ref[i+2])) - (b[i+1]<<2)) >> 4);
00229 b[i+2] = b[i+2] - (((8 -(ref[i+2] + ref[i+3])) - (b[i+2]<<2)) >> 4);
00230 b[i+3] = b[i+3] - (((8 -(ref[i+3] + ref[i+4])) - (b[i+3]<<2)) >> 4);
00231 #else
00232 tmp1 = vec_ld (0, ref+4+i);
00233 tmp2 = vec_ld (15, ref+4+i);
00234
00235 t2 = vec_perm(tmp1, tmp2, align);
00236
00237 y = vec_add(t1,vec_sld(t1,t2,4));
00238 y = vec_sub(vec_splat_s32(8),y);
00239
00240 tmp1 = vec_ld (0, ref+8+i);
00241
00242 x = vec_sl(*vbuf,vec_splat_u32(2));
00243 y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
00244
00245 tmp2 = vec_ld (15, ref+8+i);
00246
00247 *vbuf = vec_sub( *vbuf, y);
00248
00249 t1 = t2;
00250
00251 vbuf++;
00252
00253 t2 = vec_perm(tmp1, tmp2, align);
00254
00255 y = vec_add(t1,vec_sld(t1,t2,4));
00256 y = vec_sub(vec_splat_s32(8),y);
00257
00258 tmp1 = vec_ld (0, ref+12+i);
00259
00260 x = vec_sl(*vbuf,vec_splat_u32(2));
00261 y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
00262
00263 tmp2 = vec_ld (15, ref+12+i);
00264
00265 *vbuf = vec_sub( *vbuf, y);
00266
00267 t1 = t2;
00268
00269 vbuf++;
00270
00271 t2 = vec_perm(tmp1, tmp2, align);
00272
00273 y = vec_add(t1,vec_sld(t1,t2,4));
00274 y = vec_sub(vec_splat_s32(8),y);
00275
00276 tmp1 = vec_ld (0, ref+16+i);
00277
00278 x = vec_sl(*vbuf,vec_splat_u32(2));
00279 y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
00280
00281 tmp2 = vec_ld (15, ref+16+i);
00282
00283 *vbuf = vec_sub( *vbuf, y);
00284
00285 t1 = t2;
00286
00287 vbuf++;
00288
00289 t2 = vec_perm(tmp1, tmp2, align);
00290
00291 y = vec_add(t1,vec_sld(t1,t2,4));
00292 y = vec_sub(vec_splat_s32(8),y);
00293
00294 t1 = t2;
00295
00296 x = vec_sl(*vbuf,vec_splat_u32(2));
00297 y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
00298 *vbuf = vec_sub( *vbuf, y);
00299
00300 vbuf++;
00301
00302 #endif
00303 }
00304
00305 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
00306 b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS);
00307 }
00308
00309 {
00310 DWTELEM * const src = b+w2;
00311
00312 vbuf = (vector signed int *)b;
00313 vtmp = (vector signed int *)temp;
00314
00315 i = 0;
00316 align = vec_lvsl(0, src);
00317
00318 for (; i<w_r-3; i+=4) {
00319 #if 0
00320 temp[i] = src[i] - ((-3*(b[i] + b[i+1]))>>1);
00321 temp[i+1] = src[i+1] - ((-3*(b[i+1] + b[i+2]))>>1);
00322 temp[i+2] = src[i+2] - ((-3*(b[i+2] + b[i+3]))>>1);
00323 temp[i+3] = src[i+3] - ((-3*(b[i+3] + b[i+4]))>>1);
00324 #else
00325 tmp1 = vec_ld(0,src+i);
00326 t1 = vec_add(vbuf[0],vec_sld(vbuf[0],vbuf[1],4));
00327 tmp2 = vec_ld(15,src+i);
00328 t1 = vec_sub(vec_splat_s32(0),t1);
00329 t1 = vec_add(t1,vec_add(t1,t1));
00330 t2 = vec_perm(tmp1 ,tmp2 ,align);
00331 t1 = vec_sra(t1,vec_splat_u32(1));
00332 vbuf++;
00333 *vtmp = vec_sub(t2,t1);
00334 vtmp++;
00335
00336 #endif
00337
00338 }
00339
00340 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -3, 0, 1);
00341 }
00342
00343 {
00344
00345 int a;
00346 vector signed int *t = (vector signed int *)temp,
00347 *v = (vector signed int *)b;
00348
00349 snow_interleave_line_header(&i, width, b, temp);
00350
00351 for (; (i & 0xE) != 0xE; i-=2){
00352 b[i+1] = temp[i>>1];
00353 b[i] = b[i>>1];
00354 }
00355 for (i-=14; i>=0; i-=16){
00356 a=i/4;
00357
00358 v[a+3]=vec_mergel(v[(a>>1)+1],t[(a>>1)+1]);
00359 v[a+2]=vec_mergeh(v[(a>>1)+1],t[(a>>1)+1]);
00360 v[a+1]=vec_mergel(v[a>>1],t[a>>1]);
00361 v[a]=vec_mergeh(v[a>>1],t[a>>1]);
00362
00363 }
00364
00365 }
00366 #endif
00367 }
00368
00369 void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width)
00370 {
00371 int i, w4 = width/4;
00372 vector signed int *v0, *v1,*v2,*v3,*v4,*v5;
00373 vector signed int t1, t2;
00374
00375 v0=(vector signed int *)b0;
00376 v1=(vector signed int *)b1;
00377 v2=(vector signed int *)b2;
00378 v3=(vector signed int *)b3;
00379 v4=(vector signed int *)b4;
00380 v5=(vector signed int *)b5;
00381
00382 for (i=0; i< w4;i++)
00383 {
00384
00385 #if 0
00386 b4[i] -= (3*(b3[i] + b5[i])+4)>>3;
00387 b3[i] -= ((b2[i] + b4[i]));
00388 b2[i] += ((b1[i] + b3[i])+4*b2[i]+8)>>4;
00389 b1[i] += (3*(b0[i] + b2[i]))>>1;
00390 #else
00391 t1 = vec_add(v3[i], v5[i]);
00392 t2 = vec_add(t1, vec_add(t1,t1));
00393 t1 = vec_add(t2, vec_splat_s32(4));
00394 v4[i] = vec_sub(v4[i], vec_sra(t1,vec_splat_u32(3)));
00395
00396 v3[i] = vec_sub(v3[i], vec_add(v2[i], v4[i]));
00397
00398 t1 = vec_add(vec_splat_s32(8), vec_add(v1[i], v3[i]));
00399 t2 = vec_sl(v2[i], vec_splat_u32(2));
00400 v2[i] = vec_add(v2[i], vec_sra(vec_add(t1,t2),vec_splat_u32(4)));
00401 t1 = vec_add(v0[i], v2[i]);
00402 t2 = vec_add(t1, vec_add(t1,t1));
00403 v1[i] = vec_add(v1[i], vec_sra(t2,vec_splat_u32(1)));
00404
00405 #endif
00406 }
00407
00408 for(i*=4; i < width; i++)
00409 {
00410 b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
00411 b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
00412 b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
00413 b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
00414 }
00415 }
00416
00417 #define LOAD_BLOCKS \
00418 tmp1 = vec_ld(0, &block[3][y*src_stride]);\
00419 align = vec_lvsl(0, &block[3][y*src_stride]);\
00420 tmp2 = vec_ld(15, &block[3][y*src_stride]);\
00421 \
00422 b3 = vec_perm(tmp1,tmp2,align);\
00423 \
00424 tmp1 = vec_ld(0, &block[2][y*src_stride]);\
00425 align = vec_lvsl(0, &block[2][y*src_stride]);\
00426 tmp2 = vec_ld(15, &block[2][y*src_stride]);\
00427 \
00428 b2 = vec_perm(tmp1,tmp2,align);\
00429 \
00430 tmp1 = vec_ld(0, &block[1][y*src_stride]);\
00431 align = vec_lvsl(0, &block[1][y*src_stride]);\
00432 tmp2 = vec_ld(15, &block[1][y*src_stride]);\
00433 \
00434 b1 = vec_perm(tmp1,tmp2,align);\
00435 \
00436 tmp1 = vec_ld(0, &block[0][y*src_stride]);\
00437 align = vec_lvsl(0, &block[0][y*src_stride]);\
00438 tmp2 = vec_ld(15, &block[0][y*src_stride]);\
00439 \
00440 b0 = vec_perm(tmp1,tmp2,align);
00441
00442 #define LOAD_OBMCS \
00443 tmp1 = vec_ld(0, obmc1);\
00444 align = vec_lvsl(0, obmc1);\
00445 tmp2 = vec_ld(15, obmc1);\
00446 \
00447 ob1 = vec_perm(tmp1,tmp2,align);\
00448 \
00449 tmp1 = vec_ld(0, obmc2);\
00450 align = vec_lvsl(0, obmc2);\
00451 tmp2 = vec_ld(15, obmc2);\
00452 \
00453 ob2 = vec_perm(tmp1,tmp2,align);\
00454 \
00455 tmp1 = vec_ld(0, obmc3);\
00456 align = vec_lvsl(0, obmc3);\
00457 tmp2 = vec_ld(15, obmc3);\
00458 \
00459 ob3 = vec_perm(tmp1,tmp2,align);\
00460 \
00461 tmp1 = vec_ld(0, obmc4);\
00462 align = vec_lvsl(0, obmc4);\
00463 tmp2 = vec_ld(15, obmc4);\
00464 \
00465 ob4 = vec_perm(tmp1,tmp2,align);
00466
00467
00468
00469
00470
00471
00472
00473 #define STEPS_0_1\
00474 h1 = (vector unsigned short)\
00475 vec_mergeh(ob1, ob2);\
00476 \
00477 h2 = (vector unsigned short)\
00478 vec_mergeh(ob3, ob4);\
00479 \
00480 ih = (vector unsigned char)\
00481 vec_mergeh(h1,h2);\
00482 \
00483 l1 = (vector unsigned short) vec_mergeh(b3, b2);\
00484 \
00485 ih1 = (vector unsigned char) vec_mergel(h1, h2);\
00486 \
00487 l2 = (vector unsigned short) vec_mergeh(b1, b0);\
00488 \
00489 il = (vector unsigned char) vec_mergeh(l1, l2);\
00490 \
00491 v[0] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
00492 \
00493 il1 = (vector unsigned char) vec_mergel(l1, l2);\
00494 \
00495 v[1] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
00496
00497 #define FINAL_STEP_SCALAR\
00498 for(x=0; x<b_w; x++)\
00499 if(add){\
00500 vbuf[x] += dst[x + src_x];\
00501 vbuf[x] = (vbuf[x] + (1<<(FRAC_BITS-1))) >> FRAC_BITS;\
00502 if(vbuf[x]&(~255)) vbuf[x]= ~(vbuf[x]>>31);\
00503 dst8[x + y*src_stride] = vbuf[x];\
00504 }else{\
00505 dst[x + src_x] -= vbuf[x];\
00506 }
00507
00508 static void inner_add_yblock_bw_8_obmc_16_altivec(uint8_t *obmc,
00509 const int obmc_stride,
00510 uint8_t * * block, int b_w,
00511 int b_h, int src_x, int src_y,
00512 int src_stride, slice_buffer * sb,
00513 int add, uint8_t * dst8)
00514 {
00515 int y, x;
00516 DWTELEM * dst;
00517 vector unsigned short h1, h2, l1, l2;
00518 vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
00519 vector unsigned char b0,b1,b2,b3;
00520 vector unsigned char ob1,ob2,ob3,ob4;
00521
00522 DECLARE_ALIGNED_16(int, vbuf[16]);
00523 vector signed int *v = (vector signed int *)vbuf, *d;
00524
00525 for(y=0; y<b_h; y++){
00526
00527
00528 uint8_t *obmc1= obmc + y*obmc_stride;
00529 uint8_t *obmc2= obmc1+ (obmc_stride>>1);
00530 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
00531 uint8_t *obmc4= obmc3+ (obmc_stride>>1);
00532
00533 dst = slice_buffer_get_line(sb, src_y + y);
00534 d = (vector signed int *)(dst + src_x);
00535
00536
00537
00538
00539 LOAD_BLOCKS
00540
00541
00542 LOAD_OBMCS
00543
00544
00545 STEPS_0_1
00546
00547 FINAL_STEP_SCALAR
00548
00549 }
00550
00551 }
00552
00553 #define STEPS_2_3\
00554 h1 = (vector unsigned short) vec_mergel(ob1, ob2);\
00555 \
00556 h2 = (vector unsigned short) vec_mergel(ob3, ob4);\
00557 \
00558 ih = (vector unsigned char) vec_mergeh(h1,h2);\
00559 \
00560 l1 = (vector unsigned short) vec_mergel(b3, b2);\
00561 \
00562 l2 = (vector unsigned short) vec_mergel(b1, b0);\
00563 \
00564 ih1 = (vector unsigned char) vec_mergel(h1,h2);\
00565 \
00566 il = (vector unsigned char) vec_mergeh(l1,l2);\
00567 \
00568 v[2] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
00569 \
00570 il1 = (vector unsigned char) vec_mergel(l1,l2);\
00571 \
00572 v[3] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
00573
00574
00575 static void inner_add_yblock_bw_16_obmc_32_altivec(uint8_t *obmc,
00576 const int obmc_stride,
00577 uint8_t * * block, int b_w,
00578 int b_h, int src_x, int src_y,
00579 int src_stride, slice_buffer * sb,
00580 int add, uint8_t * dst8)
00581 {
00582 int y, x;
00583 DWTELEM * dst;
00584 vector unsigned short h1, h2, l1, l2;
00585 vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
00586 vector unsigned char b0,b1,b2,b3;
00587 vector unsigned char ob1,ob2,ob3,ob4;
00588 DECLARE_ALIGNED_16(int, vbuf[b_w]);
00589 vector signed int *v = (vector signed int *)vbuf, *d;
00590
00591 for(y=0; y<b_h; y++){
00592
00593
00594 uint8_t *obmc1= obmc + y*obmc_stride;
00595 uint8_t *obmc2= obmc1+ (obmc_stride>>1);
00596 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
00597 uint8_t *obmc4= obmc3+ (obmc_stride>>1);
00598
00599 dst = slice_buffer_get_line(sb, src_y + y);
00600 d = (vector signed int *)(dst + src_x);
00601
00602
00603 LOAD_BLOCKS
00604
00605
00606 LOAD_OBMCS
00607
00608
00609 STEPS_0_1
00610
00611 STEPS_2_3
00612
00613 FINAL_STEP_SCALAR
00614
00615 }
00616 }
00617
00618 #define FINAL_STEP_VEC \
00619 \
00620 if(add)\
00621 {\
00622 for(x=0; x<b_w/4; x++)\
00623 {\
00624 v[x] = vec_add(v[x], d[x]);\
00625 v[x] = vec_sra(vec_add(v[x],\
00626 vec_sl( vec_splat_s32(1),\
00627 vec_splat_u32(7))),\
00628 vec_splat_u32(8));\
00629 \
00630 mask = (vector bool int) vec_sl((vector signed int)\
00631 vec_cmpeq(v[x],v[x]),vec_splat_u32(8));\
00632 mask = (vector bool int) vec_and(v[x],vec_nor(mask,mask));\
00633 \
00634 mask = (vector bool int)\
00635 vec_cmpeq((vector signed int)mask,\
00636 (vector signed int)vec_splat_u32(0));\
00637 \
00638 vs = vec_sra(v[x],vec_splat_u32(8));\
00639 vs = vec_sra(v[x],vec_splat_u32(8));\
00640 vs = vec_sra(v[x],vec_splat_u32(15));\
00641 \
00642 vs = vec_nor(vs,vs);\
00643 \
00644 v[x]= vec_sel(v[x],vs,mask);\
00645 }\
00646 \
00647 for(x=0; x<b_w; x++)\
00648 dst8[x + y*src_stride] = vbuf[x];\
00649 \
00650 }\
00651 else\
00652 for(x=0; x<b_w/4; x++)\
00653 d[x] = vec_sub(d[x], v[x]);
00654
00655 static void inner_add_yblock_a_bw_8_obmc_16_altivec(uint8_t *obmc,
00656 const int obmc_stride,
00657 uint8_t * * block, int b_w,
00658 int b_h, int src_x, int src_y,
00659 int src_stride, slice_buffer * sb,
00660 int add, uint8_t * dst8)
00661 {
00662 int y, x;
00663 DWTELEM * dst;
00664 vector bool int mask;
00665 vector signed int vs;
00666 vector unsigned short h1, h2, l1, l2;
00667 vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
00668 vector unsigned char b0,b1,b2,b3;
00669 vector unsigned char ob1,ob2,ob3,ob4;
00670
00671 DECLARE_ALIGNED_16(int, vbuf[16]);
00672 vector signed int *v = (vector signed int *)vbuf, *d;
00673
00674 for(y=0; y<b_h; y++){
00675
00676
00677 uint8_t *obmc1= obmc + y*obmc_stride;
00678 uint8_t *obmc2= obmc1+ (obmc_stride>>1);
00679 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
00680 uint8_t *obmc4= obmc3+ (obmc_stride>>1);
00681
00682 dst = slice_buffer_get_line(sb, src_y + y);
00683 d = (vector signed int *)(dst + src_x);
00684
00685
00686
00687
00688 LOAD_BLOCKS
00689
00690
00691 LOAD_OBMCS
00692
00693
00694 STEPS_0_1
00695
00696 FINAL_STEP_VEC
00697
00698 }
00699
00700 }
00701
00702 static void inner_add_yblock_a_bw_16_obmc_32_altivec(uint8_t *obmc,
00703 const int obmc_stride,
00704 uint8_t * * block, int b_w,
00705 int b_h, int src_x, int src_y,
00706 int src_stride, slice_buffer * sb,
00707 int add, uint8_t * dst8)
00708 {
00709 int y, x;
00710 DWTELEM * dst;
00711 vector bool int mask;
00712 vector signed int vs;
00713 vector unsigned short h1, h2, l1, l2;
00714 vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
00715 vector unsigned char b0,b1,b2,b3;
00716 vector unsigned char ob1,ob2,ob3,ob4;
00717 DECLARE_ALIGNED_16(int, vbuf[b_w]);
00718 vector signed int *v = (vector signed int *)vbuf, *d;
00719
00720 for(y=0; y<b_h; y++){
00721
00722
00723 uint8_t *obmc1= obmc + y*obmc_stride;
00724 uint8_t *obmc2= obmc1+ (obmc_stride>>1);
00725 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
00726 uint8_t *obmc4= obmc3+ (obmc_stride>>1);
00727
00728 dst = slice_buffer_get_line(sb, src_y + y);
00729 d = (vector signed int *)(dst + src_x);
00730
00731
00732 LOAD_BLOCKS
00733
00734
00735 LOAD_OBMCS
00736
00737
00738 STEPS_0_1
00739
00740 STEPS_2_3
00741
00742 FINAL_STEP_VEC
00743
00744 }
00745 }
00746
00747
00748 void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride,
00749 uint8_t * * block, int b_w, int b_h,
00750 int src_x, int src_y, int src_stride,
00751 slice_buffer * sb, int add,
00752 uint8_t * dst8)
00753 {
00754 if (src_x&15) {
00755 if (b_w == 16)
00756 inner_add_yblock_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
00757 b_w, b_h, src_x, src_y,
00758 src_stride, sb, add, dst8);
00759 else if (b_w == 8)
00760 inner_add_yblock_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
00761 b_w, b_h, src_x, src_y,
00762 src_stride, sb, add, dst8);
00763 else
00764 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
00765 src_y, src_stride, sb, add, dst8);
00766 } else {
00767 if (b_w == 16)
00768 inner_add_yblock_a_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
00769 b_w, b_h, src_x, src_y,
00770 src_stride, sb, add, dst8);
00771 else if (b_w == 8)
00772 inner_add_yblock_a_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
00773 b_w, b_h, src_x, src_y,
00774 src_stride, sb, add, dst8);
00775 else
00776 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
00777 src_y, src_stride, sb, add, dst8);
00778 }
00779 }
00780
00781
00782 void snow_init_altivec(DSPContext* c, AVCodecContext *avctx)
00783 {
00784 #if 0
00785 c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec;
00786 c->vertical_compose97i = ff_snow_vertical_compose97i_altivec;
00787 c->inner_add_yblock = ff_snow_inner_add_yblock_altivec;
00788 #endif
00789 }