00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023 #include "dsputil.h"
00024
00025 #include "gcc_fixes.h"
00026
00027 #include "dsputil_ppc.h"
00028 #include "util_altivec.h"
00029
00030 int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00031 {
00032 int i;
00033 DECLARE_ALIGNED_16(int, s);
00034 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
00035 vector unsigned char *tv;
00036 vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
00037 vector unsigned int sad;
00038 vector signed int sumdiffs;
00039
00040 s = 0;
00041 sad = (vector unsigned int)vec_splat_u32(0);
00042 for(i=0;i<h;i++) {
00043
00044
00045
00046
00047
00048 tv = (vector unsigned char *) pix1;
00049 pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
00050
00051 tv = (vector unsigned char *) &pix2[0];
00052 pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
00053
00054 tv = (vector unsigned char *) &pix2[1];
00055 pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
00056
00057
00058 avgv = vec_avg(pix2v, pix2iv);
00059
00060
00061 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
00062
00063
00064 sad = vec_sum4s(t5, sad);
00065
00066 pix1 += line_size;
00067 pix2 += line_size;
00068 }
00069
00070 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
00071 sumdiffs = vec_splat(sumdiffs, 3);
00072 vec_ste(sumdiffs, 0, &s);
00073
00074 return s;
00075 }
00076
00077 int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00078 {
00079 int i;
00080 DECLARE_ALIGNED_16(int, s);
00081 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
00082 vector unsigned char *tv;
00083 vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
00084 vector unsigned int sad;
00085 vector signed int sumdiffs;
00086 uint8_t *pix3 = pix2 + line_size;
00087
00088 s = 0;
00089 sad = (vector unsigned int)vec_splat_u32(0);
00090
00091
00092
00093
00094
00095
00096
00097
00098
00099
00100 tv = (vector unsigned char *) &pix2[0];
00101 pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
00102
00103 for(i=0;i<h;i++) {
00104
00105
00106
00107
00108
00109 tv = (vector unsigned char *) pix1;
00110 pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
00111
00112 tv = (vector unsigned char *) &pix3[0];
00113 pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
00114
00115
00116 avgv = vec_avg(pix2v, pix3v);
00117
00118
00119 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
00120
00121
00122 sad = vec_sum4s(t5, sad);
00123
00124 pix1 += line_size;
00125 pix2v = pix3v;
00126 pix3 += line_size;
00127
00128 }
00129
00130
00131 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
00132 sumdiffs = vec_splat(sumdiffs, 3);
00133 vec_ste(sumdiffs, 0, &s);
00134 return s;
00135 }
00136
00137 int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00138 {
00139 int i;
00140 DECLARE_ALIGNED_16(int, s);
00141 uint8_t *pix3 = pix2 + line_size;
00142 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
00143 const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
00144 vector unsigned char *tv, avgv, t5;
00145 vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
00146 vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
00147 vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
00148 vector unsigned short avghv, avglv;
00149 vector unsigned short t1, t2, t3, t4;
00150 vector unsigned int sad;
00151 vector signed int sumdiffs;
00152
00153 sad = (vector unsigned int)vec_splat_u32(0);
00154
00155 s = 0;
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166 tv = (vector unsigned char *) &pix2[0];
00167 pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
00168
00169 tv = (vector unsigned char *) &pix2[1];
00170 pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));
00171
00172 pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
00173 pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
00174 pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
00175 pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
00176 t1 = vec_add(pix2hv, pix2ihv);
00177 t2 = vec_add(pix2lv, pix2ilv);
00178
00179 for(i=0;i<h;i++) {
00180
00181
00182
00183
00184
00185 tv = (vector unsigned char *) pix1;
00186 pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
00187
00188 tv = (vector unsigned char *) &pix3[0];
00189 pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));
00190
00191 tv = (vector unsigned char *) &pix3[1];
00192 pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));
00193
00194
00195
00196
00197
00198
00199
00200
00201
00202
00203 pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
00204 pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
00205 pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
00206 pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
00207
00208
00209 t3 = vec_add(pix3hv, pix3ihv);
00210 t4 = vec_add(pix3lv, pix3ilv);
00211
00212 avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
00213 avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
00214
00215
00216 avgv = vec_pack(avghv, avglv);
00217
00218
00219 t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
00220
00221
00222 sad = vec_sum4s(t5, sad);
00223
00224 pix1 += line_size;
00225 pix3 += line_size;
00226
00227 t1 = t3;
00228 t2 = t4;
00229 }
00230
00231 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
00232 sumdiffs = vec_splat(sumdiffs, 3);
00233 vec_ste(sumdiffs, 0, &s);
00234
00235 return s;
00236 }
00237
00238 int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00239 {
00240 int i;
00241 DECLARE_ALIGNED_16(int, s);
00242 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
00243 vector unsigned char perm1, perm2, *pix1v, *pix2v;
00244 vector unsigned char t1, t2, t3,t4, t5;
00245 vector unsigned int sad;
00246 vector signed int sumdiffs;
00247
00248 sad = (vector unsigned int)vec_splat_u32(0);
00249
00250
00251 for(i=0;i<h;i++) {
00252
00253 perm1 = vec_lvsl(0, pix1);
00254 pix1v = (vector unsigned char *) pix1;
00255 perm2 = vec_lvsl(0, pix2);
00256 pix2v = (vector unsigned char *) pix2;
00257 t1 = vec_perm(pix1v[0], pix1v[1], perm1);
00258 t2 = vec_perm(pix2v[0], pix2v[1], perm2);
00259
00260
00261 t3 = vec_max(t1, t2);
00262 t4 = vec_min(t1, t2);
00263 t5 = vec_sub(t3, t4);
00264
00265
00266 sad = vec_sum4s(t5, sad);
00267
00268 pix1 += line_size;
00269 pix2 += line_size;
00270 }
00271
00272
00273 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
00274 sumdiffs = vec_splat(sumdiffs, 3);
00275 vec_ste(sumdiffs, 0, &s);
00276
00277 return s;
00278 }
00279
00280 int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00281 {
00282 int i;
00283 DECLARE_ALIGNED_16(int, s);
00284 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
00285 vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
00286 vector unsigned char t1, t2, t3,t4, t5;
00287 vector unsigned int sad;
00288 vector signed int sumdiffs;
00289
00290 sad = (vector unsigned int)vec_splat_u32(0);
00291
00292 permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
00293
00294 for(i=0;i<h;i++) {
00295
00296
00297
00298 perm1 = vec_lvsl(0, pix1);
00299 pix1v = (vector unsigned char *) pix1;
00300 perm2 = vec_lvsl(0, pix2);
00301 pix2v = (vector unsigned char *) pix2;
00302 t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
00303 t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
00304
00305
00306 t3 = vec_max(t1, t2);
00307 t4 = vec_min(t1, t2);
00308 t5 = vec_sub(t3, t4);
00309
00310
00311 sad = vec_sum4s(t5, sad);
00312
00313 pix1 += line_size;
00314 pix2 += line_size;
00315 }
00316
00317
00318 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
00319 sumdiffs = vec_splat(sumdiffs, 3);
00320 vec_ste(sumdiffs, 0, &s);
00321
00322 return s;
00323 }
00324
00325 int pix_norm1_altivec(uint8_t *pix, int line_size)
00326 {
00327 int i;
00328 DECLARE_ALIGNED_16(int, s);
00329 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
00330 vector unsigned char *tv;
00331 vector unsigned char pixv;
00332 vector unsigned int sv;
00333 vector signed int sum;
00334
00335 sv = (vector unsigned int)vec_splat_u32(0);
00336
00337 s = 0;
00338 for (i = 0; i < 16; i++) {
00339
00340 tv = (vector unsigned char *) pix;
00341 pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));
00342
00343
00344 sv = vec_msum(pixv, pixv, sv);
00345
00346 pix += line_size;
00347 }
00348
00349 sum = vec_sums((vector signed int) sv, (vector signed int) zero);
00350 sum = vec_splat(sum, 3);
00351 vec_ste(sum, 0, &s);
00352
00353 return s;
00354 }
00355
00361 int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00362 {
00363 int i;
00364 DECLARE_ALIGNED_16(int, s);
00365 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
00366 vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
00367 vector unsigned char t1, t2, t3,t4, t5;
00368 vector unsigned int sum;
00369 vector signed int sumsqr;
00370
00371 sum = (vector unsigned int)vec_splat_u32(0);
00372
00373 permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
00374
00375
00376 for(i=0;i<h;i++) {
00377
00378
00379
00380 perm1 = vec_lvsl(0, pix1);
00381 pix1v = (vector unsigned char *) pix1;
00382 perm2 = vec_lvsl(0, pix2);
00383 pix2v = (vector unsigned char *) pix2;
00384 t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
00385 t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
00386
00387
00388
00389
00390
00391
00392
00393 t3 = vec_max(t1, t2);
00394 t4 = vec_min(t1, t2);
00395 t5 = vec_sub(t3, t4);
00396
00397
00398 sum = vec_msum(t5, t5, sum);
00399
00400 pix1 += line_size;
00401 pix2 += line_size;
00402 }
00403
00404
00405 sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
00406 sumsqr = vec_splat(sumsqr, 3);
00407 vec_ste(sumsqr, 0, &s);
00408
00409 return s;
00410 }
00411
00417 int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
00418 {
00419 int i;
00420 DECLARE_ALIGNED_16(int, s);
00421 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
00422 vector unsigned char perm1, perm2, *pix1v, *pix2v;
00423 vector unsigned char t1, t2, t3,t4, t5;
00424 vector unsigned int sum;
00425 vector signed int sumsqr;
00426
00427 sum = (vector unsigned int)vec_splat_u32(0);
00428
00429 for(i=0;i<h;i++) {
00430
00431 perm1 = vec_lvsl(0, pix1);
00432 pix1v = (vector unsigned char *) pix1;
00433 perm2 = vec_lvsl(0, pix2);
00434 pix2v = (vector unsigned char *) pix2;
00435 t1 = vec_perm(pix1v[0], pix1v[1], perm1);
00436 t2 = vec_perm(pix2v[0], pix2v[1], perm2);
00437
00438
00439
00440
00441
00442
00443
00444 t3 = vec_max(t1, t2);
00445 t4 = vec_min(t1, t2);
00446 t5 = vec_sub(t3, t4);
00447
00448
00449 sum = vec_msum(t5, t5, sum);
00450
00451 pix1 += line_size;
00452 pix2 += line_size;
00453 }
00454
00455
00456 sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
00457 sumsqr = vec_splat(sumsqr, 3);
00458 vec_ste(sumsqr, 0, &s);
00459
00460 return s;
00461 }
00462
00463 int pix_sum_altivec(uint8_t * pix, int line_size)
00464 {
00465 const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
00466 vector unsigned char perm, *pixv;
00467 vector unsigned char t1;
00468 vector unsigned int sad;
00469 vector signed int sumdiffs;
00470
00471 int i;
00472 DECLARE_ALIGNED_16(int, s);
00473
00474 sad = (vector unsigned int)vec_splat_u32(0);
00475
00476 for (i = 0; i < 16; i++) {
00477
00478 perm = vec_lvsl(0, pix);
00479 pixv = (vector unsigned char *) pix;
00480 t1 = vec_perm(pixv[0], pixv[1], perm);
00481
00482
00483 sad = vec_sum4s(t1, sad);
00484
00485 pix += line_size;
00486 }
00487
00488
00489 sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
00490 sumdiffs = vec_splat(sumdiffs, 3);
00491 vec_ste(sumdiffs, 0, &s);
00492
00493 return s;
00494 }
00495
00496 void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
00497 {
00498 int i;
00499 vector unsigned char perm, bytes, *pixv;
00500 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
00501 vector signed short shorts;
00502
00503 for(i=0;i<8;i++)
00504 {
00505
00506
00507
00508 perm = vec_lvsl(0, pixels);
00509 pixv = (vector unsigned char *) pixels;
00510 bytes = vec_perm(pixv[0], pixv[1], perm);
00511
00512
00513 shorts = (vector signed short)vec_mergeh(zero, bytes);
00514
00515
00516 vec_st(shorts, i*16, (vector signed short*)block);
00517
00518 pixels += line_size;
00519 }
00520 }
00521
00522 void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
00523 const uint8_t *s2, int stride)
00524 {
00525 int i;
00526 vector unsigned char perm, bytes, *pixv;
00527 const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
00528 vector signed short shorts1, shorts2;
00529
00530 for(i=0;i<4;i++)
00531 {
00532
00533
00534
00535 perm = vec_lvsl(0, s1);
00536 pixv = (vector unsigned char *) s1;
00537 bytes = vec_perm(pixv[0], pixv[1], perm);
00538
00539
00540 shorts1 = (vector signed short)vec_mergeh(zero, bytes);
00541
00542
00543 perm = vec_lvsl(0, s2);
00544 pixv = (vector unsigned char *) s2;
00545 bytes = vec_perm(pixv[0], pixv[1], perm);
00546
00547
00548 shorts2 = (vector signed short)vec_mergeh(zero, bytes);
00549
00550
00551 shorts1 = vec_sub(shorts1, shorts2);
00552
00553
00554 vec_st(shorts1, 0, (vector signed short*)block);
00555
00556 s1 += stride;
00557 s2 += stride;
00558 block += 8;
00559
00560
00561
00562
00563
00564
00565
00566
00567 perm = vec_lvsl(0, s1);
00568 pixv = (vector unsigned char *) s1;
00569 bytes = vec_perm(pixv[0], pixv[1], perm);
00570
00571
00572 shorts1 = (vector signed short)vec_mergeh(zero, bytes);
00573
00574
00575 perm = vec_lvsl(0, s2);
00576 pixv = (vector unsigned char *) s2;
00577 bytes = vec_perm(pixv[0], pixv[1], perm);
00578
00579
00580 shorts2 = (vector signed short)vec_mergeh(zero, bytes);
00581
00582
00583 shorts1 = vec_sub(shorts1, shorts2);
00584
00585
00586 vec_st(shorts1, 0, (vector signed short*)block);
00587
00588 s1 += stride;
00589 s2 += stride;
00590 block += 8;
00591 }
00592 }
00593
00594 void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
00595 register int i;
00596 register vector unsigned char vdst, vsrc;
00597
00598
00599 for(i = 0 ; (i + 15) < w ; i+=16)
00600 {
00601 vdst = vec_ld(i, (unsigned char*)dst);
00602 vsrc = vec_ld(i, (unsigned char*)src);
00603 vdst = vec_add(vsrc, vdst);
00604 vec_st(vdst, i, (unsigned char*)dst);
00605 }
00606
00607 for (; (i < w) ; i++)
00608 {
00609 dst[i] = src[i];
00610 }
00611 }
00612
00613
00614 void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00615 {
00616 POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1);
00617 register vector unsigned char pixelsv1, pixelsv2;
00618 register vector unsigned char pixelsv1B, pixelsv2B;
00619 register vector unsigned char pixelsv1C, pixelsv2C;
00620 register vector unsigned char pixelsv1D, pixelsv2D;
00621
00622 register vector unsigned char perm = vec_lvsl(0, pixels);
00623 int i;
00624 register int line_size_2 = line_size << 1;
00625 register int line_size_3 = line_size + line_size_2;
00626 register int line_size_4 = line_size << 2;
00627
00628 POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
00629
00630
00631
00632
00633
00634 #if 0
00635 for(i=0; i<h; i++) {
00636 pixelsv1 = vec_ld(0, (unsigned char*)pixels);
00637 pixelsv2 = vec_ld(16, (unsigned char*)pixels);
00638 vec_st(vec_perm(pixelsv1, pixelsv2, perm),
00639 0, (unsigned char*)block);
00640 pixels+=line_size;
00641 block +=line_size;
00642 }
00643 #else
00644 for(i=0; i<h; i+=4) {
00645 pixelsv1 = vec_ld(0, (unsigned char*)pixels);
00646 pixelsv2 = vec_ld(15, (unsigned char*)pixels);
00647 pixelsv1B = vec_ld(line_size, (unsigned char*)pixels);
00648 pixelsv2B = vec_ld(15 + line_size, (unsigned char*)pixels);
00649 pixelsv1C = vec_ld(line_size_2, (unsigned char*)pixels);
00650 pixelsv2C = vec_ld(15 + line_size_2, (unsigned char*)pixels);
00651 pixelsv1D = vec_ld(line_size_3, (unsigned char*)pixels);
00652 pixelsv2D = vec_ld(15 + line_size_3, (unsigned char*)pixels);
00653 vec_st(vec_perm(pixelsv1, pixelsv2, perm),
00654 0, (unsigned char*)block);
00655 vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
00656 line_size, (unsigned char*)block);
00657 vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
00658 line_size_2, (unsigned char*)block);
00659 vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
00660 line_size_3, (unsigned char*)block);
00661 pixels+=line_size_4;
00662 block +=line_size_4;
00663 }
00664 #endif
00665 POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
00666 }
00667
00668
00669 #define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
00670 void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00671 {
00672 POWERPC_PERF_DECLARE(altivec_avg_pixels16_num, 1);
00673 register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
00674 register vector unsigned char perm = vec_lvsl(0, pixels);
00675 int i;
00676
00677 POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
00678
00679 for(i=0; i<h; i++) {
00680 pixelsv1 = vec_ld(0, (unsigned char*)pixels);
00681 pixelsv2 = vec_ld(16, (unsigned char*)pixels);
00682 blockv = vec_ld(0, block);
00683 pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
00684 blockv = vec_avg(blockv,pixelsv);
00685 vec_st(blockv, 0, (unsigned char*)block);
00686 pixels+=line_size;
00687 block +=line_size;
00688 }
00689
00690 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
00691 }
00692
00693
00694 void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
00695 {
00696 POWERPC_PERF_DECLARE(altivec_avg_pixels8_num, 1);
00697 register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
00698 int i;
00699
00700 POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
00701
00702 for (i = 0; i < h; i++) {
00703
00704
00705
00706
00707 int rightside = ((unsigned long)block & 0x0000000F);
00708
00709 blockv = vec_ld(0, block);
00710 pixelsv1 = vec_ld(0, (unsigned char*)pixels);
00711 pixelsv2 = vec_ld(16, (unsigned char*)pixels);
00712 pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
00713
00714 if (rightside)
00715 {
00716 pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
00717 }
00718 else
00719 {
00720 pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
00721 }
00722
00723 blockv = vec_avg(blockv, pixelsv);
00724
00725 vec_st(blockv, 0, block);
00726
00727 pixels += line_size;
00728 block += line_size;
00729 }
00730
00731 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
00732 }
00733
00734
00735 void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00736 {
00737 POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1);
00738 register int i;
00739 register vector unsigned char
00740 pixelsv1, pixelsv2,
00741 pixelsavg;
00742 register vector unsigned char
00743 blockv, temp1, temp2;
00744 register vector unsigned short
00745 pixelssum1, pixelssum2, temp3;
00746 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
00747 register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
00748
00749 temp1 = vec_ld(0, pixels);
00750 temp2 = vec_ld(16, pixels);
00751 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
00752 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
00753 {
00754 pixelsv2 = temp2;
00755 }
00756 else
00757 {
00758 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
00759 }
00760 pixelsv1 = vec_mergeh(vczero, pixelsv1);
00761 pixelsv2 = vec_mergeh(vczero, pixelsv2);
00762 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
00763 (vector unsigned short)pixelsv2);
00764 pixelssum1 = vec_add(pixelssum1, vctwo);
00765
00766 POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
00767 for (i = 0; i < h ; i++) {
00768 int rightside = ((unsigned long)block & 0x0000000F);
00769 blockv = vec_ld(0, block);
00770
00771 temp1 = vec_ld(line_size, pixels);
00772 temp2 = vec_ld(line_size + 16, pixels);
00773 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
00774 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
00775 {
00776 pixelsv2 = temp2;
00777 }
00778 else
00779 {
00780 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
00781 }
00782
00783 pixelsv1 = vec_mergeh(vczero, pixelsv1);
00784 pixelsv2 = vec_mergeh(vczero, pixelsv2);
00785 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
00786 (vector unsigned short)pixelsv2);
00787 temp3 = vec_add(pixelssum1, pixelssum2);
00788 temp3 = vec_sra(temp3, vctwo);
00789 pixelssum1 = vec_add(pixelssum2, vctwo);
00790 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
00791
00792 if (rightside)
00793 {
00794 blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
00795 }
00796 else
00797 {
00798 blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
00799 }
00800
00801 vec_st(blockv, 0, block);
00802
00803 block += line_size;
00804 pixels += line_size;
00805 }
00806
00807 POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
00808 }
00809
00810
00811 void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00812 {
00813 POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1);
00814 register int i;
00815 register vector unsigned char
00816 pixelsv1, pixelsv2,
00817 pixelsavg;
00818 register vector unsigned char
00819 blockv, temp1, temp2;
00820 register vector unsigned short
00821 pixelssum1, pixelssum2, temp3;
00822 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
00823 register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
00824 register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
00825
00826 temp1 = vec_ld(0, pixels);
00827 temp2 = vec_ld(16, pixels);
00828 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
00829 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
00830 {
00831 pixelsv2 = temp2;
00832 }
00833 else
00834 {
00835 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
00836 }
00837 pixelsv1 = vec_mergeh(vczero, pixelsv1);
00838 pixelsv2 = vec_mergeh(vczero, pixelsv2);
00839 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
00840 (vector unsigned short)pixelsv2);
00841 pixelssum1 = vec_add(pixelssum1, vcone);
00842
00843 POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
00844 for (i = 0; i < h ; i++) {
00845 int rightside = ((unsigned long)block & 0x0000000F);
00846 blockv = vec_ld(0, block);
00847
00848 temp1 = vec_ld(line_size, pixels);
00849 temp2 = vec_ld(line_size + 16, pixels);
00850 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
00851 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
00852 {
00853 pixelsv2 = temp2;
00854 }
00855 else
00856 {
00857 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
00858 }
00859
00860 pixelsv1 = vec_mergeh(vczero, pixelsv1);
00861 pixelsv2 = vec_mergeh(vczero, pixelsv2);
00862 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
00863 (vector unsigned short)pixelsv2);
00864 temp3 = vec_add(pixelssum1, pixelssum2);
00865 temp3 = vec_sra(temp3, vctwo);
00866 pixelssum1 = vec_add(pixelssum2, vcone);
00867 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
00868
00869 if (rightside)
00870 {
00871 blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
00872 }
00873 else
00874 {
00875 blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
00876 }
00877
00878 vec_st(blockv, 0, block);
00879
00880 block += line_size;
00881 pixels += line_size;
00882 }
00883
00884 POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
00885 }
00886
00887
00888 void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
00889 {
00890 POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1);
00891 register int i;
00892 register vector unsigned char
00893 pixelsv1, pixelsv2, pixelsv3, pixelsv4;
00894 register vector unsigned char
00895 blockv, temp1, temp2;
00896 register vector unsigned short
00897 pixelssum1, pixelssum2, temp3,
00898 pixelssum3, pixelssum4, temp4;
00899 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
00900 register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
00901
00902 POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
00903
00904 temp1 = vec_ld(0, pixels);
00905 temp2 = vec_ld(16, pixels);
00906 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
00907 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
00908 {
00909 pixelsv2 = temp2;
00910 }
00911 else
00912 {
00913 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
00914 }
00915 pixelsv3 = vec_mergel(vczero, pixelsv1);
00916 pixelsv4 = vec_mergel(vczero, pixelsv2);
00917 pixelsv1 = vec_mergeh(vczero, pixelsv1);
00918 pixelsv2 = vec_mergeh(vczero, pixelsv2);
00919 pixelssum3 = vec_add((vector unsigned short)pixelsv3,
00920 (vector unsigned short)pixelsv4);
00921 pixelssum3 = vec_add(pixelssum3, vctwo);
00922 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
00923 (vector unsigned short)pixelsv2);
00924 pixelssum1 = vec_add(pixelssum1, vctwo);
00925
00926 for (i = 0; i < h ; i++) {
00927 blockv = vec_ld(0, block);
00928
00929 temp1 = vec_ld(line_size, pixels);
00930 temp2 = vec_ld(line_size + 16, pixels);
00931 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
00932 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
00933 {
00934 pixelsv2 = temp2;
00935 }
00936 else
00937 {
00938 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
00939 }
00940
00941 pixelsv3 = vec_mergel(vczero, pixelsv1);
00942 pixelsv4 = vec_mergel(vczero, pixelsv2);
00943 pixelsv1 = vec_mergeh(vczero, pixelsv1);
00944 pixelsv2 = vec_mergeh(vczero, pixelsv2);
00945
00946 pixelssum4 = vec_add((vector unsigned short)pixelsv3,
00947 (vector unsigned short)pixelsv4);
00948 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
00949 (vector unsigned short)pixelsv2);
00950 temp4 = vec_add(pixelssum3, pixelssum4);
00951 temp4 = vec_sra(temp4, vctwo);
00952 temp3 = vec_add(pixelssum1, pixelssum2);
00953 temp3 = vec_sra(temp3, vctwo);
00954
00955 pixelssum3 = vec_add(pixelssum4, vctwo);
00956 pixelssum1 = vec_add(pixelssum2, vctwo);
00957
00958 blockv = vec_packsu(temp3, temp4);
00959
00960 vec_st(blockv, 0, block);
00961
00962 block += line_size;
00963 pixels += line_size;
00964 }
00965
00966 POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
00967 }
00968
00969
00970 void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
00971 {
00972 POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1);
00973 register int i;
00974 register vector unsigned char
00975 pixelsv1, pixelsv2, pixelsv3, pixelsv4;
00976 register vector unsigned char
00977 blockv, temp1, temp2;
00978 register vector unsigned short
00979 pixelssum1, pixelssum2, temp3,
00980 pixelssum3, pixelssum4, temp4;
00981 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
00982 register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
00983 register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
00984
00985 POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
00986
00987 temp1 = vec_ld(0, pixels);
00988 temp2 = vec_ld(16, pixels);
00989 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
00990 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
00991 {
00992 pixelsv2 = temp2;
00993 }
00994 else
00995 {
00996 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
00997 }
00998 pixelsv3 = vec_mergel(vczero, pixelsv1);
00999 pixelsv4 = vec_mergel(vczero, pixelsv2);
01000 pixelsv1 = vec_mergeh(vczero, pixelsv1);
01001 pixelsv2 = vec_mergeh(vczero, pixelsv2);
01002 pixelssum3 = vec_add((vector unsigned short)pixelsv3,
01003 (vector unsigned short)pixelsv4);
01004 pixelssum3 = vec_add(pixelssum3, vcone);
01005 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
01006 (vector unsigned short)pixelsv2);
01007 pixelssum1 = vec_add(pixelssum1, vcone);
01008
01009 for (i = 0; i < h ; i++) {
01010 blockv = vec_ld(0, block);
01011
01012 temp1 = vec_ld(line_size, pixels);
01013 temp2 = vec_ld(line_size + 16, pixels);
01014 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
01015 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
01016 {
01017 pixelsv2 = temp2;
01018 }
01019 else
01020 {
01021 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
01022 }
01023
01024 pixelsv3 = vec_mergel(vczero, pixelsv1);
01025 pixelsv4 = vec_mergel(vczero, pixelsv2);
01026 pixelsv1 = vec_mergeh(vczero, pixelsv1);
01027 pixelsv2 = vec_mergeh(vczero, pixelsv2);
01028
01029 pixelssum4 = vec_add((vector unsigned short)pixelsv3,
01030 (vector unsigned short)pixelsv4);
01031 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
01032 (vector unsigned short)pixelsv2);
01033 temp4 = vec_add(pixelssum3, pixelssum4);
01034 temp4 = vec_sra(temp4, vctwo);
01035 temp3 = vec_add(pixelssum1, pixelssum2);
01036 temp3 = vec_sra(temp3, vctwo);
01037
01038 pixelssum3 = vec_add(pixelssum4, vcone);
01039 pixelssum1 = vec_add(pixelssum2, vcone);
01040
01041 blockv = vec_packsu(temp3, temp4);
01042
01043 vec_st(blockv, 0, block);
01044
01045 block += line_size;
01046 pixels += line_size;
01047 }
01048
01049 POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
01050 }
01051
01052 int hadamard8_diff8x8_altivec( void *s, uint8_t *dst, uint8_t *src, int stride, int h){
01053 POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1);
01054 int sum;
01055 register const vector unsigned char vzero =
01056 (const vector unsigned char)vec_splat_u8(0);
01057 register vector signed short temp0, temp1, temp2, temp3, temp4,
01058 temp5, temp6, temp7;
01059 POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
01060 {
01061 register const vector signed short vprod1 =(const vector signed short)
01062 AVV( 1,-1, 1,-1, 1,-1, 1,-1);
01063 register const vector signed short vprod2 =(const vector signed short)
01064 AVV( 1, 1,-1,-1, 1, 1,-1,-1);
01065 register const vector signed short vprod3 =(const vector signed short)
01066 AVV( 1, 1, 1, 1,-1,-1,-1,-1);
01067 register const vector unsigned char perm1 = (const vector unsigned char)
01068 AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
01069 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D);
01070 register const vector unsigned char perm2 = (const vector unsigned char)
01071 AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
01072 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B);
01073 register const vector unsigned char perm3 = (const vector unsigned char)
01074 AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
01075 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
01076
01077 #define ONEITERBUTTERFLY(i, res) \
01078 { \
01079 register vector unsigned char src1, src2, srcO; \
01080 register vector unsigned char dst1, dst2, dstO; \
01081 register vector signed short srcV, dstV; \
01082 register vector signed short but0, but1, but2, op1, op2, op3; \
01083 src1 = vec_ld(stride * i, src); \
01084 src2 = vec_ld((stride * i) + 15, src); \
01085 srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
01086 dst1 = vec_ld(stride * i, dst); \
01087 dst2 = vec_ld((stride * i) + 15, dst); \
01088 dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
01089 \
01090 \
01091 srcV = \
01092 (vector signed short)vec_mergeh((vector signed char)vzero, \
01093 (vector signed char)srcO); \
01094 dstV = \
01095 (vector signed short)vec_mergeh((vector signed char)vzero, \
01096 (vector signed char)dstO); \
01097 \
01098 but0 = vec_sub(srcV, dstV); \
01099 op1 = vec_perm(but0, but0, perm1); \
01100 but1 = vec_mladd(but0, vprod1, op1); \
01101 op2 = vec_perm(but1, but1, perm2); \
01102 but2 = vec_mladd(but1, vprod2, op2); \
01103 op3 = vec_perm(but2, but2, perm3); \
01104 res = vec_mladd(but2, vprod3, op3); \
01105 }
01106 ONEITERBUTTERFLY(0, temp0);
01107 ONEITERBUTTERFLY(1, temp1);
01108 ONEITERBUTTERFLY(2, temp2);
01109 ONEITERBUTTERFLY(3, temp3);
01110 ONEITERBUTTERFLY(4, temp4);
01111 ONEITERBUTTERFLY(5, temp5);
01112 ONEITERBUTTERFLY(6, temp6);
01113 ONEITERBUTTERFLY(7, temp7);
01114 }
01115 #undef ONEITERBUTTERFLY
01116 {
01117 register vector signed int vsum;
01118 register vector signed short line0 = vec_add(temp0, temp1);
01119 register vector signed short line1 = vec_sub(temp0, temp1);
01120 register vector signed short line2 = vec_add(temp2, temp3);
01121 register vector signed short line3 = vec_sub(temp2, temp3);
01122 register vector signed short line4 = vec_add(temp4, temp5);
01123 register vector signed short line5 = vec_sub(temp4, temp5);
01124 register vector signed short line6 = vec_add(temp6, temp7);
01125 register vector signed short line7 = vec_sub(temp6, temp7);
01126
01127 register vector signed short line0B = vec_add(line0, line2);
01128 register vector signed short line2B = vec_sub(line0, line2);
01129 register vector signed short line1B = vec_add(line1, line3);
01130 register vector signed short line3B = vec_sub(line1, line3);
01131 register vector signed short line4B = vec_add(line4, line6);
01132 register vector signed short line6B = vec_sub(line4, line6);
01133 register vector signed short line5B = vec_add(line5, line7);
01134 register vector signed short line7B = vec_sub(line5, line7);
01135
01136 register vector signed short line0C = vec_add(line0B, line4B);
01137 register vector signed short line4C = vec_sub(line0B, line4B);
01138 register vector signed short line1C = vec_add(line1B, line5B);
01139 register vector signed short line5C = vec_sub(line1B, line5B);
01140 register vector signed short line2C = vec_add(line2B, line6B);
01141 register vector signed short line6C = vec_sub(line2B, line6B);
01142 register vector signed short line3C = vec_add(line3B, line7B);
01143 register vector signed short line7C = vec_sub(line3B, line7B);
01144
01145 vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
01146 vsum = vec_sum4s(vec_abs(line1C), vsum);
01147 vsum = vec_sum4s(vec_abs(line2C), vsum);
01148 vsum = vec_sum4s(vec_abs(line3C), vsum);
01149 vsum = vec_sum4s(vec_abs(line4C), vsum);
01150 vsum = vec_sum4s(vec_abs(line5C), vsum);
01151 vsum = vec_sum4s(vec_abs(line6C), vsum);
01152 vsum = vec_sum4s(vec_abs(line7C), vsum);
01153 vsum = vec_sums(vsum, (vector signed int)vzero);
01154 vsum = vec_splat(vsum, 3);
01155 vec_ste(vsum, 0, &sum);
01156 }
01157 POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1);
01158 return sum;
01159 }
01160
01161
01162
01163
01164
01165
01166
01167
01168
01169
01170
01171
01172
01173
01174
01175
01176
01177
01178
01179
01180
01181
01182
01183 static int hadamard8_diff16x8_altivec( void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
01184 int sum;
01185 register vector signed short
01186 temp0 REG_v(v0),
01187 temp1 REG_v(v1),
01188 temp2 REG_v(v2),
01189 temp3 REG_v(v3),
01190 temp4 REG_v(v4),
01191 temp5 REG_v(v5),
01192 temp6 REG_v(v6),
01193 temp7 REG_v(v7);
01194 register vector signed short
01195 temp0S REG_v(v8),
01196 temp1S REG_v(v9),
01197 temp2S REG_v(v10),
01198 temp3S REG_v(v11),
01199 temp4S REG_v(v12),
01200 temp5S REG_v(v13),
01201 temp6S REG_v(v14),
01202 temp7S REG_v(v15);
01203 register const vector unsigned char vzero REG_v(v31)=
01204 (const vector unsigned char)vec_splat_u8(0);
01205 {
01206 register const vector signed short vprod1 REG_v(v16)=
01207 (const vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1);
01208 register const vector signed short vprod2 REG_v(v17)=
01209 (const vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1);
01210 register const vector signed short vprod3 REG_v(v18)=
01211 (const vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1);
01212 register const vector unsigned char perm1 REG_v(v19)=
01213 (const vector unsigned char)
01214 AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
01215 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D);
01216 register const vector unsigned char perm2 REG_v(v20)=
01217 (const vector unsigned char)
01218 AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
01219 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B);
01220 register const vector unsigned char perm3 REG_v(v21)=
01221 (const vector unsigned char)
01222 AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
01223 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
01224
01225 #define ONEITERBUTTERFLY(i, res1, res2) \
01226 { \
01227 register vector unsigned char src1 REG_v(v22), \
01228 src2 REG_v(v23), \
01229 dst1 REG_v(v24), \
01230 dst2 REG_v(v25), \
01231 srcO REG_v(v22), \
01232 dstO REG_v(v23); \
01233 \
01234 register vector signed short srcV REG_v(v24), \
01235 dstV REG_v(v25), \
01236 srcW REG_v(v26), \
01237 dstW REG_v(v27), \
01238 but0 REG_v(v28), \
01239 but0S REG_v(v29), \
01240 op1 REG_v(v30), \
01241 but1 REG_v(v22), \
01242 op1S REG_v(v23), \
01243 but1S REG_v(v24), \
01244 op2 REG_v(v25), \
01245 but2 REG_v(v26), \
01246 op2S REG_v(v27), \
01247 but2S REG_v(v28), \
01248 op3 REG_v(v29), \
01249 op3S REG_v(v30); \
01250 \
01251 src1 = vec_ld(stride * i, src); \
01252 src2 = vec_ld((stride * i) + 16, src); \
01253 srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
01254 dst1 = vec_ld(stride * i, dst); \
01255 dst2 = vec_ld((stride * i) + 16, dst); \
01256 dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
01257 \
01258 srcV = \
01259 (vector signed short)vec_mergeh((vector signed char)vzero, \
01260 (vector signed char)srcO); \
01261 dstV = \
01262 (vector signed short)vec_mergeh((vector signed char)vzero, \
01263 (vector signed char)dstO); \
01264 srcW = \
01265 (vector signed short)vec_mergel((vector signed char)vzero, \
01266 (vector signed char)srcO); \
01267 dstW = \
01268 (vector signed short)vec_mergel((vector signed char)vzero, \
01269 (vector signed char)dstO); \
01270 \
01271 but0 = vec_sub(srcV, dstV); \
01272 but0S = vec_sub(srcW, dstW); \
01273 op1 = vec_perm(but0, but0, perm1); \
01274 but1 = vec_mladd(but0, vprod1, op1); \
01275 op1S = vec_perm(but0S, but0S, perm1); \
01276 but1S = vec_mladd(but0S, vprod1, op1S); \
01277 op2 = vec_perm(but1, but1, perm2); \
01278 but2 = vec_mladd(but1, vprod2, op2); \
01279 op2S = vec_perm(but1S, but1S, perm2); \
01280 but2S = vec_mladd(but1S, vprod2, op2S); \
01281 op3 = vec_perm(but2, but2, perm3); \
01282 res1 = vec_mladd(but2, vprod3, op3); \
01283 op3S = vec_perm(but2S, but2S, perm3); \
01284 res2 = vec_mladd(but2S, vprod3, op3S); \
01285 }
01286 ONEITERBUTTERFLY(0, temp0, temp0S);
01287 ONEITERBUTTERFLY(1, temp1, temp1S);
01288 ONEITERBUTTERFLY(2, temp2, temp2S);
01289 ONEITERBUTTERFLY(3, temp3, temp3S);
01290 ONEITERBUTTERFLY(4, temp4, temp4S);
01291 ONEITERBUTTERFLY(5, temp5, temp5S);
01292 ONEITERBUTTERFLY(6, temp6, temp6S);
01293 ONEITERBUTTERFLY(7, temp7, temp7S);
01294 }
01295 #undef ONEITERBUTTERFLY
01296 {
01297 register vector signed int vsum;
01298 register vector signed short line0S, line1S, line2S, line3S, line4S,
01299 line5S, line6S, line7S, line0BS,line2BS,
01300 line1BS,line3BS,line4BS,line6BS,line5BS,
01301 line7BS,line0CS,line4CS,line1CS,line5CS,
01302 line2CS,line6CS,line3CS,line7CS;
01303
01304 register vector signed short line0 = vec_add(temp0, temp1);
01305 register vector signed short line1 = vec_sub(temp0, temp1);
01306 register vector signed short line2 = vec_add(temp2, temp3);
01307 register vector signed short line3 = vec_sub(temp2, temp3);
01308 register vector signed short line4 = vec_add(temp4, temp5);
01309 register vector signed short line5 = vec_sub(temp4, temp5);
01310 register vector signed short line6 = vec_add(temp6, temp7);
01311 register vector signed short line7 = vec_sub(temp6, temp7);
01312
01313 register vector signed short line0B = vec_add(line0, line2);
01314 register vector signed short line2B = vec_sub(line0, line2);
01315 register vector signed short line1B = vec_add(line1, line3);
01316 register vector signed short line3B = vec_sub(line1, line3);
01317 register vector signed short line4B = vec_add(line4, line6);
01318 register vector signed short line6B = vec_sub(line4, line6);
01319 register vector signed short line5B = vec_add(line5, line7);
01320 register vector signed short line7B = vec_sub(line5, line7);
01321
01322 register vector signed short line0C = vec_add(line0B, line4B);
01323 register vector signed short line4C = vec_sub(line0B, line4B);
01324 register vector signed short line1C = vec_add(line1B, line5B);
01325 register vector signed short line5C = vec_sub(line1B, line5B);
01326 register vector signed short line2C = vec_add(line2B, line6B);
01327 register vector signed short line6C = vec_sub(line2B, line6B);
01328 register vector signed short line3C = vec_add(line3B, line7B);
01329 register vector signed short line7C = vec_sub(line3B, line7B);
01330
01331 vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
01332 vsum = vec_sum4s(vec_abs(line1C), vsum);
01333 vsum = vec_sum4s(vec_abs(line2C), vsum);
01334 vsum = vec_sum4s(vec_abs(line3C), vsum);
01335 vsum = vec_sum4s(vec_abs(line4C), vsum);
01336 vsum = vec_sum4s(vec_abs(line5C), vsum);
01337 vsum = vec_sum4s(vec_abs(line6C), vsum);
01338 vsum = vec_sum4s(vec_abs(line7C), vsum);
01339
01340 line0S = vec_add(temp0S, temp1S);
01341 line1S = vec_sub(temp0S, temp1S);
01342 line2S = vec_add(temp2S, temp3S);
01343 line3S = vec_sub(temp2S, temp3S);
01344 line4S = vec_add(temp4S, temp5S);
01345 line5S = vec_sub(temp4S, temp5S);
01346 line6S = vec_add(temp6S, temp7S);
01347 line7S = vec_sub(temp6S, temp7S);
01348
01349 line0BS = vec_add(line0S, line2S);
01350 line2BS = vec_sub(line0S, line2S);
01351 line1BS = vec_add(line1S, line3S);
01352 line3BS = vec_sub(line1S, line3S);
01353 line4BS = vec_add(line4S, line6S);
01354 line6BS = vec_sub(line4S, line6S);
01355 line5BS = vec_add(line5S, line7S);
01356 line7BS = vec_sub(line5S, line7S);
01357
01358 line0CS = vec_add(line0BS, line4BS);
01359 line4CS = vec_sub(line0BS, line4BS);
01360 line1CS = vec_add(line1BS, line5BS);
01361 line5CS = vec_sub(line1BS, line5BS);
01362 line2CS = vec_add(line2BS, line6BS);
01363 line6CS = vec_sub(line2BS, line6BS);
01364 line3CS = vec_add(line3BS, line7BS);
01365 line7CS = vec_sub(line3BS, line7BS);
01366
01367 vsum = vec_sum4s(vec_abs(line0CS), vsum);
01368 vsum = vec_sum4s(vec_abs(line1CS), vsum);
01369 vsum = vec_sum4s(vec_abs(line2CS), vsum);
01370 vsum = vec_sum4s(vec_abs(line3CS), vsum);
01371 vsum = vec_sum4s(vec_abs(line4CS), vsum);
01372 vsum = vec_sum4s(vec_abs(line5CS), vsum);
01373 vsum = vec_sum4s(vec_abs(line6CS), vsum);
01374 vsum = vec_sum4s(vec_abs(line7CS), vsum);
01375 vsum = vec_sums(vsum, (vector signed int)vzero);
01376 vsum = vec_splat(vsum, 3);
01377 vec_ste(vsum, 0, &sum);
01378 }
01379 return sum;
01380 }
01381
01382 int hadamard8_diff16_altivec( void *s, uint8_t *dst, uint8_t *src, int stride, int h){
01383 POWERPC_PERF_DECLARE(altivec_hadamard8_diff16_num, 1);
01384 int score;
01385 POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1);
01386 score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
01387 if (h==16) {
01388 dst += 8*stride;
01389 src += 8*stride;
01390 score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
01391 }
01392 POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1);
01393 return score;
01394 }
01395
01396 static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
01397 int blocksize)
01398 {
01399 int i;
01400 vector float m, a;
01401 vector bool int t0, t1;
01402 const vector unsigned int v_31 =
01403 vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
01404 for(i=0; i<blocksize; i+=4) {
01405 m = vec_ld(0, mag+i);
01406 a = vec_ld(0, ang+i);
01407 t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
01408 t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
01409 a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
01410 t0 = (vector bool int)vec_and(a, t1);
01411 t1 = (vector bool int)vec_andc(a, t1);
01412 a = vec_sub(m, (vector float)t1);
01413 m = vec_add(m, (vector float)t0);
01414 vec_stl(a, 0, ang+i);
01415 vec_stl(m, 0, mag+i);
01416 }
01417 }
01418
01419
01420 void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
01421 {
01422 POWERPC_PERF_DECLARE(altivec_avg_pixels8_xy2_num, 1);
01423 register int i;
01424 register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
01425 register vector unsigned char blockv, temp1, temp2, blocktemp;
01426 register vector unsigned short pixelssum1, pixelssum2, temp3;
01427
01428 register const vector unsigned char vczero = (const vector unsigned char)
01429 vec_splat_u8(0);
01430 register const vector unsigned short vctwo = (const vector unsigned short)
01431 vec_splat_u16(2);
01432
01433 temp1 = vec_ld(0, pixels);
01434 temp2 = vec_ld(16, pixels);
01435 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
01436 if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
01437 pixelsv2 = temp2;
01438 } else {
01439 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
01440 }
01441 pixelsv1 = vec_mergeh(vczero, pixelsv1);
01442 pixelsv2 = vec_mergeh(vczero, pixelsv2);
01443 pixelssum1 = vec_add((vector unsigned short)pixelsv1,
01444 (vector unsigned short)pixelsv2);
01445 pixelssum1 = vec_add(pixelssum1, vctwo);
01446
01447 POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
01448 for (i = 0; i < h ; i++) {
01449 int rightside = ((unsigned long)block & 0x0000000F);
01450 blockv = vec_ld(0, block);
01451
01452 temp1 = vec_ld(line_size, pixels);
01453 temp2 = vec_ld(line_size + 16, pixels);
01454 pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
01455 if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
01456 {
01457 pixelsv2 = temp2;
01458 } else {
01459 pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
01460 }
01461
01462 pixelsv1 = vec_mergeh(vczero, pixelsv1);
01463 pixelsv2 = vec_mergeh(vczero, pixelsv2);
01464 pixelssum2 = vec_add((vector unsigned short)pixelsv1,
01465 (vector unsigned short)pixelsv2);
01466 temp3 = vec_add(pixelssum1, pixelssum2);
01467 temp3 = vec_sra(temp3, vctwo);
01468 pixelssum1 = vec_add(pixelssum2, vctwo);
01469 pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
01470
01471 if (rightside) {
01472 blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
01473 } else {
01474 blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
01475 }
01476
01477 blockv = vec_avg(blocktemp, blockv);
01478 vec_st(blockv, 0, block);
01479
01480 block += line_size;
01481 pixels += line_size;
01482 }
01483
01484 POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
01485 }
01486
01487 void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
01488 {
01489 c->pix_abs[0][1] = sad16_x2_altivec;
01490 c->pix_abs[0][2] = sad16_y2_altivec;
01491 c->pix_abs[0][3] = sad16_xy2_altivec;
01492 c->pix_abs[0][0] = sad16_altivec;
01493 c->pix_abs[1][0] = sad8_altivec;
01494 c->sad[0]= sad16_altivec;
01495 c->sad[1]= sad8_altivec;
01496 c->pix_norm1 = pix_norm1_altivec;
01497 c->sse[1]= sse8_altivec;
01498 c->sse[0]= sse16_altivec;
01499 c->pix_sum = pix_sum_altivec;
01500 c->diff_pixels = diff_pixels_altivec;
01501 c->get_pixels = get_pixels_altivec;
01502 c->add_bytes= add_bytes_altivec;
01503 c->put_pixels_tab[0][0] = put_pixels16_altivec;
01504
01505 c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
01506 c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
01507 c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
01508 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
01509 c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
01510 c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
01511 c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
01512 c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
01513
01514 c->hadamard8_diff[0] = hadamard8_diff16_altivec;
01515 c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
01516 if (ENABLE_VORBIS_DECODER)
01517 c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
01518 }