dsputil_altivec.c 57.9 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2002 Brian Foley
 * Copyright (c) 2002 Dieter Shirley
4
 * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
5
 *
6 7 8
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
9 10
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * FFmpeg is distributed in the hope that it will be useful,
14 15 16 17 18
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with FFmpeg; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 */
22

23
#include "dsputil.h"
24 25 26

#include "gcc_fixes.h"

27
#include "dsputil_altivec.h"
28

29
#ifdef __APPLE__
30
#include <sys/sysctl.h>
diego's avatar
diego committed
31
#elif __AMIGAOS4__
32 33 34
#include <exec/exec.h>
#include <interfaces/exec.h>
#include <proto/exec.h>
diego's avatar
diego committed
35
#else
36 37 38 39 40 41 42 43 44 45 46 47
#include <signal.h>
#include <setjmp.h>

static sigjmp_buf jmpbuf;
static volatile sig_atomic_t canjump = 0;

static void sigill_handler (int sig)
{
    if (!canjump) {
        signal (sig, SIG_DFL);
        raise (sig);
    }
48

49 50 51
    canjump = 0;
    siglongjmp (jmpbuf, 1);
}
52
#endif /* __APPLE__ */
53

michael's avatar
michael committed
54
int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
55
{
bellard's avatar
bellard committed
56
    int i;
57
    DECLARE_ALIGNED_16(int, s);
58
    const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
bellard's avatar
bellard committed
59
    vector unsigned char *tv;
60 61 62 63 64
    vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
    vector unsigned int sad;
    vector signed int sumdiffs;

    s = 0;
65
    sad = (vector unsigned int)vec_splat_u32(0);
michael's avatar
michael committed
66
    for(i=0;i<h;i++) {
67 68 69
        /*
           Read unaligned pixels into our vectors. The vectors are as follows:
           pix1v: pix1[0]-pix1[15]
70
           pix2v: pix2[0]-pix2[15]      pix2iv: pix2[1]-pix2[16]
71 72 73
        */
        tv = (vector unsigned char *) pix1;
        pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
74

75 76 77 78 79 80 81 82 83 84 85 86 87 88
        tv = (vector unsigned char *) &pix2[0];
        pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));

        tv = (vector unsigned char *) &pix2[1];
        pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));

        /* Calculate the average vector */
        avgv = vec_avg(pix2v, pix2iv);

        /* Calculate a sum of abs differences vector */
        t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));

        /* Add each 4 pixel group together and put 4 results into sad */
        sad = vec_sum4s(t5, sad);
89

90 91 92 93 94 95 96 97 98 99 100
        pix1 += line_size;
        pix2 += line_size;
    }
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);

    return s;
}

michael's avatar
michael committed
101
int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
102
{
bellard's avatar
bellard committed
103
    int i;
104
    DECLARE_ALIGNED_16(int, s);
105
    const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
bellard's avatar
bellard committed
106
    vector unsigned char *tv;
107 108 109 110 111 112
    vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
    vector unsigned int sad;
    vector signed int sumdiffs;
    uint8_t *pix3 = pix2 + line_size;

    s = 0;
113
    sad = (vector unsigned int)vec_splat_u32(0);
114 115 116 117 118 119 120 121 122 123 124 125

    /*
       Due to the fact that pix3 = pix2 + line_size, the pix3 of one
       iteration becomes pix2 in the next iteration. We can use this
       fact to avoid a potentially expensive unaligned read, each
       time around the loop.
       Read unaligned pixels into our vectors. The vectors are as follows:
       pix2v: pix2[0]-pix2[15]
       Split the pixel vectors into shorts
    */
    tv = (vector unsigned char *) &pix2[0];
    pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
126

michael's avatar
michael committed
127
    for(i=0;i<h;i++) {
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
        /*
           Read unaligned pixels into our vectors. The vectors are as follows:
           pix1v: pix1[0]-pix1[15]
           pix3v: pix3[0]-pix3[15]
        */
        tv = (vector unsigned char *) pix1;
        pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));

        tv = (vector unsigned char *) &pix3[0];
        pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));

        /* Calculate the average vector */
        avgv = vec_avg(pix2v, pix3v);

        /* Calculate a sum of abs differences vector */
        t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));

        /* Add each 4 pixel group together and put 4 results into sad */
        sad = vec_sum4s(t5, sad);
147

148 149 150
        pix1 += line_size;
        pix2v = pix3v;
        pix3 += line_size;
151

152
    }
153

154 155 156 157
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);
158
    return s;
159 160
}

michael's avatar
michael committed
161
int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
162
{
bellard's avatar
bellard committed
163
    int i;
164
    DECLARE_ALIGNED_16(int, s);
165
    uint8_t *pix3 = pix2 + line_size;
166 167
    const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
    const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
bellard's avatar
bellard committed
168
    vector unsigned char *tv, avgv, t5;
169 170 171
    vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
    vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
    vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
bellard's avatar
bellard committed
172
    vector unsigned short avghv, avglv;
173 174 175 176
    vector unsigned short t1, t2, t3, t4;
    vector unsigned int sad;
    vector signed int sumdiffs;

177
    sad = (vector unsigned int)vec_splat_u32(0);
178

179 180 181 182 183 184 185 186
    s = 0;

    /*
       Due to the fact that pix3 = pix2 + line_size, the pix3 of one
       iteration becomes pix2 in the next iteration. We can use this
       fact to avoid a potentially expensive unaligned read, as well
       as some splitting, and vector addition each time around the loop.
       Read unaligned pixels into our vectors. The vectors are as follows:
187
       pix2v: pix2[0]-pix2[15]  pix2iv: pix2[1]-pix2[16]
188 189 190 191 192 193 194 195 196 197 198 199 200 201
       Split the pixel vectors into shorts
    */
    tv = (vector unsigned char *) &pix2[0];
    pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));

    tv = (vector unsigned char *) &pix2[1];
    pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));

    pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
    pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
    pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
    pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
    t1 = vec_add(pix2hv, pix2ihv);
    t2 = vec_add(pix2lv, pix2ilv);
202

michael's avatar
michael committed
203
    for(i=0;i<h;i++) {
204 205 206
        /*
           Read unaligned pixels into our vectors. The vectors are as follows:
           pix1v: pix1[0]-pix1[15]
207
           pix3v: pix3[0]-pix3[15]      pix3iv: pix3[1]-pix3[16]
208 209 210 211 212 213 214 215 216 217 218
        */
        tv = (vector unsigned char *) pix1;
        pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));

        tv = (vector unsigned char *) &pix3[0];
        pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));

        tv = (vector unsigned char *) &pix3[1];
        pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));

        /*
diego's avatar
diego committed
219
          Note that AltiVec does have vec_avg, but this works on vector pairs
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
          and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
          would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
          Instead, we have to split the pixel vectors into vectors of shorts,
          and do the averaging by hand.
        */

        /* Split the pixel vectors into shorts */
        pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
        pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
        pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
        pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);

        /* Do the averaging on them */
        t3 = vec_add(pix3hv, pix3ihv);
        t4 = vec_add(pix3lv, pix3ilv);

236 237
        avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
        avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261

        /* Pack the shorts back into a result */
        avgv = vec_pack(avghv, avglv);

        /* Calculate a sum of abs differences vector */
        t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));

        /* Add each 4 pixel group together and put 4 results into sad */
        sad = vec_sum4s(t5, sad);

        pix1 += line_size;
        pix3 += line_size;
        /* Transfer the calculated values for pix3 into pix2 */
        t1 = t3;
        t2 = t4;
    }
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);

    return s;
}

michael's avatar
michael committed
262
int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
263
{
bellard's avatar
bellard committed
264
    int i;
265
    DECLARE_ALIGNED_16(int, s);
266
    const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
267 268
    vector unsigned char perm1, perm2, *pix1v, *pix2v;
    vector unsigned char t1, t2, t3,t4, t5;
bellard's avatar
bellard committed
269
    vector unsigned int sad;
270
    vector signed int sumdiffs;
271

272
    sad = (vector unsigned int)vec_splat_u32(0);
273 274


michael's avatar
michael committed
275
    for(i=0;i<h;i++) {
276
        /* Read potentially unaligned pixels into t1 and t2 */
277 278 279 280 281 282
        perm1 = vec_lvsl(0, pix1);
        pix1v = (vector unsigned char *) pix1;
        perm2 = vec_lvsl(0, pix2);
        pix2v = (vector unsigned char *) pix2;
        t1 = vec_perm(pix1v[0], pix1v[1], perm1);
        t2 = vec_perm(pix2v[0], pix2v[1], perm2);
283

284
        /* Calculate a sum of abs differences vector */
285 286 287
        t3 = vec_max(t1, t2);
        t4 = vec_min(t1, t2);
        t5 = vec_sub(t3, t4);
288

289
        /* Add each 4 pixel group together and put 4 results into sad */
290 291 292 293 294 295 296 297 298 299
        sad = vec_sum4s(t5, sad);

        pix1 += line_size;
        pix2 += line_size;
    }

    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);
300

301 302 303
    return s;
}

michael's avatar
michael committed
304
int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
305
{
bellard's avatar
bellard committed
306
    int i;
307
    DECLARE_ALIGNED_16(int, s);
308
    const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
309 310
    vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
    vector unsigned char t1, t2, t3,t4, t5;
bellard's avatar
bellard committed
311
    vector unsigned int sad;
312 313
    vector signed int sumdiffs;

314
    sad = (vector unsigned int)vec_splat_u32(0);
315 316

    permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
317

michael's avatar
michael committed
318
    for(i=0;i<h;i++) {
319 320 321
        /* Read potentially unaligned pixels into t1 and t2
           Since we're reading 16 pixels, and actually only want 8,
           mask out the last 8 pixels. The 0s don't change the sum. */
322 323 324 325 326 327 328
        perm1 = vec_lvsl(0, pix1);
        pix1v = (vector unsigned char *) pix1;
        perm2 = vec_lvsl(0, pix2);
        pix2v = (vector unsigned char *) pix2;
        t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
        t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);

329
        /* Calculate a sum of abs differences vector */
330 331 332 333
        t3 = vec_max(t1, t2);
        t4 = vec_min(t1, t2);
        t5 = vec_sub(t3, t4);

334
        /* Add each 4 pixel group together and put 4 results into sad */
335 336 337 338 339 340 341 342 343 344 345 346 347 348
        sad = vec_sum4s(t5, sad);

        pix1 += line_size;
        pix2 += line_size;
    }

    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);

    return s;
}

349 350
int pix_norm1_altivec(uint8_t *pix, int line_size)
{
bellard's avatar
bellard committed
351
    int i;
352
    DECLARE_ALIGNED_16(int, s);
353
    const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
bellard's avatar
bellard committed
354
    vector unsigned char *tv;
355 356 357
    vector unsigned char pixv;
    vector unsigned int sv;
    vector signed int sum;
358

359
    sv = (vector unsigned int)vec_splat_u32(0);
360

361 362 363 364 365 366
    s = 0;
    for (i = 0; i < 16; i++) {
        /* Read in the potentially unaligned pixels */
        tv = (vector unsigned char *) pix;
        pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));

367 368
        /* Square the values, and add them to our sum */
        sv = vec_msum(pixv, pixv, sv);
369 370 371 372 373 374 375 376 377 378 379

        pix += line_size;
    }
    /* Sum up the four partial sums, and put the result into s */
    sum = vec_sums((vector signed int) sv, (vector signed int) zero);
    sum = vec_splat(sum, 3);
    vec_ste(sum, 0, &s);

    return s;
}

bellard's avatar
bellard committed
380 381 382
/**
 * Sum of Squared Errors for a 8x8 block.
 * AltiVec-enhanced.
michael's avatar
michael committed
383
 * It's the sad8_altivec code above w/ squaring added.
bellard's avatar
bellard committed
384
 */
michael's avatar
michael committed
385
int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
bellard's avatar
bellard committed
386 387
{
    int i;
388
    DECLARE_ALIGNED_16(int, s);
389
    const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
bellard's avatar
bellard committed
390 391 392 393
    vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
    vector unsigned char t1, t2, t3,t4, t5;
    vector unsigned int sum;
    vector signed int sumsqr;
394

395
    sum = (vector unsigned int)vec_splat_u32(0);
396 397 398

    permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);

399

michael's avatar
michael committed
400
    for(i=0;i<h;i++) {
401 402 403
        /* Read potentially unaligned pixels into t1 and t2
           Since we're reading 16 pixels, and actually only want 8,
           mask out the last 8 pixels. The 0s don't change the sum. */
bellard's avatar
bellard committed
404 405 406 407 408 409 410 411 412 413 414
        perm1 = vec_lvsl(0, pix1);
        pix1v = (vector unsigned char *) pix1;
        perm2 = vec_lvsl(0, pix2);
        pix2v = (vector unsigned char *) pix2;
        t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
        t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);

        /*
          Since we want to use unsigned chars, we can take advantage
          of the fact that abs(a-b)^2 = (a-b)^2.
        */
415

416
        /* Calculate abs differences vector */
bellard's avatar
bellard committed
417 418 419
        t3 = vec_max(t1, t2);
        t4 = vec_min(t1, t2);
        t5 = vec_sub(t3, t4);
420

bellard's avatar
bellard committed
421 422
        /* Square the values and add them to our sum */
        sum = vec_msum(t5, t5, sum);
423

bellard's avatar
bellard committed
424 425 426
        pix1 += line_size;
        pix2 += line_size;
    }
427

bellard's avatar
bellard committed
428 429 430 431
    /* Sum up the four partial sums, and put the result into s */
    sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
    sumsqr = vec_splat(sumsqr, 3);
    vec_ste(sumsqr, 0, &s);
432

bellard's avatar
bellard committed
433 434 435 436 437 438
    return s;
}

/**
 * Sum of Squared Errors for a 16x16 block.
 * AltiVec-enhanced.
michael's avatar
michael committed
439
 * It's the sad16_altivec code above w/ squaring added.
bellard's avatar
bellard committed
440
 */
michael's avatar
michael committed
441
int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
442
{
bellard's avatar
bellard committed
443
    int i;
444
    DECLARE_ALIGNED_16(int, s);
445
    const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
bellard's avatar
bellard committed
446 447 448 449
    vector unsigned char perm1, perm2, *pix1v, *pix2v;
    vector unsigned char t1, t2, t3,t4, t5;
    vector unsigned int sum;
    vector signed int sumsqr;
450

451
    sum = (vector unsigned int)vec_splat_u32(0);
452

michael's avatar
michael committed
453
    for(i=0;i<h;i++) {
454
        /* Read potentially unaligned pixels into t1 and t2 */
bellard's avatar
bellard committed
455 456 457 458 459 460 461 462 463 464 465
        perm1 = vec_lvsl(0, pix1);
        pix1v = (vector unsigned char *) pix1;
        perm2 = vec_lvsl(0, pix2);
        pix2v = (vector unsigned char *) pix2;
        t1 = vec_perm(pix1v[0], pix1v[1], perm1);
        t2 = vec_perm(pix2v[0], pix2v[1], perm2);

        /*
          Since we want to use unsigned chars, we can take advantage
          of the fact that abs(a-b)^2 = (a-b)^2.
        */
466

467
        /* Calculate abs differences vector */
bellard's avatar
bellard committed
468 469 470
        t3 = vec_max(t1, t2);
        t4 = vec_min(t1, t2);
        t5 = vec_sub(t3, t4);
471

bellard's avatar
bellard committed
472 473
        /* Square the values and add them to our sum */
        sum = vec_msum(t5, t5, sum);
474

bellard's avatar
bellard committed
475 476 477
        pix1 += line_size;
        pix2 += line_size;
    }
478

bellard's avatar
bellard committed
479 480 481 482
    /* Sum up the four partial sums, and put the result into s */
    sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
    sumsqr = vec_splat(sumsqr, 3);
    vec_ste(sumsqr, 0, &s);
483

bellard's avatar
bellard committed
484 485
    return s;
}
486

kabi's avatar
kabi committed
487
int pix_sum_altivec(uint8_t * pix, int line_size)
bellard's avatar
bellard committed
488
{
489
    const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
490 491
    vector unsigned char perm, *pixv;
    vector unsigned char t1;
bellard's avatar
bellard committed
492
    vector unsigned int sad;
493 494
    vector signed int sumdiffs;

bellard's avatar
bellard committed
495
    int i;
496
    DECLARE_ALIGNED_16(int, s);
497

498
    sad = (vector unsigned int)vec_splat_u32(0);
499

500
    for (i = 0; i < 16; i++) {
501
        /* Read the potentially unaligned 16 pixels into t1 */
502 503 504 505
        perm = vec_lvsl(0, pix);
        pixv = (vector unsigned char *) pix;
        t1 = vec_perm(pixv[0], pixv[1], perm);

506
        /* Add each 4 pixel group together and put 4 results into sad */
507
        sad = vec_sum4s(t1, sad);
508

509 510
        pix += line_size;
    }
511

512 513 514 515
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);
516

517 518 519
    return s;
}

kabi's avatar
kabi committed
520
void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
521 522 523
{
    int i;
    vector unsigned char perm, bytes, *pixv;
524
    const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
    vector signed short shorts;

    for(i=0;i<8;i++)
    {
        // Read potentially unaligned pixels.
        // We're reading 16 pixels, and actually only want 8,
        // but we simply ignore the extras.
        perm = vec_lvsl(0, pixels);
        pixv = (vector unsigned char *) pixels;
        bytes = vec_perm(pixv[0], pixv[1], perm);

        // convert the bytes into shorts
        shorts = (vector signed short)vec_mergeh(zero, bytes);

        // save the data to the block, we assume the block is 16-byte aligned
        vec_st(shorts, i*16, (vector signed short*)block);

        pixels += line_size;
    }
}

kabi's avatar
kabi committed
546 547
void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
        const uint8_t *s2, int stride)
548 549 550
{
    int i;
    vector unsigned char perm, bytes, *pixv;
551
    const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
    vector signed short shorts1, shorts2;

    for(i=0;i<4;i++)
    {
        // Read potentially unaligned pixels
        // We're reading 16 pixels, and actually only want 8,
        // but we simply ignore the extras.
        perm = vec_lvsl(0, s1);
        pixv = (vector unsigned char *) s1;
        bytes = vec_perm(pixv[0], pixv[1], perm);

        // convert the bytes into shorts
        shorts1 = (vector signed short)vec_mergeh(zero, bytes);

        // Do the same for the second block of pixels
        perm = vec_lvsl(0, s2);
        pixv = (vector unsigned char *) s2;
        bytes = vec_perm(pixv[0], pixv[1], perm);

        // convert the bytes into shorts
        shorts2 = (vector signed short)vec_mergeh(zero, bytes);

        // Do the subtraction
        shorts1 = vec_sub(shorts1, shorts2);

        // save the data to the block, we assume the block is 16-byte aligned
        vec_st(shorts1, 0, (vector signed short*)block);

        s1 += stride;
        s2 += stride;
        block += 8;


        // The code below is a copy of the code above... This is a manual
        // unroll.

        // Read potentially unaligned pixels
        // We're reading 16 pixels, and actually only want 8,
        // but we simply ignore the extras.
        perm = vec_lvsl(0, s1);
        pixv = (vector unsigned char *) s1;
        bytes = vec_perm(pixv[0], pixv[1], perm);

        // convert the bytes into shorts
        shorts1 = (vector signed short)vec_mergeh(zero, bytes);

        // Do the same for the second block of pixels
        perm = vec_lvsl(0, s2);
        pixv = (vector unsigned char *) s2;
        bytes = vec_perm(pixv[0], pixv[1], perm);

        // convert the bytes into shorts
        shorts2 = (vector signed short)vec_mergeh(zero, bytes);

        // Do the subtraction
        shorts1 = vec_sub(shorts1, shorts2);

        // save the data to the block, we assume the block is 16-byte aligned
        vec_st(shorts1, 0, (vector signed short*)block);

        s1 += stride;
        s2 += stride;
        block += 8;
    }
}

618 619
void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
    register int i;
620
    register vector unsigned char vdst, vsrc;
621

622
    /* dst and src are 16 bytes-aligned (guaranteed) */
lu_zero's avatar
lu_zero committed
623
    for(i = 0 ; (i + 15) < w ; i+=16)
624
    {
lu_zero's avatar
lu_zero committed
625 626
      vdst = vec_ld(i, (unsigned char*)dst);
      vsrc = vec_ld(i, (unsigned char*)src);
627
      vdst = vec_add(vsrc, vdst);
lu_zero's avatar
lu_zero committed
628
      vec_st(vdst, i, (unsigned char*)dst);
629
    }
630
    /* if w is not a multiple of 16 */
631 632 633 634
    for (; (i < w) ; i++)
    {
      dst[i] = src[i];
    }
635 636
}

637
/* next one assumes that ((line_size % 16) == 0) */
638 639
void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
640
POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1);
641
    register vector unsigned char pixelsv1, pixelsv2;
642 643 644 645
    register vector unsigned char pixelsv1B, pixelsv2B;
    register vector unsigned char pixelsv1C, pixelsv2C;
    register vector unsigned char pixelsv1D, pixelsv2D;

646
    register vector unsigned char perm = vec_lvsl(0, pixels);
647
    int i;
648 649 650 651 652 653 654 655 656 657 658
    register int line_size_2 = line_size << 1;
    register int line_size_3 = line_size + line_size_2;
    register int line_size_4 = line_size << 2;

POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
// hand-unrolling the loop by 4 gains about 15%
// mininum execution time goes from 74 to 60 cycles
// it's faster than -funroll-loops, but using
// -funroll-loops w/ this is bad - 74 cycles again.
// all this is on a 7450, tuning for the 7450
#if 0
659 660 661
    for(i=0; i<h; i++) {
      pixelsv1 = vec_ld(0, (unsigned char*)pixels);
      pixelsv2 = vec_ld(16, (unsigned char*)pixels);
662
      vec_st(vec_perm(pixelsv1, pixelsv2, perm),
663
             0, (unsigned char*)block);
664 665 666
      pixels+=line_size;
      block +=line_size;
    }
667 668 669
#else
    for(i=0; i<h; i+=4) {
      pixelsv1 = vec_ld(0, (unsigned char*)pixels);
lu_zero's avatar
lu_zero committed
670
      pixelsv2 = vec_ld(15, (unsigned char*)pixels);
671
      pixelsv1B = vec_ld(line_size, (unsigned char*)pixels);
lu_zero's avatar
lu_zero committed
672
      pixelsv2B = vec_ld(15 + line_size, (unsigned char*)pixels);
673
      pixelsv1C = vec_ld(line_size_2, (unsigned char*)pixels);
lu_zero's avatar
lu_zero committed
674
      pixelsv2C = vec_ld(15 + line_size_2, (unsigned char*)pixels);
675
      pixelsv1D = vec_ld(line_size_3, (unsigned char*)pixels);
lu_zero's avatar
lu_zero committed
676
      pixelsv2D = vec_ld(15 + line_size_3, (unsigned char*)pixels);
677 678 679 680 681 682 683 684 685 686 687 688 689
      vec_st(vec_perm(pixelsv1, pixelsv2, perm),
             0, (unsigned char*)block);
      vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
             line_size, (unsigned char*)block);
      vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
             line_size_2, (unsigned char*)block);
      vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
             line_size_3, (unsigned char*)block);
      pixels+=line_size_4;
      block +=line_size_4;
    }
#endif
POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
690 691
}

692
/* next one assumes that ((line_size % 16) == 0) */
693 694 695
#define op_avg(a,b)  a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
696
POWERPC_PERF_DECLARE(altivec_avg_pixels16_num, 1);
697
    register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
698
    register vector unsigned char perm = vec_lvsl(0, pixels);
699 700
    int i;

701
POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
702 703 704 705 706

    for(i=0; i<h; i++) {
      pixelsv1 = vec_ld(0, (unsigned char*)pixels);
      pixelsv2 = vec_ld(16, (unsigned char*)pixels);
      blockv = vec_ld(0, block);
707
      pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
708 709 710 711 712 713
      blockv = vec_avg(blockv,pixelsv);
      vec_st(blockv, 0, (unsigned char*)block);
      pixels+=line_size;
      block +=line_size;
    }

714
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
715
}
716

717 718
/* next one assumes that ((line_size % 8) == 0) */
void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
719
{
720
POWERPC_PERF_DECLARE(altivec_avg_pixels8_num, 1);
721 722 723
    register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
    int i;

724
POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
725

726 727 728 729 730 731
   for (i = 0; i < h; i++) {
     /*
       block is 8 bytes-aligned, so we're either in the
       left block (16 bytes-aligned) or in the right block (not)
     */
     int rightside = ((unsigned long)block & 0x0000000F);
732

733 734 735 736
     blockv = vec_ld(0, block);
     pixelsv1 = vec_ld(0, (unsigned char*)pixels);
     pixelsv2 = vec_ld(16, (unsigned char*)pixels);
     pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
737

738 739 740 741 742 743 744 745
     if (rightside)
     {
       pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
     }
     else
     {
       pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
     }
746

747 748 749
     blockv = vec_avg(blockv, pixelsv);

     vec_st(blockv, 0, block);
750

751 752 753
     pixels += line_size;
     block += line_size;
   }
754

755
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
756 757
}

758
/* next one assumes that ((line_size % 8) == 0) */
759 760
void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
761
POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1);
762 763 764 765 766 767 768 769
   register int i;
   register vector unsigned char
     pixelsv1, pixelsv2,
     pixelsavg;
   register vector unsigned char
     blockv, temp1, temp2;
   register vector unsigned short
     pixelssum1, pixelssum2, temp3;
770 771
   register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
   register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
772

773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
   temp1 = vec_ld(0, pixels);
   temp2 = vec_ld(16, pixels);
   pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
   if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F)
   {
     pixelsv2 = temp2;
   }
   else
   {
     pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
   }
   pixelsv1 = vec_mergeh(vczero, pixelsv1);
   pixelsv2 = vec_mergeh(vczero, pixelsv2);
   pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                        (vector unsigned short)pixelsv2);
   pixelssum1 = vec_add(pixelssum1, vctwo);
789 790

POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
   for (i = 0; i < h ; i++) {
     int rightside = ((unsigned long)block & 0x0000000F);
     blockv = vec_ld(0, block);

     temp1 = vec_ld(line_size, pixels);
     temp2 = vec_ld(line_size + 16, pixels);
     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
     if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F)
     {
       pixelsv2 = temp2;
     }
     else
     {
       pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
     }

     pixelsv1 = vec_mergeh(vczero, pixelsv1);
     pixelsv2 = vec_mergeh(vczero, pixelsv2);
     pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                          (vector unsigned short)pixelsv2);
     temp3 = vec_add(pixelssum1, pixelssum2);
     temp3 = vec_sra(temp3, vctwo);
     pixelssum1 = vec_add(pixelssum2, vctwo);
     pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
815

816 817 818 819 820 821 822 823
     if (rightside)
     {
       blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
     }
     else
     {
       blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
     }
824

825
     vec_st(blockv, 0, block);
826

827 828 829
     block += line_size;
     pixels += line_size;
   }
830

831
POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
832 833
}

834 835 836
/* next one assumes that ((line_size % 8) == 0) */
void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
837
POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1);
838 839 840 841 842 843 844 845
   register int i;
   register vector unsigned char
     pixelsv1, pixelsv2,
     pixelsavg;
   register vector unsigned char
     blockv, temp1, temp2;
   register vector unsigned short
     pixelssum1, pixelssum2, temp3;
846 847 848
   register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
   register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
   register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
849

850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
   temp1 = vec_ld(0, pixels);
   temp2 = vec_ld(16, pixels);
   pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
   if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F)
   {
     pixelsv2 = temp2;
   }
   else
   {
     pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
   }
   pixelsv1 = vec_mergeh(vczero, pixelsv1);
   pixelsv2 = vec_mergeh(vczero, pixelsv2);
   pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                        (vector unsigned short)pixelsv2);
   pixelssum1 = vec_add(pixelssum1, vcone);
866 867

POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
   for (i = 0; i < h ; i++) {
     int rightside = ((unsigned long)block & 0x0000000F);
     blockv = vec_ld(0, block);

     temp1 = vec_ld(line_size, pixels);
     temp2 = vec_ld(line_size + 16, pixels);
     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
     if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F)
     {
       pixelsv2 = temp2;
     }
     else
     {
       pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
     }

     pixelsv1 = vec_mergeh(vczero, pixelsv1);
     pixelsv2 = vec_mergeh(vczero, pixelsv2);
     pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                          (vector unsigned short)pixelsv2);
     temp3 = vec_add(pixelssum1, pixelssum2);
     temp3 = vec_sra(temp3, vctwo);
     pixelssum1 = vec_add(pixelssum2, vcone);
     pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
892

893 894 895 896 897 898 899 900
     if (rightside)
     {
       blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
     }
     else
     {
       blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
     }
901

902
     vec_st(blockv, 0, block);
903

904 905 906
     block += line_size;
     pixels += line_size;
   }
907

908
POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
909 910 911 912 913
}

/* next one assumes that ((line_size % 16) == 0) */
void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
{
914
POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1);
915 916 917 918 919 920 921 922
   register int i;
   register vector unsigned char
     pixelsv1, pixelsv2, pixelsv3, pixelsv4;
   register vector unsigned char
     blockv, temp1, temp2;
   register vector unsigned short
     pixelssum1, pixelssum2, temp3,
     pixelssum3, pixelssum4, temp4;
923 924
   register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
   register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
925

926
POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
927

928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
   temp1 = vec_ld(0, pixels);
   temp2 = vec_ld(16, pixels);
   pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
   if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F)
   {
     pixelsv2 = temp2;
   }
   else
   {
     pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
   }
   pixelsv3 = vec_mergel(vczero, pixelsv1);
   pixelsv4 = vec_mergel(vczero, pixelsv2);
   pixelsv1 = vec_mergeh(vczero, pixelsv1);
   pixelsv2 = vec_mergeh(vczero, pixelsv2);
   pixelssum3 = vec_add((vector unsigned short)pixelsv3,
                        (vector unsigned short)pixelsv4);
   pixelssum3 = vec_add(pixelssum3, vctwo);
   pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                        (vector unsigned short)pixelsv2);
   pixelssum1 = vec_add(pixelssum1, vctwo);
949

950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
   for (i = 0; i < h ; i++) {
     blockv = vec_ld(0, block);

     temp1 = vec_ld(line_size, pixels);
     temp2 = vec_ld(line_size + 16, pixels);
     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
     if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F)
     {
       pixelsv2 = temp2;
     }
     else
     {
       pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
     }

     pixelsv3 = vec_mergel(vczero, pixelsv1);
     pixelsv4 = vec_mergel(vczero, pixelsv2);
     pixelsv1 = vec_mergeh(vczero, pixelsv1);
     pixelsv2 = vec_mergeh(vczero, pixelsv2);
969

970 971 972 973 974 975 976 977 978 979 980 981 982
     pixelssum4 = vec_add((vector unsigned short)pixelsv3,
                          (vector unsigned short)pixelsv4);
     pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                          (vector unsigned short)pixelsv2);
     temp4 = vec_add(pixelssum3, pixelssum4);
     temp4 = vec_sra(temp4, vctwo);
     temp3 = vec_add(pixelssum1, pixelssum2);
     temp3 = vec_sra(temp3, vctwo);

     pixelssum3 = vec_add(pixelssum4, vctwo);
     pixelssum1 = vec_add(pixelssum2, vctwo);

     blockv = vec_packsu(temp3, temp4);
983

984
     vec_st(blockv, 0, block);
985

986 987 988
     block += line_size;
     pixels += line_size;
   }
989

990
POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
991 992 993 994 995
}

/* next one assumes that ((line_size % 16) == 0) */
void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
{
996
POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1);
997 998 999 1000 1001 1002 1003 1004
   register int i;
   register vector unsigned char
     pixelsv1, pixelsv2, pixelsv3, pixelsv4;
   register vector unsigned char
     blockv, temp1, temp2;
   register vector unsigned short
     pixelssum1, pixelssum2, temp3,
     pixelssum3, pixelssum4, temp4;
1005 1006 1007
   register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
   register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
   register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
1008

1009
POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
1010

1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
   temp1 = vec_ld(0, pixels);
   temp2 = vec_ld(16, pixels);
   pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
   if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F)
   {
     pixelsv2 = temp2;
   }
   else
   {
     pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
   }
   pixelsv3 = vec_mergel(vczero, pixelsv1);
   pixelsv4 = vec_mergel(vczero, pixelsv2);
   pixelsv1 = vec_mergeh(vczero, pixelsv1);
   pixelsv2 = vec_mergeh(vczero, pixelsv2);
   pixelssum3 = vec_add((vector unsigned short)pixelsv3,
                        (vector unsigned short)pixelsv4);
   pixelssum3 = vec_add(pixelssum3, vcone);
   pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                        (vector unsigned short)pixelsv2);
   pixelssum1 = vec_add(pixelssum1, vcone);
1032

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
   for (i = 0; i < h ; i++) {
     blockv = vec_ld(0, block);

     temp1 = vec_ld(line_size, pixels);
     temp2 = vec_ld(line_size + 16, pixels);
     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
     if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F)
     {
       pixelsv2 = temp2;
     }
     else
     {
       pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
     }

     pixelsv3 = vec_mergel(vczero, pixelsv1);
     pixelsv4 = vec_mergel(vczero, pixelsv2);
     pixelsv1 = vec_mergeh(vczero, pixelsv1);
     pixelsv2 = vec_mergeh(vczero, pixelsv2);
1052

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
     pixelssum4 = vec_add((vector unsigned short)pixelsv3,
                          (vector unsigned short)pixelsv4);
     pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                          (vector unsigned short)pixelsv2);
     temp4 = vec_add(pixelssum3, pixelssum4);
     temp4 = vec_sra(temp4, vctwo);
     temp3 = vec_add(pixelssum1, pixelssum2);
     temp3 = vec_sra(temp3, vctwo);

     pixelssum3 = vec_add(pixelssum4, vcone);
     pixelssum1 = vec_add(pixelssum2, vcone);

     blockv = vec_packsu(temp3, temp4);
1066

1067
     vec_st(blockv, 0, block);
1068

1069 1070 1071
     block += line_size;
     pixels += line_size;
   }
1072

1073
POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
1074 1075
}

1076 1077
int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1);
1078
    int sum;
1079 1080
    register const vector unsigned char vzero =
                            (const vector unsigned char)vec_splat_u8(0);
1081 1082
    register vector signed short temp0, temp1, temp2, temp3, temp4,
                                 temp5, temp6, temp7;
1083
POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
1084
  {
1085
    register const vector signed short vprod1 =(const vector signed short)
1086
                                        AVV( 1,-1, 1,-1, 1,-1, 1,-1);
1087
    register const vector signed short vprod2 =(const vector signed short)
1088
                                        AVV( 1, 1,-1,-1, 1, 1,-1,-1);
1089
    register const vector signed short vprod3 =(const vector signed short)
1090
                                        AVV( 1, 1, 1, 1,-1,-1,-1,-1);
1091
    register const vector unsigned char perm1 = (const vector unsigned char)
1092 1093
      AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
          0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D);
1094
    register const vector unsigned char perm2 = (const vector unsigned char)
1095 1096
      AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
          0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B);
1097
    register const vector unsigned char perm3 = (const vector unsigned char)
1098 1099
      AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
          0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
1100

1101 1102 1103 1104
#define ONEITERBUTTERFLY(i, res)                                        \
    {                                                                   \
      register vector unsigned char src1, src2, srcO;                   \
      register vector unsigned char dst1, dst2, dstO;                   \
1105 1106
      register vector signed short srcV, dstV;                          \
      register vector signed short but0, but1, but2, op1, op2, op3;     \
1107
      src1 = vec_ld(stride * i, src);                                   \
1108
      src2 = vec_ld((stride * i) + 15, src);                            \
1109 1110
      srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src));           \
      dst1 = vec_ld(stride * i, dst);                                   \
1111
      dst2 = vec_ld((stride * i) + 15, dst);                            \
1112 1113 1114
      dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst));           \
      /* promote the unsigned chars to signed shorts */                 \
      /* we're in the 8x8 function, we only care for the first 8 */     \
1115 1116 1117 1118 1119 1120
      srcV =                                                            \
        (vector signed short)vec_mergeh((vector signed char)vzero,      \
        (vector signed char)srcO);                                      \
      dstV =                                                            \
        (vector signed short)vec_mergeh((vector signed char)vzero,      \
        (vector signed char)dstO);                                      \
1121
      /* substractions inside the first butterfly */                    \
1122 1123 1124 1125 1126 1127
      but0 = vec_sub(srcV, dstV);                                       \
      op1 = vec_perm(but0, but0, perm1);                                \
      but1 = vec_mladd(but0, vprod1, op1);                              \
      op2 = vec_perm(but1, but1, perm2);                                \
      but2 = vec_mladd(but1, vprod2, op2);                              \
      op3 = vec_perm(but2, but2, perm3);                                \
1128
      res = vec_mladd(but2, vprod3, op3);                               \
1129 1130 1131 1132 1133 1134 1135 1136 1137
    }
    ONEITERBUTTERFLY(0, temp0);
    ONEITERBUTTERFLY(1, temp1);
    ONEITERBUTTERFLY(2, temp2);
    ONEITERBUTTERFLY(3, temp3);
    ONEITERBUTTERFLY(4, temp4);
    ONEITERBUTTERFLY(5, temp5);
    ONEITERBUTTERFLY(6, temp6);
    ONEITERBUTTERFLY(7, temp7);
1138
  }
1139
#undef ONEITERBUTTERFLY
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
  {
    register vector signed int vsum;
    register vector signed short line0 = vec_add(temp0, temp1);
    register vector signed short line1 = vec_sub(temp0, temp1);
    register vector signed short line2 = vec_add(temp2, temp3);
    register vector signed short line3 = vec_sub(temp2, temp3);
    register vector signed short line4 = vec_add(temp4, temp5);
    register vector signed short line5 = vec_sub(temp4, temp5);
    register vector signed short line6 = vec_add(temp6, temp7);
    register vector signed short line7 = vec_sub(temp6, temp7);
1150

1151 1152 1153 1154 1155 1156 1157 1158
    register vector signed short line0B = vec_add(line0, line2);
    register vector signed short line2B = vec_sub(line0, line2);
    register vector signed short line1B = vec_add(line1, line3);
    register vector signed short line3B = vec_sub(line1, line3);
    register vector signed short line4B = vec_add(line4, line6);
    register vector signed short line6B = vec_sub(line4, line6);
    register vector signed short line5B = vec_add(line5, line7);
    register vector signed short line7B = vec_sub(line5, line7);
1159

1160 1161 1162 1163 1164 1165 1166 1167
    register vector signed short line0C = vec_add(line0B, line4B);
    register vector signed short line4C = vec_sub(line0B, line4B);
    register vector signed short line1C = vec_add(line1B, line5B);
    register vector signed short line5C = vec_sub(line1B, line5B);
    register vector signed short line2C = vec_add(line2B, line6B);
    register vector signed short line6C = vec_sub(line2B, line6B);
    register vector signed short line3C = vec_add(line3B, line7B);
    register vector signed short line7C = vec_sub(line3B, line7B);
1168

1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
    vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
    vsum = vec_sum4s(vec_abs(line1C), vsum);
    vsum = vec_sum4s(vec_abs(line2C), vsum);
    vsum = vec_sum4s(vec_abs(line3C), vsum);
    vsum = vec_sum4s(vec_abs(line4C), vsum);
    vsum = vec_sum4s(vec_abs(line5C), vsum);
    vsum = vec_sum4s(vec_abs(line6C), vsum);
    vsum = vec_sum4s(vec_abs(line7C), vsum);
    vsum = vec_sums(vsum, (vector signed int)vzero);
    vsum = vec_splat(vsum, 3);
    vec_ste(vsum, 0, &sum);
  }
POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1);
  return sum;
}

/*
  16x8 works with 16 elements ; it allows to avoid replicating
  loads, and give the compiler more rooms for scheduling.
  It's only used from inside hadamard8_diff16_altivec.
1189

1190 1191 1192 1193 1194 1195 1196
  Unfortunately, it seems gcc-3.3 is a bit dumb, and
  the compiled code has a LOT of spill code, it seems
  gcc (unlike xlc) cannot keep everything in registers
  by itself. The following code include hand-made
  registers allocation. It's not clean, but on
  a 7450 the resulting code is much faster (best case
  fall from 700+ cycles to 550).
1197

1198 1199 1200
  xlc doesn't add spill code, but it doesn't know how to
  schedule for the 7450, and its code isn't much faster than
  gcc-3.3 on the 7450 (but uses 25% less instructions...)
1201

1202 1203 1204 1205 1206 1207
  On the 970, the hand-made RA is still a win (arount 690
  vs. around 780), but xlc goes to around 660 on the
  regular C code...
*/

static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
    int sum;
    register vector signed short
        temp0 REG_v(v0),
        temp1 REG_v(v1),
        temp2 REG_v(v2),
        temp3 REG_v(v3),
        temp4 REG_v(v4),
        temp5 REG_v(v5),
        temp6 REG_v(v6),
        temp7 REG_v(v7);
    register vector signed short
        temp0S REG_v(v8),
        temp1S REG_v(v9),
        temp2S REG_v(v10),
        temp3S REG_v(v11),
        temp4S REG_v(v12),
        temp5S REG_v(v13),
        temp6S REG_v(v14),
        temp7S REG_v(v15);
1227 1228
    register const vector unsigned char vzero REG_v(v31)=
        (const vector unsigned char)vec_splat_u8(0);
1229
  {
1230 1231 1232 1233 1234 1235 1236 1237
    register const vector signed short vprod1 REG_v(v16)=
        (const vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1);
    register const vector signed short vprod2 REG_v(v17)=
        (const vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1);
    register const vector signed short vprod3 REG_v(v18)=
        (const vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1);
    register const vector unsigned char perm1 REG_v(v19)=
        (const vector unsigned char)
1238 1239
        AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
            0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D);
1240 1241
    register const vector unsigned char perm2 REG_v(v20)=
        (const vector unsigned char)
1242 1243
        AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
            0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B);
1244 1245
    register const vector unsigned char perm3 REG_v(v21)=
        (const vector unsigned char)
1246 1247
        AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
            0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
1248

1249 1250
#define ONEITERBUTTERFLY(i, res1, res2)                                 \
    {                                                                   \
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
      register vector unsigned char src1 REG_v(v22),                    \
                                    src2 REG_v(v23),                    \
                                    dst1 REG_v(v24),                    \
                                    dst2 REG_v(v25),                    \
                                    srcO REG_v(v22),                    \
                                    dstO REG_v(v23);                    \
                                                                        \
      register vector signed short  srcV REG_v(v24),                    \
                                    dstV REG_v(v25),                    \
                                    srcW REG_v(v26),                    \
                                    dstW REG_v(v27),                    \
                                    but0 REG_v(v28),                    \
                                    but0S REG_v(v29),                   \
                                    op1 REG_v(v30),                     \
                                    but1 REG_v(v22),                    \
                                    op1S REG_v(v23),                    \
                                    but1S REG_v(v24),                   \
                                    op2 REG_v(v25),                     \
                                    but2 REG_v(v26),                    \
                                    op2S REG_v(v27),                    \
                                    but2S REG_v(v28),                   \
                                    op3 REG_v(v29),                     \
                                    op3S REG_v(v30);                    \
                                                                        \
1275 1276
      src1 = vec_ld(stride * i, src);                                   \
      src2 = vec_ld((stride * i) + 16, src);                            \
1277
      srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src));           \
1278 1279
      dst1 = vec_ld(stride * i, dst);                                   \
      dst2 = vec_ld((stride * i) + 16, dst);                            \
1280
      dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst));           \
1281
      /* promote the unsigned chars to signed shorts */                 \
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
      srcV =                                                            \
        (vector signed short)vec_mergeh((vector signed char)vzero,      \
        (vector signed char)srcO);                                      \
      dstV =                                                            \
        (vector signed short)vec_mergeh((vector signed char)vzero,      \
        (vector signed char)dstO);                                      \
      srcW =                                                            \
        (vector signed short)vec_mergel((vector signed char)vzero,      \
        (vector signed char)srcO);                                      \
      dstW =                                                            \
        (vector signed short)vec_mergel((vector signed char)vzero,      \
        (vector signed char)dstO);                                      \
1294
      /* substractions inside the first butterfly */                    \
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
      but0 = vec_sub(srcV, dstV);                                       \
      but0S = vec_sub(srcW, dstW);                                      \
      op1 = vec_perm(but0, but0, perm1);                                \
      but1 = vec_mladd(but0, vprod1, op1);                              \
      op1S = vec_perm(but0S, but0S, perm1);                             \
      but1S = vec_mladd(but0S, vprod1, op1S);                           \
      op2 = vec_perm(but1, but1, perm2);                                \
      but2 = vec_mladd(but1, vprod2, op2);                              \
      op2S = vec_perm(but1S, but1S, perm2);                             \
      but2S = vec_mladd(but1S, vprod2, op2S);                           \
      op3 = vec_perm(but2, but2, perm3);                                \
1306
      res1 = vec_mladd(but2, vprod3, op3);                              \
1307
      op3S = vec_perm(but2S, but2S, perm3);                             \
1308
      res2 = vec_mladd(but2S, vprod3, op3S);                            \
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
    }
    ONEITERBUTTERFLY(0, temp0, temp0S);
    ONEITERBUTTERFLY(1, temp1, temp1S);
    ONEITERBUTTERFLY(2, temp2, temp2S);
    ONEITERBUTTERFLY(3, temp3, temp3S);
    ONEITERBUTTERFLY(4, temp4, temp4S);
    ONEITERBUTTERFLY(5, temp5, temp5S);
    ONEITERBUTTERFLY(6, temp6, temp6S);
    ONEITERBUTTERFLY(7, temp7, temp7S);
  }
#undef ONEITERBUTTERFLY
  {
    register vector signed int vsum;
1322 1323 1324 1325 1326 1327
    register vector signed short line0S, line1S, line2S, line3S, line4S,
                                 line5S, line6S, line7S, line0BS,line2BS,
                                 line1BS,line3BS,line4BS,line6BS,line5BS,
                                 line7BS,line0CS,line4CS,line1CS,line5CS,
                                 line2CS,line6CS,line3CS,line7CS;

1328 1329 1330 1331 1332 1333 1334 1335
    register vector signed short line0 = vec_add(temp0, temp1);
    register vector signed short line1 = vec_sub(temp0, temp1);
    register vector signed short line2 = vec_add(temp2, temp3);
    register vector signed short line3 = vec_sub(temp2, temp3);
    register vector signed short line4 = vec_add(temp4, temp5);
    register vector signed short line5 = vec_sub(temp4, temp5);
    register vector signed short line6 = vec_add(temp6, temp7);
    register vector signed short line7 = vec_sub(temp6, temp7);
1336

1337 1338 1339 1340 1341 1342 1343 1344
    register vector signed short line0B = vec_add(line0, line2);
    register vector signed short line2B = vec_sub(line0, line2);
    register vector signed short line1B = vec_add(line1, line3);
    register vector signed short line3B = vec_sub(line1, line3);
    register vector signed short line4B = vec_add(line4, line6);
    register vector signed short line6B = vec_sub(line4, line6);
    register vector signed short line5B = vec_add(line5, line7);
    register vector signed short line7B = vec_sub(line5, line7);
1345

1346 1347 1348 1349 1350 1351 1352 1353
    register vector signed short line0C = vec_add(line0B, line4B);
    register vector signed short line4C = vec_sub(line0B, line4B);
    register vector signed short line1C = vec_add(line1B, line5B);
    register vector signed short line5C = vec_sub(line1B, line5B);
    register vector signed short line2C = vec_add(line2B, line6B);
    register vector signed short line6C = vec_sub(line2B, line6B);
    register vector signed short line3C = vec_add(line3B, line7B);
    register vector signed short line7C = vec_sub(line3B, line7B);
1354

1355 1356 1357 1358 1359 1360 1361 1362 1363
    vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
    vsum = vec_sum4s(vec_abs(line1C), vsum);
    vsum = vec_sum4s(vec_abs(line2C), vsum);
    vsum = vec_sum4s(vec_abs(line3C), vsum);
    vsum = vec_sum4s(vec_abs(line4C), vsum);
    vsum = vec_sum4s(vec_abs(line5C), vsum);
    vsum = vec_sum4s(vec_abs(line6C), vsum);
    vsum = vec_sum4s(vec_abs(line7C), vsum);

1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
    line0S = vec_add(temp0S, temp1S);
    line1S = vec_sub(temp0S, temp1S);
    line2S = vec_add(temp2S, temp3S);
    line3S = vec_sub(temp2S, temp3S);
    line4S = vec_add(temp4S, temp5S);
    line5S = vec_sub(temp4S, temp5S);
    line6S = vec_add(temp6S, temp7S);
    line7S = vec_sub(temp6S, temp7S);

    line0BS = vec_add(line0S, line2S);
    line2BS = vec_sub(line0S, line2S);
    line1BS = vec_add(line1S, line3S);
    line3BS = vec_sub(line1S, line3S);
    line4BS = vec_add(line4S, line6S);
    line6BS = vec_sub(line4S, line6S);
    line5BS = vec_add(line5S, line7S);
    line7BS = vec_sub(line5S, line7S);

    line0CS = vec_add(line0BS, line4BS);
    line4CS = vec_sub(line0BS, line4BS);
    line1CS = vec_add(line1BS, line5BS);
    line5CS = vec_sub(line1BS, line5BS);
    line2CS = vec_add(line2BS, line6BS);
    line6CS = vec_sub(line2BS, line6BS);
    line3CS = vec_add(line3BS, line7BS);
    line7CS = vec_sub(line3BS, line7BS);
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401

    vsum = vec_sum4s(vec_abs(line0CS), vsum);
    vsum = vec_sum4s(vec_abs(line1CS), vsum);
    vsum = vec_sum4s(vec_abs(line2CS), vsum);
    vsum = vec_sum4s(vec_abs(line3CS), vsum);
    vsum = vec_sum4s(vec_abs(line4CS), vsum);
    vsum = vec_sum4s(vec_abs(line5CS), vsum);
    vsum = vec_sum4s(vec_abs(line6CS), vsum);
    vsum = vec_sum4s(vec_abs(line7CS), vsum);
    vsum = vec_sums(vsum, (vector signed int)vzero);
    vsum = vec_splat(vsum, 3);
    vec_ste(vsum, 0, &sum);
1402 1403 1404 1405
  }
  return sum;
}

1406 1407
int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
POWERPC_PERF_DECLARE(altivec_hadamard8_diff16_num, 1);
1408
    int score;
1409
POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1);
1410 1411 1412 1413 1414 1415
    score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
    if (h==16) {
        dst += 8*stride;
        src += 8*stride;
        score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
    }
1416
POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1);
1417
    return score;
1418 1419
}

1420 1421
int has_altivec(void)
{
1422
#ifdef __AMIGAOS4__
1423 1424
    ULONG result = 0;
    extern struct ExecIFace *IExec;
1425

1426 1427 1428
    IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE);
    if (result == VECTORTYPE_ALTIVEC) return 1;
    return 0;
diego's avatar
diego committed
1429
#elif __APPLE__
1430 1431 1432 1433 1434 1435 1436 1437
    int sels[2] = {CTL_HW, HW_VECTORUNIT};
    int has_vu = 0;
    size_t len = sizeof(has_vu);
    int err;

    err = sysctl(sels, 2, &has_vu, &len, NULL, 0);

    if (err == 0) return (has_vu != 0);
diego's avatar
diego committed
1438 1439 1440
    return 0;
#else
/* Do it the brute-force way, borrowed from the libmpeg2 library. */
1441 1442 1443 1444 1445 1446
    {
      signal (SIGILL, sigill_handler);
      if (sigsetjmp (jmpbuf, 1)) {
        signal (SIGILL, SIG_DFL);
      } else {
        canjump = 1;
1447

1448 1449 1450 1451
        asm volatile ("mtspr 256, %0\n\t"
                      "vand %%v0, %%v0, %%v0"
                      :
                      : "r" (-1));
1452

1453 1454 1455 1456
        signal (SIGILL, SIG_DFL);
        return 1;
      }
    }
1457
    return 0;
1458
#endif /* __AMIGAOS4__ */
1459
}
1460

lu_zero's avatar
lu_zero committed
1461 1462 1463 1464
static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
                                            int blocksize)
{
    int i;
1465
    vector float m, a;
lu_zero's avatar
lu_zero committed
1466 1467 1468 1469 1470 1471 1472 1473
    vector bool int t0, t1;
    const vector unsigned int v_31 = //XXX
        vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
    for(i=0; i<blocksize; i+=4) {
        m = vec_ld(0, mag+i);
        a = vec_ld(0, ang+i);
        t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
        t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
lu_zero's avatar
lu_zero committed
1474
        a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
1475 1476
        t0 = (vector bool int)vec_and(a, t1);
        t1 = (vector bool int)vec_andc(a, t1);
lu_zero's avatar
lu_zero committed
1477 1478
        a = vec_sub(m, (vector float)t1);
        m = vec_add(m, (vector float)t0);
1479 1480
        vec_stl(a, 0, ang+i);
        vec_stl(m, 0, mag+i);
lu_zero's avatar
lu_zero committed
1481 1482 1483
    }
}

1484 1485 1486 1487
/* next one assumes that ((line_size % 8) == 0) */
void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
POWERPC_PERF_DECLARE(altivec_avg_pixels8_xy2_num, 1);
1488 1489 1490 1491 1492
    register int i;
    register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
    register vector unsigned char blockv, temp1, temp2, blocktemp;
    register vector unsigned short pixelssum1, pixelssum2, temp3;

1493
    register const vector unsigned char vczero = (const vector unsigned char)
1494
                                        vec_splat_u8(0);
1495
    register const vector unsigned short vctwo = (const vector unsigned short)
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
                                        vec_splat_u16(2);

    temp1 = vec_ld(0, pixels);
    temp2 = vec_ld(16, pixels);
    pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
    if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F) {
        pixelsv2 = temp2;
    } else {
        pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
    }
    pixelsv1 = vec_mergeh(vczero, pixelsv1);
    pixelsv2 = vec_mergeh(vczero, pixelsv2);
    pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                         (vector unsigned short)pixelsv2);
    pixelssum1 = vec_add(pixelssum1, vctwo);
1511 1512

POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
    for (i = 0; i < h ; i++) {
        int rightside = ((unsigned long)block & 0x0000000F);
        blockv = vec_ld(0, block);

        temp1 = vec_ld(line_size, pixels);
        temp2 = vec_ld(line_size + 16, pixels);
        pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
        if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F)
        {
            pixelsv2 = temp2;
        } else {
            pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
        }

        pixelsv1 = vec_mergeh(vczero, pixelsv1);
        pixelsv2 = vec_mergeh(vczero, pixelsv2);
        pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                             (vector unsigned short)pixelsv2);
        temp3 = vec_add(pixelssum1, pixelssum2);
        temp3 = vec_sra(temp3, vctwo);
        pixelssum1 = vec_add(pixelssum2, vctwo);
        pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);

        if (rightside) {
            blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
        } else {
            blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
        }

        blockv = vec_avg(blocktemp, blockv);
        vec_st(blockv, 0, block);
1544

1545 1546 1547
        block += line_size;
        pixels += line_size;
    }
1548

1549 1550
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
}
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580

void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
{
    c->pix_abs[0][1] = sad16_x2_altivec;
    c->pix_abs[0][2] = sad16_y2_altivec;
    c->pix_abs[0][3] = sad16_xy2_altivec;
    c->pix_abs[0][0] = sad16_altivec;
    c->pix_abs[1][0] = sad8_altivec;
    c->sad[0]= sad16_altivec;
    c->sad[1]= sad8_altivec;
    c->pix_norm1 = pix_norm1_altivec;
    c->sse[1]= sse8_altivec;
    c->sse[0]= sse16_altivec;
    c->pix_sum = pix_sum_altivec;
    c->diff_pixels = diff_pixels_altivec;
    c->get_pixels = get_pixels_altivec;
    c->add_bytes= add_bytes_altivec;
    c->put_pixels_tab[0][0] = put_pixels16_altivec;
    /* the two functions do the same thing, so use the same code */
    c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
    c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
    c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
    c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
    c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
    c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
    c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
    c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;

    c->hadamard8_diff[0] = hadamard8_diff16_altivec;
    c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
1581
#ifdef CONFIG_VORBIS_DECODER
1582
    c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
1583
#endif
1584
}