dsputil_altivec.c 66.7 KB
Newer Older
1 2 3
/*
 * Copyright (c) 2002 Brian Foley
 * Copyright (c) 2002 Dieter Shirley
4
 * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20

21
#include "../dsputil.h"
22 23 24

#include "gcc_fixes.h"

25
#include "dsputil_altivec.h"
26

27
#ifdef CONFIG_DARWIN
28
#include <sys/sysctl.h>
29
#else /* CONFIG_DARWIN */
30 31 32 33 34
#ifdef __AMIGAOS4__
#include <exec/exec.h>
#include <interfaces/exec.h>
#include <proto/exec.h>
#else /* __AMIGAOS4__ */
35 36 37 38 39 40 41 42 43 44 45 46
#include <signal.h>
#include <setjmp.h>

static sigjmp_buf jmpbuf;
static volatile sig_atomic_t canjump = 0;

static void sigill_handler (int sig)
{
    if (!canjump) {
        signal (sig, SIG_DFL);
        raise (sig);
    }
47

48 49 50 51
    canjump = 0;
    siglongjmp (jmpbuf, 1);
}
#endif /* CONFIG_DARWIN */
52
#endif /* __AMIGAOS4__ */
53

michael's avatar
michael committed
54
int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
55
{
bellard's avatar
bellard committed
56 57
    int i;
    int s __attribute__((aligned(16)));
58
    const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
bellard's avatar
bellard committed
59
    vector unsigned char *tv;
60 61 62 63 64
    vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
    vector unsigned int sad;
    vector signed int sumdiffs;

    s = 0;
65
    sad = (vector unsigned int)vec_splat_u32(0);
michael's avatar
michael committed
66
    for(i=0;i<h;i++) {
67 68 69
        /*
           Read unaligned pixels into our vectors. The vectors are as follows:
           pix1v: pix1[0]-pix1[15]
70
           pix2v: pix2[0]-pix2[15]      pix2iv: pix2[1]-pix2[16]
71 72 73
        */
        tv = (vector unsigned char *) pix1;
        pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));
74

75 76 77 78 79 80 81 82 83 84 85 86 87 88
        tv = (vector unsigned char *) &pix2[0];
        pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));

        tv = (vector unsigned char *) &pix2[1];
        pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));

        /* Calculate the average vector */
        avgv = vec_avg(pix2v, pix2iv);

        /* Calculate a sum of abs differences vector */
        t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));

        /* Add each 4 pixel group together and put 4 results into sad */
        sad = vec_sum4s(t5, sad);
89

90 91 92 93 94 95 96 97 98 99 100
        pix1 += line_size;
        pix2 += line_size;
    }
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);

    return s;
}

michael's avatar
michael committed
101
int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
102
{
bellard's avatar
bellard committed
103 104
    int i;
    int s __attribute__((aligned(16)));
105
    const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
bellard's avatar
bellard committed
106
    vector unsigned char *tv;
107 108 109 110 111 112
    vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
    vector unsigned int sad;
    vector signed int sumdiffs;
    uint8_t *pix3 = pix2 + line_size;

    s = 0;
113
    sad = (vector unsigned int)vec_splat_u32(0);
114 115 116 117 118 119 120 121 122 123 124 125

    /*
       Due to the fact that pix3 = pix2 + line_size, the pix3 of one
       iteration becomes pix2 in the next iteration. We can use this
       fact to avoid a potentially expensive unaligned read, each
       time around the loop.
       Read unaligned pixels into our vectors. The vectors are as follows:
       pix2v: pix2[0]-pix2[15]
       Split the pixel vectors into shorts
    */
    tv = (vector unsigned char *) &pix2[0];
    pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
126

michael's avatar
michael committed
127
    for(i=0;i<h;i++) {
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
        /*
           Read unaligned pixels into our vectors. The vectors are as follows:
           pix1v: pix1[0]-pix1[15]
           pix3v: pix3[0]-pix3[15]
        */
        tv = (vector unsigned char *) pix1;
        pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));

        tv = (vector unsigned char *) &pix3[0];
        pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));

        /* Calculate the average vector */
        avgv = vec_avg(pix2v, pix3v);

        /* Calculate a sum of abs differences vector */
        t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));

        /* Add each 4 pixel group together and put 4 results into sad */
        sad = vec_sum4s(t5, sad);
147

148 149 150
        pix1 += line_size;
        pix2v = pix3v;
        pix3 += line_size;
151

152
    }
153

154 155 156 157
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);
158
    return s;
159 160
}

michael's avatar
michael committed
161
int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
162
{
bellard's avatar
bellard committed
163 164
    int i;
    int s __attribute__((aligned(16)));
165
    uint8_t *pix3 = pix2 + line_size;
166 167
    const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
    const_vector unsigned short two = (const_vector unsigned short)vec_splat_u16(2);
bellard's avatar
bellard committed
168
    vector unsigned char *tv, avgv, t5;
169 170 171
    vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
    vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
    vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
bellard's avatar
bellard committed
172
    vector unsigned short avghv, avglv;
173 174 175 176
    vector unsigned short t1, t2, t3, t4;
    vector unsigned int sad;
    vector signed int sumdiffs;

177
    sad = (vector unsigned int)vec_splat_u32(0);
178

179 180 181 182 183 184 185 186
    s = 0;

    /*
       Due to the fact that pix3 = pix2 + line_size, the pix3 of one
       iteration becomes pix2 in the next iteration. We can use this
       fact to avoid a potentially expensive unaligned read, as well
       as some splitting, and vector addition each time around the loop.
       Read unaligned pixels into our vectors. The vectors are as follows:
187
       pix2v: pix2[0]-pix2[15]  pix2iv: pix2[1]-pix2[16]
188 189 190 191 192 193 194 195 196 197 198 199 200 201
       Split the pixel vectors into shorts
    */
    tv = (vector unsigned char *) &pix2[0];
    pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));

    tv = (vector unsigned char *) &pix2[1];
    pix2iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[1]));

    pix2hv = (vector unsigned short) vec_mergeh(zero, pix2v);
    pix2lv = (vector unsigned short) vec_mergel(zero, pix2v);
    pix2ihv = (vector unsigned short) vec_mergeh(zero, pix2iv);
    pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv);
    t1 = vec_add(pix2hv, pix2ihv);
    t2 = vec_add(pix2lv, pix2ilv);
202

michael's avatar
michael committed
203
    for(i=0;i<h;i++) {
204 205 206
        /*
           Read unaligned pixels into our vectors. The vectors are as follows:
           pix1v: pix1[0]-pix1[15]
207
           pix3v: pix3[0]-pix3[15]      pix3iv: pix3[1]-pix3[16]
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
        */
        tv = (vector unsigned char *) pix1;
        pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1));

        tv = (vector unsigned char *) &pix3[0];
        pix3v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[0]));

        tv = (vector unsigned char *) &pix3[1];
        pix3iv = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix3[1]));

        /*
          Note that Altivec does have vec_avg, but this works on vector pairs
          and rounds up. We could do avg(avg(a,b),avg(c,d)), but the rounding
          would mean that, for example, avg(3,0,0,1) = 2, when it should be 1.
          Instead, we have to split the pixel vectors into vectors of shorts,
          and do the averaging by hand.
        */

        /* Split the pixel vectors into shorts */
        pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
        pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
        pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
        pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);

        /* Do the averaging on them */
        t3 = vec_add(pix3hv, pix3ihv);
        t4 = vec_add(pix3lv, pix3ilv);

236 237
        avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
        avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261

        /* Pack the shorts back into a result */
        avgv = vec_pack(avghv, avglv);

        /* Calculate a sum of abs differences vector */
        t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));

        /* Add each 4 pixel group together and put 4 results into sad */
        sad = vec_sum4s(t5, sad);

        pix1 += line_size;
        pix3 += line_size;
        /* Transfer the calculated values for pix3 into pix2 */
        t1 = t3;
        t2 = t4;
    }
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);

    return s;
}

michael's avatar
michael committed
262
int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
263
{
bellard's avatar
bellard committed
264 265
    int i;
    int s __attribute__((aligned(16)));
266
    const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
267 268
    vector unsigned char perm1, perm2, *pix1v, *pix2v;
    vector unsigned char t1, t2, t3,t4, t5;
bellard's avatar
bellard committed
269
    vector unsigned int sad;
270
    vector signed int sumdiffs;
271

272
    sad = (vector unsigned int)vec_splat_u32(0);
273 274


michael's avatar
michael committed
275
    for(i=0;i<h;i++) {
276
        /* Read potentially unaligned pixels into t1 and t2 */
277 278 279 280 281 282
        perm1 = vec_lvsl(0, pix1);
        pix1v = (vector unsigned char *) pix1;
        perm2 = vec_lvsl(0, pix2);
        pix2v = (vector unsigned char *) pix2;
        t1 = vec_perm(pix1v[0], pix1v[1], perm1);
        t2 = vec_perm(pix2v[0], pix2v[1], perm2);
283

284
        /* Calculate a sum of abs differences vector */
285 286 287
        t3 = vec_max(t1, t2);
        t4 = vec_min(t1, t2);
        t5 = vec_sub(t3, t4);
288

289
        /* Add each 4 pixel group together and put 4 results into sad */
290 291 292 293 294 295 296 297 298 299
        sad = vec_sum4s(t5, sad);

        pix1 += line_size;
        pix2 += line_size;
    }

    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);
300

301 302 303
    return s;
}

michael's avatar
michael committed
304
int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
305
{
bellard's avatar
bellard committed
306 307
    int i;
    int s __attribute__((aligned(16)));
308
    const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
309 310
    vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
    vector unsigned char t1, t2, t3,t4, t5;
bellard's avatar
bellard committed
311
    vector unsigned int sad;
312 313
    vector signed int sumdiffs;

314
    sad = (vector unsigned int)vec_splat_u32(0);
315 316

    permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
317

michael's avatar
michael committed
318
    for(i=0;i<h;i++) {
319 320 321
        /* Read potentially unaligned pixels into t1 and t2
           Since we're reading 16 pixels, and actually only want 8,
           mask out the last 8 pixels. The 0s don't change the sum. */
322 323 324 325 326 327 328
        perm1 = vec_lvsl(0, pix1);
        pix1v = (vector unsigned char *) pix1;
        perm2 = vec_lvsl(0, pix2);
        pix2v = (vector unsigned char *) pix2;
        t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
        t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);

329
        /* Calculate a sum of abs differences vector */
330 331 332 333
        t3 = vec_max(t1, t2);
        t4 = vec_min(t1, t2);
        t5 = vec_sub(t3, t4);

334
        /* Add each 4 pixel group together and put 4 results into sad */
335 336 337 338 339 340 341 342 343 344 345 346 347 348
        sad = vec_sum4s(t5, sad);

        pix1 += line_size;
        pix2 += line_size;
    }

    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);

    return s;
}

349 350
int pix_norm1_altivec(uint8_t *pix, int line_size)
{
bellard's avatar
bellard committed
351 352
    int i;
    int s __attribute__((aligned(16)));
353
    const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
bellard's avatar
bellard committed
354
    vector unsigned char *tv;
355 356 357
    vector unsigned char pixv;
    vector unsigned int sv;
    vector signed int sum;
358

359
    sv = (vector unsigned int)vec_splat_u32(0);
360

361 362 363 364 365 366
    s = 0;
    for (i = 0; i < 16; i++) {
        /* Read in the potentially unaligned pixels */
        tv = (vector unsigned char *) pix;
        pixv = vec_perm(tv[0], tv[1], vec_lvsl(0, pix));

367 368
        /* Square the values, and add them to our sum */
        sv = vec_msum(pixv, pixv, sv);
369 370 371 372 373 374 375 376 377 378 379

        pix += line_size;
    }
    /* Sum up the four partial sums, and put the result into s */
    sum = vec_sums((vector signed int) sv, (vector signed int) zero);
    sum = vec_splat(sum, 3);
    vec_ste(sum, 0, &s);

    return s;
}

bellard's avatar
bellard committed
380 381 382
/**
 * Sum of Squared Errors for a 8x8 block.
 * AltiVec-enhanced.
michael's avatar
michael committed
383
 * It's the sad8_altivec code above w/ squaring added.
bellard's avatar
bellard committed
384
 */
michael's avatar
michael committed
385
int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
bellard's avatar
bellard committed
386 387 388
{
    int i;
    int s __attribute__((aligned(16)));
389
    const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
bellard's avatar
bellard committed
390 391 392 393
    vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
    vector unsigned char t1, t2, t3,t4, t5;
    vector unsigned int sum;
    vector signed int sumsqr;
394

395
    sum = (vector unsigned int)vec_splat_u32(0);
396 397 398

    permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);

399

michael's avatar
michael committed
400
    for(i=0;i<h;i++) {
401 402 403
        /* Read potentially unaligned pixels into t1 and t2
           Since we're reading 16 pixels, and actually only want 8,
           mask out the last 8 pixels. The 0s don't change the sum. */
bellard's avatar
bellard committed
404 405 406 407 408 409 410 411 412 413 414
        perm1 = vec_lvsl(0, pix1);
        pix1v = (vector unsigned char *) pix1;
        perm2 = vec_lvsl(0, pix2);
        pix2v = (vector unsigned char *) pix2;
        t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
        t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);

        /*
          Since we want to use unsigned chars, we can take advantage
          of the fact that abs(a-b)^2 = (a-b)^2.
        */
415

416
        /* Calculate abs differences vector */
bellard's avatar
bellard committed
417 418 419
        t3 = vec_max(t1, t2);
        t4 = vec_min(t1, t2);
        t5 = vec_sub(t3, t4);
420

bellard's avatar
bellard committed
421 422
        /* Square the values and add them to our sum */
        sum = vec_msum(t5, t5, sum);
423

bellard's avatar
bellard committed
424 425 426
        pix1 += line_size;
        pix2 += line_size;
    }
427

bellard's avatar
bellard committed
428 429 430 431
    /* Sum up the four partial sums, and put the result into s */
    sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
    sumsqr = vec_splat(sumsqr, 3);
    vec_ste(sumsqr, 0, &s);
432

bellard's avatar
bellard committed
433 434 435 436 437 438
    return s;
}

/**
 * Sum of Squared Errors for a 16x16 block.
 * AltiVec-enhanced.
michael's avatar
michael committed
439
 * It's the sad16_altivec code above w/ squaring added.
bellard's avatar
bellard committed
440
 */
michael's avatar
michael committed
441
int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
442
{
bellard's avatar
bellard committed
443 444
    int i;
    int s __attribute__((aligned(16)));
445
    const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
bellard's avatar
bellard committed
446 447 448 449
    vector unsigned char perm1, perm2, *pix1v, *pix2v;
    vector unsigned char t1, t2, t3,t4, t5;
    vector unsigned int sum;
    vector signed int sumsqr;
450

451
    sum = (vector unsigned int)vec_splat_u32(0);
452

michael's avatar
michael committed
453
    for(i=0;i<h;i++) {
454
        /* Read potentially unaligned pixels into t1 and t2 */
bellard's avatar
bellard committed
455 456 457 458 459 460 461 462 463 464 465
        perm1 = vec_lvsl(0, pix1);
        pix1v = (vector unsigned char *) pix1;
        perm2 = vec_lvsl(0, pix2);
        pix2v = (vector unsigned char *) pix2;
        t1 = vec_perm(pix1v[0], pix1v[1], perm1);
        t2 = vec_perm(pix2v[0], pix2v[1], perm2);

        /*
          Since we want to use unsigned chars, we can take advantage
          of the fact that abs(a-b)^2 = (a-b)^2.
        */
466

467
        /* Calculate abs differences vector */
bellard's avatar
bellard committed
468 469 470
        t3 = vec_max(t1, t2);
        t4 = vec_min(t1, t2);
        t5 = vec_sub(t3, t4);
471

bellard's avatar
bellard committed
472 473
        /* Square the values and add them to our sum */
        sum = vec_msum(t5, t5, sum);
474

bellard's avatar
bellard committed
475 476 477
        pix1 += line_size;
        pix2 += line_size;
    }
478

bellard's avatar
bellard committed
479 480 481 482
    /* Sum up the four partial sums, and put the result into s */
    sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
    sumsqr = vec_splat(sumsqr, 3);
    vec_ste(sumsqr, 0, &s);
483

bellard's avatar
bellard committed
484 485
    return s;
}
486

kabi's avatar
kabi committed
487
int pix_sum_altivec(uint8_t * pix, int line_size)
bellard's avatar
bellard committed
488
{
489
    const_vector unsigned int zero = (const_vector unsigned int)vec_splat_u32(0);
490 491
    vector unsigned char perm, *pixv;
    vector unsigned char t1;
bellard's avatar
bellard committed
492
    vector unsigned int sad;
493 494
    vector signed int sumdiffs;

bellard's avatar
bellard committed
495 496
    int i;
    int s __attribute__((aligned(16)));
497

498
    sad = (vector unsigned int)vec_splat_u32(0);
499

500
    for (i = 0; i < 16; i++) {
501
        /* Read the potentially unaligned 16 pixels into t1 */
502 503 504 505
        perm = vec_lvsl(0, pix);
        pixv = (vector unsigned char *) pix;
        t1 = vec_perm(pixv[0], pixv[1], perm);

506
        /* Add each 4 pixel group together and put 4 results into sad */
507
        sad = vec_sum4s(t1, sad);
508

509 510
        pix += line_size;
    }
511

512 513 514 515
    /* Sum up the four partial sums, and put the result into s */
    sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
    sumdiffs = vec_splat(sumdiffs, 3);
    vec_ste(sumdiffs, 0, &s);
516

517 518 519
    return s;
}

kabi's avatar
kabi committed
520
void get_pixels_altivec(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
521 522 523
{
    int i;
    vector unsigned char perm, bytes, *pixv;
524
    const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
    vector signed short shorts;

    for(i=0;i<8;i++)
    {
        // Read potentially unaligned pixels.
        // We're reading 16 pixels, and actually only want 8,
        // but we simply ignore the extras.
        perm = vec_lvsl(0, pixels);
        pixv = (vector unsigned char *) pixels;
        bytes = vec_perm(pixv[0], pixv[1], perm);

        // convert the bytes into shorts
        shorts = (vector signed short)vec_mergeh(zero, bytes);

        // save the data to the block, we assume the block is 16-byte aligned
        vec_st(shorts, i*16, (vector signed short*)block);

        pixels += line_size;
    }
}

kabi's avatar
kabi committed
546 547
void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
        const uint8_t *s2, int stride)
548 549 550
{
    int i;
    vector unsigned char perm, bytes, *pixv;
551
    const_vector unsigned char zero = (const_vector unsigned char)vec_splat_u8(0);
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
    vector signed short shorts1, shorts2;

    for(i=0;i<4;i++)
    {
        // Read potentially unaligned pixels
        // We're reading 16 pixels, and actually only want 8,
        // but we simply ignore the extras.
        perm = vec_lvsl(0, s1);
        pixv = (vector unsigned char *) s1;
        bytes = vec_perm(pixv[0], pixv[1], perm);

        // convert the bytes into shorts
        shorts1 = (vector signed short)vec_mergeh(zero, bytes);

        // Do the same for the second block of pixels
        perm = vec_lvsl(0, s2);
        pixv = (vector unsigned char *) s2;
        bytes = vec_perm(pixv[0], pixv[1], perm);

        // convert the bytes into shorts
        shorts2 = (vector signed short)vec_mergeh(zero, bytes);

        // Do the subtraction
        shorts1 = vec_sub(shorts1, shorts2);

        // save the data to the block, we assume the block is 16-byte aligned
        vec_st(shorts1, 0, (vector signed short*)block);

        s1 += stride;
        s2 += stride;
        block += 8;


        // The code below is a copy of the code above... This is a manual
        // unroll.

        // Read potentially unaligned pixels
        // We're reading 16 pixels, and actually only want 8,
        // but we simply ignore the extras.
        perm = vec_lvsl(0, s1);
        pixv = (vector unsigned char *) s1;
        bytes = vec_perm(pixv[0], pixv[1], perm);

        // convert the bytes into shorts
        shorts1 = (vector signed short)vec_mergeh(zero, bytes);

        // Do the same for the second block of pixels
        perm = vec_lvsl(0, s2);
        pixv = (vector unsigned char *) s2;
        bytes = vec_perm(pixv[0], pixv[1], perm);

        // convert the bytes into shorts
        shorts2 = (vector signed short)vec_mergeh(zero, bytes);

        // Do the subtraction
        shorts1 = vec_sub(shorts1, shorts2);

        // save the data to the block, we assume the block is 16-byte aligned
        vec_st(shorts1, 0, (vector signed short*)block);

        s1 += stride;
        s2 += stride;
        block += 8;
    }
}

618
void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
619
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
620 621 622 623 624 625 626 627 628 629 630 631 632
    int i;
    for(i=0; i+7<w; i++){
        dst[i+0] += src[i+0];
        dst[i+1] += src[i+1];
        dst[i+2] += src[i+2];
        dst[i+3] += src[i+3];
        dst[i+4] += src[i+4];
        dst[i+5] += src[i+5];
        dst[i+6] += src[i+6];
        dst[i+7] += src[i+7];
    }
    for(; i<w; i++)
        dst[i+0] += src[i+0];
633
#else /* ALTIVEC_USE_REFERENCE_C_CODE */
634
    register int i;
635
    register vector unsigned char vdst, vsrc;
636

637 638
    /* dst and src are 16 bytes-aligned (guaranteed) */
    for(i = 0 ; (i + 15) < w ; i++)
639
    {
640 641
      vdst = vec_ld(i << 4, (unsigned char*)dst);
      vsrc = vec_ld(i << 4, (unsigned char*)src);
642
      vdst = vec_add(vsrc, vdst);
643
      vec_st(vdst, i << 4, (unsigned char*)dst);
644
    }
645
    /* if w is not a multiple of 16 */
646 647 648 649
    for (; (i < w) ; i++)
    {
      dst[i] = src[i];
    }
650 651 652
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}

653
/* next one assumes that ((line_size % 16) == 0) */
654 655
void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
656
POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1);
657 658 659
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
    int i;

660
POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
661 662

    for(i=0; i<h; i++) {
663 664 665 666
      *((uint32_t*)(block)) = LD32(pixels);
      *((uint32_t*)(block+4)) = LD32(pixels+4);
      *((uint32_t*)(block+8)) = LD32(pixels+8);
      *((uint32_t*)(block+12)) = LD32(pixels+12);
667 668 669 670
      pixels+=line_size;
      block +=line_size;
    }

671
POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
672 673 674

#else /* ALTIVEC_USE_REFERENCE_C_CODE */
    register vector unsigned char pixelsv1, pixelsv2;
675 676 677 678
    register vector unsigned char pixelsv1B, pixelsv2B;
    register vector unsigned char pixelsv1C, pixelsv2C;
    register vector unsigned char pixelsv1D, pixelsv2D;

679
    register vector unsigned char perm = vec_lvsl(0, pixels);
680
    int i;
681 682 683 684 685 686 687 688 689 690 691
    register int line_size_2 = line_size << 1;
    register int line_size_3 = line_size + line_size_2;
    register int line_size_4 = line_size << 2;

POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
// hand-unrolling the loop by 4 gains about 15%
// mininum execution time goes from 74 to 60 cycles
// it's faster than -funroll-loops, but using
// -funroll-loops w/ this is bad - 74 cycles again.
// all this is on a 7450, tuning for the 7450
#if 0
692 693 694
    for(i=0; i<h; i++) {
      pixelsv1 = vec_ld(0, (unsigned char*)pixels);
      pixelsv2 = vec_ld(16, (unsigned char*)pixels);
695
      vec_st(vec_perm(pixelsv1, pixelsv2, perm),
696
             0, (unsigned char*)block);
697 698 699
      pixels+=line_size;
      block +=line_size;
    }
700 701 702
#else
    for(i=0; i<h; i+=4) {
      pixelsv1 = vec_ld(0, (unsigned char*)pixels);
lu_zero's avatar
lu_zero committed
703
      pixelsv2 = vec_ld(15, (unsigned char*)pixels);
704
      pixelsv1B = vec_ld(line_size, (unsigned char*)pixels);
lu_zero's avatar
lu_zero committed
705
      pixelsv2B = vec_ld(15 + line_size, (unsigned char*)pixels);
706
      pixelsv1C = vec_ld(line_size_2, (unsigned char*)pixels);
lu_zero's avatar
lu_zero committed
707
      pixelsv2C = vec_ld(15 + line_size_2, (unsigned char*)pixels);
708
      pixelsv1D = vec_ld(line_size_3, (unsigned char*)pixels);
lu_zero's avatar
lu_zero committed
709
      pixelsv2D = vec_ld(15 + line_size_3, (unsigned char*)pixels);
710 711 712 713 714 715 716 717 718 719 720 721 722
      vec_st(vec_perm(pixelsv1, pixelsv2, perm),
             0, (unsigned char*)block);
      vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
             line_size, (unsigned char*)block);
      vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
             line_size_2, (unsigned char*)block);
      vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
             line_size_3, (unsigned char*)block);
      pixels+=line_size_4;
      block +=line_size_4;
    }
#endif
POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
723 724 725 726

#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}

727
/* next one assumes that ((line_size % 16) == 0) */
728 729 730
#define op_avg(a,b)  a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
731
POWERPC_PERF_DECLARE(altivec_avg_pixels16_num, 1);
732 733 734
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
    int i;

735
POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
736 737

    for(i=0; i<h; i++) {
738 739 740 741
      op_avg(*((uint32_t*)(block)),LD32(pixels));
      op_avg(*((uint32_t*)(block+4)),LD32(pixels+4));
      op_avg(*((uint32_t*)(block+8)),LD32(pixels+8));
      op_avg(*((uint32_t*)(block+12)),LD32(pixels+12));
742 743 744 745
      pixels+=line_size;
      block +=line_size;
    }

746
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
747 748 749

#else /* ALTIVEC_USE_REFERENCE_C_CODE */
    register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
750
    register vector unsigned char perm = vec_lvsl(0, pixels);
751 752
    int i;

753
POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
754 755 756 757 758

    for(i=0; i<h; i++) {
      pixelsv1 = vec_ld(0, (unsigned char*)pixels);
      pixelsv2 = vec_ld(16, (unsigned char*)pixels);
      blockv = vec_ld(0, block);
759
      pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
760 761 762 763 764 765
      blockv = vec_avg(blockv,pixelsv);
      vec_st(blockv, 0, (unsigned char*)block);
      pixels+=line_size;
      block +=line_size;
    }

766
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
767 768

#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
769
}
770

771 772
/* next one assumes that ((line_size % 8) == 0) */
void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
773
{
774
POWERPC_PERF_DECLARE(altivec_avg_pixels8_num, 1);
775 776
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
    int i;
777
POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
    for (i = 0; i < h; i++) {
        *((uint32_t *) (block)) =
            (((*((uint32_t *) (block))) |
              ((((const struct unaligned_32 *) (pixels))->l))) -
             ((((*((uint32_t *) (block))) ^
                ((((const struct unaligned_32 *) (pixels))->
                  l))) & 0xFEFEFEFEUL) >> 1));
        *((uint32_t *) (block + 4)) =
            (((*((uint32_t *) (block + 4))) |
              ((((const struct unaligned_32 *) (pixels + 4))->l))) -
             ((((*((uint32_t *) (block + 4))) ^
                ((((const struct unaligned_32 *) (pixels +
                                                  4))->
                  l))) & 0xFEFEFEFEUL) >> 1));
        pixels += line_size;
        block += line_size;
    }
795
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
796 797 798 799 800

#else /* ALTIVEC_USE_REFERENCE_C_CODE */
    register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
    int i;

801
POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
802

803 804 805 806 807 808
   for (i = 0; i < h; i++) {
     /*
       block is 8 bytes-aligned, so we're either in the
       left block (16 bytes-aligned) or in the right block (not)
     */
     int rightside = ((unsigned long)block & 0x0000000F);
809

810 811 812 813
     blockv = vec_ld(0, block);
     pixelsv1 = vec_ld(0, (unsigned char*)pixels);
     pixelsv2 = vec_ld(16, (unsigned char*)pixels);
     pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
814

815 816 817 818 819 820 821 822
     if (rightside)
     {
       pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
     }
     else
     {
       pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
     }
823

824 825 826
     blockv = vec_avg(blockv, pixelsv);

     vec_st(blockv, 0, block);
827

828 829 830
     pixels += line_size;
     block += line_size;
   }
831

832
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
833

834 835 836
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}

837
/* next one assumes that ((line_size % 8) == 0) */
838 839
void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
840
POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1);
841 842
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
    int j;
843
POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
    for (j = 0; j < 2; j++) {
      int i;
      const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
      const uint32_t b =
        (((const struct unaligned_32 *) (pixels + 1))->l);
      uint32_t l0 =
        (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
      uint32_t h0 =
        ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
      uint32_t l1, h1;
      pixels += line_size;
      for (i = 0; i < h; i += 2) {
        uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
        uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
        l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
        h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *) block) =
          h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
        pixels += line_size;
        block += line_size;
        a = (((const struct unaligned_32 *) (pixels))->l);
        b = (((const struct unaligned_32 *) (pixels + 1))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *) block) =
          h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
        pixels += line_size;
        block += line_size;
      } pixels += 4 - line_size * (h + 1);
      block += 4 - line_size * h;
    }

876
POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
877 878 879 880 881 882 883 884 885 886

#else /* ALTIVEC_USE_REFERENCE_C_CODE */
   register int i;
   register vector unsigned char
     pixelsv1, pixelsv2,
     pixelsavg;
   register vector unsigned char
     blockv, temp1, temp2;
   register vector unsigned short
     pixelssum1, pixelssum2, temp3;
887 888
   register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
   register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
889

890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
   temp1 = vec_ld(0, pixels);
   temp2 = vec_ld(16, pixels);
   pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
   if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F)
   {
     pixelsv2 = temp2;
   }
   else
   {
     pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
   }
   pixelsv1 = vec_mergeh(vczero, pixelsv1);
   pixelsv2 = vec_mergeh(vczero, pixelsv2);
   pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                        (vector unsigned short)pixelsv2);
   pixelssum1 = vec_add(pixelssum1, vctwo);
906 907

POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
   for (i = 0; i < h ; i++) {
     int rightside = ((unsigned long)block & 0x0000000F);
     blockv = vec_ld(0, block);

     temp1 = vec_ld(line_size, pixels);
     temp2 = vec_ld(line_size + 16, pixels);
     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
     if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F)
     {
       pixelsv2 = temp2;
     }
     else
     {
       pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
     }

     pixelsv1 = vec_mergeh(vczero, pixelsv1);
     pixelsv2 = vec_mergeh(vczero, pixelsv2);
     pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                          (vector unsigned short)pixelsv2);
     temp3 = vec_add(pixelssum1, pixelssum2);
     temp3 = vec_sra(temp3, vctwo);
     pixelssum1 = vec_add(pixelssum2, vctwo);
     pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
932

933 934 935 936 937 938 939 940
     if (rightside)
     {
       blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
     }
     else
     {
       blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
     }
941

942
     vec_st(blockv, 0, block);
943

944 945 946
     block += line_size;
     pixels += line_size;
   }
947

948
POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
949 950 951
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}

952 953 954
/* next one assumes that ((line_size % 8) == 0) */
void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
955
POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1);
956 957
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
    int j;
958
POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
    for (j = 0; j < 2; j++) {
      int i;
      const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
      const uint32_t b =
        (((const struct unaligned_32 *) (pixels + 1))->l);
      uint32_t l0 =
        (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
      uint32_t h0 =
        ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
      uint32_t l1, h1;
      pixels += line_size;
      for (i = 0; i < h; i += 2) {
        uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
        uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
        l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
        h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *) block) =
          h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
        pixels += line_size;
        block += line_size;
        a = (((const struct unaligned_32 *) (pixels))->l);
        b = (((const struct unaligned_32 *) (pixels + 1))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *) block) =
          h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
        pixels += line_size;
        block += line_size;
      } pixels += 4 - line_size * (h + 1);
      block += 4 - line_size * h;
    }
990

991
POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
992 993 994 995 996 997 998 999 1000 1001

#else /* ALTIVEC_USE_REFERENCE_C_CODE */
   register int i;
   register vector unsigned char
     pixelsv1, pixelsv2,
     pixelsavg;
   register vector unsigned char
     blockv, temp1, temp2;
   register vector unsigned short
     pixelssum1, pixelssum2, temp3;
1002 1003 1004
   register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
   register const_vector unsigned short vcone = (const_vector unsigned short)vec_splat_u16(1);
   register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
1005

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
   temp1 = vec_ld(0, pixels);
   temp2 = vec_ld(16, pixels);
   pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
   if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F)
   {
     pixelsv2 = temp2;
   }
   else
   {
     pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
   }
   pixelsv1 = vec_mergeh(vczero, pixelsv1);
   pixelsv2 = vec_mergeh(vczero, pixelsv2);
   pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                        (vector unsigned short)pixelsv2);
   pixelssum1 = vec_add(pixelssum1, vcone);
1022 1023

POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
   for (i = 0; i < h ; i++) {
     int rightside = ((unsigned long)block & 0x0000000F);
     blockv = vec_ld(0, block);

     temp1 = vec_ld(line_size, pixels);
     temp2 = vec_ld(line_size + 16, pixels);
     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
     if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F)
     {
       pixelsv2 = temp2;
     }
     else
     {
       pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
     }

     pixelsv1 = vec_mergeh(vczero, pixelsv1);
     pixelsv2 = vec_mergeh(vczero, pixelsv2);
     pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                          (vector unsigned short)pixelsv2);
     temp3 = vec_add(pixelssum1, pixelssum2);
     temp3 = vec_sra(temp3, vctwo);
     pixelssum1 = vec_add(pixelssum2, vcone);
     pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
1048

1049 1050 1051 1052 1053 1054 1055 1056
     if (rightside)
     {
       blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
     }
     else
     {
       blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
     }
1057

1058
     vec_st(blockv, 0, block);
1059

1060 1061 1062
     block += line_size;
     pixels += line_size;
   }
1063

1064
POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
1065 1066 1067 1068 1069 1070
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}

/* next one assumes that ((line_size % 16) == 0) */
void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
{
1071
POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1);
1072 1073
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
    int j;
1074
POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
      for (j = 0; j < 4; j++) {
      int i;
      const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
      const uint32_t b =
        (((const struct unaligned_32 *) (pixels + 1))->l);
      uint32_t l0 =
        (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
      uint32_t h0 =
        ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
      uint32_t l1, h1;
      pixels += line_size;
      for (i = 0; i < h; i += 2) {
        uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
        uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
        l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
        h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *) block) =
          h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
        pixels += line_size;
        block += line_size;
        a = (((const struct unaligned_32 *) (pixels))->l);
        b = (((const struct unaligned_32 *) (pixels + 1))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *) block) =
          h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
        pixels += line_size;
        block += line_size;
      } pixels += 4 - line_size * (h + 1);
      block += 4 - line_size * h;
    }

1107
POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117

#else /* ALTIVEC_USE_REFERENCE_C_CODE */
   register int i;
   register vector unsigned char
     pixelsv1, pixelsv2, pixelsv3, pixelsv4;
   register vector unsigned char
     blockv, temp1, temp2;
   register vector unsigned short
     pixelssum1, pixelssum2, temp3,
     pixelssum3, pixelssum4, temp4;
1118 1119
   register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
   register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
1120

1121
POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
1122

1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
   temp1 = vec_ld(0, pixels);
   temp2 = vec_ld(16, pixels);
   pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
   if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F)
   {
     pixelsv2 = temp2;
   }
   else
   {
     pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
   }
   pixelsv3 = vec_mergel(vczero, pixelsv1);
   pixelsv4 = vec_mergel(vczero, pixelsv2);
   pixelsv1 = vec_mergeh(vczero, pixelsv1);
   pixelsv2 = vec_mergeh(vczero, pixelsv2);
   pixelssum3 = vec_add((vector unsigned short)pixelsv3,
                        (vector unsigned short)pixelsv4);
   pixelssum3 = vec_add(pixelssum3, vctwo);
   pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                        (vector unsigned short)pixelsv2);
   pixelssum1 = vec_add(pixelssum1, vctwo);
1144

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
   for (i = 0; i < h ; i++) {
     blockv = vec_ld(0, block);

     temp1 = vec_ld(line_size, pixels);
     temp2 = vec_ld(line_size + 16, pixels);
     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
     if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F)
     {
       pixelsv2 = temp2;
     }
     else
     {
       pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
     }

     pixelsv3 = vec_mergel(vczero, pixelsv1);
     pixelsv4 = vec_mergel(vczero, pixelsv2);
     pixelsv1 = vec_mergeh(vczero, pixelsv1);
     pixelsv2 = vec_mergeh(vczero, pixelsv2);
1164

1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
     pixelssum4 = vec_add((vector unsigned short)pixelsv3,
                          (vector unsigned short)pixelsv4);
     pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                          (vector unsigned short)pixelsv2);
     temp4 = vec_add(pixelssum3, pixelssum4);
     temp4 = vec_sra(temp4, vctwo);
     temp3 = vec_add(pixelssum1, pixelssum2);
     temp3 = vec_sra(temp3, vctwo);

     pixelssum3 = vec_add(pixelssum4, vctwo);
     pixelssum1 = vec_add(pixelssum2, vctwo);

     blockv = vec_packsu(temp3, temp4);
1178

1179
     vec_st(blockv, 0, block);
1180

1181 1182 1183
     block += line_size;
     pixels += line_size;
   }
1184

1185
POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
1186 1187 1188 1189 1190 1191
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}

/* next one assumes that ((line_size % 16) == 0) */
void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
{
1192
POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1);
1193 1194
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
    int j;
1195
POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
      for (j = 0; j < 4; j++) {
      int i;
      const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
      const uint32_t b =
        (((const struct unaligned_32 *) (pixels + 1))->l);
      uint32_t l0 =
        (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
      uint32_t h0 =
        ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
      uint32_t l1, h1;
      pixels += line_size;
      for (i = 0; i < h; i += 2) {
        uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
        uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
        l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
        h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *) block) =
          h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
        pixels += line_size;
        block += line_size;
        a = (((const struct unaligned_32 *) (pixels))->l);
        b = (((const struct unaligned_32 *) (pixels + 1))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *) block) =
          h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
        pixels += line_size;
        block += line_size;
      } pixels += 4 - line_size * (h + 1);
      block += 4 - line_size * h;
    }

1228
POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238

#else /* ALTIVEC_USE_REFERENCE_C_CODE */
   register int i;
   register vector unsigned char
     pixelsv1, pixelsv2, pixelsv3, pixelsv4;
   register vector unsigned char
     blockv, temp1, temp2;
   register vector unsigned short
     pixelssum1, pixelssum2, temp3,
     pixelssum3, pixelssum4, temp4;
1239 1240 1241
   register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
   register const_vector unsigned short vcone = (const_vector unsigned short)vec_splat_u16(1);
   register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
1242

1243
POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
1244

1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
   temp1 = vec_ld(0, pixels);
   temp2 = vec_ld(16, pixels);
   pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
   if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F)
   {
     pixelsv2 = temp2;
   }
   else
   {
     pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
   }
   pixelsv3 = vec_mergel(vczero, pixelsv1);
   pixelsv4 = vec_mergel(vczero, pixelsv2);
   pixelsv1 = vec_mergeh(vczero, pixelsv1);
   pixelsv2 = vec_mergeh(vczero, pixelsv2);
   pixelssum3 = vec_add((vector unsigned short)pixelsv3,
                        (vector unsigned short)pixelsv4);
   pixelssum3 = vec_add(pixelssum3, vcone);
   pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                        (vector unsigned short)pixelsv2);
   pixelssum1 = vec_add(pixelssum1, vcone);
1266

1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
   for (i = 0; i < h ; i++) {
     blockv = vec_ld(0, block);

     temp1 = vec_ld(line_size, pixels);
     temp2 = vec_ld(line_size + 16, pixels);
     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
     if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F)
     {
       pixelsv2 = temp2;
     }
     else
     {
       pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
     }

     pixelsv3 = vec_mergel(vczero, pixelsv1);
     pixelsv4 = vec_mergel(vczero, pixelsv2);
     pixelsv1 = vec_mergeh(vczero, pixelsv1);
     pixelsv2 = vec_mergeh(vczero, pixelsv2);
1286

1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
     pixelssum4 = vec_add((vector unsigned short)pixelsv3,
                          (vector unsigned short)pixelsv4);
     pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                          (vector unsigned short)pixelsv2);
     temp4 = vec_add(pixelssum3, pixelssum4);
     temp4 = vec_sra(temp4, vctwo);
     temp3 = vec_add(pixelssum1, pixelssum2);
     temp3 = vec_sra(temp3, vctwo);

     pixelssum3 = vec_add(pixelssum4, vcone);
     pixelssum1 = vec_add(pixelssum2, vcone);

     blockv = vec_packsu(temp3, temp4);
1300

1301
     vec_st(blockv, 0, block);
1302

1303 1304 1305
     block += line_size;
     pixels += line_size;
   }
1306

1307
POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
1308 1309 1310
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}

1311 1312 1313
int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1);
  int sum;
1314 1315
  register const_vector unsigned char vzero = (const_vector unsigned char)vec_splat_u8(0);
  register vector signed short temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
1316
POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
1317
  {
1318 1319 1320
    register const_vector signed short vprod1 = (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1);
    register const_vector signed short vprod2 = (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1);
    register const_vector signed short vprod3 = (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1);
1321
    register const_vector unsigned char perm1 = (const_vector unsigned char)
1322
      AVV(0x02, 0x03, 0x00, 0x01,
1323 1324 1325
       0x06, 0x07, 0x04, 0x05,
       0x0A, 0x0B, 0x08, 0x09,
       0x0E, 0x0F, 0x0C, 0x0D);
1326
    register const_vector unsigned char perm2 = (const_vector unsigned char)
1327
      AVV(0x04, 0x05, 0x06, 0x07,
1328 1329 1330
       0x00, 0x01, 0x02, 0x03,
       0x0C, 0x0D, 0x0E, 0x0F,
       0x08, 0x09, 0x0A, 0x0B);
1331
    register const_vector unsigned char perm3 = (const_vector unsigned char)
1332
      AVV(0x08, 0x09, 0x0A, 0x0B,
1333 1334 1335 1336
       0x0C, 0x0D, 0x0E, 0x0F,
       0x00, 0x01, 0x02, 0x03,
       0x04, 0x05, 0x06, 0x07);

1337 1338 1339 1340
#define ONEITERBUTTERFLY(i, res)                                        \
    {                                                                   \
      register vector unsigned char src1, src2, srcO;                   \
      register vector unsigned char dst1, dst2, dstO;                   \
1341 1342
      register vector signed short srcV, dstV;                          \
      register vector signed short but0, but1, but2, op1, op2, op3;     \
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
      src1 = vec_ld(stride * i, src);                                   \
      if ((((stride * i) + (unsigned long)src) & 0x0000000F) > 8)       \
        src2 = vec_ld((stride * i) + 16, src);                          \
      srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src));           \
      dst1 = vec_ld(stride * i, dst);                                   \
      if ((((stride * i) + (unsigned long)dst) & 0x0000000F) > 8)       \
        dst2 = vec_ld((stride * i) + 16, dst);                          \
      dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst));           \
      /* promote the unsigned chars to signed shorts */                 \
      /* we're in the 8x8 function, we only care for the first 8 */     \
1353 1354 1355 1356 1357 1358
      srcV =                                                            \
        (vector signed short)vec_mergeh((vector signed char)vzero,      \
        (vector signed char)srcO);                                      \
      dstV =                                                            \
        (vector signed short)vec_mergeh((vector signed char)vzero,      \
        (vector signed char)dstO);                                      \
1359
      /* substractions inside the first butterfly */                    \
1360 1361 1362 1363 1364 1365
      but0 = vec_sub(srcV, dstV);                                       \
      op1 = vec_perm(but0, but0, perm1);                                \
      but1 = vec_mladd(but0, vprod1, op1);                              \
      op2 = vec_perm(but1, but1, perm2);                                \
      but2 = vec_mladd(but1, vprod2, op2);                              \
      op3 = vec_perm(but2, but2, perm3);                                \
1366
      res = vec_mladd(but2, vprod3, op3);                               \
1367 1368 1369 1370 1371 1372 1373 1374 1375
    }
    ONEITERBUTTERFLY(0, temp0);
    ONEITERBUTTERFLY(1, temp1);
    ONEITERBUTTERFLY(2, temp2);
    ONEITERBUTTERFLY(3, temp3);
    ONEITERBUTTERFLY(4, temp4);
    ONEITERBUTTERFLY(5, temp5);
    ONEITERBUTTERFLY(6, temp6);
    ONEITERBUTTERFLY(7, temp7);
1376
  }
1377
#undef ONEITERBUTTERFLY
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
  {
    register vector signed int vsum;
    register vector signed short line0 = vec_add(temp0, temp1);
    register vector signed short line1 = vec_sub(temp0, temp1);
    register vector signed short line2 = vec_add(temp2, temp3);
    register vector signed short line3 = vec_sub(temp2, temp3);
    register vector signed short line4 = vec_add(temp4, temp5);
    register vector signed short line5 = vec_sub(temp4, temp5);
    register vector signed short line6 = vec_add(temp6, temp7);
    register vector signed short line7 = vec_sub(temp6, temp7);
1388

1389 1390 1391 1392 1393 1394 1395 1396
    register vector signed short line0B = vec_add(line0, line2);
    register vector signed short line2B = vec_sub(line0, line2);
    register vector signed short line1B = vec_add(line1, line3);
    register vector signed short line3B = vec_sub(line1, line3);
    register vector signed short line4B = vec_add(line4, line6);
    register vector signed short line6B = vec_sub(line4, line6);
    register vector signed short line5B = vec_add(line5, line7);
    register vector signed short line7B = vec_sub(line5, line7);
1397

1398 1399 1400 1401 1402 1403 1404 1405
    register vector signed short line0C = vec_add(line0B, line4B);
    register vector signed short line4C = vec_sub(line0B, line4B);
    register vector signed short line1C = vec_add(line1B, line5B);
    register vector signed short line5C = vec_sub(line1B, line5B);
    register vector signed short line2C = vec_add(line2B, line6B);
    register vector signed short line6C = vec_sub(line2B, line6B);
    register vector signed short line3C = vec_add(line3B, line7B);
    register vector signed short line7C = vec_sub(line3B, line7B);
1406

1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
    vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
    vsum = vec_sum4s(vec_abs(line1C), vsum);
    vsum = vec_sum4s(vec_abs(line2C), vsum);
    vsum = vec_sum4s(vec_abs(line3C), vsum);
    vsum = vec_sum4s(vec_abs(line4C), vsum);
    vsum = vec_sum4s(vec_abs(line5C), vsum);
    vsum = vec_sum4s(vec_abs(line6C), vsum);
    vsum = vec_sum4s(vec_abs(line7C), vsum);
    vsum = vec_sums(vsum, (vector signed int)vzero);
    vsum = vec_splat(vsum, 3);
    vec_ste(vsum, 0, &sum);
  }
POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1);
  return sum;
}

/*
  16x8 works with 16 elements ; it allows to avoid replicating
  loads, and give the compiler more rooms for scheduling.
  It's only used from inside hadamard8_diff16_altivec.
1427

1428 1429 1430 1431 1432 1433 1434
  Unfortunately, it seems gcc-3.3 is a bit dumb, and
  the compiled code has a LOT of spill code, it seems
  gcc (unlike xlc) cannot keep everything in registers
  by itself. The following code include hand-made
  registers allocation. It's not clean, but on
  a 7450 the resulting code is much faster (best case
  fall from 700+ cycles to 550).
1435

1436 1437 1438
  xlc doesn't add spill code, but it doesn't know how to
  schedule for the 7450, and its code isn't much faster than
  gcc-3.3 on the 7450 (but uses 25% less instructions...)
1439

1440 1441 1442 1443 1444 1445 1446 1447
  On the 970, the hand-made RA is still a win (arount 690
  vs. around 780), but xlc goes to around 660 on the
  regular C code...
*/

static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
  int sum;
  register vector signed short
1448 1449 1450 1451 1452 1453 1454 1455
    temp0 REG_v(v0),
    temp1 REG_v(v1),
    temp2 REG_v(v2),
    temp3 REG_v(v3),
    temp4 REG_v(v4),
    temp5 REG_v(v5),
    temp6 REG_v(v6),
    temp7 REG_v(v7);
1456
  register vector signed short
1457 1458 1459 1460 1461 1462 1463 1464 1465
    temp0S REG_v(v8),
    temp1S REG_v(v9),
    temp2S REG_v(v10),
    temp3S REG_v(v11),
    temp4S REG_v(v12),
    temp5S REG_v(v13),
    temp6S REG_v(v14),
    temp7S REG_v(v15);
  register const_vector unsigned char vzero REG_v(v31)= (const_vector unsigned char)vec_splat_u8(0);
1466
  {
1467 1468 1469 1470
    register const_vector signed short vprod1 REG_v(v16)= (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1);
    register const_vector signed short vprod2 REG_v(v17)= (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1);
    register const_vector signed short vprod3 REG_v(v18)= (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1);
    register const_vector unsigned char perm1 REG_v(v19)= (const_vector unsigned char)
1471
      AVV(0x02, 0x03, 0x00, 0x01,
1472 1473 1474
       0x06, 0x07, 0x04, 0x05,
       0x0A, 0x0B, 0x08, 0x09,
       0x0E, 0x0F, 0x0C, 0x0D);
1475
    register const_vector unsigned char perm2 REG_v(v20)= (const_vector unsigned char)
1476
      AVV(0x04, 0x05, 0x06, 0x07,
1477 1478 1479
       0x00, 0x01, 0x02, 0x03,
       0x0C, 0x0D, 0x0E, 0x0F,
       0x08, 0x09, 0x0A, 0x0B);
1480
    register const_vector unsigned char perm3 REG_v(v21)= (const_vector unsigned char)
1481
      AVV(0x08, 0x09, 0x0A, 0x0B,
1482 1483 1484
       0x0C, 0x0D, 0x0E, 0x0F,
       0x00, 0x01, 0x02, 0x03,
       0x04, 0x05, 0x06, 0x07);
1485

1486 1487
#define ONEITERBUTTERFLY(i, res1, res2)                                 \
    {                                                                   \
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511
      register vector unsigned char src1 REG_v(v22),                    \
                                    src2 REG_v(v23),                    \
                                    dst1 REG_v(v24),                    \
                                    dst2 REG_v(v25),                    \
                                    srcO REG_v(v22),                    \
                                    dstO REG_v(v23);                    \
                                                                        \
      register vector signed short  srcV REG_v(v24),                    \
                                    dstV REG_v(v25),                    \
                                    srcW REG_v(v26),                    \
                                    dstW REG_v(v27),                    \
                                    but0 REG_v(v28),                    \
                                    but0S REG_v(v29),                   \
                                    op1 REG_v(v30),                     \
                                    but1 REG_v(v22),                    \
                                    op1S REG_v(v23),                    \
                                    but1S REG_v(v24),                   \
                                    op2 REG_v(v25),                     \
                                    but2 REG_v(v26),                    \
                                    op2S REG_v(v27),                    \
                                    but2S REG_v(v28),                   \
                                    op3 REG_v(v29),                     \
                                    op3S REG_v(v30);                    \
                                                                        \
1512 1513
      src1 = vec_ld(stride * i, src);                                   \
      src2 = vec_ld((stride * i) + 16, src);                            \
1514
      srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src));           \
1515 1516
      dst1 = vec_ld(stride * i, dst);                                   \
      dst2 = vec_ld((stride * i) + 16, dst);                            \
1517
      dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst));           \
1518
      /* promote the unsigned chars to signed shorts */                 \
1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
      srcV =                                                            \
        (vector signed short)vec_mergeh((vector signed char)vzero,      \
        (vector signed char)srcO);                                      \
      dstV =                                                            \
        (vector signed short)vec_mergeh((vector signed char)vzero,      \
        (vector signed char)dstO);                                      \
      srcW =                                                            \
        (vector signed short)vec_mergel((vector signed char)vzero,      \
        (vector signed char)srcO);                                      \
      dstW =                                                            \
        (vector signed short)vec_mergel((vector signed char)vzero,      \
        (vector signed char)dstO);                                      \
1531
      /* substractions inside the first butterfly */                    \
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
      but0 = vec_sub(srcV, dstV);                                       \
      but0S = vec_sub(srcW, dstW);                                      \
      op1 = vec_perm(but0, but0, perm1);                                \
      but1 = vec_mladd(but0, vprod1, op1);                              \
      op1S = vec_perm(but0S, but0S, perm1);                             \
      but1S = vec_mladd(but0S, vprod1, op1S);                           \
      op2 = vec_perm(but1, but1, perm2);                                \
      but2 = vec_mladd(but1, vprod2, op2);                              \
      op2S = vec_perm(but1S, but1S, perm2);                             \
      but2S = vec_mladd(but1S, vprod2, op2S);                           \
      op3 = vec_perm(but2, but2, perm3);                                \
1543
      res1 = vec_mladd(but2, vprod3, op3);                              \
1544
      op3S = vec_perm(but2S, but2S, perm3);                             \
1545
      res2 = vec_mladd(but2S, vprod3, op3S);                            \
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
    }
    ONEITERBUTTERFLY(0, temp0, temp0S);
    ONEITERBUTTERFLY(1, temp1, temp1S);
    ONEITERBUTTERFLY(2, temp2, temp2S);
    ONEITERBUTTERFLY(3, temp3, temp3S);
    ONEITERBUTTERFLY(4, temp4, temp4S);
    ONEITERBUTTERFLY(5, temp5, temp5S);
    ONEITERBUTTERFLY(6, temp6, temp6S);
    ONEITERBUTTERFLY(7, temp7, temp7S);
  }
#undef ONEITERBUTTERFLY
  {
    register vector signed int vsum;
1559 1560 1561 1562 1563 1564
    register vector signed short line0S, line1S, line2S, line3S, line4S,
                                 line5S, line6S, line7S, line0BS,line2BS,
                                 line1BS,line3BS,line4BS,line6BS,line5BS,
                                 line7BS,line0CS,line4CS,line1CS,line5CS,
                                 line2CS,line6CS,line3CS,line7CS;

1565 1566 1567 1568 1569 1570 1571 1572
    register vector signed short line0 = vec_add(temp0, temp1);
    register vector signed short line1 = vec_sub(temp0, temp1);
    register vector signed short line2 = vec_add(temp2, temp3);
    register vector signed short line3 = vec_sub(temp2, temp3);
    register vector signed short line4 = vec_add(temp4, temp5);
    register vector signed short line5 = vec_sub(temp4, temp5);
    register vector signed short line6 = vec_add(temp6, temp7);
    register vector signed short line7 = vec_sub(temp6, temp7);
1573

1574 1575 1576 1577 1578 1579 1580 1581
    register vector signed short line0B = vec_add(line0, line2);
    register vector signed short line2B = vec_sub(line0, line2);
    register vector signed short line1B = vec_add(line1, line3);
    register vector signed short line3B = vec_sub(line1, line3);
    register vector signed short line4B = vec_add(line4, line6);
    register vector signed short line6B = vec_sub(line4, line6);
    register vector signed short line5B = vec_add(line5, line7);
    register vector signed short line7B = vec_sub(line5, line7);
1582

1583 1584 1585 1586 1587 1588 1589 1590
    register vector signed short line0C = vec_add(line0B, line4B);
    register vector signed short line4C = vec_sub(line0B, line4B);
    register vector signed short line1C = vec_add(line1B, line5B);
    register vector signed short line5C = vec_sub(line1B, line5B);
    register vector signed short line2C = vec_add(line2B, line6B);
    register vector signed short line6C = vec_sub(line2B, line6B);
    register vector signed short line3C = vec_add(line3B, line7B);
    register vector signed short line7C = vec_sub(line3B, line7B);
1591

1592 1593 1594 1595 1596 1597 1598 1599 1600
    vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
    vsum = vec_sum4s(vec_abs(line1C), vsum);
    vsum = vec_sum4s(vec_abs(line2C), vsum);
    vsum = vec_sum4s(vec_abs(line3C), vsum);
    vsum = vec_sum4s(vec_abs(line4C), vsum);
    vsum = vec_sum4s(vec_abs(line5C), vsum);
    vsum = vec_sum4s(vec_abs(line6C), vsum);
    vsum = vec_sum4s(vec_abs(line7C), vsum);

1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
    line0S = vec_add(temp0S, temp1S);
    line1S = vec_sub(temp0S, temp1S);
    line2S = vec_add(temp2S, temp3S);
    line3S = vec_sub(temp2S, temp3S);
    line4S = vec_add(temp4S, temp5S);
    line5S = vec_sub(temp4S, temp5S);
    line6S = vec_add(temp6S, temp7S);
    line7S = vec_sub(temp6S, temp7S);

    line0BS = vec_add(line0S, line2S);
    line2BS = vec_sub(line0S, line2S);
    line1BS = vec_add(line1S, line3S);
    line3BS = vec_sub(line1S, line3S);
    line4BS = vec_add(line4S, line6S);
    line6BS = vec_sub(line4S, line6S);
    line5BS = vec_add(line5S, line7S);
    line7BS = vec_sub(line5S, line7S);

    line0CS = vec_add(line0BS, line4BS);
    line4CS = vec_sub(line0BS, line4BS);
    line1CS = vec_add(line1BS, line5BS);
    line5CS = vec_sub(line1BS, line5BS);
    line2CS = vec_add(line2BS, line6BS);
    line6CS = vec_sub(line2BS, line6BS);
    line3CS = vec_add(line3BS, line7BS);
    line7CS = vec_sub(line3BS, line7BS);
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638

    vsum = vec_sum4s(vec_abs(line0CS), vsum);
    vsum = vec_sum4s(vec_abs(line1CS), vsum);
    vsum = vec_sum4s(vec_abs(line2CS), vsum);
    vsum = vec_sum4s(vec_abs(line3CS), vsum);
    vsum = vec_sum4s(vec_abs(line4CS), vsum);
    vsum = vec_sum4s(vec_abs(line5CS), vsum);
    vsum = vec_sum4s(vec_abs(line6CS), vsum);
    vsum = vec_sum4s(vec_abs(line7CS), vsum);
    vsum = vec_sums(vsum, (vector signed int)vzero);
    vsum = vec_splat(vsum, 3);
    vec_ste(vsum, 0, &sum);
1639 1640 1641 1642
  }
  return sum;
}

1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
POWERPC_PERF_DECLARE(altivec_hadamard8_diff16_num, 1);
  int score;
POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1);
  score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
  if (h==16) {
    dst += 8*stride;
    src += 8*stride;
    score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
  }
POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1);
  return score;
}

1657 1658
int has_altivec(void)
{
1659
#ifdef __AMIGAOS4__
1660 1661
        ULONG result = 0;
        extern struct ExecIFace *IExec;
1662

1663 1664 1665
        IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE);
        if (result == VECTORTYPE_ALTIVEC) return 1;
        return 0;
1666 1667
#else /* __AMIGAOS4__ */

1668
#ifdef CONFIG_DARWIN
1669 1670 1671 1672 1673 1674 1675 1676
    int sels[2] = {CTL_HW, HW_VECTORUNIT};
    int has_vu = 0;
    size_t len = sizeof(has_vu);
    int err;

    err = sysctl(sels, 2, &has_vu, &len, NULL, 0);

    if (err == 0) return (has_vu != 0);
1677 1678 1679 1680 1681 1682 1683 1684 1685
#else /* CONFIG_DARWIN */
/* no Darwin, do it the brute-force way */
/* this is borrowed from the libmpeg2 library */
    {
      signal (SIGILL, sigill_handler);
      if (sigsetjmp (jmpbuf, 1)) {
        signal (SIGILL, SIG_DFL);
      } else {
        canjump = 1;
1686

1687 1688 1689 1690
        asm volatile ("mtspr 256, %0\n\t"
                      "vand %%v0, %%v0, %%v0"
                      :
                      : "r" (-1));
1691

1692 1693 1694 1695 1696
        signal (SIGILL, SIG_DFL);
        return 1;
      }
    }
#endif /* CONFIG_DARWIN */
1697
    return 0;
1698
#endif /* __AMIGAOS4__ */
1699
}
1700

lu_zero's avatar
lu_zero committed
1701 1702 1703 1704
static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
                                            int blocksize)
{
    int i;
lu_zero's avatar
lu_zero committed
1705
    vector float m, a, s0, s1;
lu_zero's avatar
lu_zero committed
1706 1707 1708 1709 1710 1711 1712 1713
    vector bool int t0, t1;
    const vector unsigned int v_31 = //XXX
        vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
    for(i=0; i<blocksize; i+=4) {
        m = vec_ld(0, mag+i);
        a = vec_ld(0, ang+i);
        t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
        t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
lu_zero's avatar
lu_zero committed
1714 1715 1716 1717 1718
        a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
        s0 = vec_and(a, t1);
        s1 = vec_andc(a, t1);
        a = vec_add(m, s0);
        m = vec_sub(m, s1);
lu_zero's avatar
lu_zero committed
1719 1720 1721 1722 1723
        vec_ste(a, 0, ang+i);
        vec_ste(m, 0, mag+i);
    }
}

1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
/* next one assumes that ((line_size % 8) == 0) */
void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
POWERPC_PERF_DECLARE(altivec_avg_pixels8_xy2_num, 1);
#ifdef ALTIVEC_USE_REFERENCE_C_CODE

    int j;
POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
 for (j = 0; j < 2; j++) {
   int             i;
   const uint32_t  a = (((const struct unaligned_32 *) (pixels))->l);
   const uint32_t  b = (((const struct unaligned_32 *) (pixels + 1))->l);
   uint32_t        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
   uint32_t        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
   uint32_t        l1, h1;
   pixels += line_size;
   for (i = 0; i < h; i += 2) {
     uint32_t        a = (((const struct unaligned_32 *) (pixels))->l);
     uint32_t        b = (((const struct unaligned_32 *) (pixels + 1))->l);
     l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
     h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
     *((uint32_t *) block) = rnd_avg32(*((uint32_t *) block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
     pixels += line_size;
     block += line_size;
     a = (((const struct unaligned_32 *) (pixels))->l);
     b = (((const struct unaligned_32 *) (pixels + 1))->l);
     l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
     h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
     *((uint32_t *) block) = rnd_avg32(*((uint32_t *) block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
     pixels += line_size;
     block += line_size;
   } pixels += 4 - line_size * (h + 1);
   block += 4 - line_size * h;
 }
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
#else /* ALTIVEC_USE_REFERENCE_C_CODE */
   register int i;
   register vector unsigned char
     pixelsv1, pixelsv2,
     pixelsavg;
   register vector unsigned char
     blockv, temp1, temp2, blocktemp;
   register vector unsigned short
     pixelssum1, pixelssum2, temp3;
   register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
   register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
1770

1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
   temp1 = vec_ld(0, pixels);
   temp2 = vec_ld(16, pixels);
   pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
   if ((((unsigned long)pixels) & 0x0000000F) ==  0x0000000F)
   {
     pixelsv2 = temp2;
   }
   else
   {
     pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
   }
   pixelsv1 = vec_mergeh(vczero, pixelsv1);
   pixelsv2 = vec_mergeh(vczero, pixelsv2);
   pixelssum1 = vec_add((vector unsigned short)pixelsv1,
                        (vector unsigned short)pixelsv2);
   pixelssum1 = vec_add(pixelssum1, vctwo);
1787 1788

POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
   for (i = 0; i < h ; i++) {
     int rightside = ((unsigned long)block & 0x0000000F);
     blockv = vec_ld(0, block);

     temp1 = vec_ld(line_size, pixels);
     temp2 = vec_ld(line_size + 16, pixels);
     pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
     if (((((unsigned long)pixels) + line_size) & 0x0000000F) ==  0x0000000F)
     {
       pixelsv2 = temp2;
     }
     else
     {
       pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
     }

     pixelsv1 = vec_mergeh(vczero, pixelsv1);
     pixelsv2 = vec_mergeh(vczero, pixelsv2);
     pixelssum2 = vec_add((vector unsigned short)pixelsv1,
                          (vector unsigned short)pixelsv2);
     temp3 = vec_add(pixelssum1, pixelssum2);
     temp3 = vec_sra(temp3, vctwo);
     pixelssum1 = vec_add(pixelssum2, vctwo);
     pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
1813

1814 1815 1816 1817 1818 1819 1820 1821
     if (rightside)
     {
       blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
     }
     else
     {
       blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
     }
1822

1823 1824
     blockv = vec_avg(blocktemp, blockv);
     vec_st(blockv, 0, block);
1825

1826 1827 1828
     block += line_size;
     pixels += line_size;
   }
1829

1830 1831 1832
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}