Commit b4bea3cc authored by Christophe Massiot's avatar Christophe Massiot

* Have dvdread plug-in respond to dvd: URLs ;

* MacOS X interface shouldn't take as much CPU as it used to ;
* Fixed a bug in Altivec-enabled MC (thanks walken) ;
* Fixed a case of segfault in Altivec-enabled memcpy.
parent 49a7d5c1
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* dvdread.c : DvdRead input module for vlc * dvdread.c : DvdRead input module for vlc
***************************************************************************** *****************************************************************************
* Copyright (C) 2001 VideoLAN * Copyright (C) 2001 VideoLAN
* $Id: dvdread.c,v 1.13 2002/03/06 16:39:37 stef Exp $ * $Id: dvdread.c,v 1.14 2002/04/16 23:00:54 massiot Exp $
* *
* Authors: Samuel Hocevar <sam@zoy.org> * Authors: Samuel Hocevar <sam@zoy.org>
* *
...@@ -47,6 +47,7 @@ MODULE_INIT_START ...@@ -47,6 +47,7 @@ MODULE_INIT_START
ADD_CAPABILITY( DEMUX, 0 ) ADD_CAPABILITY( DEMUX, 0 )
ADD_CAPABILITY( ACCESS, 110 ) ADD_CAPABILITY( ACCESS, 110 )
ADD_SHORTCUT( "dvdread" ) ADD_SHORTCUT( "dvdread" )
ADD_SHORTCUT( "dvd" )
MODULE_INIT_STOP MODULE_INIT_STOP
MODULE_ACTIVATE_START MODULE_ACTIVATE_START
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* intf_controller.c: MacOS X plugin for vlc * intf_controller.c: MacOS X plugin for vlc
***************************************************************************** *****************************************************************************
* Copyright (C) 2001 VideoLAN * Copyright (C) 2001 VideoLAN
* $Id: intf_controller.c,v 1.4 2002/03/19 03:33:52 jlj Exp $ * $Id: intf_controller.c,v 1.5 2002/04/16 23:00:54 massiot Exp $
* *
* Authors: Florian G. Pflug <fgp@phlo.org> * Authors: Florian G. Pflug <fgp@phlo.org>
* Jon Lech Johansen <jon-vl@nanocrew.net> * Jon Lech Johansen <jon-vl@nanocrew.net>
...@@ -91,7 +91,7 @@ ...@@ -91,7 +91,7 @@
UpdateSystemActivity( UsrActivity ); UpdateSystemActivity( UsrActivity );
} }
sleepDate = [NSDate dateWithTimeIntervalSinceNow: 0.1]; sleepDate = [NSDate dateWithTimeIntervalSinceNow: 0.5];
[NSThread sleepUntilDate: sleepDate]; [NSThread sleepUntilDate: sleepDate];
} }
......
...@@ -2,10 +2,10 @@ ...@@ -2,10 +2,10 @@
* intf_macosx.c: MacOS X interface plugin * intf_macosx.c: MacOS X interface plugin
***************************************************************************** *****************************************************************************
* Copyright (C) 2001 VideoLAN * Copyright (C) 2001 VideoLAN
* $Id: intf_macosx.c,v 1.11 2002/02/18 01:34:44 jlj Exp $ * $Id: intf_macosx.c,v 1.12 2002/04/16 23:00:54 massiot Exp $
* *
* Authors: Colin Delacroix <colin@zoy.org> * Authors: Colin Delacroix <colin@zoy.org>
* Florian G. Pflug <fgp@phlo.org> * Florian G. Pflug <fgp@phlo.org>
* Jon Lech Johansen <jon-vl@nanocrew.net> * Jon Lech Johansen <jon-vl@nanocrew.net>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* memcpyaltivec.c : Altivec memcpy module * memcpyaltivec.c : Altivec memcpy module
***************************************************************************** *****************************************************************************
* Copyright (C) 2001 VideoLAN * Copyright (C) 2001 VideoLAN
* $Id: memcpyaltivec.c,v 1.3 2002/04/07 23:08:44 massiot Exp $ * $Id: memcpyaltivec.c,v 1.4 2002/04/16 23:00:54 massiot Exp $
* *
* Authors: Christophe Massiot <massiot@via.ecp.fr> * Authors: Christophe Massiot <massiot@via.ecp.fr>
* *
...@@ -122,7 +122,7 @@ void * _M( fast_memcpy )(void * _to, const void * _from, size_t len) ...@@ -122,7 +122,7 @@ void * _M( fast_memcpy )(void * _to, const void * _from, size_t len)
from += 16; from += 16;
len -= 16; len -= 16;
tmp = vec_perm( ref0, ref1, perm ); tmp = vec_perm( ref0, ref1, perm );
do while( len & ~(MMREG_SIZE-1) )
{ {
ref0 = vec_ld( 0, from ); ref0 = vec_ld( 0, from );
ref1 = vec_ld( 15, from ); ref1 = vec_ld( 15, from );
...@@ -131,7 +131,7 @@ void * _M( fast_memcpy )(void * _to, const void * _from, size_t len) ...@@ -131,7 +131,7 @@ void * _M( fast_memcpy )(void * _to, const void * _from, size_t len)
vec_st( tmp, 0, to ); vec_st( tmp, 0, to );
tmp = vec_perm( ref0, ref1, perm ); tmp = vec_perm( ref0, ref1, perm );
to += 16; to += 16;
} while( len & ~(MMREG_SIZE-1) ); }
vec_st( tmp, 0, to ); vec_st( tmp, 0, to );
to += 16; to += 16;
} }
...@@ -165,60 +165,62 @@ void * _M( fast_memcpy )(void * _to, const void * _from, size_t len) ...@@ -165,60 +165,62 @@ void * _M( fast_memcpy )(void * _to, const void * _from, size_t len)
void * _M( fast_memcpy )(void * _to, const void * _from, size_t len) void * _M( fast_memcpy )(void * _to, const void * _from, size_t len)
{ {
asm (" \n" asm (" \n"
" cmplwi %cr0, %r5, 16 \n" " cmplwi %cr0, %r5, 16 \n"
" mr %r9, %r3 \n" " mr %r9, %r3 \n"
" bc 4, 1, ._L3 \n" " bc 4, 1, ._L3 \n"
" andi. %r0, %r3, 15 \n" " andi. %r0, %r3, 15 \n"
" bc 12, 2, ._L4 \n" " bc 12, 2, ._L4 \n"
" subfic %r0, %r0, 16 \n" " subfic %r0, %r0, 16 \n"
" add %r11, %r3, %r0 \n" " add %r11, %r3, %r0 \n"
" cmplw %cr0, %r3, %r11 \n" " cmplw %cr0, %r3, %r11 \n"
" subf %r5, %r0, %r5 \n" " subf %r5, %r0, %r5 \n"
" bc 4, 0, ._L4 \n" " bc 4, 0, ._L4 \n"
" ._L7: \n" " ._L7: \n"
" lbz %r0, 0(%r4) \n" " lbz %r0, 0(%r4) \n"
" stb %r0, 0(%r9) \n" " stb %r0, 0(%r9) \n"
" addi %r9, %r9, 1 \n" " addi %r9, %r9, 1 \n"
" cmplw %cr0, %r9, %r11 \n" " cmplw %cr0, %r9, %r11 \n"
" addi %r4, %r4, 1 \n" " addi %r4, %r4, 1 \n"
" bc 12, 0, ._L7 \n" " bc 12, 0, ._L7 \n"
" ._L4: \n" " ._L4: \n"
" rlwinm. %r0, %r5, 0, 0, 27 \n" " rlwinm. %r0, %r5, 0, 0, 27 \n"
" bc 12, 2, ._L3 \n" " bc 12, 2, ._L3 \n"
" li %r11, 15 \n" " addi %r5, %r5, -16 \n"
" lvsl %v12, 0, %r4 \n" " li %r11, 15 \n"
" lvx %v1, 0, %r4 \n" " lvsl %v12, 0, %r4 \n"
" lvx %v0, %r11, %r4 \n" " lvx %v1, 0, %r4 \n"
" addi %r4, %r4, 16 \n" " lvx %v0, %r11, %r4 \n"
" vperm %v13, %v1, %v0, %v12 \n" " rlwinm. %r0, %r5, 0, 0, 27 \n"
" addi %r5, %r5, -16 \n" " vperm %v13, %v1, %v0, %v12 \n"
" ._L13: \n" " addi %r4, %r4, 16 \n"
" addi %r5, %r5, -16 \n" " bc 12, 2, ._L11 \n"
" li %r11, 15 \n" " ._L12: \n"
" lvx %v1, 0, %r4 \n" " addi %r5, %r5, -16 \n"
" lvx %v0, %r11, %r4 \n" " li %r11, 15 \n"
" rlwinm. %r0, %r5, 0, 0, 27 \n" " lvx %v1, 0, %r4 \n"
" stvx %v13, 0, %r9 \n" " lvx %v0, %r11, %r4 \n"
" vperm %v13, %v1, %v0, %v12 \n" " rlwinm. %r0, %r5, 0, 0, 27 \n"
" addi %r4, %r4, 16 \n" " stvx %v13, 0, %r9 \n"
" addi %r9, %r9, 16 \n" " vperm %v13, %v1, %v0, %v12 \n"
" bc 4, 2, ._L13 \n" " addi %r4, %r4, 16 \n"
" stvx %v13, 0, %r9 \n" " addi %r9, %r9, 16 \n"
" addi %r9, %r9, 16 \n" " bc 4, 2, ._L12 \n"
" ._L3: \n" " ._L11: \n"
" cmpwi %cr0, %r5, 0 \n" " stvx %v13, 0, %r9 \n"
" bclr 12, 2 \n" " addi %r9, %r9, 16 \n"
" add %r5, %r9, %r5 \n" " ._L3: \n"
" cmplw %cr0, %r9, %r5 \n" " cmpwi %cr0, %r5, 0 \n"
" bclr 4, 0 \n" " bclr 12, 2 \n"
" ._L17: \n" " add %r5, %r9, %r5 \n"
" lbz %r0, 0(%r4) \n" " cmplw %cr0, %r9, %r5 \n"
" stb %r0, 0(%r9) \n" " bclr 4, 0 \n"
" addi %r9, %r9, 1 \n" " ._L17: \n"
" cmplw %cr0, %r9, %r5 \n" " lbz %r0, 0(%r4) \n"
" addi %r4, %r4, 1 \n" " stb %r0, 0(%r9) \n"
" bc 12, 0, ._L17 \n" " addi %r9, %r9, 1 \n"
" blr \n" " cmplw %cr0, %r9, %r5 \n"
" addi %r4, %r4, 1 \n"
" bc 12, 0, ._L17 \n"
); );
} }
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* motionaltivec.c : Altivec motion compensation module for vlc * motionaltivec.c : Altivec motion compensation module for vlc
***************************************************************************** *****************************************************************************
* Copyright (C) 2001 VideoLAN * Copyright (C) 2001 VideoLAN
* $Id: motionaltivec.c,v 1.10 2002/02/15 13:32:53 sam Exp $ * $Id: motionaltivec.c,v 1.11 2002/04/16 23:00:54 massiot Exp $
* *
* Authors: Michel Lespinasse <walken@zoy.org> * Authors: Michel Lespinasse <walken@zoy.org>
* Paul Mackerras <paulus@linuxcare.com.au> * Paul Mackerras <paulus@linuxcare.com.au>
...@@ -466,7 +466,7 @@ static void MC_put_xy_8_altivec (uint8_t * dest, uint8_t * ref, ...@@ -466,7 +466,7 @@ static void MC_put_xy_8_altivec (uint8_t * dest, uint8_t * ref,
" vspltisb %v3, 1 \n" " vspltisb %v3, 1 \n"
" lvsl %v5, %r5, %r4 \n" " lvsl %v5, %r5, %r4 \n"
" vmrghb %v4, %v4, %v4 \n" " vmrghb %v4, %v4, %v4 \n"
" li %r9, 16 \n" " li %r9, 8 \n"
" vmrghb %v5, %v5, %v5 \n" " vmrghb %v5, %v5, %v5 \n"
" lvx %v1, 0, %r4 \n" " lvx %v1, 0, %r4 \n"
" vpkuhum %v4, %v4, %v4 \n" " vpkuhum %v4, %v4, %v4 \n"
...@@ -495,8 +495,8 @@ static void MC_put_xy_8_altivec (uint8_t * dest, uint8_t * ref, ...@@ -495,8 +495,8 @@ static void MC_put_xy_8_altivec (uint8_t * dest, uint8_t * ref,
" vavgub %v1, %v9, %v7 \n" " vavgub %v1, %v9, %v7 \n"
" vand %v0, %v0, %v13 \n" " vand %v0, %v0, %v13 \n"
" vsububm %v13, %v1, %v0 \n" " vsububm %v13, %v1, %v0 \n"
"._L41: \n" " ._L41: \n"
" li %r9, 16 \n" " li %r9, 8 \n"
" lvx %v0, %r9, %r4 \n" " lvx %v0, %r9, %r4 \n"
" lvx %v1, 0, %r4 \n" " lvx %v1, 0, %r4 \n"
" stvewx %v13, 0, %r3 \n" " stvewx %v13, 0, %r3 \n"
...@@ -505,7 +505,7 @@ static void MC_put_xy_8_altivec (uint8_t * dest, uint8_t * ref, ...@@ -505,7 +505,7 @@ static void MC_put_xy_8_altivec (uint8_t * dest, uint8_t * ref,
" stvewx %v13, %r9, %r3 \n" " stvewx %v13, %r9, %r3 \n"
" vperm %v11, %v1, %v0, %v4 \n" " vperm %v11, %v1, %v0, %v4 \n"
" add %r4, %r4, %r5 \n" " add %r4, %r4, %r5 \n"
" li %r9, 16 \n" " li %r9, 8 \n"
" vavgub %v9, %v11, %v10 \n" " vavgub %v9, %v11, %v10 \n"
" lvx %v0, %r9, %r4 \n" " lvx %v0, %r9, %r4 \n"
" vxor %v8, %v11, %v10 \n" " vxor %v8, %v11, %v10 \n"
...@@ -533,7 +533,7 @@ static void MC_put_xy_8_altivec (uint8_t * dest, uint8_t * ref, ...@@ -533,7 +533,7 @@ static void MC_put_xy_8_altivec (uint8_t * dest, uint8_t * ref,
" add %r3, %r3, %r5 \n" " add %r3, %r3, %r5 \n"
" vsububm %v13, %v0, %v1 \n" " vsububm %v13, %v0, %v1 \n"
" bdnz ._L41 \n" " bdnz ._L41 \n"
" li %r9, 16 \n" " li %r9, 8 \n"
" lvx %v0, %r9, %r4 \n" " lvx %v0, %r9, %r4 \n"
" lvx %v1, 0, %r4 \n" " lvx %v1, 0, %r4 \n"
" stvewx %v13, 0, %r3 \n" " stvewx %v13, 0, %r3 \n"
...@@ -1000,7 +1000,7 @@ static void MC_avg_xy_8_altivec (uint8_t * dest, uint8_t * ref, ...@@ -1000,7 +1000,7 @@ static void MC_avg_xy_8_altivec (uint8_t * dest, uint8_t * ref,
" vspltisb %v19, 1 \n" " vspltisb %v19, 1 \n"
" lvsl %v3, %r5, %r4 \n" " lvsl %v3, %r5, %r4 \n"
" vmrghb %v2, %v2, %v2 \n" " vmrghb %v2, %v2, %v2 \n"
" li %r9, 16 \n" " li %r9, 8 \n"
" vmrghb %v3, %v3, %v3 \n" " vmrghb %v3, %v3, %v3 \n"
" lvx %v9, 0, %r4 \n" " lvx %v9, 0, %r4 \n"
" vpkuhum %v2, %v2, %v2 \n" " vpkuhum %v2, %v2, %v2 \n"
...@@ -1031,8 +1031,8 @@ static void MC_avg_xy_8_altivec (uint8_t * dest, uint8_t * ref, ...@@ -1031,8 +1031,8 @@ static void MC_avg_xy_8_altivec (uint8_t * dest, uint8_t * ref,
" vand %v1, %v1, %v13 \n" " vand %v1, %v1, %v13 \n"
" vsububm %v0, %v0, %v1 \n" " vsububm %v0, %v0, %v1 \n"
" vavgub %v13, %v4, %v0 \n" " vavgub %v13, %v4, %v0 \n"
"._L81: \n" " ._L81: \n"
" li %r9, 16 \n" " li %r9, 8 \n"
" lvx %v1, %r9, %r4 \n" " lvx %v1, %r9, %r4 \n"
" lvx %v9, 0, %r4 \n" " lvx %v9, 0, %r4 \n"
" lvx %v4, %r5, %r3 \n" " lvx %v4, %r5, %r3 \n"
...@@ -1043,7 +1043,7 @@ static void MC_avg_xy_8_altivec (uint8_t * dest, uint8_t * ref, ...@@ -1043,7 +1043,7 @@ static void MC_avg_xy_8_altivec (uint8_t * dest, uint8_t * ref,
" stvewx %v13, %r9, %r3 \n" " stvewx %v13, %r9, %r3 \n"
" vxor %v7, %v11, %v10 \n" " vxor %v7, %v11, %v10 \n"
" add %r4, %r4, %r5 \n" " add %r4, %r4, %r5 \n"
" li %r9, 16 \n" " li %r9, 8 \n"
" vavgub %v8, %v11, %v10 \n" " vavgub %v8, %v11, %v10 \n"
" lvx %v1, %r9, %r4 \n" " lvx %v1, %r9, %r4 \n"
" vor %v0, %v7, %v5 \n" " vor %v0, %v7, %v5 \n"
...@@ -1073,7 +1073,7 @@ static void MC_avg_xy_8_altivec (uint8_t * dest, uint8_t * ref, ...@@ -1073,7 +1073,7 @@ static void MC_avg_xy_8_altivec (uint8_t * dest, uint8_t * ref,
" add %r3, %r3, %r5 \n" " add %r3, %r3, %r5 \n"
" vavgub %v13, %v4, %v0 \n" " vavgub %v13, %v4, %v0 \n"
" bdnz ._L81 \n" " bdnz ._L81 \n"
" li %r9, 16 \n" " li %r9, 8 \n"
" lvx %v1, %r9, %r4 \n" " lvx %v1, %r9, %r4 \n"
" lvx %v9, 0, %r4 \n" " lvx %v9, 0, %r4 \n"
" lvx %v4, %r5, %r3 \n" " lvx %v4, %r5, %r3 \n"
...@@ -1417,7 +1417,6 @@ void MC_put_xy_16_altivec (unsigned char * dest, unsigned char * ref, ...@@ -1417,7 +1417,6 @@ void MC_put_xy_16_altivec (unsigned char * dest, unsigned char * ref,
vec_and (vec_and (ones, vec_or (xor0, xor1)), vec_and (vec_and (ones, vec_or (xor0, xor1)),
vec_xor (avg0, avg1))); vec_xor (avg0, avg1)));
do { do {
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (16, ref);
...@@ -1477,7 +1476,7 @@ void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref, ...@@ -1477,7 +1476,7 @@ void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref,
height = (height >> 1) - 1; height = (height >> 1) - 1;
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (8, ref);
ref += stride; ref += stride;
A = vec_perm (ref0, ref1, perm0A); A = vec_perm (ref0, ref1, perm0A);
B = vec_perm (ref0, ref1, perm0B); B = vec_perm (ref0, ref1, perm0B);
...@@ -1485,7 +1484,7 @@ void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref, ...@@ -1485,7 +1484,7 @@ void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref,
xor0 = vec_xor (A, B); xor0 = vec_xor (A, B);
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (8, ref);
ref += stride; ref += stride;
A = vec_perm (ref0, ref1, perm1A); A = vec_perm (ref0, ref1, perm1A);
B = vec_perm (ref0, ref1, perm1B); B = vec_perm (ref0, ref1, perm1B);
...@@ -1495,10 +1494,9 @@ void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref, ...@@ -1495,10 +1494,9 @@ void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref,
vec_and (vec_and (ones, vec_or (xor0, xor1)), vec_and (vec_and (ones, vec_or (xor0, xor1)),
vec_xor (avg0, avg1))); vec_xor (avg0, avg1)));
do { do {
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (8, ref);
ref += stride; ref += stride;
vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
...@@ -1512,7 +1510,7 @@ void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref, ...@@ -1512,7 +1510,7 @@ void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref,
vec_xor (avg0, avg1))); vec_xor (avg0, avg1)));
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (8, ref);
ref += stride; ref += stride;
vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
...@@ -1527,7 +1525,7 @@ void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref, ...@@ -1527,7 +1525,7 @@ void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref,
} while (--height); } while (--height);
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (8, ref);
vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
dest += stride; dest += stride;
...@@ -1558,12 +1556,12 @@ void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref, ...@@ -1558,12 +1556,12 @@ void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref,
do { do {
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (8, ref);
ref += stride; ref += stride;
A = vec_perm (ref0, ref1, permA); A = vec_perm (ref0, ref1, permA);
B = vec_perm (ref0, ref1, permB); B = vec_perm (ref0, ref1, permB);
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (8, ref);
C = vec_perm (ref0, ref1, permA); C = vec_perm (ref0, ref1, permA);
D = vec_perm (ref0, ref1, permB); D = vec_perm (ref0, ref1, permB);
...@@ -1916,7 +1914,6 @@ void MC_avg_xy_16_altivec (unsigned char * dest, unsigned char * ref, ...@@ -1916,7 +1914,6 @@ void MC_avg_xy_16_altivec (unsigned char * dest, unsigned char * ref,
vec_and (vec_and (ones, vec_or (xor0, xor1)), vec_and (vec_and (ones, vec_or (xor0, xor1)),
vec_xor (avg0, avg1)))); vec_xor (avg0, avg1))));
do { do {
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (16, ref);
...@@ -1981,7 +1978,7 @@ void MC_avg_xy_8_altivec (unsigned char * dest, unsigned char * ref, ...@@ -1981,7 +1978,7 @@ void MC_avg_xy_8_altivec (unsigned char * dest, unsigned char * ref,
height = (height >> 1) - 1; height = (height >> 1) - 1;
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (8, ref);
ref += stride; ref += stride;
A = vec_perm (ref0, ref1, perm0A); A = vec_perm (ref0, ref1, perm0A);
B = vec_perm (ref0, ref1, perm0B); B = vec_perm (ref0, ref1, perm0B);
...@@ -1989,7 +1986,7 @@ void MC_avg_xy_8_altivec (unsigned char * dest, unsigned char * ref, ...@@ -1989,7 +1986,7 @@ void MC_avg_xy_8_altivec (unsigned char * dest, unsigned char * ref,
xor0 = vec_xor (A, B); xor0 = vec_xor (A, B);
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (8, ref);
ref += stride; ref += stride;
prev = vec_ld (0, dest); prev = vec_ld (0, dest);
A = vec_perm (ref0, ref1, perm1A); A = vec_perm (ref0, ref1, perm1A);
...@@ -2000,10 +1997,9 @@ void MC_avg_xy_8_altivec (unsigned char * dest, unsigned char * ref, ...@@ -2000,10 +1997,9 @@ void MC_avg_xy_8_altivec (unsigned char * dest, unsigned char * ref,
vec_and (vec_and (ones, vec_or (xor0, xor1)), vec_and (vec_and (ones, vec_or (xor0, xor1)),
vec_xor (avg0, avg1)))); vec_xor (avg0, avg1))));
do { do {
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (8, ref);
ref += stride; ref += stride;
prev = vec_ld (stride, dest); prev = vec_ld (stride, dest);
vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
...@@ -2019,7 +2015,7 @@ void MC_avg_xy_8_altivec (unsigned char * dest, unsigned char * ref, ...@@ -2019,7 +2015,7 @@ void MC_avg_xy_8_altivec (unsigned char * dest, unsigned char * ref,
vec_xor (avg0, avg1)))); vec_xor (avg0, avg1))));
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (8, ref);
ref += stride; ref += stride;
prev = vec_ld (stride, dest); prev = vec_ld (stride, dest);
vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
...@@ -2036,7 +2032,7 @@ void MC_avg_xy_8_altivec (unsigned char * dest, unsigned char * ref, ...@@ -2036,7 +2032,7 @@ void MC_avg_xy_8_altivec (unsigned char * dest, unsigned char * ref,
} while (--height); } while (--height);
ref0 = vec_ld (0, ref); ref0 = vec_ld (0, ref);
ref1 = vec_ld (16, ref); ref1 = vec_ld (8, ref);
prev = vec_ld (stride, dest); prev = vec_ld (stride, dest);
vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment