Commit c5165668 authored by flameeyes's avatar flameeyes

Convert asm keyword into __asm__.

Neither the asm() nor the __asm__() keyword is not part of the C99
standard, but while GCC accepts the former in C89 syntax, it is not
accepted in C99 unless GNU extensions are turned on (with -fasm). The
latter form is accepted in any syntax as an extension (without
requiring further command-line options).

Sun Studio C99 compiler also does not accept asm() while accepting
__asm__(), albeit reporting warnings that it's not valid C99 synta.



git-svn-id: file:///var/local/repositories/mplayer/trunk/libswscale@27778 b3059339-0415-0410-9bf9-f77b7e298cf2
parent dd7d7599
...@@ -81,12 +81,12 @@ static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long s ...@@ -81,12 +81,12 @@ static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long s
#endif #endif
end = s + src_size; end = s + src_size;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 23; mm_end = end - 23;
asm volatile("movq %0, %%mm7"::"m"(mask32):"memory"); __asm__ volatile("movq %0, %%mm7"::"m"(mask32):"memory");
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t" "movd %1, %%mm0 \n\t"
"punpckldq 3%1, %%mm0 \n\t" "punpckldq 3%1, %%mm0 \n\t"
...@@ -110,8 +110,8 @@ static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long s ...@@ -110,8 +110,8 @@ static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long s
dest += 32; dest += 32;
s += 24; s += 24;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -141,11 +141,11 @@ static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -141,11 +141,11 @@ static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long s
#endif #endif
end = s + src_size; end = s + src_size;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 31; mm_end = end - 31;
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t" "movq %1, %%mm0 \n\t"
"movq 8%1, %%mm1 \n\t" "movq 8%1, %%mm1 \n\t"
...@@ -196,8 +196,8 @@ static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -196,8 +196,8 @@ static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long s
dest += 24; dest += 24;
s += 32; s += 32;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -231,12 +231,12 @@ static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -231,12 +231,12 @@ static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_
const uint8_t *mm_end; const uint8_t *mm_end;
end = s + src_size; end = s + src_size;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*s)); __asm__ volatile(PREFETCH" %0"::"m"(*s));
asm volatile("movq %0, %%mm4"::"m"(mask15s)); __asm__ volatile("movq %0, %%mm4"::"m"(mask15s));
mm_end = end - 15; mm_end = end - 15;
while (s<mm_end) while (s<mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t" "movq %1, %%mm0 \n\t"
"movq 8%1, %%mm2 \n\t" "movq 8%1, %%mm2 \n\t"
...@@ -254,8 +254,8 @@ static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -254,8 +254,8 @@ static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_
d+=16; d+=16;
s+=16; s+=16;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
mm_end = end - 3; mm_end = end - 3;
while (s < mm_end) while (s < mm_end)
...@@ -280,13 +280,13 @@ static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -280,13 +280,13 @@ static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_
const uint8_t *mm_end; const uint8_t *mm_end;
end = s + src_size; end = s + src_size;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*s)); __asm__ volatile(PREFETCH" %0"::"m"(*s));
asm volatile("movq %0, %%mm7"::"m"(mask15rg)); __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg));
asm volatile("movq %0, %%mm6"::"m"(mask15b)); __asm__ volatile("movq %0, %%mm6"::"m"(mask15b));
mm_end = end - 15; mm_end = end - 15;
while (s<mm_end) while (s<mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t" "movq %1, %%mm0 \n\t"
"movq 8%1, %%mm2 \n\t" "movq 8%1, %%mm2 \n\t"
...@@ -308,8 +308,8 @@ static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -308,8 +308,8 @@ static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_
d+=16; d+=16;
s+=16; s+=16;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
mm_end = end - 3; mm_end = end - 3;
while (s < mm_end) while (s < mm_end)
...@@ -340,7 +340,7 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -340,7 +340,7 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_
#ifdef HAVE_MMX #ifdef HAVE_MMX
mm_end = end - 15; mm_end = end - 15;
#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
asm volatile( __asm__ volatile(
"movq %3, %%mm5 \n\t" "movq %3, %%mm5 \n\t"
"movq %4, %%mm6 \n\t" "movq %4, %%mm6 \n\t"
"movq %5, %%mm7 \n\t" "movq %5, %%mm7 \n\t"
...@@ -375,14 +375,14 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -375,14 +375,14 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_
: "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216) : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
); );
#else #else
asm volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
asm volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t" "movq %1, %%mm6 \n\t"
::"m"(red_16mask),"m"(green_16mask)); ::"m"(red_16mask),"m"(green_16mask));
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t" "movd %1, %%mm0 \n\t"
"movd 4%1, %%mm3 \n\t" "movd 4%1, %%mm3 \n\t"
...@@ -416,8 +416,8 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -416,8 +416,8 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_
s += 16; s += 16;
} }
#endif #endif
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -436,15 +436,15 @@ static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long s ...@@ -436,15 +436,15 @@ static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long s
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
asm volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t" "movq %1, %%mm6 \n\t"
::"m"(red_16mask),"m"(green_16mask)); ::"m"(red_16mask),"m"(green_16mask));
mm_end = end - 15; mm_end = end - 15;
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t" "movd %1, %%mm0 \n\t"
"movd 4%1, %%mm3 \n\t" "movd 4%1, %%mm3 \n\t"
...@@ -477,8 +477,8 @@ static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long s ...@@ -477,8 +477,8 @@ static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long s
d += 4; d += 4;
s += 16; s += 16;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -499,7 +499,7 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -499,7 +499,7 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_
#ifdef HAVE_MMX #ifdef HAVE_MMX
mm_end = end - 15; mm_end = end - 15;
#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
asm volatile( __asm__ volatile(
"movq %3, %%mm5 \n\t" "movq %3, %%mm5 \n\t"
"movq %4, %%mm6 \n\t" "movq %4, %%mm6 \n\t"
"movq %5, %%mm7 \n\t" "movq %5, %%mm7 \n\t"
...@@ -534,14 +534,14 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -534,14 +534,14 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_
: "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215) : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
); );
#else #else
asm volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
asm volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t" "movq %1, %%mm6 \n\t"
::"m"(red_15mask),"m"(green_15mask)); ::"m"(red_15mask),"m"(green_15mask));
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t" "movd %1, %%mm0 \n\t"
"movd 4%1, %%mm3 \n\t" "movd 4%1, %%mm3 \n\t"
...@@ -575,8 +575,8 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -575,8 +575,8 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_
s += 16; s += 16;
} }
#endif #endif
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -595,15 +595,15 @@ static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long s ...@@ -595,15 +595,15 @@ static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long s
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
asm volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t" "movq %1, %%mm6 \n\t"
::"m"(red_15mask),"m"(green_15mask)); ::"m"(red_15mask),"m"(green_15mask));
mm_end = end - 15; mm_end = end - 15;
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t" "movd %1, %%mm0 \n\t"
"movd 4%1, %%mm3 \n\t" "movd 4%1, %%mm3 \n\t"
...@@ -636,8 +636,8 @@ static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long s ...@@ -636,8 +636,8 @@ static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long s
d += 4; d += 4;
s += 16; s += 16;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -656,15 +656,15 @@ static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long s ...@@ -656,15 +656,15 @@ static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long s
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
asm volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t" "movq %1, %%mm6 \n\t"
::"m"(red_16mask),"m"(green_16mask)); ::"m"(red_16mask),"m"(green_16mask));
mm_end = end - 11; mm_end = end - 11;
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t" "movd %1, %%mm0 \n\t"
"movd 3%1, %%mm3 \n\t" "movd 3%1, %%mm3 \n\t"
...@@ -697,8 +697,8 @@ static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long s ...@@ -697,8 +697,8 @@ static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long s
d += 4; d += 4;
s += 12; s += 12;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -719,15 +719,15 @@ static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -719,15 +719,15 @@ static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
asm volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t" "movq %1, %%mm6 \n\t"
::"m"(red_16mask),"m"(green_16mask)); ::"m"(red_16mask),"m"(green_16mask));
mm_end = end - 15; mm_end = end - 15;
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t" "movd %1, %%mm0 \n\t"
"movd 3%1, %%mm3 \n\t" "movd 3%1, %%mm3 \n\t"
...@@ -760,8 +760,8 @@ static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -760,8 +760,8 @@ static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_
d += 4; d += 4;
s += 12; s += 12;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -782,15 +782,15 @@ static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long s ...@@ -782,15 +782,15 @@ static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long s
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
asm volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t" "movq %1, %%mm6 \n\t"
::"m"(red_15mask),"m"(green_15mask)); ::"m"(red_15mask),"m"(green_15mask));
mm_end = end - 11; mm_end = end - 11;
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t" "movd %1, %%mm0 \n\t"
"movd 3%1, %%mm3 \n\t" "movd 3%1, %%mm3 \n\t"
...@@ -823,8 +823,8 @@ static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long s ...@@ -823,8 +823,8 @@ static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long s
d += 4; d += 4;
s += 12; s += 12;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -845,15 +845,15 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -845,15 +845,15 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_
uint16_t *d = (uint16_t *)dst; uint16_t *d = (uint16_t *)dst;
end = s + src_size; end = s + src_size;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
asm volatile( __asm__ volatile(
"movq %0, %%mm7 \n\t" "movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t" "movq %1, %%mm6 \n\t"
::"m"(red_15mask),"m"(green_15mask)); ::"m"(red_15mask),"m"(green_15mask));
mm_end = end - 15; mm_end = end - 15;
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t" "movd %1, %%mm0 \n\t"
"movd 3%1, %%mm3 \n\t" "movd 3%1, %%mm3 \n\t"
...@@ -886,8 +886,8 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -886,8 +886,8 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_
d += 4; d += 4;
s += 12; s += 12;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -929,11 +929,11 @@ static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -929,11 +929,11 @@ static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long s
const uint16_t *s = (const uint16_t*)src; const uint16_t *s = (const uint16_t*)src;
end = s + src_size/2; end = s + src_size/2;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 7; mm_end = end - 7;
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t" "movq %1, %%mm0 \n\t"
"movq %1, %%mm1 \n\t" "movq %1, %%mm1 \n\t"
...@@ -996,7 +996,7 @@ static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -996,7 +996,7 @@ static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long s
:"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null) :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null)
:"memory"); :"memory");
/* borrowed 32 to 24 */ /* borrowed 32 to 24 */
asm volatile( __asm__ volatile(
"movq %%mm0, %%mm4 \n\t" "movq %%mm0, %%mm4 \n\t"
"movq %%mm3, %%mm5 \n\t" "movq %%mm3, %%mm5 \n\t"
"movq %%mm6, %%mm0 \n\t" "movq %%mm6, %%mm0 \n\t"
...@@ -1048,8 +1048,8 @@ static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -1048,8 +1048,8 @@ static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long s
d += 24; d += 24;
s += 8; s += 8;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -1071,11 +1071,11 @@ static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -1071,11 +1071,11 @@ static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long s
const uint16_t *s = (const uint16_t *)src; const uint16_t *s = (const uint16_t *)src;
end = s + src_size/2; end = s + src_size/2;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
mm_end = end - 7; mm_end = end - 7;
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t" "movq %1, %%mm0 \n\t"
"movq %1, %%mm1 \n\t" "movq %1, %%mm1 \n\t"
...@@ -1137,7 +1137,7 @@ static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -1137,7 +1137,7 @@ static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long s
:"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null) :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null)
:"memory"); :"memory");
/* borrowed 32 to 24 */ /* borrowed 32 to 24 */
asm volatile( __asm__ volatile(
"movq %%mm0, %%mm4 \n\t" "movq %%mm0, %%mm4 \n\t"
"movq %%mm3, %%mm5 \n\t" "movq %%mm3, %%mm5 \n\t"
"movq %%mm6, %%mm0 \n\t" "movq %%mm6, %%mm0 \n\t"
...@@ -1189,8 +1189,8 @@ static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -1189,8 +1189,8 @@ static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long s
d += 24; d += 24;
s += 8; s += 8;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -1212,12 +1212,12 @@ static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -1212,12 +1212,12 @@ static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_
const uint16_t *s = (const uint16_t *)src; const uint16_t *s = (const uint16_t *)src;
end = s + src_size/2; end = s + src_size/2;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
asm volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
mm_end = end - 3; mm_end = end - 3;
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t" "movq %1, %%mm0 \n\t"
"movq %1, %%mm1 \n\t" "movq %1, %%mm1 \n\t"
...@@ -1253,8 +1253,8 @@ static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -1253,8 +1253,8 @@ static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_
d += 16; d += 16;
s += 4; s += 4;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -1290,12 +1290,12 @@ static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -1290,12 +1290,12 @@ static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_
const uint16_t *s = (const uint16_t*)src; const uint16_t *s = (const uint16_t*)src;
end = s + src_size/2; end = s + src_size/2;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
asm volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
mm_end = end - 3; mm_end = end - 3;
while (s < mm_end) while (s < mm_end)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t" "movq %1, %%mm0 \n\t"
"movq %1, %%mm1 \n\t" "movq %1, %%mm1 \n\t"
...@@ -1331,8 +1331,8 @@ static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_ ...@@ -1331,8 +1331,8 @@ static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_
d += 16; d += 16;
s += 4; s += 4;
} }
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
while (s < end) while (s < end)
{ {
...@@ -1358,7 +1358,7 @@ static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long s ...@@ -1358,7 +1358,7 @@ static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long s
const uint8_t *s = src-idx; const uint8_t *s = src-idx;
uint8_t *d = dst-idx; uint8_t *d = dst-idx;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile( __asm__ volatile(
"test %0, %0 \n\t" "test %0, %0 \n\t"
"jns 2f \n\t" "jns 2f \n\t"
PREFETCH" (%1, %0) \n\t" PREFETCH" (%1, %0) \n\t"
...@@ -1421,7 +1421,7 @@ static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -1421,7 +1421,7 @@ static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long s
unsigned i; unsigned i;
#ifdef HAVE_MMX #ifdef HAVE_MMX
long mmx_size= 23 - src_size; long mmx_size= 23 - src_size;
asm volatile ( __asm__ volatile (
"test %%"REG_a", %%"REG_a" \n\t" "test %%"REG_a", %%"REG_a" \n\t"
"jns 2f \n\t" "jns 2f \n\t"
"movq "MANGLE(mask24r)", %%mm5 \n\t" "movq "MANGLE(mask24r)", %%mm5 \n\t"
...@@ -1465,8 +1465,8 @@ static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long s ...@@ -1465,8 +1465,8 @@ static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long s
: "r" (src-mmx_size), "r"(dst-mmx_size) : "r" (src-mmx_size), "r"(dst-mmx_size)
); );
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
if (mmx_size==23) return; //finished, was multiple of 8 if (mmx_size==23) return; //finished, was multiple of 8
...@@ -1496,7 +1496,7 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u ...@@ -1496,7 +1496,7 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u
{ {
#ifdef HAVE_MMX #ifdef HAVE_MMX
//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway) //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
asm volatile( __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t" "xor %%"REG_a", %%"REG_a" \n\t"
ASMALIGN(4) ASMALIGN(4)
"1: \n\t" "1: \n\t"
...@@ -1537,10 +1537,10 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u ...@@ -1537,10 +1537,10 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u
y2 = yc2[n]; \ y2 = yc2[n]; \
u = uc[n]; \ u = uc[n]; \
v = vc[n]; \ v = vc[n]; \
asm("unpkbw %1, %0" : "=r"(y1) : "r"(y1)); \ __asm__("unpkbw %1, %0" : "=r"(y1) : "r"(y1)); \
asm("unpkbw %1, %0" : "=r"(y2) : "r"(y2)); \ __asm__("unpkbw %1, %0" : "=r"(y2) : "r"(y2)); \
asm("unpkbl %1, %0" : "=r"(u) : "r"(u)); \ __asm__("unpkbl %1, %0" : "=r"(u) : "r"(u)); \
asm("unpkbl %1, %0" : "=r"(v) : "r"(v)); \ __asm__("unpkbl %1, %0" : "=r"(v) : "r"(v)); \
yuv1 = (u << 8) + (v << 24); \ yuv1 = (u << 8) + (v << 24); \
yuv2 = yuv1 + y2; \ yuv2 = yuv1 + y2; \
yuv1 += y1; \ yuv1 += y1; \
...@@ -1557,10 +1557,10 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u ...@@ -1557,10 +1557,10 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u
uint64_t y1, y2, yuv1, yuv2; uint64_t y1, y2, yuv1, yuv2;
uint64_t u, v; uint64_t u, v;
/* Prefetch */ /* Prefetch */
asm("ldq $31,64(%0)" :: "r"(yc)); __asm__("ldq $31,64(%0)" :: "r"(yc));
asm("ldq $31,64(%0)" :: "r"(yc2)); __asm__("ldq $31,64(%0)" :: "r"(yc2));
asm("ldq $31,64(%0)" :: "r"(uc)); __asm__("ldq $31,64(%0)" :: "r"(uc));
asm("ldq $31,64(%0)" :: "r"(vc)); __asm__("ldq $31,64(%0)" :: "r"(vc));
pl2yuy2(0); pl2yuy2(0);
pl2yuy2(1); pl2yuy2(1);
...@@ -1620,7 +1620,7 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u ...@@ -1620,7 +1620,7 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u
dst += dstStride; dst += dstStride;
} }
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm( EMMS" \n\t" __asm__( EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
:::"memory"); :::"memory");
#endif #endif
...@@ -1648,7 +1648,7 @@ static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *u ...@@ -1648,7 +1648,7 @@ static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *u
{ {
#ifdef HAVE_MMX #ifdef HAVE_MMX
//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway) //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
asm volatile( __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t" "xor %%"REG_a", %%"REG_a" \n\t"
ASMALIGN(4) ASMALIGN(4)
"1: \n\t" "1: \n\t"
...@@ -1726,7 +1726,7 @@ static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *u ...@@ -1726,7 +1726,7 @@ static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *u
dst += dstStride; dst += dstStride;
} }
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm( EMMS" \n\t" __asm__( EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
:::"memory"); :::"memory");
#endif #endif
...@@ -1777,7 +1777,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t ...@@ -1777,7 +1777,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
for (y=0; y<height; y+=2) for (y=0; y<height; y+=2)
{ {
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile( __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t" "xor %%"REG_a", %%"REG_a" \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t" "pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $8, %%mm7 \n\t" // FF,00,FF,00... "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
...@@ -1832,7 +1832,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t ...@@ -1832,7 +1832,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
ydst += lumStride; ydst += lumStride;
src += srcStride; src += srcStride;
asm volatile( __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t" "xor %%"REG_a", %%"REG_a" \n\t"
ASMALIGN(4) ASMALIGN(4)
"1: \n\t" "1: \n\t"
...@@ -1882,7 +1882,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t ...@@ -1882,7 +1882,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
src += srcStride; src += srcStride;
} }
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile( EMMS" \n\t" __asm__ volatile( EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
:::"memory"); :::"memory");
#endif #endif
...@@ -1916,7 +1916,7 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWi ...@@ -1916,7 +1916,7 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWi
for (y=1; y<srcHeight; y++){ for (y=1; y<srcHeight; y++){
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW) #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
const long mmxSize= srcWidth&~15; const long mmxSize= srcWidth&~15;
asm volatile( __asm__ volatile(
"mov %4, %%"REG_a" \n\t" "mov %4, %%"REG_a" \n\t"
"1: \n\t" "1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t" "movq (%0, %%"REG_a"), %%mm0 \n\t"
...@@ -1994,7 +1994,7 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWi ...@@ -1994,7 +1994,7 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWi
#endif #endif
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile( EMMS" \n\t" __asm__ volatile( EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
:::"memory"); :::"memory");
#endif #endif
...@@ -2015,7 +2015,7 @@ static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t ...@@ -2015,7 +2015,7 @@ static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
for (y=0; y<height; y+=2) for (y=0; y<height; y+=2)
{ {
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile( __asm__ volatile(
"xorl %%eax, %%eax \n\t" "xorl %%eax, %%eax \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t" "pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $8, %%mm7 \n\t" // FF,00,FF,00... "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
...@@ -2070,7 +2070,7 @@ static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t ...@@ -2070,7 +2070,7 @@ static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
ydst += lumStride; ydst += lumStride;
src += srcStride; src += srcStride;
asm volatile( __asm__ volatile(
"xorl %%eax, %%eax \n\t" "xorl %%eax, %%eax \n\t"
ASMALIGN(4) ASMALIGN(4)
"1: \n\t" "1: \n\t"
...@@ -2120,7 +2120,7 @@ static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t ...@@ -2120,7 +2120,7 @@ static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
src += srcStride; src += srcStride;
} }
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile( EMMS" \n\t" __asm__ volatile( EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
:::"memory"); :::"memory");
#endif #endif
...@@ -2145,7 +2145,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_ ...@@ -2145,7 +2145,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
long i; long i;
for (i=0; i<2; i++) for (i=0; i<2; i++)
{ {
asm volatile( __asm__ volatile(
"mov %2, %%"REG_a" \n\t" "mov %2, %%"REG_a" \n\t"
"movq "MANGLE(ff_bgr2YCoeff)", %%mm6 \n\t" "movq "MANGLE(ff_bgr2YCoeff)", %%mm6 \n\t"
"movq "MANGLE(ff_w1111)", %%mm5 \n\t" "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
...@@ -2218,7 +2218,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_ ...@@ -2218,7 +2218,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
src += srcStride; src += srcStride;
} }
src -= srcStride*2; src -= srcStride*2;
asm volatile( __asm__ volatile(
"mov %4, %%"REG_a" \n\t" "mov %4, %%"REG_a" \n\t"
"movq "MANGLE(ff_w1111)", %%mm5 \n\t" "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
"movq "MANGLE(ff_bgr2UCoeff)", %%mm6 \n\t" "movq "MANGLE(ff_bgr2UCoeff)", %%mm6 \n\t"
...@@ -2372,7 +2372,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_ ...@@ -2372,7 +2372,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
src += srcStride*2; src += srcStride*2;
} }
asm volatile( EMMS" \n\t" __asm__ volatile( EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
:::"memory"); :::"memory");
#else #else
...@@ -2440,7 +2440,7 @@ static void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest, ...@@ -2440,7 +2440,7 @@ static void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest,
#ifdef HAVE_MMX #ifdef HAVE_MMX
#ifdef HAVE_SSE2 #ifdef HAVE_SSE2
asm( __asm__(
"xor %%"REG_a", %%"REG_a" \n\t" "xor %%"REG_a", %%"REG_a" \n\t"
"1: \n\t" "1: \n\t"
PREFETCH" 64(%1, %%"REG_a") \n\t" PREFETCH" 64(%1, %%"REG_a") \n\t"
...@@ -2459,7 +2459,7 @@ static void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest, ...@@ -2459,7 +2459,7 @@ static void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest,
: "memory", "%"REG_a"" : "memory", "%"REG_a""
); );
#else #else
asm( __asm__(
"xor %%"REG_a", %%"REG_a" \n\t" "xor %%"REG_a", %%"REG_a" \n\t"
"1: \n\t" "1: \n\t"
PREFETCH" 64(%1, %%"REG_a") \n\t" PREFETCH" 64(%1, %%"REG_a") \n\t"
...@@ -2502,7 +2502,7 @@ static void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest, ...@@ -2502,7 +2502,7 @@ static void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest,
src2 += src2Stride; src2 += src2Stride;
} }
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm( __asm__(
EMMS" \n\t" EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
::: "memory" ::: "memory"
...@@ -2519,7 +2519,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, ...@@ -2519,7 +2519,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
long y,x,w,h; long y,x,w,h;
w=width/2; h=height/2; w=width/2; h=height/2;
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile( __asm__ volatile(
PREFETCH" %0 \n\t" PREFETCH" %0 \n\t"
PREFETCH" %1 \n\t" PREFETCH" %1 \n\t"
::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory"); ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
...@@ -2531,7 +2531,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, ...@@ -2531,7 +2531,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
#ifdef HAVE_MMX #ifdef HAVE_MMX
for (;x<w-31;x+=32) for (;x<w-31;x+=32)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t" "movq %1, %%mm0 \n\t"
"movq 8%1, %%mm2 \n\t" "movq 8%1, %%mm2 \n\t"
...@@ -2571,7 +2571,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, ...@@ -2571,7 +2571,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
#ifdef HAVE_MMX #ifdef HAVE_MMX
for (;x<w-31;x+=32) for (;x<w-31;x+=32)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32%1 \n\t" PREFETCH" 32%1 \n\t"
"movq %1, %%mm0 \n\t" "movq %1, %%mm0 \n\t"
"movq 8%1, %%mm2 \n\t" "movq 8%1, %%mm2 \n\t"
...@@ -2605,7 +2605,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, ...@@ -2605,7 +2605,7 @@ static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x]; for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
} }
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm( __asm__(
EMMS" \n\t" EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
::: "memory" ::: "memory"
...@@ -2630,7 +2630,7 @@ static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2 ...@@ -2630,7 +2630,7 @@ static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2
#ifdef HAVE_MMX #ifdef HAVE_MMX
for (;x<w-7;x+=8) for (;x<w-7;x+=8)
{ {
asm volatile( __asm__ volatile(
PREFETCH" 32(%1, %0) \n\t" PREFETCH" 32(%1, %0) \n\t"
PREFETCH" 32(%2, %0) \n\t" PREFETCH" 32(%2, %0) \n\t"
PREFETCH" 32(%3, %0) \n\t" PREFETCH" 32(%3, %0) \n\t"
...@@ -2696,7 +2696,7 @@ static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2 ...@@ -2696,7 +2696,7 @@ static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2
} }
} }
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm( __asm__(
EMMS" \n\t" EMMS" \n\t"
SFENCE" \n\t" SFENCE" \n\t"
::: "memory" ::: "memory"
......
...@@ -120,7 +120,7 @@ static int doTest(uint8_t *ref[3], int refStride[3], int w, int h, int srcFormat ...@@ -120,7 +120,7 @@ static int doTest(uint8_t *ref[3], int refStride[3], int w, int h, int srcFormat
sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride); sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);
#if defined(ARCH_X86) #if defined(ARCH_X86)
asm volatile ("emms\n\t"); __asm__ volatile ("emms\n\t");
#endif #endif
ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h); ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h);
...@@ -215,7 +215,7 @@ int main(int argc, char **argv){ ...@@ -215,7 +215,7 @@ int main(int argc, char **argv){
sws_scale(sws, rgb_src, rgb_stride, 0, H, src, stride); sws_scale(sws, rgb_src, rgb_stride, 0, H, src, stride);
#if defined(ARCH_X86) #if defined(ARCH_X86)
asm volatile ("emms\n\t"); __asm__ volatile ("emms\n\t");
#endif #endif
selfTest(src, stride, W, H); selfTest(src, stride, W, H);
......
...@@ -1061,7 +1061,7 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF ...@@ -1061,7 +1061,7 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
int ret= -1; int ret= -1;
#if defined(ARCH_X86) #if defined(ARCH_X86)
if (flags & SWS_CPU_CAPS_MMX) if (flags & SWS_CPU_CAPS_MMX)
asm volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions) __asm__ volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions)
#endif #endif
// Note the +1 is for the MMXscaler which reads over the end // Note the +1 is for the MMXscaler which reads over the end
...@@ -1450,7 +1450,7 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil ...@@ -1450,7 +1450,7 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil
//code fragment //code fragment
asm volatile( __asm__ volatile(
"jmp 9f \n\t" "jmp 9f \n\t"
// Begin // Begin
"0: \n\t" "0: \n\t"
...@@ -1490,7 +1490,7 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil ...@@ -1490,7 +1490,7 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil
"=r" (fragmentLengthA) "=r" (fragmentLengthA)
); );
asm volatile( __asm__ volatile(
"jmp 9f \n\t" "jmp 9f \n\t"
// Begin // Begin
"0: \n\t" "0: \n\t"
...@@ -2167,7 +2167,7 @@ SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat, int d ...@@ -2167,7 +2167,7 @@ SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat, int d
SwsFilter dummyFilter= {NULL, NULL, NULL, NULL}; SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
#if defined(ARCH_X86) #if defined(ARCH_X86)
if (flags & SWS_CPU_CAPS_MMX) if (flags & SWS_CPU_CAPS_MMX)
asm volatile("emms\n\t"::: "memory"); __asm__ volatile("emms\n\t"::: "memory");
#endif #endif
#if !defined(RUNTIME_CPUDETECT) || !defined (CONFIG_GPL) //ensure that the flags match the compiled variant if cpudetect is off #if !defined(RUNTIME_CPUDETECT) || !defined (CONFIG_GPL) //ensure that the flags match the compiled variant if cpudetect is off
......
...@@ -71,7 +71,7 @@ ...@@ -71,7 +71,7 @@
#endif #endif
#define YSCALEYUV2YV12X(x, offset, dest, width) \ #define YSCALEYUV2YV12X(x, offset, dest, width) \
asm volatile(\ __asm__ volatile(\
"xor %%"REG_a", %%"REG_a" \n\t"\ "xor %%"REG_a", %%"REG_a" \n\t"\
"movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\ "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
"movq %%mm3, %%mm4 \n\t"\ "movq %%mm3, %%mm4 \n\t"\
...@@ -107,7 +107,7 @@ ...@@ -107,7 +107,7 @@
); );
#define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \ #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
asm volatile(\ __asm__ volatile(\
"lea " offset "(%0), %%"REG_d" \n\t"\ "lea " offset "(%0), %%"REG_d" \n\t"\
"xor %%"REG_a", %%"REG_a" \n\t"\ "xor %%"REG_a", %%"REG_a" \n\t"\
"pxor %%mm4, %%mm4 \n\t"\ "pxor %%mm4, %%mm4 \n\t"\
...@@ -207,7 +207,7 @@ ...@@ -207,7 +207,7 @@
: "%eax", "%ebx", "%ecx", "%edx", "%esi" : "%eax", "%ebx", "%ecx", "%edx", "%esi"
*/ */
#define YSCALEYUV2PACKEDX \ #define YSCALEYUV2PACKEDX \
asm volatile(\ __asm__ volatile(\
"xor %%"REG_a", %%"REG_a" \n\t"\ "xor %%"REG_a", %%"REG_a" \n\t"\
ASMALIGN(4)\ ASMALIGN(4)\
"nop \n\t"\ "nop \n\t"\
...@@ -256,7 +256,7 @@ ...@@ -256,7 +256,7 @@
); );
#define YSCALEYUV2PACKEDX_ACCURATE \ #define YSCALEYUV2PACKEDX_ACCURATE \
asm volatile(\ __asm__ volatile(\
"xor %%"REG_a", %%"REG_a" \n\t"\ "xor %%"REG_a", %%"REG_a" \n\t"\
ASMALIGN(4)\ ASMALIGN(4)\
"nop \n\t"\ "nop \n\t"\
...@@ -1002,7 +1002,7 @@ static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chr ...@@ -1002,7 +1002,7 @@ static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chr
if (c->flags & SWS_ACCURATE_RND){ if (c->flags & SWS_ACCURATE_RND){
while(p--){ while(p--){
asm volatile( __asm__ volatile(
YSCALEYUV2YV121_ACCURATE YSCALEYUV2YV121_ACCURATE
:: "r" (src[p]), "r" (dst[p] + counter[p]), :: "r" (src[p]), "r" (dst[p] + counter[p]),
"g" (-counter[p]) "g" (-counter[p])
...@@ -1011,7 +1011,7 @@ static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chr ...@@ -1011,7 +1011,7 @@ static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chr
} }
}else{ }else{
while(p--){ while(p--){
asm volatile( __asm__ volatile(
YSCALEYUV2YV121 YSCALEYUV2YV121
:: "r" (src[p]), "r" (dst[p] + counter[p]), :: "r" (src[p]), "r" (dst[p] + counter[p]),
"g" (-counter[p]) "g" (-counter[p])
...@@ -1220,7 +1220,7 @@ static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t * ...@@ -1220,7 +1220,7 @@ static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *
{ {
#ifdef HAVE_MMX #ifdef HAVE_MMX
case PIX_FMT_RGB32: case PIX_FMT_RGB32:
asm volatile( __asm__ volatile(
FULL_YSCALEYUV2RGB FULL_YSCALEYUV2RGB
...@@ -1244,7 +1244,7 @@ FULL_YSCALEYUV2RGB ...@@ -1244,7 +1244,7 @@ FULL_YSCALEYUV2RGB
); );
break; break;
case PIX_FMT_BGR24: case PIX_FMT_BGR24:
asm volatile( __asm__ volatile(
FULL_YSCALEYUV2RGB FULL_YSCALEYUV2RGB
...@@ -1293,7 +1293,7 @@ FULL_YSCALEYUV2RGB ...@@ -1293,7 +1293,7 @@ FULL_YSCALEYUV2RGB
); );
break; break;
case PIX_FMT_BGR555: case PIX_FMT_BGR555:
asm volatile( __asm__ volatile(
FULL_YSCALEYUV2RGB FULL_YSCALEYUV2RGB
#ifdef DITHER1XBPP #ifdef DITHER1XBPP
...@@ -1326,7 +1326,7 @@ FULL_YSCALEYUV2RGB ...@@ -1326,7 +1326,7 @@ FULL_YSCALEYUV2RGB
); );
break; break;
case PIX_FMT_BGR565: case PIX_FMT_BGR565:
asm volatile( __asm__ volatile(
FULL_YSCALEYUV2RGB FULL_YSCALEYUV2RGB
#ifdef DITHER1XBPP #ifdef DITHER1XBPP
...@@ -1434,7 +1434,7 @@ FULL_YSCALEYUV2RGB ...@@ -1434,7 +1434,7 @@ FULL_YSCALEYUV2RGB
{ {
//Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
case PIX_FMT_RGB32: case PIX_FMT_RGB32:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1448,7 +1448,7 @@ FULL_YSCALEYUV2RGB ...@@ -1448,7 +1448,7 @@ FULL_YSCALEYUV2RGB
); );
return; return;
case PIX_FMT_BGR24: case PIX_FMT_BGR24:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1461,7 +1461,7 @@ FULL_YSCALEYUV2RGB ...@@ -1461,7 +1461,7 @@ FULL_YSCALEYUV2RGB
); );
return; return;
case PIX_FMT_RGB555: case PIX_FMT_RGB555:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1482,7 +1482,7 @@ FULL_YSCALEYUV2RGB ...@@ -1482,7 +1482,7 @@ FULL_YSCALEYUV2RGB
); );
return; return;
case PIX_FMT_RGB565: case PIX_FMT_RGB565:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1502,7 +1502,7 @@ FULL_YSCALEYUV2RGB ...@@ -1502,7 +1502,7 @@ FULL_YSCALEYUV2RGB
); );
return; return;
case PIX_FMT_YUYV422: case PIX_FMT_YUYV422:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1546,7 +1546,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t * ...@@ -1546,7 +1546,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
switch(dstFormat) switch(dstFormat)
{ {
case PIX_FMT_RGB32: case PIX_FMT_RGB32:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1560,7 +1560,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t * ...@@ -1560,7 +1560,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
); );
return; return;
case PIX_FMT_BGR24: case PIX_FMT_BGR24:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1574,7 +1574,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t * ...@@ -1574,7 +1574,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
); );
return; return;
case PIX_FMT_RGB555: case PIX_FMT_RGB555:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1594,7 +1594,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t * ...@@ -1594,7 +1594,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
); );
return; return;
case PIX_FMT_RGB565: case PIX_FMT_RGB565:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1615,7 +1615,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t * ...@@ -1615,7 +1615,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
); );
return; return;
case PIX_FMT_YUYV422: case PIX_FMT_YUYV422:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1635,7 +1635,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t * ...@@ -1635,7 +1635,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
switch(dstFormat) switch(dstFormat)
{ {
case PIX_FMT_RGB32: case PIX_FMT_RGB32:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1649,7 +1649,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t * ...@@ -1649,7 +1649,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
); );
return; return;
case PIX_FMT_BGR24: case PIX_FMT_BGR24:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1663,7 +1663,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t * ...@@ -1663,7 +1663,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
); );
return; return;
case PIX_FMT_RGB555: case PIX_FMT_RGB555:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1683,7 +1683,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t * ...@@ -1683,7 +1683,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
); );
return; return;
case PIX_FMT_RGB565: case PIX_FMT_RGB565:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1704,7 +1704,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t * ...@@ -1704,7 +1704,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
); );
return; return;
case PIX_FMT_YUYV422: case PIX_FMT_YUYV422:
asm volatile( __asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t" "mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t" "push %%"REG_BP" \n\t"
...@@ -1734,7 +1734,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t * ...@@ -1734,7 +1734,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused) static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
{ {
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile( __asm__ volatile(
"movq "MANGLE(bm01010101)", %%mm2 \n\t" "movq "MANGLE(bm01010101)", %%mm2 \n\t"
"mov %0, %%"REG_a" \n\t" "mov %0, %%"REG_a" \n\t"
"1: \n\t" "1: \n\t"
...@@ -1759,7 +1759,7 @@ static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width, uint3 ...@@ -1759,7 +1759,7 @@ static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width, uint3
static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused) static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
{ {
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile( __asm__ volatile(
"movq "MANGLE(bm01010101)", %%mm4 \n\t" "movq "MANGLE(bm01010101)", %%mm4 \n\t"
"mov %0, %%"REG_a" \n\t" "mov %0, %%"REG_a" \n\t"
"1: \n\t" "1: \n\t"
...@@ -1796,7 +1796,7 @@ static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, ...@@ -1796,7 +1796,7 @@ static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1,
static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused) static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
{ {
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile( __asm__ volatile(
"mov %0, %%"REG_a" \n\t" "mov %0, %%"REG_a" \n\t"
"1: \n\t" "1: \n\t"
"movq (%1, %%"REG_a",2), %%mm0 \n\t" "movq (%1, %%"REG_a",2), %%mm0 \n\t"
...@@ -1820,7 +1820,7 @@ static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width, uint3 ...@@ -1820,7 +1820,7 @@ static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width, uint3
static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused) static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
{ {
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile( __asm__ volatile(
"movq "MANGLE(bm01010101)", %%mm4 \n\t" "movq "MANGLE(bm01010101)", %%mm4 \n\t"
"mov %0, %%"REG_a" \n\t" "mov %0, %%"REG_a" \n\t"
"1: \n\t" "1: \n\t"
...@@ -1917,20 +1917,20 @@ static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, uint8_t *src, long width, ...@@ -1917,20 +1917,20 @@ static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, uint8_t *src, long width,
{ {
if(srcFormat == PIX_FMT_BGR24){ if(srcFormat == PIX_FMT_BGR24){
asm volatile( __asm__ volatile(
"movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t" "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
"movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t" "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
: :
); );
}else{ }else{
asm volatile( __asm__ volatile(
"movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t" "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
"movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t" "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
: :
); );
} }
asm volatile( __asm__ volatile(
"movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t" "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
"mov %2, %%"REG_a" \n\t" "mov %2, %%"REG_a" \n\t"
"pxor %%mm7, %%mm7 \n\t" "pxor %%mm7, %%mm7 \n\t"
...@@ -1968,7 +1968,7 @@ static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, uint8_t *src, long width, ...@@ -1968,7 +1968,7 @@ static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, uint8_t *src, long width,
static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, long width, int srcFormat) static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, long width, int srcFormat)
{ {
asm volatile( __asm__ volatile(
"movq 24+%4, %%mm6 \n\t" "movq 24+%4, %%mm6 \n\t"
"mov %3, %%"REG_a" \n\t" "mov %3, %%"REG_a" \n\t"
"pxor %%mm7, %%mm7 \n\t" "pxor %%mm7, %%mm7 \n\t"
...@@ -2184,7 +2184,7 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW ...@@ -2184,7 +2184,7 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW
filter-= counter*2; filter-= counter*2;
filterPos-= counter/2; filterPos-= counter/2;
dst-= counter/2; dst-= counter/2;
asm volatile( __asm__ volatile(
#if defined(PIC) #if defined(PIC)
"push %%"REG_b" \n\t" "push %%"REG_b" \n\t"
#endif #endif
...@@ -2230,7 +2230,7 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW ...@@ -2230,7 +2230,7 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW
filter-= counter*4; filter-= counter*4;
filterPos-= counter/2; filterPos-= counter/2;
dst-= counter/2; dst-= counter/2;
asm volatile( __asm__ volatile(
#if defined(PIC) #if defined(PIC)
"push %%"REG_b" \n\t" "push %%"REG_b" \n\t"
#endif #endif
...@@ -2288,7 +2288,7 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW ...@@ -2288,7 +2288,7 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW
//filter-= counter*filterSize/2; //filter-= counter*filterSize/2;
filterPos-= counter/2; filterPos-= counter/2;
dst-= counter/2; dst-= counter/2;
asm volatile( __asm__ volatile(
"pxor %%mm7, %%mm7 \n\t" "pxor %%mm7, %%mm7 \n\t"
ASMALIGN(4) ASMALIGN(4)
"1: \n\t" "1: \n\t"
...@@ -2456,7 +2456,7 @@ static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth, ...@@ -2456,7 +2456,7 @@ static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth,
#endif #endif
if (canMMX2BeUsed) if (canMMX2BeUsed)
{ {
asm volatile( __asm__ volatile(
#if defined(PIC) #if defined(PIC)
"mov %%"REG_b", %5 \n\t" "mov %%"REG_b", %5 \n\t"
#endif #endif
...@@ -2521,7 +2521,7 @@ FUNNY_Y_CODE ...@@ -2521,7 +2521,7 @@ FUNNY_Y_CODE
long xInc_shr16 = xInc >> 16; long xInc_shr16 = xInc >> 16;
uint16_t xInc_mask = xInc & 0xffff; uint16_t xInc_mask = xInc & 0xffff;
//NO MMX just normal asm ... //NO MMX just normal asm ...
asm volatile( __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t" // i "xor %%"REG_a", %%"REG_a" \n\t" // i
"xor %%"REG_d", %%"REG_d" \n\t" // xx "xor %%"REG_d", %%"REG_d" \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha "xorl %%ecx, %%ecx \n\t" // 2*xalpha
...@@ -2729,7 +2729,7 @@ inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, ...@@ -2729,7 +2729,7 @@ inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth,
#endif #endif
if (canMMX2BeUsed) if (canMMX2BeUsed)
{ {
asm volatile( __asm__ volatile(
#if defined(PIC) #if defined(PIC)
"mov %%"REG_b", %6 \n\t" "mov %%"REG_b", %6 \n\t"
#endif #endif
...@@ -2806,7 +2806,7 @@ FUNNY_UV_CODE ...@@ -2806,7 +2806,7 @@ FUNNY_UV_CODE
#endif /* HAVE_MMX2 */ #endif /* HAVE_MMX2 */
long xInc_shr16 = (long) (xInc >> 16); long xInc_shr16 = (long) (xInc >> 16);
uint16_t xInc_mask = xInc & 0xffff; uint16_t xInc_mask = xInc & 0xffff;
asm volatile( __asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t" // i "xor %%"REG_a", %%"REG_a" \n\t" // i
"xor %%"REG_d", %%"REG_d" \n\t" // xx "xor %%"REG_d", %%"REG_d" \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha "xorl %%ecx, %%ecx \n\t" // 2*xalpha
...@@ -3256,8 +3256,8 @@ static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int s ...@@ -3256,8 +3256,8 @@ static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int s
} }
#ifdef HAVE_MMX #ifdef HAVE_MMX
asm volatile(SFENCE:::"memory"); __asm__ volatile(SFENCE:::"memory");
asm volatile(EMMS:::"memory"); __asm__ volatile(EMMS:::"memory");
#endif #endif
/* store changed local vars back in the context */ /* store changed local vars back in the context */
c->dstY= dstY; c->dstY= dstY;
......
...@@ -133,7 +133,7 @@ static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStr ...@@ -133,7 +133,7 @@ static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStr
h_size= (c->dstW+7)&~7; h_size= (c->dstW+7)&~7;
if(h_size*2 > FFABS(dstStride[0])) h_size-=8; if(h_size*2 > FFABS(dstStride[0])) h_size-=8;
asm volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); __asm__ volatile ("pxor %mm4, %mm4;" /* zero mm4 */ );
//printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&c->blueDither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0], //printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&c->blueDither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0],
//srcStride[0],srcStride[1],srcStride[2],dstStride[0]); //srcStride[0],srcStride[1],srcStride[2],dstStride[0]);
for (y= 0; y<srcSliceH; y++ ) { for (y= 0; y<srcSliceH; y++ ) {
...@@ -148,7 +148,7 @@ static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStr ...@@ -148,7 +148,7 @@ static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStr
c->redDither= ff_dither8[(y+1)&1]; c->redDither= ff_dither8[(y+1)&1];
/* This MMX assembly code deals with a SINGLE scan line at a time, /* This MMX assembly code deals with a SINGLE scan line at a time,
* it converts 8 pixels in each iteration. */ * it converts 8 pixels in each iteration. */
asm volatile ( __asm__ volatile (
/* load data for start of next scan line */ /* load data for start of next scan line */
"movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
"movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
...@@ -210,7 +210,7 @@ YUV2RGB ...@@ -210,7 +210,7 @@ YUV2RGB
); );
} }
asm volatile (EMMS); __asm__ volatile (EMMS);
return srcSliceH; return srcSliceH;
} }
...@@ -227,7 +227,7 @@ static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStr ...@@ -227,7 +227,7 @@ static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStr
h_size= (c->dstW+7)&~7; h_size= (c->dstW+7)&~7;
if(h_size*2 > FFABS(dstStride[0])) h_size-=8; if(h_size*2 > FFABS(dstStride[0])) h_size-=8;
asm volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); __asm__ volatile ("pxor %mm4, %mm4;" /* zero mm4 */ );
//printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&c->blueDither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0], //printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&c->blueDither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0],
//srcStride[0],srcStride[1],srcStride[2],dstStride[0]); //srcStride[0],srcStride[1],srcStride[2],dstStride[0]);
for (y= 0; y<srcSliceH; y++ ) { for (y= 0; y<srcSliceH; y++ ) {
...@@ -242,7 +242,7 @@ static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStr ...@@ -242,7 +242,7 @@ static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStr
c->redDither= ff_dither8[(y+1)&1]; c->redDither= ff_dither8[(y+1)&1];
/* This MMX assembly code deals with a SINGLE scan line at a time, /* This MMX assembly code deals with a SINGLE scan line at a time,
* it converts 8 pixels in each iteration. */ * it converts 8 pixels in each iteration. */
asm volatile ( __asm__ volatile (
/* load data for start of next scan line */ /* load data for start of next scan line */
"movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
"movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
...@@ -299,7 +299,7 @@ YUV2RGB ...@@ -299,7 +299,7 @@ YUV2RGB
); );
} }
asm volatile (EMMS); __asm__ volatile (EMMS);
return srcSliceH; return srcSliceH;
} }
...@@ -315,7 +315,7 @@ static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStr ...@@ -315,7 +315,7 @@ static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStr
h_size= (c->dstW+7)&~7; h_size= (c->dstW+7)&~7;
if(h_size*3 > FFABS(dstStride[0])) h_size-=8; if(h_size*3 > FFABS(dstStride[0])) h_size-=8;
asm volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); __asm__ volatile ("pxor %mm4, %mm4;" /* zero mm4 */ );
for (y= 0; y<srcSliceH; y++ ) { for (y= 0; y<srcSliceH; y++ ) {
uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0]; uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0];
...@@ -326,7 +326,7 @@ static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStr ...@@ -326,7 +326,7 @@ static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStr
/* This MMX assembly code deals with a SINGLE scan line at a time, /* This MMX assembly code deals with a SINGLE scan line at a time,
* it converts 8 pixels in each iteration. */ * it converts 8 pixels in each iteration. */
asm volatile ( __asm__ volatile (
/* load data for start of next scan line */ /* load data for start of next scan line */
"movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
"movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
...@@ -445,7 +445,7 @@ YUV2RGB ...@@ -445,7 +445,7 @@ YUV2RGB
); );
} }
asm volatile (EMMS); __asm__ volatile (EMMS);
return srcSliceH; return srcSliceH;
} }
...@@ -461,7 +461,7 @@ static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStr ...@@ -461,7 +461,7 @@ static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStr
h_size= (c->dstW+7)&~7; h_size= (c->dstW+7)&~7;
if(h_size*4 > FFABS(dstStride[0])) h_size-=8; if(h_size*4 > FFABS(dstStride[0])) h_size-=8;
asm volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); __asm__ volatile ("pxor %mm4, %mm4;" /* zero mm4 */ );
for (y= 0; y<srcSliceH; y++ ) { for (y= 0; y<srcSliceH; y++ ) {
uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0]; uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0];
...@@ -472,7 +472,7 @@ static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStr ...@@ -472,7 +472,7 @@ static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStr
/* This MMX assembly code deals with a SINGLE scan line at a time, /* This MMX assembly code deals with a SINGLE scan line at a time,
* it converts 8 pixels in each iteration. */ * it converts 8 pixels in each iteration. */
asm volatile ( __asm__ volatile (
/* load data for start of next scan line */ /* load data for start of next scan line */
"movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
"movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
...@@ -531,6 +531,6 @@ YUV2RGB ...@@ -531,6 +531,6 @@ YUV2RGB
); );
} }
asm volatile (EMMS); __asm__ volatile (EMMS);
return srcSliceH; return srcSliceH;
} }
...@@ -85,7 +85,7 @@ static int vis_420P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int s ...@@ -85,7 +85,7 @@ static int vis_420P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int s
int y, out1, out2, out3, out4, out5, out6; int y, out1, out2, out3, out4, out5, out6;
for(y=0;y < srcSliceH;++y) { for(y=0;y < srcSliceH;++y) {
asm volatile ( __asm__ volatile (
YUV2RGB_INIT YUV2RGB_INIT
"wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */ "wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */
"1: \n\t" "1: \n\t"
...@@ -136,7 +136,7 @@ static int vis_422P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int s ...@@ -136,7 +136,7 @@ static int vis_422P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int s
int y, out1, out2, out3, out4, out5, out6; int y, out1, out2, out3, out4, out5, out6;
for(y=0;y < srcSliceH;++y) { for(y=0;y < srcSliceH;++y) {
asm volatile ( __asm__ volatile (
YUV2RGB_INIT YUV2RGB_INIT
"wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */ "wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */
"1: \n\t" "1: \n\t"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment