Commit fcfc14b3 authored by diego's avatar diego

cosmetics: __asm__ __volatile__ --> asm volatile


git-svn-id: file:///var/local/repositories/ffmpeg/trunk@12885 9553f0bf-9b14-0410-a0b8-cfaf0461ba5b
parent 9de14f11
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include "dsputil.h" #include "dsputil.h"
#define DEF(x, y) x ## _no_rnd_ ## y ##_iwmmxt #define DEF(x, y) x ## _no_rnd_ ## y ##_iwmmxt
#define SET_RND(regd) __asm__ __volatile__ ("mov r12, #1 \n\t tbcsth " #regd ", r12":::"r12"); #define SET_RND(regd) asm volatile ("mov r12, #1 \n\t tbcsth " #regd ", r12":::"r12");
#define WAVG2B "wavg2b" #define WAVG2B "wavg2b"
#include "dsputil_iwmmxt_rnd.h" #include "dsputil_iwmmxt_rnd.h"
#undef DEF #undef DEF
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#undef WAVG2B #undef WAVG2B
#define DEF(x, y) x ## _ ## y ##_iwmmxt #define DEF(x, y) x ## _ ## y ##_iwmmxt
#define SET_RND(regd) __asm__ __volatile__ ("mov r12, #2 \n\t tbcsth " #regd ", r12":::"r12"); #define SET_RND(regd) asm volatile ("mov r12, #2 \n\t tbcsth " #regd ", r12":::"r12");
#define WAVG2B "wavg2br" #define WAVG2B "wavg2br"
#include "dsputil_iwmmxt_rnd.h" #include "dsputil_iwmmxt_rnd.h"
#undef DEF #undef DEF
...@@ -89,7 +89,7 @@ void add_pixels_clamped_iwmmxt(const DCTELEM *block, uint8_t *pixels, int line_s ...@@ -89,7 +89,7 @@ void add_pixels_clamped_iwmmxt(const DCTELEM *block, uint8_t *pixels, int line_s
{ {
uint8_t *pixels2 = pixels + line_size; uint8_t *pixels2 = pixels + line_size;
__asm__ __volatile__ ( asm volatile (
"mov r12, #4 \n\t" "mov r12, #4 \n\t"
"1: \n\t" "1: \n\t"
"pld [%[pixels], %[line_size2]] \n\t" "pld [%[pixels], %[line_size2]] \n\t"
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
void DEF(put, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_size, int h) void DEF(put, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
{ {
int stride = line_size; int stride = line_size;
__asm__ __volatile__ ( asm volatile (
"and r12, %[pixels], #7 \n\t" "and r12, %[pixels], #7 \n\t"
"bic %[pixels], %[pixels], #7 \n\t" "bic %[pixels], %[pixels], #7 \n\t"
"tmcr wcgr1, r12 \n\t" "tmcr wcgr1, r12 \n\t"
...@@ -60,7 +60,7 @@ void DEF(put, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_siz ...@@ -60,7 +60,7 @@ void DEF(put, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_siz
void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_size, int h) void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
{ {
int stride = line_size; int stride = line_size;
__asm__ __volatile__ ( asm volatile (
"and r12, %[pixels], #7 \n\t" "and r12, %[pixels], #7 \n\t"
"bic %[pixels], %[pixels], #7 \n\t" "bic %[pixels], %[pixels], #7 \n\t"
"tmcr wcgr1, r12 \n\t" "tmcr wcgr1, r12 \n\t"
...@@ -102,7 +102,7 @@ void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_siz ...@@ -102,7 +102,7 @@ void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_siz
void DEF(put, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_size, int h) void DEF(put, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
{ {
int stride = line_size; int stride = line_size;
__asm__ __volatile__ ( asm volatile (
"and r12, %[pixels], #7 \n\t" "and r12, %[pixels], #7 \n\t"
"bic %[pixels], %[pixels], #7 \n\t" "bic %[pixels], %[pixels], #7 \n\t"
"tmcr wcgr1, r12 \n\t" "tmcr wcgr1, r12 \n\t"
...@@ -142,7 +142,7 @@ void DEF(put, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_si ...@@ -142,7 +142,7 @@ void DEF(put, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_si
void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_size, int h) void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
{ {
int stride = line_size; int stride = line_size;
__asm__ __volatile__ ( asm volatile (
"pld [%[pixels]] \n\t" "pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t" "pld [%[pixels], #32] \n\t"
"pld [%[block]] \n\t" "pld [%[block]] \n\t"
...@@ -201,7 +201,7 @@ void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, const int line_ ...@@ -201,7 +201,7 @@ void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, const int line_
// [wr0 wr1 wr2 wr3] for previous line // [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line // [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
__asm__ __volatile__( asm volatile(
"pld [%[pixels]] \n\t" "pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t" "pld [%[pixels], #32] \n\t"
"and r12, %[pixels], #7 \n\t" "and r12, %[pixels], #7 \n\t"
...@@ -250,7 +250,7 @@ void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, const int line ...@@ -250,7 +250,7 @@ void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, const int line
// [wr0 wr1 wr2 wr3] for previous line // [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line // [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
__asm__ __volatile__( asm volatile(
"pld [%[pixels]] \n\t" "pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t" "pld [%[pixels], #32] \n\t"
"and r12, %[pixels], #7 \n\t" "and r12, %[pixels], #7 \n\t"
...@@ -311,7 +311,7 @@ void DEF(avg, pixels8_x2)(uint8_t *block, const uint8_t *pixels, const int line_ ...@@ -311,7 +311,7 @@ void DEF(avg, pixels8_x2)(uint8_t *block, const uint8_t *pixels, const int line_
// [wr0 wr1 wr2 wr3] for previous line // [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line // [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
__asm__ __volatile__( asm volatile(
"pld [%[pixels]] \n\t" "pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t" "pld [%[pixels], #32] \n\t"
"pld [%[block]] \n\t" "pld [%[block]] \n\t"
...@@ -372,7 +372,7 @@ void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, const int line ...@@ -372,7 +372,7 @@ void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, const int line
// [wr0 wr1 wr2 wr3] for previous line // [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line // [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
__asm__ __volatile__( asm volatile(
"pld [%[pixels]] \n\t" "pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t" "pld [%[pixels], #32] \n\t"
"pld [%[block]] \n\t" "pld [%[block]] \n\t"
...@@ -448,7 +448,7 @@ void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, const int line_ ...@@ -448,7 +448,7 @@ void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, const int line_
int stride = line_size; int stride = line_size;
// [wr0 wr1 wr2 wr3] for previous line // [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line // [wr4 wr5 wr6 wr7] for current line
__asm__ __volatile__( asm volatile(
"pld [%[pixels]] \n\t" "pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t" "pld [%[pixels], #32] \n\t"
"and r12, %[pixels], #7 \n\t" "and r12, %[pixels], #7 \n\t"
...@@ -502,7 +502,7 @@ void DEF(put, pixels16_y2)(uint8_t *block, const uint8_t *pixels, const int line ...@@ -502,7 +502,7 @@ void DEF(put, pixels16_y2)(uint8_t *block, const uint8_t *pixels, const int line
int stride = line_size; int stride = line_size;
// [wr0 wr1 wr2 wr3] for previous line // [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line // [wr4 wr5 wr6 wr7] for current line
__asm__ __volatile__( asm volatile(
"pld [%[pixels]] \n\t" "pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t" "pld [%[pixels], #32] \n\t"
"and r12, %[pixels], #7 \n\t" "and r12, %[pixels], #7 \n\t"
...@@ -559,7 +559,7 @@ void DEF(avg, pixels16_y2)(uint8_t *block, const uint8_t *pixels, const int line ...@@ -559,7 +559,7 @@ void DEF(avg, pixels16_y2)(uint8_t *block, const uint8_t *pixels, const int line
int stride = line_size; int stride = line_size;
// [wr0 wr1 wr2 wr3] for previous line // [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line // [wr4 wr5 wr6 wr7] for current line
__asm__ __volatile__( asm volatile(
"pld [%[pixels]] \n\t" "pld [%[pixels]] \n\t"
"pld [%[pixels], #32] \n\t" "pld [%[pixels], #32] \n\t"
"and r12, %[pixels], #7 \n\t" "and r12, %[pixels], #7 \n\t"
...@@ -627,7 +627,7 @@ void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, const int line ...@@ -627,7 +627,7 @@ void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, const int line
// [wr0 wr1 wr2 wr3] for previous line // [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line // [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
__asm__ __volatile__( asm volatile(
"pld [%[pixels]] \n\t" "pld [%[pixels]] \n\t"
"mov r12, #2 \n\t" "mov r12, #2 \n\t"
"pld [%[pixels], #32] \n\t" "pld [%[pixels], #32] \n\t"
...@@ -721,7 +721,7 @@ void DEF(put, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, const int lin ...@@ -721,7 +721,7 @@ void DEF(put, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, const int lin
// [wr0 wr1 wr2 wr3] for previous line // [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line // [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
__asm__ __volatile__( asm volatile(
"pld [%[pixels]] \n\t" "pld [%[pixels]] \n\t"
"mov r12, #2 \n\t" "mov r12, #2 \n\t"
"pld [%[pixels], #32] \n\t" "pld [%[pixels], #32] \n\t"
...@@ -863,7 +863,7 @@ void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, const int line ...@@ -863,7 +863,7 @@ void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, const int line
// [wr0 wr1 wr2 wr3] for previous line // [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line // [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
__asm__ __volatile__( asm volatile(
"pld [%[block]] \n\t" "pld [%[block]] \n\t"
"pld [%[block], #32] \n\t" "pld [%[block], #32] \n\t"
"pld [%[pixels]] \n\t" "pld [%[pixels]] \n\t"
...@@ -967,7 +967,7 @@ void DEF(avg, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, const int lin ...@@ -967,7 +967,7 @@ void DEF(avg, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, const int lin
// [wr0 wr1 wr2 wr3] for previous line // [wr0 wr1 wr2 wr3] for previous line
// [wr4 wr5 wr6 wr7] for current line // [wr4 wr5 wr6 wr7] for current line
SET_RND(wr15); // =2 for rnd and =1 for no_rnd version SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
__asm__ __volatile__( asm volatile(
"pld [%[block]] \n\t" "pld [%[block]] \n\t"
"pld [%[block], #32] \n\t" "pld [%[block], #32] \n\t"
"pld [%[pixels]] \n\t" "pld [%[pixels]] \n\t"
......
...@@ -65,7 +65,7 @@ static inline void dct_unquantize_h263_helper_c(DCTELEM *block, int qmul, int qa ...@@ -65,7 +65,7 @@ static inline void dct_unquantize_h263_helper_c(DCTELEM *block, int qmul, int qa
({ DCTELEM *xblock = xxblock; \ ({ DCTELEM *xblock = xxblock; \
int xqmul = xxqmul, xqadd = xxqadd, xcount = xxcount, xtmp; \ int xqmul = xxqmul, xqadd = xxqadd, xcount = xxcount, xtmp; \
int xdata1, xdata2; \ int xdata1, xdata2; \
__asm__ __volatile__( \ asm volatile( \
"subs %[count], %[count], #2 \n\t" \ "subs %[count], %[count], #2 \n\t" \
"ble 2f \n\t" \ "ble 2f \n\t" \
"ldrd r4, [%[block], #0] \n\t" \ "ldrd r4, [%[block], #0] \n\t" \
......
...@@ -48,7 +48,7 @@ static void dct_unquantize_h263_intra_iwmmxt(MpegEncContext *s, ...@@ -48,7 +48,7 @@ static void dct_unquantize_h263_intra_iwmmxt(MpegEncContext *s,
else else
nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
__asm__ __volatile__ ( asm volatile (
/* "movd %1, %%mm6 \n\t" //qmul */ /* "movd %1, %%mm6 \n\t" //qmul */
/* "packssdw %%mm6, %%mm6 \n\t" */ /* "packssdw %%mm6, %%mm6 \n\t" */
/* "packssdw %%mm6, %%mm6 \n\t" */ /* "packssdw %%mm6, %%mm6 \n\t" */
......
...@@ -44,7 +44,7 @@ int mm_support(void) ...@@ -44,7 +44,7 @@ int mm_support(void)
int max_std_level, max_ext_level, std_caps=0, ext_caps=0; int max_std_level, max_ext_level, std_caps=0, ext_caps=0;
long a, c; long a, c;
__asm__ __volatile__ ( asm volatile (
/* See if CPUID instruction is supported ... */ /* See if CPUID instruction is supported ... */
/* ... Get copies of EFLAGS into eax and ecx */ /* ... Get copies of EFLAGS into eax and ecx */
"pushf\n\t" "pushf\n\t"
......
...@@ -43,25 +43,25 @@ typedef union { ...@@ -43,25 +43,25 @@ typedef union {
#define mmx_i2r(op,imm,reg) \ #define mmx_i2r(op,imm,reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \ asm volatile (#op " %0, %%" #reg \
: /* nothing */ \ : /* nothing */ \
: "i" (imm) ) : "i" (imm) )
#define mmx_m2r(op,mem,reg) \ #define mmx_m2r(op,mem,reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \ asm volatile (#op " %0, %%" #reg \
: /* nothing */ \ : /* nothing */ \
: "m" (mem)) : "m" (mem))
#define mmx_r2m(op,reg,mem) \ #define mmx_r2m(op,reg,mem) \
__asm__ __volatile__ (#op " %%" #reg ", %0" \ asm volatile (#op " %%" #reg ", %0" \
: "=m" (mem) \ : "=m" (mem) \
: /* nothing */ ) : /* nothing */ )
#define mmx_r2r(op,regs,regd) \ #define mmx_r2r(op,regs,regd) \
__asm__ __volatile__ (#op " %" #regs ", %" #regd) asm volatile (#op " %" #regs ", %" #regd)
#define emms() __asm__ __volatile__ ("emms") #define emms() asm volatile ("emms")
#define movd_m2r(var,reg) mmx_m2r (movd, var, reg) #define movd_m2r(var,reg) mmx_m2r (movd, var, reg)
#define movd_r2m(reg,var) mmx_r2m (movd, reg, var) #define movd_r2m(reg,var) mmx_r2m (movd, reg, var)
...@@ -200,16 +200,16 @@ typedef union { ...@@ -200,16 +200,16 @@ typedef union {
#define mmx_m2ri(op,mem,reg,imm) \ #define mmx_m2ri(op,mem,reg,imm) \
__asm__ __volatile__ (#op " %1, %0, %%" #reg \ asm volatile (#op " %1, %0, %%" #reg \
: /* nothing */ \ : /* nothing */ \
: "m" (mem), "i" (imm)) : "m" (mem), "i" (imm))
#define mmx_r2ri(op,regs,regd,imm) \ #define mmx_r2ri(op,regs,regd,imm) \
__asm__ __volatile__ (#op " %0, %%" #regs ", %%" #regd \ asm volatile (#op " %0, %%" #regs ", %%" #regd \
: /* nothing */ \ : /* nothing */ \
: "i" (imm) ) : "i" (imm) )
#define mmx_fetch(mem,hint) \ #define mmx_fetch(mem,hint) \
__asm__ __volatile__ ("prefetch" #hint " %0" \ asm volatile ("prefetch" #hint " %0" \
: /* nothing */ \ : /* nothing */ \
: "m" (mem)) : "m" (mem))
...@@ -240,7 +240,7 @@ typedef union { ...@@ -240,7 +240,7 @@ typedef union {
#define pminub_r2r(regs,regd) mmx_r2r (pminub, regs, regd) #define pminub_r2r(regs,regd) mmx_r2r (pminub, regs, regd)
#define pmovmskb(mmreg,reg) \ #define pmovmskb(mmreg,reg) \
__asm__ __volatile__ ("movmskps %" #mmreg ", %" #reg) asm volatile ("movmskps %" #mmreg ", %" #reg)
#define pmulhuw_m2r(var,reg) mmx_m2r (pmulhuw, var, reg) #define pmulhuw_m2r(var,reg) mmx_m2r (pmulhuw, var, reg)
#define pmulhuw_r2r(regs,regd) mmx_r2r (pmulhuw, regs, regd) #define pmulhuw_r2r(regs,regd) mmx_r2r (pmulhuw, regs, regd)
...@@ -256,7 +256,7 @@ typedef union { ...@@ -256,7 +256,7 @@ typedef union {
#define pshufw_m2r(var,reg,imm) mmx_m2ri(pshufw, var, reg, imm) #define pshufw_m2r(var,reg,imm) mmx_m2ri(pshufw, var, reg, imm)
#define pshufw_r2r(regs,regd,imm) mmx_r2ri(pshufw, regs, regd, imm) #define pshufw_r2r(regs,regd,imm) mmx_r2ri(pshufw, regs, regd, imm)
#define sfence() __asm__ __volatile__ ("sfence\n\t") #define sfence() asm volatile ("sfence\n\t")
/* SSE2 */ /* SSE2 */
#define pshufhw_m2r(var,reg,imm) mmx_m2ri(pshufhw, var, reg, imm) #define pshufhw_m2r(var,reg,imm) mmx_m2ri(pshufhw, var, reg, imm)
......
...@@ -257,7 +257,7 @@ static short consttable[] align16 = { ...@@ -257,7 +257,7 @@ static short consttable[] align16 = {
pmaxh($2, $0, $2); \ pmaxh($2, $0, $2); \
ppacb($0, $2, $2); \ ppacb($0, $2, $2); \
sd3(2, 0, 4); \ sd3(2, 0, 4); \
__asm__ __volatile__ ("add $4, $5, $4"); asm volatile ("add $4, $5, $4");
#define DCT_8_INV_COL8_PUT() \ #define DCT_8_INV_COL8_PUT() \
PUT($16); \ PUT($16); \
...@@ -277,7 +277,7 @@ static short consttable[] align16 = { ...@@ -277,7 +277,7 @@ static short consttable[] align16 = {
pmaxh($2, $0, $2); \ pmaxh($2, $0, $2); \
ppacb($0, $2, $2); \ ppacb($0, $2, $2); \
sd3(2, 0, 4); \ sd3(2, 0, 4); \
__asm__ __volatile__ ("add $4, $5, $4"); asm volatile ("add $4, $5, $4");
/*fixme: schedule*/ /*fixme: schedule*/
#define DCT_8_INV_COL8_ADD() \ #define DCT_8_INV_COL8_ADD() \
...@@ -294,7 +294,7 @@ static short consttable[] align16 = { ...@@ -294,7 +294,7 @@ static short consttable[] align16 = {
void ff_mmi_idct(int16_t * block) void ff_mmi_idct(int16_t * block)
{ {
/* $4 = block */ /* $4 = block */
__asm__ __volatile__("la $24, %0"::"m"(consttable[0])); asm volatile("la $24, %0"::"m"(consttable[0]));
lq($24, ROUNDER_0, $8); lq($24, ROUNDER_0, $8);
lq($24, ROUNDER_1, $7); lq($24, ROUNDER_1, $7);
DCT_8_INV_ROW1($4, 0, TAB_i_04, $8, $8); DCT_8_INV_ROW1($4, 0, TAB_i_04, $8, $8);
...@@ -309,14 +309,14 @@ void ff_mmi_idct(int16_t * block) ...@@ -309,14 +309,14 @@ void ff_mmi_idct(int16_t * block)
DCT_8_INV_COL8_STORE($4); DCT_8_INV_COL8_STORE($4);
//let savedtemp regs be saved //let savedtemp regs be saved
__asm__ __volatile__(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23"); asm volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
} }
void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block) void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
{ {
/* $4 = dest, $5 = line_size, $6 = block */ /* $4 = dest, $5 = line_size, $6 = block */
__asm__ __volatile__("la $24, %0"::"m"(consttable[0])); asm volatile("la $24, %0"::"m"(consttable[0]));
lq($24, ROUNDER_0, $8); lq($24, ROUNDER_0, $8);
lq($24, ROUNDER_1, $7); lq($24, ROUNDER_1, $7);
DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8); DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8);
...@@ -333,14 +333,14 @@ void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block) ...@@ -333,14 +333,14 @@ void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
DCT_8_INV_COL8_PUT(); DCT_8_INV_COL8_PUT();
//let savedtemp regs be saved //let savedtemp regs be saved
__asm__ __volatile__(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23"); asm volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
} }
void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block) void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
{ {
/* $4 = dest, $5 = line_size, $6 = block */ /* $4 = dest, $5 = line_size, $6 = block */
__asm__ __volatile__("la $24, %0"::"m"(consttable[0])); asm volatile("la $24, %0"::"m"(consttable[0]));
lq($24, ROUNDER_0, $8); lq($24, ROUNDER_0, $8);
lq($24, ROUNDER_1, $7); lq($24, ROUNDER_1, $7);
DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8); DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8);
...@@ -357,6 +357,6 @@ void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block) ...@@ -357,6 +357,6 @@ void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
DCT_8_INV_COL8_ADD(); DCT_8_INV_COL8_ADD();
//let savedtemp regs be saved //let savedtemp regs be saved
__asm__ __volatile__(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23"); asm volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
} }
...@@ -60,112 +60,112 @@ ...@@ -60,112 +60,112 @@
#define lq(base, off, reg) \ #define lq(base, off, reg) \
__asm__ __volatile__ ("lq " #reg ", %0("#base ")" : : "i" (off) ) asm volatile ("lq " #reg ", %0("#base ")" : : "i" (off) )
#define lq2(mem, reg) \ #define lq2(mem, reg) \
__asm__ __volatile__ ("lq " #reg ", %0" : : "r" (mem)) asm volatile ("lq " #reg ", %0" : : "r" (mem))
#define sq(reg, off, base) \ #define sq(reg, off, base) \
__asm__ __volatile__ ("sq " #reg ", %0("#base ")" : : "i" (off) ) asm volatile ("sq " #reg ", %0("#base ")" : : "i" (off) )
/* /*
#define ld(base, off, reg) \ #define ld(base, off, reg) \
__asm__ __volatile__ ("ld " #reg ", " #off "("#base ")") asm volatile ("ld " #reg ", " #off "("#base ")")
*/ */
#define ld3(base, off, reg) \ #define ld3(base, off, reg) \
__asm__ __volatile__ (".word %0" : : "i" ( 0xdc000000 | (base<<21) | (reg<<16) | (off))) asm volatile (".word %0" : : "i" ( 0xdc000000 | (base<<21) | (reg<<16) | (off)))
#define ldr3(base, off, reg) \ #define ldr3(base, off, reg) \
__asm__ __volatile__ (".word %0" : : "i" ( 0x6c000000 | (base<<21) | (reg<<16) | (off))) asm volatile (".word %0" : : "i" ( 0x6c000000 | (base<<21) | (reg<<16) | (off)))
#define ldl3(base, off, reg) \ #define ldl3(base, off, reg) \
__asm__ __volatile__ (".word %0" : : "i" ( 0x68000000 | (base<<21) | (reg<<16) | (off))) asm volatile (".word %0" : : "i" ( 0x68000000 | (base<<21) | (reg<<16) | (off)))
/* /*
#define sd(reg, off, base) \ #define sd(reg, off, base) \
__asm__ __volatile__ ("sd " #reg ", " #off "("#base ")") asm volatile ("sd " #reg ", " #off "("#base ")")
*/ */
//seems assembler has bug encoding mnemonic 'sd', so DIY //seems assembler has bug encoding mnemonic 'sd', so DIY
#define sd3(reg, off, base) \ #define sd3(reg, off, base) \
__asm__ __volatile__ (".word %0" : : "i" ( 0xfc000000 | (base<<21) | (reg<<16) | (off))) asm volatile (".word %0" : : "i" ( 0xfc000000 | (base<<21) | (reg<<16) | (off)))
#define sw(reg, off, base) \ #define sw(reg, off, base) \
__asm__ __volatile__ ("sw " #reg ", " #off "("#base ")") asm volatile ("sw " #reg ", " #off "("#base ")")
#define sq2(reg, mem) \ #define sq2(reg, mem) \
__asm__ __volatile__ ("sq " #reg ", %0" : : "m" (*(mem))) asm volatile ("sq " #reg ", %0" : : "m" (*(mem)))
#define pinth(rs, rt, rd) \ #define pinth(rs, rt, rd) \
__asm__ __volatile__ ("pinth " #rd ", " #rs ", " #rt ) asm volatile ("pinth " #rd ", " #rs ", " #rt )
#define phmadh(rs, rt, rd) \ #define phmadh(rs, rt, rd) \
__asm__ __volatile__ ("phmadh " #rd ", " #rs ", " #rt ) asm volatile ("phmadh " #rd ", " #rs ", " #rt )
#define pcpyud(rs, rt, rd) \ #define pcpyud(rs, rt, rd) \
__asm__ __volatile__ ("pcpyud " #rd ", " #rs ", " #rt ) asm volatile ("pcpyud " #rd ", " #rs ", " #rt )
#define pcpyld(rs, rt, rd) \ #define pcpyld(rs, rt, rd) \
__asm__ __volatile__ ("pcpyld " #rd ", " #rs ", " #rt ) asm volatile ("pcpyld " #rd ", " #rs ", " #rt )
#define pcpyh(rt, rd) \ #define pcpyh(rt, rd) \
__asm__ __volatile__ ("pcpyh " #rd ", " #rt ) asm volatile ("pcpyh " #rd ", " #rt )
#define paddw(rs, rt, rd) \ #define paddw(rs, rt, rd) \
__asm__ __volatile__ ("paddw " #rd ", " #rs ", " #rt ) asm volatile ("paddw " #rd ", " #rs ", " #rt )
#define pextlw(rs, rt, rd) \ #define pextlw(rs, rt, rd) \
__asm__ __volatile__ ("pextlw " #rd ", " #rs ", " #rt ) asm volatile ("pextlw " #rd ", " #rs ", " #rt )
#define pextuw(rs, rt, rd) \ #define pextuw(rs, rt, rd) \
__asm__ __volatile__ ("pextuw " #rd ", " #rs ", " #rt ) asm volatile ("pextuw " #rd ", " #rs ", " #rt )
#define pextlh(rs, rt, rd) \ #define pextlh(rs, rt, rd) \
__asm__ __volatile__ ("pextlh " #rd ", " #rs ", " #rt ) asm volatile ("pextlh " #rd ", " #rs ", " #rt )
#define pextuh(rs, rt, rd) \ #define pextuh(rs, rt, rd) \
__asm__ __volatile__ ("pextuh " #rd ", " #rs ", " #rt ) asm volatile ("pextuh " #rd ", " #rs ", " #rt )
#define psubw(rs, rt, rd) \ #define psubw(rs, rt, rd) \
__asm__ __volatile__ ("psubw " #rd ", " #rs ", " #rt ) asm volatile ("psubw " #rd ", " #rs ", " #rt )
#define psraw(rt, sa, rd) \ #define psraw(rt, sa, rd) \
__asm__ __volatile__ ("psraw " #rd ", " #rt ", %0" : : "i"(sa) ) asm volatile ("psraw " #rd ", " #rt ", %0" : : "i"(sa) )
#define ppach(rs, rt, rd) \ #define ppach(rs, rt, rd) \
__asm__ __volatile__ ("ppach " #rd ", " #rs ", " #rt ) asm volatile ("ppach " #rd ", " #rs ", " #rt )
#define ppacb(rs, rt, rd) \ #define ppacb(rs, rt, rd) \
__asm__ __volatile__ ("ppacb " #rd ", " #rs ", " #rt ) asm volatile ("ppacb " #rd ", " #rs ", " #rt )
#define prevh(rt, rd) \ #define prevh(rt, rd) \
__asm__ __volatile__ ("prevh " #rd ", " #rt ) asm volatile ("prevh " #rd ", " #rt )
#define pmulth(rs, rt, rd) \ #define pmulth(rs, rt, rd) \
__asm__ __volatile__ ("pmulth " #rd ", " #rs ", " #rt ) asm volatile ("pmulth " #rd ", " #rs ", " #rt )
#define pmaxh(rs, rt, rd) \ #define pmaxh(rs, rt, rd) \
__asm__ __volatile__ ("pmaxh " #rd ", " #rs ", " #rt ) asm volatile ("pmaxh " #rd ", " #rs ", " #rt )
#define pminh(rs, rt, rd) \ #define pminh(rs, rt, rd) \
__asm__ __volatile__ ("pminh " #rd ", " #rs ", " #rt ) asm volatile ("pminh " #rd ", " #rs ", " #rt )
#define pinteh(rs, rt, rd) \ #define pinteh(rs, rt, rd) \
__asm__ __volatile__ ("pinteh " #rd ", " #rs ", " #rt ) asm volatile ("pinteh " #rd ", " #rs ", " #rt )
#define paddh(rs, rt, rd) \ #define paddh(rs, rt, rd) \
__asm__ __volatile__ ("paddh " #rd ", " #rs ", " #rt ) asm volatile ("paddh " #rd ", " #rs ", " #rt )
#define psubh(rs, rt, rd) \ #define psubh(rs, rt, rd) \
__asm__ __volatile__ ("psubh " #rd ", " #rs ", " #rt ) asm volatile ("psubh " #rd ", " #rs ", " #rt )
#define psrah(rt, sa, rd) \ #define psrah(rt, sa, rd) \
__asm__ __volatile__ ("psrah " #rd ", " #rt ", %0" : : "i"(sa) ) asm volatile ("psrah " #rd ", " #rt ", %0" : : "i"(sa) )
#define pmfhl_uw(rd) \ #define pmfhl_uw(rd) \
__asm__ __volatile__ ("pmfhl.uw " #rd) asm volatile ("pmfhl.uw " #rd)
#define pextlb(rs, rt, rd) \ #define pextlb(rs, rt, rd) \
__asm__ __volatile__ ("pextlb " #rd ", " #rs ", " #rt ) asm volatile ("pextlb " #rd ", " #rs ", " #rt )
#endif /* FFMPEG_MMI_H */ #endif /* FFMPEG_MMI_H */
...@@ -54,7 +54,7 @@ static const float odd_table[] __attribute__ ((aligned(8))) = { ...@@ -54,7 +54,7 @@ static const float odd_table[] __attribute__ ((aligned(8))) = {
#if defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__) #if defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__)
#define load_matrix(table) \ #define load_matrix(table) \
__asm__ volatile( \ asm volatile( \
" fschg\n" \ " fschg\n" \
" fmov @%0+,xd0\n" \ " fmov @%0+,xd0\n" \
" fmov @%0+,xd2\n" \ " fmov @%0+,xd2\n" \
...@@ -71,15 +71,15 @@ static const float odd_table[] __attribute__ ((aligned(8))) = { ...@@ -71,15 +71,15 @@ static const float odd_table[] __attribute__ ((aligned(8))) = {
) )
#define ftrv() \ #define ftrv() \
__asm__ volatile("ftrv xmtrx,fv0" \ asm volatile("ftrv xmtrx,fv0" \
: "=f"(fr0),"=f"(fr1),"=f"(fr2),"=f"(fr3) \ : "=f"(fr0),"=f"(fr1),"=f"(fr2),"=f"(fr3) \
: "0"(fr0), "1"(fr1), "2"(fr2), "3"(fr3) ); : "0"(fr0), "1"(fr1), "2"(fr2), "3"(fr3) );
#define DEFREG \ #define DEFREG \
register float fr0 __asm__("fr0"); \ register float fr0 asm("fr0"); \
register float fr1 __asm__("fr1"); \ register float fr1 asm("fr1"); \
register float fr2 __asm__("fr2"); \ register float fr2 asm("fr2"); \
register float fr3 __asm__("fr3") register float fr3 asm("fr3")
#else #else
......
...@@ -55,97 +55,97 @@ ...@@ -55,97 +55,97 @@
#define vis_rd_d(X) (vis_dreg(X) << 25) #define vis_rd_d(X) (vis_dreg(X) << 25)
#define vis_ss2s(opf,rs1,rs2,rd) \ #define vis_ss2s(opf,rs1,rs2,rd) \
__asm__ __volatile__ (".word %0" \ asm volatile (".word %0" \
: : "i" (vis_opc_base | vis_opf(opf) | \ : : "i" (vis_opc_base | vis_opf(opf) | \
vis_rs1_s(rs1) | \ vis_rs1_s(rs1) | \
vis_rs2_s(rs2) | \ vis_rs2_s(rs2) | \
vis_rd_s(rd))) vis_rd_s(rd)))
#define vis_dd2d(opf,rs1,rs2,rd) \ #define vis_dd2d(opf,rs1,rs2,rd) \
__asm__ __volatile__ (".word %0" \ asm volatile (".word %0" \
: : "i" (vis_opc_base | vis_opf(opf) | \ : : "i" (vis_opc_base | vis_opf(opf) | \
vis_rs1_d(rs1) | \ vis_rs1_d(rs1) | \
vis_rs2_d(rs2) | \ vis_rs2_d(rs2) | \
vis_rd_d(rd))) vis_rd_d(rd)))
#define vis_ss2d(opf,rs1,rs2,rd) \ #define vis_ss2d(opf,rs1,rs2,rd) \
__asm__ __volatile__ (".word %0" \ asm volatile (".word %0" \
: : "i" (vis_opc_base | vis_opf(opf) | \ : : "i" (vis_opc_base | vis_opf(opf) | \
vis_rs1_s(rs1) | \ vis_rs1_s(rs1) | \
vis_rs2_s(rs2) | \ vis_rs2_s(rs2) | \
vis_rd_d(rd))) vis_rd_d(rd)))
#define vis_sd2d(opf,rs1,rs2,rd) \ #define vis_sd2d(opf,rs1,rs2,rd) \
__asm__ __volatile__ (".word %0" \ asm volatile (".word %0" \
: : "i" (vis_opc_base | vis_opf(opf) | \ : : "i" (vis_opc_base | vis_opf(opf) | \
vis_rs1_s(rs1) | \ vis_rs1_s(rs1) | \
vis_rs2_d(rs2) | \ vis_rs2_d(rs2) | \
vis_rd_d(rd))) vis_rd_d(rd)))
#define vis_d2s(opf,rs2,rd) \ #define vis_d2s(opf,rs2,rd) \
__asm__ __volatile__ (".word %0" \ asm volatile (".word %0" \
: : "i" (vis_opc_base | vis_opf(opf) | \ : : "i" (vis_opc_base | vis_opf(opf) | \
vis_rs2_d(rs2) | \ vis_rs2_d(rs2) | \
vis_rd_s(rd))) vis_rd_s(rd)))
#define vis_s2d(opf,rs2,rd) \ #define vis_s2d(opf,rs2,rd) \
__asm__ __volatile__ (".word %0" \ asm volatile (".word %0" \
: : "i" (vis_opc_base | vis_opf(opf) | \ : : "i" (vis_opc_base | vis_opf(opf) | \
vis_rs2_s(rs2) | \ vis_rs2_s(rs2) | \
vis_rd_d(rd))) vis_rd_d(rd)))
#define vis_d12d(opf,rs1,rd) \ #define vis_d12d(opf,rs1,rd) \
__asm__ __volatile__ (".word %0" \ asm volatile (".word %0" \
: : "i" (vis_opc_base | vis_opf(opf) | \ : : "i" (vis_opc_base | vis_opf(opf) | \
vis_rs1_d(rs1) | \ vis_rs1_d(rs1) | \
vis_rd_d(rd))) vis_rd_d(rd)))
#define vis_d22d(opf,rs2,rd) \ #define vis_d22d(opf,rs2,rd) \
__asm__ __volatile__ (".word %0" \ asm volatile (".word %0" \
: : "i" (vis_opc_base | vis_opf(opf) | \ : : "i" (vis_opc_base | vis_opf(opf) | \
vis_rs2_d(rs2) | \ vis_rs2_d(rs2) | \
vis_rd_d(rd))) vis_rd_d(rd)))
#define vis_s12s(opf,rs1,rd) \ #define vis_s12s(opf,rs1,rd) \
__asm__ __volatile__ (".word %0" \ asm volatile (".word %0" \
: : "i" (vis_opc_base | vis_opf(opf) | \ : : "i" (vis_opc_base | vis_opf(opf) | \
vis_rs1_s(rs1) | \ vis_rs1_s(rs1) | \
vis_rd_s(rd))) vis_rd_s(rd)))
#define vis_s22s(opf,rs2,rd) \ #define vis_s22s(opf,rs2,rd) \
__asm__ __volatile__ (".word %0" \ asm volatile (".word %0" \
: : "i" (vis_opc_base | vis_opf(opf) | \ : : "i" (vis_opc_base | vis_opf(opf) | \
vis_rs2_s(rs2) | \ vis_rs2_s(rs2) | \
vis_rd_s(rd))) vis_rd_s(rd)))
#define vis_s(opf,rd) \ #define vis_s(opf,rd) \
__asm__ __volatile__ (".word %0" \ asm volatile (".word %0" \
: : "i" (vis_opc_base | vis_opf(opf) | \ : : "i" (vis_opc_base | vis_opf(opf) | \
vis_rd_s(rd))) vis_rd_s(rd)))
#define vis_d(opf,rd) \ #define vis_d(opf,rd) \
__asm__ __volatile__ (".word %0" \ asm volatile (".word %0" \
: : "i" (vis_opc_base | vis_opf(opf) | \ : : "i" (vis_opc_base | vis_opf(opf) | \
vis_rd_d(rd))) vis_rd_d(rd)))
#define vis_r2m(op,rd,mem) \ #define vis_r2m(op,rd,mem) \
__asm__ __volatile__ (#op "\t%%f" #rd ", [%0]" : : "r" (&(mem)) ) asm volatile (#op "\t%%f" #rd ", [%0]" : : "r" (&(mem)) )
#define vis_r2m_2(op,rd,mem1,mem2) \ #define vis_r2m_2(op,rd,mem1,mem2) \
__asm__ __volatile__ (#op "\t%%f" #rd ", [%0 + %1]" : : "r" (mem1), "r" (mem2) ) asm volatile (#op "\t%%f" #rd ", [%0 + %1]" : : "r" (mem1), "r" (mem2) )
#define vis_m2r(op,mem,rd) \ #define vis_m2r(op,mem,rd) \
__asm__ __volatile__ (#op "\t[%0], %%f" #rd : : "r" (&(mem)) ) asm volatile (#op "\t[%0], %%f" #rd : : "r" (&(mem)) )
#define vis_m2r_2(op,mem1,mem2,rd) \ #define vis_m2r_2(op,mem1,mem2,rd) \
__asm__ __volatile__ (#op "\t[%0 + %1], %%f" #rd : : "r" (mem1), "r" (mem2) ) asm volatile (#op "\t[%0 + %1], %%f" #rd : : "r" (mem1), "r" (mem2) )
static inline void vis_set_gsr(unsigned int _val) static inline void vis_set_gsr(unsigned int _val)
{ {
register unsigned int val asm("g1"); register unsigned int val asm("g1");
val = _val; val = _val;
__asm__ __volatile__(".word 0xa7804000" asm volatile(".word 0xa7804000"
: : "r" (val)); : : "r" (val));
} }
...@@ -166,7 +166,7 @@ static inline void vis_set_gsr(unsigned int _val) ...@@ -166,7 +166,7 @@ static inline void vis_set_gsr(unsigned int _val)
#define vis_ldblk(mem, rd) \ #define vis_ldblk(mem, rd) \
do { register void *__mem asm("g1"); \ do { register void *__mem asm("g1"); \
__mem = &(mem); \ __mem = &(mem); \
__asm__ __volatile__(".word 0xc1985e00 | %1" \ asm volatile(".word 0xc1985e00 | %1" \
: \ : \
: "r" (__mem), \ : "r" (__mem), \
"i" (vis_rd_d(rd)) \ "i" (vis_rd_d(rd)) \
...@@ -176,7 +176,7 @@ do { register void *__mem asm("g1"); \ ...@@ -176,7 +176,7 @@ do { register void *__mem asm("g1"); \
#define vis_stblk(rd, mem) \ #define vis_stblk(rd, mem) \
do { register void *__mem asm("g1"); \ do { register void *__mem asm("g1"); \
__mem = &(mem); \ __mem = &(mem); \
__asm__ __volatile__(".word 0xc1b85e00 | %1" \ asm volatile(".word 0xc1b85e00 | %1" \
: \ : \
: "r" (__mem), \ : "r" (__mem), \
"i" (vis_rd_d(rd)) \ "i" (vis_rd_d(rd)) \
...@@ -184,10 +184,10 @@ do { register void *__mem asm("g1"); \ ...@@ -184,10 +184,10 @@ do { register void *__mem asm("g1"); \
} while (0) } while (0)
#define vis_membar_storestore() \ #define vis_membar_storestore() \
__asm__ __volatile__(".word 0x8143e008" : : : "memory") asm volatile(".word 0x8143e008" : : : "memory")
#define vis_membar_sync() \ #define vis_membar_sync() \
__asm__ __volatile__(".word 0x8143e040" : : : "memory") asm volatile(".word 0x8143e040" : : : "memory")
/* 16 and 32 bit partitioned addition and subtraction. The normal /* 16 and 32 bit partitioned addition and subtraction. The normal
* versions perform 4 16-bit or 2 32-bit additions or subtractions. * versions perform 4 16-bit or 2 32-bit additions or subtractions.
...@@ -230,7 +230,7 @@ static inline void *vis_alignaddr(void *_ptr) ...@@ -230,7 +230,7 @@ static inline void *vis_alignaddr(void *_ptr)
ptr = _ptr; ptr = _ptr;
__asm__ __volatile__(".word %2" asm volatile(".word %2"
: "=&r" (ptr) : "=&r" (ptr)
: "0" (ptr), : "0" (ptr),
"i" (vis_opc_base | vis_opf(0x18) | "i" (vis_opc_base | vis_opf(0x18) |
...@@ -247,7 +247,7 @@ static inline void vis_alignaddr_g0(void *_ptr) ...@@ -247,7 +247,7 @@ static inline void vis_alignaddr_g0(void *_ptr)
ptr = _ptr; ptr = _ptr;
__asm__ __volatile__(".word %2" asm volatile(".word %2"
: "=&r" (ptr) : "=&r" (ptr)
: "0" (ptr), : "0" (ptr),
"i" (vis_opc_base | vis_opf(0x18) | "i" (vis_opc_base | vis_opf(0x18) |
...@@ -262,7 +262,7 @@ static inline void *vis_alignaddrl(void *_ptr) ...@@ -262,7 +262,7 @@ static inline void *vis_alignaddrl(void *_ptr)
ptr = _ptr; ptr = _ptr;
__asm__ __volatile__(".word %2" asm volatile(".word %2"
: "=&r" (ptr) : "=&r" (ptr)
: "0" (ptr), : "0" (ptr),
"i" (vis_opc_base | vis_opf(0x19) | "i" (vis_opc_base | vis_opf(0x19) |
...@@ -279,7 +279,7 @@ static inline void vis_alignaddrl_g0(void *_ptr) ...@@ -279,7 +279,7 @@ static inline void vis_alignaddrl_g0(void *_ptr)
ptr = _ptr; ptr = _ptr;
__asm__ __volatile__(".word %2" asm volatile(".word %2"
: "=&r" (ptr) : "=&r" (ptr)
: "0" (ptr), : "0" (ptr),
"i" (vis_opc_base | vis_opf(0x19) | "i" (vis_opc_base | vis_opf(0x19) |
......
...@@ -341,7 +341,7 @@ static inline uint64_t read_time(void) ...@@ -341,7 +341,7 @@ static inline uint64_t read_time(void)
uint32_t tbu, tbl, temp; uint32_t tbu, tbl, temp;
/* from section 2.2.1 of the 32-bit PowerPC PEM */ /* from section 2.2.1 of the 32-bit PowerPC PEM */
__asm__ __volatile__( asm volatile(
"1:\n" "1:\n"
"mftbu %2\n" "mftbu %2\n"
"mftb %0\n" "mftb %0\n"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment