xtensa: use named assembly arguments in atomic.h

Numeric assembly arguments are hard to understand and assembly code that
uses them is hard to modify. Use named arguments in ATOMIC_OP,
ATOMIC_OP_RETURN and ATOMIC_FETCH_OP macros.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
Max Filippov 2019-10-16 00:33:10 -07:00
parent 5bf67094a3
commit 643d6976ff

View File

@ -64,13 +64,13 @@ static inline void atomic_##op(int i, atomic_t *v) \
int result; \ int result; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: l32ex %1, %3\n" \ "1: l32ex %[tmp], %[addr]\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
" s32ex %0, %3\n" \ " s32ex %[result], %[addr]\n" \
" getex %0\n" \ " getex %[result]\n" \
" beqz %0, 1b\n" \ " beqz %[result], 1b\n" \
: "=&a" (result), "=&a" (tmp) \ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
: "a" (i), "a" (v) \ : [i] "a" (i), [addr] "a" (v) \
: "memory" \ : "memory" \
); \ ); \
} \ } \
@ -82,14 +82,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
int result; \ int result; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: l32ex %1, %3\n" \ "1: l32ex %[tmp], %[addr]\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
" s32ex %0, %3\n" \ " s32ex %[result], %[addr]\n" \
" getex %0\n" \ " getex %[result]\n" \
" beqz %0, 1b\n" \ " beqz %[result], 1b\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
: "=&a" (result), "=&a" (tmp) \ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
: "a" (i), "a" (v) \ : [i] "a" (i), [addr] "a" (v) \
: "memory" \ : "memory" \
); \ ); \
\ \
@ -103,13 +103,13 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
int result; \ int result; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: l32ex %1, %3\n" \ "1: l32ex %[tmp], %[addr]\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
" s32ex %0, %3\n" \ " s32ex %[result], %[addr]\n" \
" getex %0\n" \ " getex %[result]\n" \
" beqz %0, 1b\n" \ " beqz %[result], 1b\n" \
: "=&a" (result), "=&a" (tmp) \ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
: "a" (i), "a" (v) \ : [i] "a" (i), [addr] "a" (v) \
: "memory" \ : "memory" \
); \ ); \
\ \
@ -124,13 +124,13 @@ static inline void atomic_##op(int i, atomic_t * v) \
int result; \ int result; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \ "1: l32i %[tmp], %[addr], 0\n" \
" wsr %1, scompare1\n" \ " wsr %[tmp], scompare1\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
" s32c1i %0, %3, 0\n" \ " s32c1i %[result], %[addr], 0\n" \
" bne %0, %1, 1b\n" \ " bne %[result], %[tmp], 1b\n" \
: "=&a" (result), "=&a" (tmp) \ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
: "a" (i), "a" (v) \ : [i] "a" (i), [addr] "a" (v) \
: "memory" \ : "memory" \
); \ ); \
} \ } \
@ -142,14 +142,14 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
int result; \ int result; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \ "1: l32i %[tmp], %[addr], 0\n" \
" wsr %1, scompare1\n" \ " wsr %[tmp], scompare1\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
" s32c1i %0, %3, 0\n" \ " s32c1i %[result], %[addr], 0\n" \
" bne %0, %1, 1b\n" \ " bne %[result], %[tmp], 1b\n" \
" " #op " %0, %0, %2\n" \ " " #op " %[result], %[result], %[i]\n" \
: "=&a" (result), "=&a" (tmp) \ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
: "a" (i), "a" (v) \ : [i] "a" (i), [addr] "a" (v) \
: "memory" \ : "memory" \
); \ ); \
\ \
@ -163,13 +163,13 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
int result; \ int result; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: l32i %1, %3, 0\n" \ "1: l32i %[tmp], %[addr], 0\n" \
" wsr %1, scompare1\n" \ " wsr %[tmp], scompare1\n" \
" " #op " %0, %1, %2\n" \ " " #op " %[result], %[tmp], %[i]\n" \
" s32c1i %0, %3, 0\n" \ " s32c1i %[result], %[addr], 0\n" \
" bne %0, %1, 1b\n" \ " bne %[result], %[tmp], 1b\n" \
: "=&a" (result), "=&a" (tmp) \ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
: "a" (i), "a" (v) \ : [i] "a" (i), [addr] "a" (v) \
: "memory" \ : "memory" \
); \ ); \
\ \
@ -184,14 +184,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
unsigned int vval; \ unsigned int vval; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15, "__stringify(TOPLEVEL)"\n"\ " rsil a15, "__stringify(TOPLEVEL)"\n" \
" l32i %0, %2, 0\n" \ " l32i %[result], %[addr], 0\n" \
" " #op " %0, %0, %1\n" \ " " #op " %[result], %[result], %[i]\n" \
" s32i %0, %2, 0\n" \ " s32i %[result], %[addr], 0\n" \
" wsr a15, ps\n" \ " wsr a15, ps\n" \
" rsync\n" \ " rsync\n" \
: "=&a" (vval) \ : [result] "=&a" (vval) \
: "a" (i), "a" (v) \ : [i] "a" (i), [addr] "a" (v) \
: "a15", "memory" \ : "a15", "memory" \
); \ ); \
} \ } \
@ -203,13 +203,13 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \ " rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %2, 0\n" \ " l32i %[result], %[addr], 0\n" \
" " #op " %0, %0, %1\n" \ " " #op " %[result], %[result], %[i]\n" \
" s32i %0, %2, 0\n" \ " s32i %[result], %[addr], 0\n" \
" wsr a15, ps\n" \ " wsr a15, ps\n" \
" rsync\n" \ " rsync\n" \
: "=&a" (vval) \ : [result] "=&a" (vval) \
: "a" (i), "a" (v) \ : [i] "a" (i), [addr] "a" (v) \
: "a15", "memory" \ : "a15", "memory" \
); \ ); \
\ \
@ -223,13 +223,13 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \ " rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %3, 0\n" \ " l32i %[result], %[addr], 0\n" \
" " #op " %1, %0, %2\n" \ " " #op " %[tmp], %[result], %[i]\n" \
" s32i %1, %3, 0\n" \ " s32i %[tmp], %[addr], 0\n" \
" wsr a15, ps\n" \ " wsr a15, ps\n" \
" rsync\n" \ " rsync\n" \
: "=&a" (vval), "=&a" (tmp) \ : [result] "=&a" (vval), [tmp] "=&a" (tmp) \
: "a" (i), "a" (v) \ : [i] "a" (i), [addr] "a" (v) \
: "a15", "memory" \ : "a15", "memory" \
); \ ); \
\ \