Commit 8fdd6c6d authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] lightweight robust futexes: x86_64

x86_64: add the futex_atomic_cmpxchg_inuser() assembly implementation, and
wire up the new syscalls.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarArjan van de Ven <arjan@infradead.org>
Acked-by: default avatarUlrich Drepper <drepper@redhat.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent dfd4e3ec
...@@ -688,6 +688,8 @@ ia32_sys_call_table: ...@@ -688,6 +688,8 @@ ia32_sys_call_table:
.quad sys_ni_syscall /* pselect6 for now */ .quad sys_ni_syscall /* pselect6 for now */
.quad sys_ni_syscall /* ppoll for now */ .quad sys_ni_syscall /* ppoll for now */
.quad sys_unshare /* 310 */ .quad sys_unshare /* 310 */
.quad compat_sys_set_robust_list
.quad compat_sys_get_robust_list
ia32_syscall_end: ia32_syscall_end:
.rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8 .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
.quad ni_syscall .quad ni_syscall
......
...@@ -97,7 +97,28 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -97,7 +97,28 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
static inline int static inline int
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
{ {
return -ENOSYS; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
__asm__ __volatile__(
"1: " LOCK_PREFIX "cmpxchgl %3, %1 \n"
"2: .section .fixup, \"ax\" \n"
"3: mov %2, %0 \n"
" jmp 2b \n"
" .previous \n"
" .section __ex_table, \"a\" \n"
" .align 8 \n"
" .quad 1b,3b \n"
" .previous \n"
: "=a" (oldval), "=m" (*uaddr)
: "i" (-EFAULT), "r" (newval), "0" (oldval)
: "memory"
);
return oldval;
} }
#endif #endif
......
...@@ -605,8 +605,12 @@ __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */ ...@@ -605,8 +605,12 @@ __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */
__SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ __SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */
#define __NR_unshare 272 #define __NR_unshare 272
__SYSCALL(__NR_unshare, sys_unshare) __SYSCALL(__NR_unshare, sys_unshare)
#define __NR_set_robust_list 273
__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
#define __NR_get_robust_list 274
__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
#define __NR_syscall_max __NR_unshare #define __NR_syscall_max __NR_get_robust_list
#ifndef __NO_STUBS #ifndef __NO_STUBS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment