Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
5d96218b
Commit
5d96218b
authored
Feb 10, 2009
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'x86/uaccess' into core/percpu
parents
249d51b5
18114f61
Changes
9
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
488 additions
and
425 deletions
+488
-425
arch/x86/ia32/ia32_signal.c
arch/x86/ia32/ia32_signal.c
+195
-170
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt.h
+0
-2
arch/x86/include/asm/spinlock.h
arch/x86/include/asm/spinlock.h
+2
-64
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/thread_info.h
+1
-0
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess.h
+130
-8
arch/x86/kernel/io_apic.c
arch/x86/kernel/io_apic.c
+0
-34
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/paravirt-spinlocks.c
+0
-10
arch/x86/kernel/signal.c
arch/x86/kernel/signal.c
+154
-137
arch/x86/mm/extable.c
arch/x86/mm/extable.c
+6
-0
No files found.
arch/x86/ia32/ia32_signal.c
View file @
5d96218b
This diff is collapsed.
Click to expand it.
arch/x86/include/asm/paravirt.h
View file @
5d96218b
...
@@ -1471,8 +1471,6 @@ u64 _paravirt_ident_64(u64);
...
@@ -1471,8 +1471,6 @@ u64 _paravirt_ident_64(u64);
#define paravirt_nop ((void *)_paravirt_nop)
#define paravirt_nop ((void *)_paravirt_nop)
void
paravirt_use_bytelocks
(
void
);
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
static
inline
int
__raw_spin_is_locked
(
struct
raw_spinlock
*
lock
)
static
inline
int
__raw_spin_is_locked
(
struct
raw_spinlock
*
lock
)
...
...
arch/x86/include/asm/spinlock.h
View file @
5d96218b
...
@@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
...
@@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
return
(((
tmp
>>
TICKET_SHIFT
)
-
tmp
)
&
((
1
<<
TICKET_SHIFT
)
-
1
))
>
1
;
return
(((
tmp
>>
TICKET_SHIFT
)
-
tmp
)
&
((
1
<<
TICKET_SHIFT
)
-
1
))
>
1
;
}
}
#ifdef CONFIG_PARAVIRT
#ifndef CONFIG_PARAVIRT
/*
* Define virtualization-friendly old-style lock byte lock, for use in
* pv_lock_ops if desired.
*
* This differs from the pre-2.6.24 spinlock by always using xchgb
* rather than decb to take the lock; this allows it to use a
* zero-initialized lock structure. It also maintains a 1-byte
* contention counter, so that we can implement
* __byte_spin_is_contended.
*/
struct
__byte_spinlock
{
s8
lock
;
s8
spinners
;
};
static
inline
int
__byte_spin_is_locked
(
raw_spinlock_t
*
lock
)
{
struct
__byte_spinlock
*
bl
=
(
struct
__byte_spinlock
*
)
lock
;
return
bl
->
lock
!=
0
;
}
static
inline
int
__byte_spin_is_contended
(
raw_spinlock_t
*
lock
)
{
struct
__byte_spinlock
*
bl
=
(
struct
__byte_spinlock
*
)
lock
;
return
bl
->
spinners
!=
0
;
}
static
inline
void
__byte_spin_lock
(
raw_spinlock_t
*
lock
)
{
struct
__byte_spinlock
*
bl
=
(
struct
__byte_spinlock
*
)
lock
;
s8
val
=
1
;
asm
(
"1: xchgb %1, %0
\n
"
" test %1,%1
\n
"
" jz 3f
\n
"
" "
LOCK_PREFIX
"incb %2
\n
"
"2: rep;nop
\n
"
" cmpb $1, %0
\n
"
" je 2b
\n
"
" "
LOCK_PREFIX
"decb %2
\n
"
" jmp 1b
\n
"
"3:"
:
"+m"
(
bl
->
lock
),
"+q"
(
val
),
"+m"
(
bl
->
spinners
)
:
:
"memory"
);
}
static
inline
int
__byte_spin_trylock
(
raw_spinlock_t
*
lock
)
{
struct
__byte_spinlock
*
bl
=
(
struct
__byte_spinlock
*
)
lock
;
u8
old
=
1
;
asm
(
"xchgb %1,%0"
:
"+m"
(
bl
->
lock
),
"+q"
(
old
)
:
:
"memory"
);
return
old
==
0
;
}
static
inline
void
__byte_spin_unlock
(
raw_spinlock_t
*
lock
)
{
struct
__byte_spinlock
*
bl
=
(
struct
__byte_spinlock
*
)
lock
;
smp_wmb
();
bl
->
lock
=
0
;
}
#else
/* !CONFIG_PARAVIRT */
static
inline
int
__raw_spin_is_locked
(
raw_spinlock_t
*
lock
)
static
inline
int
__raw_spin_is_locked
(
raw_spinlock_t
*
lock
)
{
{
return
__ticket_spin_is_locked
(
lock
);
return
__ticket_spin_is_locked
(
lock
);
...
@@ -267,7 +205,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
...
@@ -267,7 +205,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
__raw_spin_lock
(
lock
);
__raw_spin_lock
(
lock
);
}
}
#endif
/* CONFIG_PARAVIRT */
#endif
static
inline
void
__raw_spin_unlock_wait
(
raw_spinlock_t
*
lock
)
static
inline
void
__raw_spin_unlock_wait
(
raw_spinlock_t
*
lock
)
{
{
...
...
arch/x86/include/asm/thread_info.h
View file @
5d96218b
...
@@ -40,6 +40,7 @@ struct thread_info {
...
@@ -40,6 +40,7 @@ struct thread_info {
*/
*/
__u8
supervisor_stack
[
0
];
__u8
supervisor_stack
[
0
];
#endif
#endif
int
uaccess_err
;
};
};
#define INIT_THREAD_INFO(tsk) \
#define INIT_THREAD_INFO(tsk) \
...
...
arch/x86/include/asm/uaccess.h
View file @
5d96218b
...
@@ -121,7 +121,7 @@ extern int __get_user_bad(void);
...
@@ -121,7 +121,7 @@ extern int __get_user_bad(void);
#define __get_user_x(size, ret, x, ptr) \
#define __get_user_x(size, ret, x, ptr) \
asm volatile("call __get_user_" #size \
asm volatile("call __get_user_" #size \
: "=a" (ret),"=d" (x) \
: "=a" (ret),
"=d" (x) \
: "0" (ptr)) \
: "0" (ptr)) \
/* Careful: we have to cast the result to the type of the pointer
/* Careful: we have to cast the result to the type of the pointer
...
@@ -181,12 +181,12 @@ extern int __get_user_bad(void);
...
@@ -181,12 +181,12 @@ extern int __get_user_bad(void);
#define __put_user_x(size, x, ptr, __ret_pu) \
#define __put_user_x(size, x, ptr, __ret_pu) \
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
:"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
:
"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_32
#define __put_user_
u64(x, addr, err)
\
#define __put_user_
asm_u64(x, addr, err, errret)
\
asm volatile("1: movl %%eax,0(%2)\n" \
asm volatile("1: movl %%eax,0(%2)\n" \
"2: movl %%edx,4(%2)\n" \
"2: movl %%edx,4(%2)\n" \
"3:\n" \
"3:\n" \
...
@@ -197,14 +197,24 @@ extern int __get_user_bad(void);
...
@@ -197,14 +197,24 @@ extern int __get_user_bad(void);
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=r" (err) \
: "=r" (err) \
: "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
: "A" (x), "r" (addr), "i" (errret), "0" (err))
#define __put_user_asm_ex_u64(x, addr) \
asm volatile("1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
"3:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
_ASM_EXTABLE(2b, 3b - 2b) \
: : "A" (x), "r" (addr))
#define __put_user_x8(x, ptr, __ret_pu) \
#define __put_user_x8(x, ptr, __ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
#else
#define __put_user_u64(x, ptr, retval) \
#define __put_user_asm_u64(x, ptr, retval, errret) \
__put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
__put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
#define __put_user_asm_ex_u64(x, addr) \
__put_user_asm_ex(x, addr, "q", "", "Zr")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif
#endif
...
@@ -276,10 +286,32 @@ do { \
...
@@ -276,10 +286,32 @@ do { \
__put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
__put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
break; \
break; \
case 4: \
case 4: \
__put_user_asm(x, ptr, retval, "l", "k",
"ir", errret);
\
__put_user_asm(x, ptr, retval, "l", "k",
"ir", errret);
\
break; \
break; \
case 8: \
case 8: \
__put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
__put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
errret); \
break; \
default: \
__put_user_bad(); \
} \
} while (0)
#define __put_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__put_user_asm_ex(x, ptr, "b", "b", "iq"); \
break; \
case 2: \
__put_user_asm_ex(x, ptr, "w", "w", "ir"); \
break; \
case 4: \
__put_user_asm_ex(x, ptr, "l", "k", "ir"); \
break; \
case 8: \
__put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
break; \
break; \
default: \
default: \
__put_user_bad(); \
__put_user_bad(); \
...
@@ -311,9 +343,12 @@ do { \
...
@@ -311,9 +343,12 @@ do { \
#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
#else
#else
#define __get_user_asm_u64(x, ptr, retval, errret) \
#define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
#define __get_user_asm_ex_u64(x, ptr) \
__get_user_asm_ex(x, ptr, "q", "", "=r")
#endif
#endif
#define __get_user_size(x, ptr, size, retval, errret) \
#define __get_user_size(x, ptr, size, retval, errret) \
...
@@ -350,6 +385,33 @@ do { \
...
@@ -350,6 +385,33 @@ do { \
: "=r" (err), ltype(x) \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
: "m" (__m(addr)), "i" (errret), "0" (err))
#define __get_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_asm_ex(x, ptr, "b", "b", "=q"); \
break; \
case 2: \
__get_user_asm_ex(x, ptr, "w", "w", "=r"); \
break; \
case 4: \
__get_user_asm_ex(x, ptr, "l", "k", "=r"); \
break; \
case 8: \
__get_user_asm_ex_u64(x, ptr); \
break; \
default: \
(x) = __get_user_bad(); \
} \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
: ltype(x) : "m" (__m(addr)))
#define __put_user_nocheck(x, ptr, size) \
#define __put_user_nocheck(x, ptr, size) \
({ \
({ \
int __pu_err; \
int __pu_err; \
...
@@ -385,6 +447,26 @@ struct __large_struct { unsigned long buf[100]; };
...
@@ -385,6 +447,26 @@ struct __large_struct { unsigned long buf[100]; };
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE(1b, 3b) \
: "=r"(err) \
: "=r"(err) \
: ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
: ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %"rtype"0,%1\n" \
"2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
: : ltype(x), "m" (__m(addr)))
/*
* uaccess_try and catch
*/
#define uaccess_try do { \
int prev_err = current_thread_info()->uaccess_err; \
current_thread_info()->uaccess_err = 0; \
barrier();
#define uaccess_catch(err) \
(err) |= current_thread_info()->uaccess_err; \
current_thread_info()->uaccess_err = prev_err; \
} while (0)
/**
/**
* __get_user: - Get a simple variable from user space, with less checking.
* __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
* @x: Variable to store result.
...
@@ -408,6 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
...
@@ -408,6 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user(x, ptr) \
#define __get_user(x, ptr) \
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
/**
/**
* __put_user: - Write a simple value into user space, with less checking.
* __put_user: - Write a simple value into user space, with less checking.
* @x: Value to copy to user space.
* @x: Value to copy to user space.
...
@@ -434,6 +517,45 @@ struct __large_struct { unsigned long buf[100]; };
...
@@ -434,6 +517,45 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user_unaligned __get_user
#define __get_user_unaligned __get_user
#define __put_user_unaligned __put_user
#define __put_user_unaligned __put_user
/*
* {get|put}_user_try and catch
*
* get_user_try {
* get_user_ex(...);
* } get_user_catch(err)
*/
#define get_user_try uaccess_try
#define get_user_catch(err) uaccess_catch(err)
#define get_user_ex(x, ptr) do { \
unsigned long __gue_val; \
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr)))__gue_val; \
} while (0)
#ifdef CONFIG_X86_WP_WORKS_OK
#define put_user_try uaccess_try
#define put_user_catch(err) uaccess_catch(err)
#define put_user_ex(x, ptr) \
__put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
#else
/* !CONFIG_X86_WP_WORKS_OK */
#define put_user_try do { \
int __uaccess_err = 0;
#define put_user_catch(err) \
(err) |= __uaccess_err; \
} while (0)
#define put_user_ex(x, ptr) do { \
__uaccess_err |= __put_user(x, ptr); \
} while (0)
#endif
/* CONFIG_X86_WP_WORKS_OK */
/*
/*
* movsl can be slow when source and dest are not both 8-byte aligned
* movsl can be slow when source and dest are not both 8-byte aligned
*/
*/
...
...
arch/x86/kernel/io_apic.c
View file @
5d96218b
...
@@ -3466,40 +3466,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
...
@@ -3466,40 +3466,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
return
0
;
return
0
;
}
}
int
arch_setup_msi_irq
(
struct
pci_dev
*
dev
,
struct
msi_desc
*
msidesc
)
{
unsigned
int
irq
;
int
ret
;
unsigned
int
irq_want
;
irq_want
=
nr_irqs_gsi
;
irq
=
create_irq_nr
(
irq_want
);
if
(
irq
==
0
)
return
-
1
;
#ifdef CONFIG_INTR_REMAP
if
(
!
intr_remapping_enabled
)
goto
no_ir
;
ret
=
msi_alloc_irte
(
dev
,
irq
,
1
);
if
(
ret
<
0
)
goto
error
;
no_ir:
#endif
ret
=
setup_msi_irq
(
dev
,
msidesc
,
irq
);
if
(
ret
<
0
)
{
destroy_irq
(
irq
);
return
ret
;
}
return
0
;
#ifdef CONFIG_INTR_REMAP
error:
destroy_irq
(
irq
);
return
ret
;
#endif
}
int
arch_setup_msi_irqs
(
struct
pci_dev
*
dev
,
int
nvec
,
int
type
)
int
arch_setup_msi_irqs
(
struct
pci_dev
*
dev
,
int
nvec
,
int
type
)
{
{
unsigned
int
irq
;
unsigned
int
irq
;
...
...
arch/x86/kernel/paravirt-spinlocks.c
View file @
5d96218b
...
@@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = {
...
@@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = {
};
};
EXPORT_SYMBOL
(
pv_lock_ops
);
EXPORT_SYMBOL
(
pv_lock_ops
);
void
__init
paravirt_use_bytelocks
(
void
)
{
#ifdef CONFIG_SMP
pv_lock_ops
.
spin_is_locked
=
__byte_spin_is_locked
;
pv_lock_ops
.
spin_is_contended
=
__byte_spin_is_contended
;
pv_lock_ops
.
spin_lock
=
__byte_spin_lock
;
pv_lock_ops
.
spin_trylock
=
__byte_spin_trylock
;
pv_lock_ops
.
spin_unlock
=
__byte_spin_unlock
;
#endif
}
arch/x86/kernel/signal.c
View file @
5d96218b
This diff is collapsed.
Click to expand it.
arch/x86/mm/extable.c
View file @
5d96218b
...
@@ -23,6 +23,12 @@ int fixup_exception(struct pt_regs *regs)
...
@@ -23,6 +23,12 @@ int fixup_exception(struct pt_regs *regs)
fixup
=
search_exception_tables
(
regs
->
ip
);
fixup
=
search_exception_tables
(
regs
->
ip
);
if
(
fixup
)
{
if
(
fixup
)
{
/* If fixup is less than 16, it means uaccess error */
if
(
fixup
->
fixup
<
16
)
{
current_thread_info
()
->
uaccess_err
=
-
EFAULT
;
regs
->
ip
+=
fixup
->
fixup
;
return
1
;
}
regs
->
ip
=
fixup
->
fixup
;
regs
->
ip
=
fixup
->
fixup
;
return
1
;
return
1
;
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment