Commit 17c330f9 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6:
  sparc64: Fix clock event multiplier printf format.
  sparc64: Use clock{source,events}_calc_mult_shift().
  sparc64: Use free_bootmem_late() in mdesc_lmb_free().
  sparc: Add alignment and emulation fault perf events.
  sparc64: Add syscall tracepoint support.
  sparc: Stop trying to be so fancy and use __builtin_{memcpy,memset}()
  sparc: Use __builtin_object_size() to validate the buffer size for copy_from_user()
  sparc64: Add some missing __kprobes annotations to kernel fault paths.
  sparc64: Use kprobes_built_in() to avoid ifdefs in fault_64.c
  sparc: Validate that kprobe address is 4-byte aligned.
  sparc64: Don't specify IRQF_SHARED for LDC interrupts.
  sparc64: Fix stack debugging IRQ stack regression.
  sparc64: Fix overly strict range type matching for PCI devices.
parents 48e902f0 7466bd3c
......@@ -43,6 +43,7 @@ config SPARC64
select HAVE_SYSCALL_WRAPPERS
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_SYSCALL_TRACEPOINTS
select USE_GENERIC_SMP_HELPERS if SMP
select RTC_DRV_CMOS
select RTC_DRV_BQ4802
......
......@@ -33,4 +33,18 @@ config FRAME_POINTER
depends on MCOUNT
default y
config DEBUG_STRICT_USER_COPY_CHECKS
bool "Strict copy size checks"
depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
---help---
Enabling this option turns a certain set of sanity checks for user
copy operations into compile time failures.
The copy_from_user() etc checks are there to help test if there
are sufficient security checks on the length argument of
the copy operation, by having gcc prove that the argument is
within bounds.
If unsure, or if you run an older (pre 4.4) gcc, say N.
endmenu
......@@ -16,8 +16,6 @@
#ifdef __KERNEL__
extern void __memmove(void *,const void *,__kernel_size_t);
extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
extern __kernel_size_t __memset(void *,int,__kernel_size_t);
#ifndef EXPORT_SYMTAB_STROPS
......@@ -32,82 +30,10 @@ extern __kernel_size_t __memset(void *,int,__kernel_size_t);
})
#define __HAVE_ARCH_MEMCPY
static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
{
extern void __copy_1page(void *, const void *);
if(n <= 32) {
__builtin_memcpy(to, from, n);
} else if (((unsigned int) to & 7) != 0) {
/* Destination is not aligned on the double-word boundary */
__memcpy(to, from, n);
} else {
switch(n) {
case PAGE_SIZE:
__copy_1page(to, from);
break;
default:
__memcpy(to, from, n);
break;
}
}
return to;
}
static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
{
__memcpy(to, from, n);
return to;
}
#undef memcpy
#define memcpy(t, f, n) \
(__builtin_constant_p(n) ? \
__constant_memcpy((t),(f),(n)) : \
__nonconstant_memcpy((t),(f),(n)))
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#define __HAVE_ARCH_MEMSET
static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
{
extern void bzero_1page(void *);
extern __kernel_size_t __bzero(void *, __kernel_size_t);
if(!c) {
if(count == PAGE_SIZE)
bzero_1page(s);
else
__bzero(s, count);
} else {
__memset(s, c, count);
}
return s;
}
static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
{
extern __kernel_size_t __bzero(void *, __kernel_size_t);
if(!c)
__bzero(s, count);
else
__memset(s, c, count);
return s;
}
static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
{
__memset(s, c, count);
return s;
}
#undef memset
#define memset(s, c, count) \
(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \
__constant_c_and_count_memset((s), (c), (count)) : \
__constant_c_memset((s), (c), (count))) \
: __nonconstant_memset((s), (c), (count)))
#define memset(s, c, count) __builtin_memset(s, c, count)
#define __HAVE_ARCH_MEMSCAN
......
......@@ -15,8 +15,6 @@
#include <asm/asi.h>
extern void *__memset(void *,int,__kernel_size_t);
#ifndef EXPORT_SYMTAB_STROPS
/* First the mem*() things. */
......@@ -24,29 +22,10 @@ extern void *__memset(void *,int,__kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#define __HAVE_ARCH_MEMSET
extern void *__builtin_memset(void *,int,__kernel_size_t);
static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
{
extern __kernel_size_t __bzero(void *, __kernel_size_t);
if (!c) {
__bzero(s, count);
return s;
} else
return __memset(s, c, count);
}
#undef memset
#define memset(s, c, count) \
((__builtin_constant_p(count) && (count) <= 32) ? \
__builtin_memset((s), (c), (count)) : \
(__builtin_constant_p(c) ? \
__constant_memset((s), (c), (count)) : \
__memset((s), (c), (count))))
#define memset(s, c, count) __builtin_memset(s, c, count)
#define __HAVE_ARCH_MEMSCAN
......
......@@ -227,6 +227,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
/* flag bit 8 is available */
#define TIF_SECCOMP 9 /* secure computing */
#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
/* flag bit 11 is available */
/* NOTE: Thread flags >= 12 should be ones we have no interest
* in using in assembly, else we can't use the mask as
......@@ -246,6 +247,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_FREEZE (1<<TIF_FREEZE)
......
......@@ -260,8 +260,23 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un
return __copy_user(to, (__force void __user *) from, n);
}
extern void copy_from_user_overflow(void)
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
__compiletime_error("copy_from_user() buffer size is not provably correct")
#else
__compiletime_warning("copy_from_user() buffer size is not provably correct")
#endif
;
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
int sz = __compiletime_object_size(to);
if (unlikely(sz != -1 && sz < n)) {
copy_from_user_overflow();
return -EFAULT;
}
if (n && __access_ok((unsigned long) from, n))
return __copy_user((__force void __user *) to, from, n);
else
......
......@@ -6,6 +6,7 @@
*/
#ifdef __KERNEL__
#include <linux/errno.h>
#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/thread_info.h>
......@@ -204,6 +205,14 @@ __asm__ __volatile__( \
extern int __get_user_bad(void);
extern void copy_from_user_overflow(void)
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
__compiletime_error("copy_from_user() buffer size is not provably correct")
#else
__compiletime_warning("copy_from_user() buffer size is not provably correct")
#endif
;
extern unsigned long __must_check ___copy_from_user(void *to,
const void __user *from,
unsigned long size);
......@@ -212,10 +221,16 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
unsigned long ret = ___copy_from_user(to, from, size);
unsigned long ret = (unsigned long) -EFAULT;
int sz = __compiletime_object_size(to);
if (likely(sz == -1 || sz >= size)) {
ret = ___copy_from_user(to, from, size);
if (unlikely(ret))
ret = copy_from_user_fixup(to, from, size);
} else {
copy_from_user_overflow();
}
return ret;
}
#define __copy_from_user copy_from_user
......
......@@ -398,7 +398,7 @@
#define __NR_perf_event_open 327
#define __NR_recvmmsg 328
#define NR_SYSCALLS 329
#define NR_syscalls 329
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
......
......@@ -1294,7 +1294,7 @@ linux_sparc_syscall:
sethi %hi(PSR_SYSCALL), %l4
or %l0, %l4, %l0
/* Direct access to user regs, must faster. */
cmp %g1, NR_SYSCALLS
cmp %g1, NR_syscalls
bgeu linux_sparc_ni_syscall
sll %g1, 2, %l4
ld [%l7 + %l4], %l7
......
......@@ -4,6 +4,7 @@
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>
#include <trace/syscall.h>
#include <asm/ftrace.h>
......@@ -91,3 +92,13 @@ int __init ftrace_dyn_arch_init(void *data)
}
#endif
#ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned int sys_call_table[];
unsigned long __init arch_syscall_addr(int nr)
{
return (unsigned long)sys_call_table[nr];
}
#endif
......@@ -46,6 +46,9 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
if ((unsigned long) p->addr & 0x3UL)
return -EILSEQ;
p->ainsn.insn[0] = *p->addr;
flushi(&p->ainsn.insn[0]);
......
......@@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
err = request_irq(lp->cfg.rx_irq, ldc_rx,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
lp->rx_irq_name, lp);
if (err)
return err;
err = request_irq(lp->cfg.tx_irq, ldc_tx,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
lp->tx_irq_name, lp);
if (err) {
free_irq(lp->cfg.rx_irq, lp);
......
......@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/bootmem.h>
#include <asm/cpudata.h>
#include <asm/hypervisor.h>
......@@ -108,25 +109,15 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
static void mdesc_lmb_free(struct mdesc_handle *hp)
{
unsigned int alloc_size, handle_size = hp->handle_size;
unsigned long start, end;
unsigned int alloc_size;
unsigned long start;
BUG_ON(atomic_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
alloc_size = PAGE_ALIGN(handle_size);
start = (unsigned long) hp;
end = start + alloc_size;
while (start < end) {
struct page *p;
p = virt_to_page(start);
ClearPageReserved(p);
__free_page(p);
start += PAGE_SIZE;
}
alloc_size = PAGE_ALIGN(hp->handle_size);
start = __pa(hp);
free_bootmem_late(start, alloc_size);
}
static struct mdesc_mem_ops lmb_mdesc_ops = {
......
......@@ -104,9 +104,19 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
int i;
/* Check address type match */
if ((addr[0] ^ range[0]) & 0x03000000)
if (!((addr[0] ^ range[0]) & 0x03000000))
goto type_match;
/* Special exception, we can map a 64-bit address into
* a 32-bit range.
*/
if ((addr[0] & 0x03000000) == 0x03000000 &&
(range[0] & 0x03000000) == 0x02000000)
goto type_match;
return -EINVAL;
type_match:
if (of_out_of_range(addr + 1, range + 1, range + na + pna,
na - 1, ns))
return -EINVAL;
......
......@@ -23,6 +23,7 @@
#include <linux/signal.h>
#include <linux/regset.h>
#include <linux/tracehook.h>
#include <trace/syscall.h>
#include <linux/compat.h>
#include <linux/elf.h>
......@@ -37,6 +38,9 @@
#include <asm/cpudata.h>
#include <asm/cacheflush.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
#include "entry.h"
/* #define ALLOW_INIT_TRACING */
......@@ -1059,6 +1063,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
if (test_thread_flag(TIF_SYSCALL_TRACE))
ret = tracehook_report_syscall_entry(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->u_regs[UREG_G1]);
if (unlikely(current->audit_context) && !ret)
audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
AUDIT_ARCH_SPARC :
......@@ -1084,6 +1091,9 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(result, regs->u_regs[UREG_I0]);
}
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->u_regs[UREG_G1]);
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
}
......@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
#endif
.align 32
1: ldx [%g6 + TI_FLAGS], %l5
andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
be,pt %icc, rtrap
nop
call syscall_trace_leave
......@@ -187,7 +187,7 @@ linux_syscall_trace:
.globl linux_sparc_syscall32
linux_sparc_syscall32:
/* Direct access to user regs, much faster. */
cmp %g1, NR_SYSCALLS ! IEU1 Group
cmp %g1, NR_syscalls ! IEU1 Group
bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
srl %i0, 0, %o0 ! IEU0
sll %g1, 2, %l4 ! IEU0 Group
......@@ -198,7 +198,7 @@ linux_sparc_syscall32:
srl %i5, 0, %o5 ! IEU1
srl %i2, 0, %o2 ! IEU0 Group
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
bne,pn %icc, linux_syscall_trace32 ! CTI
mov %i0, %l5 ! IEU1
call %l7 ! CTI Group brk forced
......@@ -210,7 +210,7 @@ linux_sparc_syscall32:
.globl linux_sparc_syscall
linux_sparc_syscall:
/* Direct access to user regs, much faster. */
cmp %g1, NR_SYSCALLS ! IEU1 Group
cmp %g1, NR_syscalls ! IEU1 Group
bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
mov %i0, %o0 ! IEU0
sll %g1, 2, %l4 ! IEU0 Group
......@@ -221,7 +221,7 @@ linux_sparc_syscall:
mov %i3, %o3 ! IEU1
mov %i4, %o4 ! IEU0 Group
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
bne,pn %icc, linux_syscall_trace ! CTI Group
mov %i0, %l5 ! IEU0
2: call %l7 ! CTI Group brk forced
......@@ -245,7 +245,7 @@ ret_sys_call:
cmp %o0, -ERESTART_RESTARTBLOCK
bgeu,pn %xcc, 1f
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
80:
/* System call success, clear Carry condition code. */
andn %g3, %g2, %g3
......@@ -260,7 +260,7 @@ ret_sys_call:
/* System call failure, set Carry condition code.
* Also, get abs(errno) to return to the process.
*/
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
sub %g0, %o0, %o0
or %g3, %g2, %g3
stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
......
......@@ -774,26 +774,9 @@ void __devinit setup_sparc64_timer(void)
static struct clocksource clocksource_tick = {
.rating = 100,
.mask = CLOCKSOURCE_MASK(64),
.shift = 16,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init setup_clockevent_multiplier(unsigned long hz)
{
unsigned long mult, shift = 32;
while (1) {
mult = div_sc(hz, NSEC_PER_SEC, shift);
if (mult && (mult >> 32UL) == 0UL)
break;
shift--;
}
sparc64_clockevent.shift = shift;
sparc64_clockevent.mult = mult;
}
static unsigned long tb_ticks_per_usec __read_mostly;
void __delay(unsigned long loops)
......@@ -828,9 +811,7 @@ void __init time_init(void)
clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
clocksource_tick.name = tick_ops->name;
clocksource_tick.mult =
clocksource_hz2mult(freq,
clocksource_tick.shift);
clocksource_calc_mult_shift(&clocksource_tick, freq, 4);
clocksource_tick.read = clocksource_tick_read;
printk("clocksource: mult[%x] shift[%d]\n",
......@@ -839,15 +820,14 @@ void __init time_init(void)
clocksource_register(&clocksource_tick);
sparc64_clockevent.name = tick_ops->name;
setup_clockevent_multiplier(freq);
clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
sparc64_clockevent.max_delta_ns =
clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent);
sparc64_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &sparc64_clockevent);
printk("clockevent: mult[%ux] shift[%d]\n",
printk("clockevent: mult[%x] shift[%d]\n",
sparc64_clockevent.mult, sparc64_clockevent.shift);
setup_sparc64_timer();
......
......@@ -17,8 +17,7 @@
#include <asm/uaccess.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
/* #define DEBUG_MNA */
#include <linux/perf_event.h>
enum direction {
load, /* ld, ldd, ldh, ldsh */
......@@ -29,12 +28,6 @@ enum direction {
invalid,
};
#ifdef DEBUG_MNA
static char *dirstrings[] = {
"load", "store", "both", "fpload", "fpstore", "invalid"
};
#endif
static inline enum direction decode_direction(unsigned int insn)
{
unsigned long tmp = (insn >> 21) & 1;
......@@ -255,10 +248,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
unsigned long addr = compute_effective_address(regs, insn);
int err;
#ifdef DEBUG_MNA
printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n",
regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
#endif
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch (dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
......@@ -350,6 +340,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
}
addr = compute_effective_address(regs, insn);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch(dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
......
......@@ -20,10 +20,9 @@
#include <asm/uaccess.h>
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/perf_event.h>
#include <asm/fpumacro.h>
/* #define DEBUG_MNA */
enum direction {
load, /* ld, ldd, ldh, ldsh */
store, /* st, std, sth, stsh */
......@@ -33,12 +32,6 @@ enum direction {
invalid,
};
#ifdef DEBUG_MNA
static char *dirstrings[] = {
"load", "store", "both", "fpload", "fpstore", "invalid"
};
#endif
static inline enum direction decode_direction(unsigned int insn)
{
unsigned long tmp = (insn >> 21) & 1;
......@@ -327,12 +320,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
#ifdef DEBUG_MNA
printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] "
"retpc[%016lx]\n",
regs->tpc, dirstrings[dir], addr, size,
regs->u_regs[UREG_RETPC]);
#endif
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch (asi) {
case ASI_NL:
case ASI_AIUPL:
......@@ -399,6 +387,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
int ret, i, rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (insn & 0x2000) {
maybe_flush_windows(0, 0, rd, from_kernel);
value = sign_extend_imm13(insn);
......@@ -445,6 +434,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
int asi = decode_asi(insn, regs);
int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
save_and_clear_fpu();
current_thread_info()->xfsr[0] &= ~0x1c000;
if (freg & 3) {
......@@ -566,6 +557,8 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned long *reg;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
maybe_flush_windows(0, 0, rd, from_kernel);
reg = fetch_reg_addr(rd, regs);
if (from_kernel || rd < 16) {
......@@ -596,6 +589,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if (tstate & TSTATE_PRIV)
die_if_kernel("lddfmna from kernel", regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
......@@ -657,6 +651,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if (tstate & TSTATE_PRIV)
die_if_kernel("stdfmna from kernel", regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
......
......@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/perf_event.h>
#include <asm/ptrace.h>
#include <asm/pstate.h>
......@@ -801,6 +802,8 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
BUG_ON(regs->tstate & TSTATE_PRIV);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
......
......@@ -44,3 +44,4 @@ obj-y += iomap.o
obj-$(CONFIG_SPARC32) += atomic32.o
obj-y += ksyms.o
obj-$(CONFIG_SPARC64) += PeeCeeI.o
obj-y += usercopy.o
......@@ -6,10 +6,6 @@
.text
.globl __memset
.type __memset, #function
__memset: /* %o0=buf, %o1=pat, %o2=len */
.globl memset
.type memset, #function
memset: /* %o0=buf, %o1=pat, %o2=len */
......@@ -83,7 +79,6 @@ __bzero_done:
retl
mov %o3, %o0
.size __bzero, .-__bzero
.size __memset, .-__memset
.size memset, .-memset
#define EX_ST(x,y) \
......
......@@ -560,7 +560,7 @@ __csum_partial_copy_end:
mov %i0, %o1
mov %i1, %o0
5:
call __memcpy
call memcpy
mov %i2, %o2
tst %o0
bne,a 2f
......
......@@ -30,7 +30,6 @@ EXPORT_SYMBOL(__memscan_generic);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__bzero);
......@@ -81,7 +80,6 @@ EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
/* Special internal versions of library functions. */
EXPORT_SYMBOL(__copy_1page);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
EXPORT_SYMBOL(bzero_1page);
......
......@@ -64,8 +64,9 @@ mcount:
2: sethi %hi(softirq_stack), %g3
or %g3, %lo(softirq_stack), %g3
ldx [%g3 + %g1], %g7
sub %g7, STACK_BIAS, %g7
cmp %sp, %g7
bleu,pt %xcc, 2f
bleu,pt %xcc, 3f
sethi %hi(THREAD_SIZE), %g3
add %g7, %g3, %g7
cmp %sp, %g7
......@@ -75,7 +76,7 @@ mcount:
* again, we are already trying to output the stack overflow
* message.
*/
sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
or %g7, %lo(ovstack), %g7
add %g7, OVSTACKSIZE, %g3
sub %g3, STACK_BIAS + 192, %g3
......
......@@ -543,9 +543,6 @@ FUNC(memmove)
b 3f
add %o0, 2, %o0
#ifdef __KERNEL__
FUNC(__memcpy)
#endif
FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
sub %o0, %o1, %o4
......
......@@ -60,11 +60,10 @@
.globl __bzero_begin
__bzero_begin:
.globl __bzero, __memset,
.globl __bzero
.globl memset
.globl __memset_start, __memset_end
__memset_start:
__memset:
memset:
and %o1, 0xff, %g3
sll %g3, 8, %g2
......
#include <linux/module.h>
#include <linux/bug.h>
void copy_from_user_overflow(void)
{
WARN(1, "Buffer overflow detected!\n");
}
EXPORT_SYMBOL(copy_from_user_overflow);
......@@ -67,6 +67,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include "sfp-util_32.h"
......@@ -163,6 +164,8 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
int retcode = 0; /* assume all succeed */
unsigned long insn;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
#ifdef DEBUG_MATHEMU
printk("In do_mathemu()... pc is %08lx\n", regs->pc);
printk("fpqdepth is %ld\n", fpt->thread.fpqdepth);
......
......@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/perf_event.h>
#include <asm/fpumacro.h>
#include <asm/ptrace.h>
......@@ -183,6 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
if (tstate & TSTATE_PRIV)
die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
......
......@@ -31,13 +31,12 @@
#include <asm/sections.h>
#include <asm/mmu_context.h>
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
static inline __kprobes int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 0))
ret = 1;
......@@ -45,12 +44,6 @@ static inline int notify_page_fault(struct pt_regs *regs)
}
return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs)
{
return 0;
}
#endif
static void __kprobes unhandled_fault(unsigned long address,
struct task_struct *tsk,
......@@ -73,7 +66,7 @@ static void __kprobes unhandled_fault(unsigned long address,
die_if_kernel("Oops", regs);
}
static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
{
printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
regs->tpc);
......@@ -170,8 +163,9 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
return insn;
}
static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
unsigned int insn, unsigned long address)
static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
int fault_code, unsigned int insn,
unsigned long address)
{
unsigned char asi = ASI_P;
......@@ -225,7 +219,7 @@ cannot_handle:
unhandled_fault (address, current, regs);
}
static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs)
static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
{
static int times;
......@@ -237,7 +231,7 @@ static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs)
show_regs(regs);
}
static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
unsigned long addr)
{
static int times;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment