Commit 59c288ff authored by Linus Torvalds's avatar Linus Torvalds

Merge branches 'frv' and 'mn10300'

* frv:
  FRV: Implement new-style ptrace
  FRV: Don't turn on TIF_SYSCALL_TRACE unconditionally in syscall prologue
  FRV: Implement TIF_NOTIFY_RESUME
  FRV: Remove in-kernel strace code
  FRV: BUG to BUG_ON changes
  FRV: bitops: Change the bitmap index from int to unsigned long

* mn10300:
  MN10300: Add utrace/tracehooks support
  MN10300: Don't set the dirty bit in the DTLB entries in the TLB-miss handler
...@@ -6,6 +6,7 @@ config FRV ...@@ -6,6 +6,7 @@ config FRV
bool bool
default y default y
select HAVE_IDE select HAVE_IDE
select HAVE_ARCH_TRACEHOOK
config ZONE_DMA config ZONE_DMA
bool bool
......
...@@ -112,7 +112,7 @@ extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsig ...@@ -112,7 +112,7 @@ extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsig
#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v)) #define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v)) #define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
static inline int test_and_clear_bit(int nr, volatile void *addr) static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
{ {
volatile unsigned long *ptr = addr; volatile unsigned long *ptr = addr;
unsigned long mask = 1UL << (nr & 31); unsigned long mask = 1UL << (nr & 31);
...@@ -120,7 +120,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) ...@@ -120,7 +120,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0;
} }
static inline int test_and_set_bit(int nr, volatile void *addr) static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
{ {
volatile unsigned long *ptr = addr; volatile unsigned long *ptr = addr;
unsigned long mask = 1UL << (nr & 31); unsigned long mask = 1UL << (nr & 31);
...@@ -128,7 +128,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) ...@@ -128,7 +128,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0;
} }
static inline int test_and_change_bit(int nr, volatile void *addr) static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
{ {
volatile unsigned long *ptr = addr; volatile unsigned long *ptr = addr;
unsigned long mask = 1UL << (nr & 31); unsigned long mask = 1UL << (nr & 31);
...@@ -136,22 +136,22 @@ static inline int test_and_change_bit(int nr, volatile void *addr) ...@@ -136,22 +136,22 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0;
} }
static inline void clear_bit(int nr, volatile void *addr) static inline void clear_bit(unsigned long nr, volatile void *addr)
{ {
test_and_clear_bit(nr, addr); test_and_clear_bit(nr, addr);
} }
static inline void set_bit(int nr, volatile void *addr) static inline void set_bit(unsigned long nr, volatile void *addr)
{ {
test_and_set_bit(nr, addr); test_and_set_bit(nr, addr);
} }
static inline void change_bit(int nr, volatile void * addr) static inline void change_bit(unsigned long nr, volatile void *addr)
{ {
test_and_change_bit(nr, addr); test_and_change_bit(nr, addr);
} }
static inline void __clear_bit(int nr, volatile void * addr) static inline void __clear_bit(unsigned long nr, volatile void *addr)
{ {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
int mask; int mask;
...@@ -161,7 +161,7 @@ static inline void __clear_bit(int nr, volatile void * addr) ...@@ -161,7 +161,7 @@ static inline void __clear_bit(int nr, volatile void * addr)
*a &= ~mask; *a &= ~mask;
} }
static inline void __set_bit(int nr, volatile void * addr) static inline void __set_bit(unsigned long nr, volatile void *addr)
{ {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
int mask; int mask;
...@@ -171,7 +171,7 @@ static inline void __set_bit(int nr, volatile void * addr) ...@@ -171,7 +171,7 @@ static inline void __set_bit(int nr, volatile void * addr)
*a |= mask; *a |= mask;
} }
static inline void __change_bit(int nr, volatile void *addr) static inline void __change_bit(unsigned long nr, volatile void *addr)
{ {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
int mask; int mask;
...@@ -181,7 +181,7 @@ static inline void __change_bit(int nr, volatile void *addr) ...@@ -181,7 +181,7 @@ static inline void __change_bit(int nr, volatile void *addr)
*a ^= mask; *a ^= mask;
} }
static inline int __test_and_clear_bit(int nr, volatile void * addr) static inline int __test_and_clear_bit(unsigned long nr, volatile void *addr)
{ {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
int mask, retval; int mask, retval;
...@@ -193,7 +193,7 @@ static inline int __test_and_clear_bit(int nr, volatile void * addr) ...@@ -193,7 +193,7 @@ static inline int __test_and_clear_bit(int nr, volatile void * addr)
return retval; return retval;
} }
static inline int __test_and_set_bit(int nr, volatile void * addr) static inline int __test_and_set_bit(unsigned long nr, volatile void *addr)
{ {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
int mask, retval; int mask, retval;
...@@ -205,7 +205,7 @@ static inline int __test_and_set_bit(int nr, volatile void * addr) ...@@ -205,7 +205,7 @@ static inline int __test_and_set_bit(int nr, volatile void * addr)
return retval; return retval;
} }
static inline int __test_and_change_bit(int nr, volatile void * addr) static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
{ {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
int mask, retval; int mask, retval;
...@@ -220,12 +220,13 @@ static inline int __test_and_change_bit(int nr, volatile void * addr) ...@@ -220,12 +220,13 @@ static inline int __test_and_change_bit(int nr, volatile void * addr)
/* /*
* This routine doesn't need to be atomic. * This routine doesn't need to be atomic.
*/ */
static inline int __constant_test_bit(int nr, const volatile void * addr) static inline int
__constant_test_bit(unsigned long nr, const volatile void *addr)
{ {
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
} }
static inline int __test_bit(int nr, const volatile void * addr) static inline int __test_bit(unsigned long nr, const volatile void *addr)
{ {
int * a = (int *) addr; int * a = (int *) addr;
int mask; int mask;
......
...@@ -116,6 +116,7 @@ do { \ ...@@ -116,6 +116,7 @@ do { \
} while(0) } while(0)
#define USE_ELF_CORE_DUMP #define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC #define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC
#define ELF_EXEC_PAGESIZE 16384 #define ELF_EXEC_PAGESIZE 16384
......
...@@ -87,8 +87,7 @@ static inline void pci_dma_sync_single(struct pci_dev *hwdev, ...@@ -87,8 +87,7 @@ static inline void pci_dma_sync_single(struct pci_dev *hwdev,
dma_addr_t dma_handle, dma_addr_t dma_handle,
size_t size, int direction) size_t size, int direction)
{ {
if (direction == PCI_DMA_NONE) BUG_ON(direction == PCI_DMA_NONE);
BUG();
frv_cache_wback_inv((unsigned long)bus_to_virt(dma_handle), frv_cache_wback_inv((unsigned long)bus_to_virt(dma_handle),
(unsigned long)bus_to_virt(dma_handle) + size); (unsigned long)bus_to_virt(dma_handle) + size);
...@@ -105,9 +104,7 @@ static inline void pci_dma_sync_sg(struct pci_dev *hwdev, ...@@ -105,9 +104,7 @@ static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
int nelems, int direction) int nelems, int direction)
{ {
int i; int i;
BUG_ON(direction == PCI_DMA_NONE);
if (direction == PCI_DMA_NONE)
BUG();
for (i = 0; i < nelems; i++) for (i = 0; i < nelems; i++)
frv_cache_wback_inv(sg_dma_address(&sg[i]), frv_cache_wback_inv(sg_dma_address(&sg[i]),
......
...@@ -65,6 +65,8 @@ ...@@ -65,6 +65,8 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct task_struct;
/* /*
* we dedicate GR28 to keeping a pointer to the current exception frame * we dedicate GR28 to keeping a pointer to the current exception frame
* - gr28 is destroyed on entry to the kernel from userspace * - gr28 is destroyed on entry to the kernel from userspace
...@@ -73,11 +75,18 @@ register struct pt_regs *__frame asm("gr28"); ...@@ -73,11 +75,18 @@ register struct pt_regs *__frame asm("gr28");
#define user_mode(regs) (!((regs)->psr & PSR_S)) #define user_mode(regs) (!((regs)->psr & PSR_S))
#define instruction_pointer(regs) ((regs)->pc) #define instruction_pointer(regs) ((regs)->pc)
#define user_stack_pointer(regs) ((regs)->sp)
extern unsigned long user_stack(const struct pt_regs *); extern unsigned long user_stack(const struct pt_regs *);
extern void show_regs(struct pt_regs *); extern void show_regs(struct pt_regs *);
#define profile_pc(regs) ((regs)->pc) #define profile_pc(regs) ((regs)->pc)
#endif
#define task_pt_regs(task) ((task)->thread.frame0)
#define arch_has_single_step() (1)
extern void user_enable_single_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_PTRACE_H */ #endif /* _ASM_PTRACE_H */
/* syscall parameter access functions
*
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_SYSCALL_H
#define _ASM_SYSCALL_H
#include <linux/err.h>
#include <asm/ptrace.h>
/*
* Get the system call number or -1
*/
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
return regs->syscallno;
}
/*
* Restore the clobbered GR8 register
* (1st syscall arg was overwritten with syscall return or error)
*/
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
regs->gr8 = regs->orig_gr8;
}
/*
* See if the syscall return value is an error, returning it if it is and 0 if
* not
*/
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
return IS_ERR_VALUE(regs->gr8) ? regs->gr8 : 0;
}
/*
* Get the syscall return value
*/
static inline long syscall_get_return_value(struct task_struct *task,
struct pt_regs *regs)
{
return regs->gr8;
}
/*
* Set the syscall return value
*/
static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
if (error)
regs->gr8 = -error;
else
regs->gr8 = val;
}
/*
* Retrieve the system call arguments
*/
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args)
{
/*
* Do this simply for now. If we need to start supporting
* fetching arguments from arbitrary indices, this will need some
* extra logic. Presently there are no in-tree users that depend
* on this behaviour.
*/
BUG_ON(i);
/* Argument pattern is: GR8, GR9, GR10, GR11, GR12, GR13 */
switch (n) {
case 6: args[5] = regs->gr13;
case 5: args[4] = regs->gr12;
case 4: args[3] = regs->gr11;
case 3: args[2] = regs->gr10;
case 2: args[1] = regs->gr9;
case 1: args[0] = regs->gr8;
break;
default:
BUG();
}
}
/*
* Alter the system call arguments
*/
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args)
{
/* Same note as above applies */
BUG_ON(i);
switch (n) {
case 6: regs->gr13 = args[5];
case 5: regs->gr12 = args[4];
case 4: regs->gr11 = args[3];
case 3: regs->gr10 = args[2];
case 2: regs->gr9 = args[1];
case 1: regs->gr8 = args[0];
break;
default:
BUG();
}
}
#endif /* _ASM_SYSCALL_H */
...@@ -109,20 +109,20 @@ register struct thread_info *__current_thread_info asm("gr15"); ...@@ -109,20 +109,20 @@ register struct thread_info *__current_thread_info asm("gr15");
* - other flags in MSW * - other flags in MSW
*/ */
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_SIGPENDING 2 /* signal pending */
#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_IRET 4 /* return with iret */ #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 17 /* OOM killer killed process */ #define TIF_MEMDIE 17 /* OOM killer killed process */
#define TIF_FREEZE 18 /* freezing for suspend */ #define TIF_FREEZE 18 /* freezing for suspend */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_IRET (1 << TIF_IRET)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_FREEZE (1 << TIF_FREEZE) #define _TIF_FREEZE (1 << TIF_FREEZE)
......
...@@ -886,7 +886,6 @@ system_call: ...@@ -886,7 +886,6 @@ system_call:
bnc icc0,#0,__syscall_badsys bnc icc0,#0,__syscall_badsys
ldi @(gr15,#TI_FLAGS),gr4 ldi @(gr15,#TI_FLAGS),gr4
ori gr4,#_TIF_SYSCALL_TRACE,gr4
andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0 andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
bne icc0,#0,__syscall_trace_entry bne icc0,#0,__syscall_trace_entry
...@@ -1150,11 +1149,10 @@ __entry_work_notifysig: ...@@ -1150,11 +1149,10 @@ __entry_work_notifysig:
# perform syscall entry tracing # perform syscall entry tracing
__syscall_trace_entry: __syscall_trace_entry:
LEDS 0x6320 LEDS 0x6320
setlos.p #0,gr8 call syscall_trace_entry
call do_syscall_trace
ldi @(gr28,#REG_SYSCALLNO),gr7 lddi.p @(gr28,#REG_GR(8)) ,gr8
lddi @(gr28,#REG_GR(8)) ,gr8 ori gr8,#0,gr7 ; syscall_trace_entry() returned new syscallno
lddi @(gr28,#REG_GR(10)),gr10 lddi @(gr28,#REG_GR(10)),gr10
lddi.p @(gr28,#REG_GR(12)),gr12 lddi.p @(gr28,#REG_GR(12)),gr12
...@@ -1169,11 +1167,10 @@ __syscall_exit_work: ...@@ -1169,11 +1167,10 @@ __syscall_exit_work:
beq icc0,#1,__entry_work_pending beq icc0,#1,__entry_work_pending
movsg psr,gr23 movsg psr,gr23
andi gr23,#~PSR_PIL,gr23 ; could let do_syscall_trace() call schedule() andi gr23,#~PSR_PIL,gr23 ; could let syscall_trace_exit() call schedule()
movgs gr23,psr movgs gr23,psr
setlos.p #1,gr8 call syscall_trace_exit
call do_syscall_trace
bra __entry_resume_userspace bra __entry_resume_userspace
__syscall_badsys: __syscall_badsys:
......
This diff is collapsed.
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/unistd.h> #include <linux/unistd.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/tracehook.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -516,6 +517,9 @@ static void do_signal(void) ...@@ -516,6 +517,9 @@ static void do_signal(void)
* clear the TIF_RESTORE_SIGMASK flag */ * clear the TIF_RESTORE_SIGMASK flag */
if (test_thread_flag(TIF_RESTORE_SIGMASK)) if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK); clear_thread_flag(TIF_RESTORE_SIGMASK);
tracehook_signal_handler(signr, &info, &ka, __frame,
test_thread_flag(TIF_SINGLESTEP));
} }
return; return;
...@@ -564,4 +568,10 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags) ...@@ -564,4 +568,10 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(); do_signal();
/* deal with notification on about to resume userspace execution */
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(__frame);
}
} /* end do_notify_resume() */ } /* end do_notify_resume() */
...@@ -23,8 +23,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count) ...@@ -23,8 +23,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
char *p, ch; char *p, ch;
long err = -EFAULT; long err = -EFAULT;
if (count < 0) BUG_ON(count < 0);
BUG();
p = dst; p = dst;
...@@ -76,8 +75,7 @@ long strnlen_user(const char __user *src, long count) ...@@ -76,8 +75,7 @@ long strnlen_user(const char __user *src, long count)
long err = 0; long err = 0;
char ch; char ch;
if (count < 0) BUG_ON(count < 0);
BUG();
#ifndef CONFIG_MMU #ifndef CONFIG_MMU
if ((unsigned long) src < memory_start) if ((unsigned long) src < memory_start)
......
...@@ -116,8 +116,7 @@ EXPORT_SYMBOL(dma_free_coherent); ...@@ -116,8 +116,7 @@ EXPORT_SYMBOL(dma_free_coherent);
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
if (direction == DMA_NONE) BUG_ON(direction == DMA_NONE);
BUG();
frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size); frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
...@@ -151,8 +150,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -151,8 +150,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
frv_cache_wback_inv(sg_dma_address(&sg[i]), frv_cache_wback_inv(sg_dma_address(&sg[i]),
sg_dma_address(&sg[i]) + sg_dma_len(&sg[i])); sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]));
if (direction == DMA_NONE) BUG_ON(direction == DMA_NONE);
BUG();
return nents; return nents;
} }
......
...@@ -48,8 +48,7 @@ EXPORT_SYMBOL(dma_free_coherent); ...@@ -48,8 +48,7 @@ EXPORT_SYMBOL(dma_free_coherent);
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
if (direction == DMA_NONE) BUG_ON(direction == DMA_NONE);
BUG();
frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size); frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
...@@ -81,8 +80,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -81,8 +80,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
void *vaddr; void *vaddr;
int i; int i;
if (direction == DMA_NONE) BUG_ON(direction == DMA_NONE);
BUG();
dampr2 = __get_DAMPR(2); dampr2 = __get_DAMPR(2);
......
...@@ -8,6 +8,7 @@ mainmenu "Linux Kernel Configuration" ...@@ -8,6 +8,7 @@ mainmenu "Linux Kernel Configuration"
config MN10300 config MN10300
def_bool y def_bool y
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_ARCH_TRACEHOOK
config AM33 config AM33
def_bool y def_bool y
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
*/ */
typedef unsigned long elf_greg_t; typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) #define ELF_NGREG ((sizeof(struct pt_regs) / sizeof(elf_greg_t)) - 1)
typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef elf_greg_t elf_gregset_t[ELF_NGREG];
#define ELF_NFPREG 32 #define ELF_NFPREG 32
...@@ -76,6 +76,7 @@ do { \ ...@@ -76,6 +76,7 @@ do { \
} while (0) } while (0)
#define USE_ELF_CORE_DUMP #define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096 #define ELF_EXEC_PAGESIZE 4096
/* /*
......
...@@ -143,13 +143,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); ...@@ -143,13 +143,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(task) \ #define task_pt_regs(task) ((task)->thread.uregs)
({ \
struct pt_regs *__regs__; \
__regs__ = (struct pt_regs *) (KSTK_TOP(task_stack_page(task)) - 8); \
__regs__ - 1; \
})
#define KSTK_EIP(task) (task_pt_regs(task)->pc) #define KSTK_EIP(task) (task_pt_regs(task)->pc)
#define KSTK_ESP(task) (task_pt_regs(task)->sp) #define KSTK_ESP(task) (task_pt_regs(task)->sp)
......
...@@ -91,9 +91,17 @@ extern struct pt_regs *__frame; /* current frame pointer */ ...@@ -91,9 +91,17 @@ extern struct pt_regs *__frame; /* current frame pointer */
#if defined(__KERNEL__) #if defined(__KERNEL__)
#if !defined(__ASSEMBLY__) #if !defined(__ASSEMBLY__)
struct task_struct;
#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL) #define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
#define instruction_pointer(regs) ((regs)->pc) #define instruction_pointer(regs) ((regs)->pc)
#define user_stack_pointer(regs) ((regs)->sp)
extern void show_regs(struct pt_regs *); extern void show_regs(struct pt_regs *);
#define arch_has_single_step() (1)
extern void user_enable_single_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *);
#endif /* !__ASSEMBLY */ #endif /* !__ASSEMBLY */
#define profile_pc(regs) ((regs)->pc) #define profile_pc(regs) ((regs)->pc)
......
...@@ -76,7 +76,7 @@ ENTRY(system_call) ...@@ -76,7 +76,7 @@ ENTRY(system_call)
cmp nr_syscalls,d0 cmp nr_syscalls,d0
bcc syscall_badsys bcc syscall_badsys
btst _TIF_SYSCALL_TRACE,(TI_flags,a2) btst _TIF_SYSCALL_TRACE,(TI_flags,a2)
bne syscall_trace_entry bne syscall_entry_trace
syscall_call: syscall_call:
add d0,d0,a1 add d0,d0,a1
add a1,a1 add a1,a1
...@@ -104,11 +104,10 @@ restore_all: ...@@ -104,11 +104,10 @@ restore_all:
syscall_exit_work: syscall_exit_work:
btst _TIF_SYSCALL_TRACE,d2 btst _TIF_SYSCALL_TRACE,d2
beq work_pending beq work_pending
__sti # could let do_syscall_trace() call __sti # could let syscall_trace_exit() call
# schedule() instead # schedule() instead
mov fp,d0 mov fp,d0
mov 1,d1 call syscall_trace_exit[],0 # do_syscall_trace(regs)
call do_syscall_trace[],0 # do_syscall_trace(regs,entryexit)
jmp resume_userspace jmp resume_userspace
ALIGN ALIGN
...@@ -138,13 +137,11 @@ work_notifysig: ...@@ -138,13 +137,11 @@ work_notifysig:
jmp resume_userspace jmp resume_userspace
# perform syscall entry tracing # perform syscall entry tracing
syscall_trace_entry: syscall_entry_trace:
mov -ENOSYS,d0 mov -ENOSYS,d0
mov d0,(REG_D0,fp) mov d0,(REG_D0,fp)
mov fp,d0 mov fp,d0
clr d1 call syscall_trace_entry[],0 # returns the syscall number to actually use
call do_syscall_trace[],0
mov (REG_ORIG_D0,fp),d0
mov (REG_D1,fp),d1 mov (REG_D1,fp),d1
cmp nr_syscalls,d0 cmp nr_syscalls,d0
bcs syscall_call bcs syscall_call
......
This diff is collapsed.
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/tracehook.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -511,6 +512,9 @@ static void do_signal(struct pt_regs *regs) ...@@ -511,6 +512,9 @@ static void do_signal(struct pt_regs *regs)
* clear the TIF_RESTORE_SIGMASK flag */ * clear the TIF_RESTORE_SIGMASK flag */
if (test_thread_flag(TIF_RESTORE_SIGMASK)) if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK); clear_thread_flag(TIF_RESTORE_SIGMASK);
tracehook_signal_handler(signr, &info, &ka, regs,
test_thread_flag(TIF_SINGLESTEP));
} }
return; return;
...@@ -561,4 +565,9 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) ...@@ -561,4 +565,9 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
/* deal with pending signal delivery */ /* deal with pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(regs); do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(__frame);
}
} }
...@@ -165,24 +165,6 @@ ENTRY(itlb_aerror) ...@@ -165,24 +165,6 @@ ENTRY(itlb_aerror)
ENTRY(dtlb_aerror) ENTRY(dtlb_aerror)
and ~EPSW_NMID,epsw and ~EPSW_NMID,epsw
add -4,sp add -4,sp
mov d1,(sp)
movhu (MMUFCR_DFC),d1 # is it the initial valid write
# to this page?
and MMUFCR_xFC_INITWR,d1
beq dtlb_pagefault # jump if not
mov (DPTEL),d1 # set the dirty bit
# (don't replace with BSET!)
or _PAGE_DIRTY,d1
mov d1,(DPTEL)
mov (sp),d1
add 4,sp
rti
ALIGN
dtlb_pagefault:
mov (sp),d1
SAVE_ALL SAVE_ALL
add -4,sp # need to pass three params add -4,sp # need to pass three params
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment