Commit d61fc448 authored by Pekka Paalanen's avatar Pekka Paalanen Committed by Thomas Gleixner

x86: mmiotrace, preview 2

Kconfig.debug, Makefile and testmmiotrace.c style fixes.
Use real mutex instead of mutex.
Fix failure path in register probe func.
kmmio: RCU read-locked over single stepping.
Generate mapping id's.
Make mmio-mod.c built-in and rewrite its locking.
Add debugfs file to enable/disable mmiotracing.
kmmio: use irqsave spinlocks.
Lots of cleanups in mmio-mod.c
Marker file moved from /proc into debugfs.
Call mmiotrace entrypoints directly from ioremap.c.
Signed-off-by: default avatarPekka Paalanen <pq@iki.fi>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 0fd0e3da
......@@ -170,22 +170,19 @@ config IOMMU_LEAK
config MMIOTRACE_HOOKS
bool
default n
config MMIOTRACE
tristate "Memory mapped IO tracing"
bool "Memory mapped IO tracing"
depends on DEBUG_KERNEL && RELAY && DEBUG_FS
select MMIOTRACE_HOOKS
default n
default y
help
This will build a kernel module called mmiotrace.
Making this a built-in is heavily discouraged.
Mmiotrace traces Memory Mapped I/O access and is meant for debugging
and reverse engineering. The kernel module offers wrapped
versions of the ioremap family of functions. The driver to be traced
must be modified to call these wrappers. A user space program is
required to collect the MMIO data.
Mmiotrace traces Memory Mapped I/O access and is meant for
debugging and reverse engineering. It is called from the ioremap
implementation and works via page faults. A user space program is
required to collect the MMIO data from debugfs files.
Tracing is disabled by default and can be enabled from a debugfs
file.
See http://nouveau.freedesktop.org/wiki/MmioTrace
If you are not helping to develop drivers, say N.
......@@ -193,7 +190,6 @@ config MMIOTRACE
config MMIOTRACE_TEST
tristate "Test module for mmiotrace"
depends on MMIOTRACE && m
default n
help
This is a dumb module for testing mmiotrace. It is very dangerous
as it will write garbage to IO memory starting at a given address.
......
obj-$(CONFIG_MMIOTRACE_HOOKS) += kmmio.o
obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
mmiotrace-objs := pf_in.o mmio-mod.o
mmiotrace-y := pf_in.o mmio-mod.o
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
......@@ -19,6 +19,7 @@
#include <linux/preempt.h>
#include <linux/percpu.h>
#include <linux/kdebug.h>
#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
#include <asm/errno.h>
......@@ -59,7 +60,7 @@ struct kmmio_context {
static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
void *args);
static DECLARE_MUTEX(kmmio_init_mutex);
static DEFINE_MUTEX(kmmio_init_mutex);
static DEFINE_SPINLOCK(kmmio_lock);
/* These are protected by kmmio_lock */
......@@ -90,7 +91,7 @@ static struct notifier_block nb_die = {
*/
void reference_kmmio(void)
{
down(&kmmio_init_mutex);
mutex_lock(&kmmio_init_mutex);
spin_lock_irq(&kmmio_lock);
if (!kmmio_initialized) {
int i;
......@@ -101,7 +102,7 @@ void reference_kmmio(void)
}
kmmio_initialized++;
spin_unlock_irq(&kmmio_lock);
up(&kmmio_init_mutex);
mutex_unlock(&kmmio_init_mutex);
}
EXPORT_SYMBOL_GPL(reference_kmmio);
......@@ -115,7 +116,7 @@ void unreference_kmmio(void)
{
bool unreg = false;
down(&kmmio_init_mutex);
mutex_lock(&kmmio_init_mutex);
spin_lock_irq(&kmmio_lock);
if (kmmio_initialized == 1) {
......@@ -128,7 +129,7 @@ void unreference_kmmio(void)
if (unreg)
unregister_die_notifier(&nb_die); /* calls sync_rcu() */
up(&kmmio_init_mutex);
mutex_unlock(&kmmio_init_mutex);
}
EXPORT_SYMBOL(unreference_kmmio);
......@@ -244,17 +245,13 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
* Preemption is now disabled to prevent process switch during
* single stepping. We can only handle one active kmmio trace
* per cpu, so ensure that we finish it before something else
* gets to run.
*
* XXX what if an interrupt occurs between returning from
* do_page_fault() and entering the single-step exception handler?
* And that interrupt triggers a kmmio trap?
* XXX If we tracing an interrupt service routine or whatever, is
* this enough to keep it on the current cpu?
* gets to run. We also hold the RCU read lock over single
* stepping to avoid looking up the probe and kmmio_fault_page
* again.
*/
preempt_disable();
rcu_read_lock();
faultpage = get_kmmio_fault_page(addr);
if (!faultpage) {
/*
......@@ -287,14 +284,24 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
if (ctx->probe && ctx->probe->pre_handler)
ctx->probe->pre_handler(ctx->probe, regs, addr);
/*
* Enable single-stepping and disable interrupts for the faulting
* context. Local interrupts must not get enabled during stepping.
*/
regs->flags |= TF_MASK;
regs->flags &= ~IF_MASK;
/* Now we set present bit in PTE and single step. */
disarm_kmmio_fault_page(ctx->fpage->page, NULL);
/*
* If another cpu accesses the same page while we are stepping,
* the access will not be caught. It will simply succeed and the
* only downside is we lose the event. If this becomes a problem,
* the user should drop to single cpu before tracing.
*/
put_cpu_var(kmmio_ctx);
rcu_read_unlock();
return 1;
no_kmmio_ctx:
......@@ -313,32 +320,15 @@ no_kmmio:
static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
{
int ret = 0;
struct kmmio_probe *probe;
struct kmmio_fault_page *faultpage;
struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
if (!ctx->active)
goto out;
rcu_read_lock();
faultpage = get_kmmio_fault_page(ctx->addr);
probe = get_kmmio_probe(ctx->addr);
if (faultpage != ctx->fpage || probe != ctx->probe) {
/*
* The trace setup changed after kmmio_handler() and before
* running this respective post handler. User does not want
* the result anymore.
*/
ctx->probe = NULL;
ctx->fpage = NULL;
}
if (ctx->probe && ctx->probe->post_handler)
ctx->probe->post_handler(ctx->probe, condition, regs);
if (ctx->fpage)
arm_kmmio_fault_page(ctx->fpage->page, NULL);
arm_kmmio_fault_page(ctx->fpage->page, NULL);
regs->flags &= ~TF_MASK;
regs->flags |= ctx->saved_flags;
......@@ -346,6 +336,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
/* These were acquired in kmmio_handler(). */
ctx->active--;
BUG_ON(ctx->active);
rcu_read_unlock();
preempt_enable_no_resched();
/*
......@@ -355,8 +346,6 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
*/
if (!(regs->flags & TF_MASK))
ret = 1;
rcu_read_unlock();
out:
put_cpu_var(kmmio_ctx);
return ret;
......@@ -411,15 +400,16 @@ static void release_kmmio_fault_page(unsigned long page,
int register_kmmio_probe(struct kmmio_probe *p)
{
unsigned long flags;
int ret = 0;
unsigned long size = 0;
spin_lock_irq(&kmmio_lock);
kmmio_count++;
spin_lock_irqsave(&kmmio_lock, flags);
if (get_kmmio_probe(p->addr)) {
ret = -EEXIST;
goto out;
}
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < p->len) {
if (add_kmmio_fault_page(p->addr + size))
......@@ -427,7 +417,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
size += PAGE_SIZE;
}
out:
spin_unlock_irq(&kmmio_lock);
spin_unlock_irqrestore(&kmmio_lock, flags);
/*
* XXX: What should I do here?
* Here was a call to global_flush_tlb(), but it does not exist
......@@ -478,7 +468,8 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)
/*
* Remove a kmmio probe. You have to synchronize_rcu() before you can be
* sure that the callbacks will not be called anymore.
* sure that the callbacks will not be called anymore. Only after that
* you may actually release your struct kmmio_probe.
*
* Unregistering a kmmio fault page has three steps:
* 1. release_kmmio_fault_page()
......@@ -490,18 +481,19 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)
*/
void unregister_kmmio_probe(struct kmmio_probe *p)
{
unsigned long flags;
unsigned long size = 0;
struct kmmio_fault_page *release_list = NULL;
struct kmmio_delayed_release *drelease;
spin_lock_irq(&kmmio_lock);
spin_lock_irqsave(&kmmio_lock, flags);
while (size < p->len) {
release_kmmio_fault_page(p->addr + size, &release_list);
size += PAGE_SIZE;
}
list_del_rcu(&p->list);
kmmio_count--;
spin_unlock_irq(&kmmio_lock);
spin_unlock_irqrestore(&kmmio_lock, flags);
drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
if (!drelease) {
......
This diff is collapsed.
......@@ -4,10 +4,6 @@
#include <linux/module.h>
#include <asm/io.h>
extern void __iomem *ioremap_nocache_trace(unsigned long offset,
unsigned long size);
extern void iounmap_trace(volatile void __iomem *addr);
#define MODULE_NAME "testmmiotrace"
static unsigned long mmio_address;
......@@ -28,25 +24,24 @@ static void do_write_test(void __iomem *p)
static void do_read_test(void __iomem *p)
{
unsigned int i;
volatile unsigned int v;
for (i = 0; i < 256; i++)
v = ioread8(p + i);
ioread8(p + i);
for (i = 1024; i < (5 * 1024); i += 2)
v = ioread16(p + i);
ioread16(p + i);
for (i = (5 * 1024); i < (16 * 1024); i += 4)
v = ioread32(p + i);
ioread32(p + i);
}
static void do_test(void)
{
void __iomem *p = ioremap_nocache_trace(mmio_address, 0x4000);
void __iomem *p = ioremap_nocache(mmio_address, 0x4000);
if (!p) {
pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
return;
}
do_write_test(p);
do_read_test(p);
iounmap_trace(p);
iounmap(p);
}
static int __init init(void)
......
......@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mmiotrace.h>
#include <asm/cacheflush.h>
#include <asm/e820.h>
......@@ -126,6 +127,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
unsigned long new_prot_val;
pgprot_t prot;
int retval;
void __iomem *ret_addr;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
......@@ -233,7 +235,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
return NULL;
}
return (void __iomem *) (vaddr + offset);
ret_addr = (void __iomem *) (vaddr + offset);
mmiotrace_ioremap(phys_addr, size, ret_addr);
return ret_addr;
}
/**
......@@ -325,6 +330,8 @@ void iounmap(volatile void __iomem *addr)
addr = (volatile void __iomem *)
(PAGE_MASK & (unsigned long __force)addr);
mmiotrace_iounmap(addr);
/* Use the vm area unlocked, assuming the caller
ensures there isn't another iounmap for the same address
in parallel. Reuse of the virtual address is prevented by
......
......@@ -16,11 +16,12 @@ typedef void (*kmmio_post_handler_t)(struct kmmio_probe *,
unsigned long condition, struct pt_regs *);
struct kmmio_probe {
struct list_head list;
struct list_head list; /* kmmio internal list */
unsigned long addr; /* start location of the probe point */
unsigned long len; /* length of the probe region */
kmmio_pre_handler_t pre_handler; /* Called before addr is executed. */
kmmio_post_handler_t post_handler; /* Called after addr is executed */
void *user_data;
};
/* kmmio is active by some kmmio_probes? */
......@@ -38,6 +39,21 @@ extern void unregister_kmmio_probe(struct kmmio_probe *p);
/* Called from page fault handler. */
extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
/* Called from ioremap.c */
#ifdef CONFIG_MMIOTRACE
extern void
mmiotrace_ioremap(unsigned long offset, unsigned long size, void __iomem *addr);
extern void mmiotrace_iounmap(volatile void __iomem *addr);
#else
static inline void
mmiotrace_ioremap(unsigned long offset, unsigned long size, void __iomem *addr)
{
}
static inline void mmiotrace_iounmap(volatile void __iomem *addr)
{
}
#endif /* CONFIG_MMIOTRACE_HOOKS */
#endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment