Commit 6ee4752f authored by David Chinner's avatar David Chinner Committed by Lachlan McIlroy

[XFS] Use atomic counters for ktrace buffer indexes

ktrace_enter() is consuming vast amounts of CPU time due to the use of a
single global lock for protecting buffer index increments. Change it to
use per-buffer atomic counters - this reduces ktrace_enter() overhead
during a trace intensive test on a 4p machine from 58% of all CPU time to
12% and halves test runtime.

SGI-PV: 977546
SGI-Modid: xfs-linux-melb:xfs-kern:30537a
Signed-off-by: default avatarDavid Chinner <dgc@sgi.com>
Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
parent 44d814ce
...@@ -92,7 +92,7 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep) ...@@ -92,7 +92,7 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
ktp->kt_entries = ktep; ktp->kt_entries = ktep;
ktp->kt_nentries = nentries; ktp->kt_nentries = nentries;
ktp->kt_index = 0; atomic_set(&ktp->kt_index, 0);
ktp->kt_rollover = 0; ktp->kt_rollover = 0;
return ktp; return ktp;
} }
...@@ -151,8 +151,6 @@ ktrace_enter( ...@@ -151,8 +151,6 @@ ktrace_enter(
void *val14, void *val14,
void *val15) void *val15)
{ {
static DEFINE_SPINLOCK(wrap_lock);
unsigned long flags;
int index; int index;
ktrace_entry_t *ktep; ktrace_entry_t *ktep;
...@@ -161,12 +159,8 @@ ktrace_enter( ...@@ -161,12 +159,8 @@ ktrace_enter(
/* /*
* Grab an entry by pushing the index up to the next one. * Grab an entry by pushing the index up to the next one.
*/ */
spin_lock_irqsave(&wrap_lock, flags); index = atomic_add_return(1, &ktp->kt_index);
index = ktp->kt_index; index = (index - 1) % ktp->kt_nentries;
if (++ktp->kt_index == ktp->kt_nentries)
ktp->kt_index = 0;
spin_unlock_irqrestore(&wrap_lock, flags);
if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
ktp->kt_rollover = 1; ktp->kt_rollover = 1;
...@@ -199,11 +193,12 @@ int ...@@ -199,11 +193,12 @@ int
ktrace_nentries( ktrace_nentries(
ktrace_t *ktp) ktrace_t *ktp)
{ {
if (ktp == NULL) { int index;
if (ktp == NULL)
return 0; return 0;
}
return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index); index = atomic_read(&ktp->kt_index) % ktp->kt_nentries;
return (ktp->kt_rollover ? ktp->kt_nentries : index);
} }
/* /*
...@@ -228,7 +223,7 @@ ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp) ...@@ -228,7 +223,7 @@ ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
int nentries; int nentries;
if (ktp->kt_rollover) if (ktp->kt_rollover)
index = ktp->kt_index; index = atomic_read(&ktp->kt_index) % ktp->kt_nentries;
else else
index = 0; index = 0;
......
...@@ -30,7 +30,7 @@ typedef struct ktrace_entry { ...@@ -30,7 +30,7 @@ typedef struct ktrace_entry {
*/ */
typedef struct ktrace { typedef struct ktrace {
int kt_nentries; /* number of entries in trace buf */ int kt_nentries; /* number of entries in trace buf */
int kt_index; /* current index in entries */ atomic_t kt_index; /* current index in entries */
int kt_rollover; int kt_rollover;
ktrace_entry_t *kt_entries; /* buffer of entries */ ktrace_entry_t *kt_entries; /* buffer of entries */
} ktrace_t; } ktrace_t;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment