Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
87654a70
Commit
87654a70
authored
Jul 25, 2009
by
Thomas Gleixner
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
trace: Convert various locks to atomic_spinlock
Signed-off-by:
Thomas Gleixner
<
tglx@linutronix.de
>
parent
fea886ed
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
30 additions
and
30 deletions
+30
-30
kernel/trace/ring_buffer.c
kernel/trace/ring_buffer.c
+22
-22
kernel/trace/trace.c
kernel/trace/trace.c
+5
-5
kernel/trace/trace_irqsoff.c
kernel/trace/trace_irqsoff.c
+3
-3
No files found.
kernel/trace/ring_buffer.c
View file @
87654a70
...
@@ -403,7 +403,7 @@ int ring_buffer_print_page_header(struct trace_seq *s)
...
@@ -403,7 +403,7 @@ int ring_buffer_print_page_header(struct trace_seq *s)
struct
ring_buffer_per_cpu
{
struct
ring_buffer_per_cpu
{
int
cpu
;
int
cpu
;
struct
ring_buffer
*
buffer
;
struct
ring_buffer
*
buffer
;
spinlock_t
reader_lock
;
/* serialize readers */
atomic_spinlock_t
reader_lock
;
/* serialize readers */
raw_spinlock_t
lock
;
raw_spinlock_t
lock
;
struct
lock_class_key
lock_key
;
struct
lock_class_key
lock_key
;
struct
list_head
pages
;
struct
list_head
pages
;
...
@@ -570,7 +570,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
...
@@ -570,7 +570,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer
->
cpu
=
cpu
;
cpu_buffer
->
cpu
=
cpu
;
cpu_buffer
->
buffer
=
buffer
;
cpu_buffer
->
buffer
=
buffer
;
spin_lock_init
(
&
cpu_buffer
->
reader_lock
);
atomic_
spin_lock_init
(
&
cpu_buffer
->
reader_lock
);
lockdep_set_class
(
&
cpu_buffer
->
reader_lock
,
buffer
->
reader_lock_key
);
lockdep_set_class
(
&
cpu_buffer
->
reader_lock
,
buffer
->
reader_lock_key
);
cpu_buffer
->
lock
=
(
raw_spinlock_t
)
__RAW_SPIN_LOCK_UNLOCKED
;
cpu_buffer
->
lock
=
(
raw_spinlock_t
)
__RAW_SPIN_LOCK_UNLOCKED
;
INIT_LIST_HEAD
(
&
cpu_buffer
->
pages
);
INIT_LIST_HEAD
(
&
cpu_buffer
->
pages
);
...
@@ -2117,9 +2117,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
...
@@ -2117,9 +2117,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
cpu_buffer
=
iter
->
cpu_buffer
;
cpu_buffer
=
iter
->
cpu_buffer
;
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
rb_iter_reset
(
iter
);
rb_iter_reset
(
iter
);
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
}
}
EXPORT_SYMBOL_GPL
(
ring_buffer_iter_reset
);
EXPORT_SYMBOL_GPL
(
ring_buffer_iter_reset
);
...
@@ -2517,10 +2517,10 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
...
@@ -2517,10 +2517,10 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
again:
again:
local_irq_save
(
flags
);
local_irq_save
(
flags
);
if
(
dolock
)
if
(
dolock
)
spin_lock
(
&
cpu_buffer
->
reader_lock
);
atomic_
spin_lock
(
&
cpu_buffer
->
reader_lock
);
event
=
rb_buffer_peek
(
buffer
,
cpu
,
ts
);
event
=
rb_buffer_peek
(
buffer
,
cpu
,
ts
);
if
(
dolock
)
if
(
dolock
)
spin_unlock
(
&
cpu_buffer
->
reader_lock
);
atomic_
spin_unlock
(
&
cpu_buffer
->
reader_lock
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
if
(
event
&&
event
->
type_len
==
RINGBUF_TYPE_PADDING
)
{
if
(
event
&&
event
->
type_len
==
RINGBUF_TYPE_PADDING
)
{
...
@@ -2547,9 +2547,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
...
@@ -2547,9 +2547,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
unsigned
long
flags
;
unsigned
long
flags
;
again:
again:
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
event
=
rb_iter_peek
(
iter
,
ts
);
event
=
rb_iter_peek
(
iter
,
ts
);
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
if
(
event
&&
event
->
type_len
==
RINGBUF_TYPE_PADDING
)
{
if
(
event
&&
event
->
type_len
==
RINGBUF_TYPE_PADDING
)
{
cpu_relax
();
cpu_relax
();
...
@@ -2587,7 +2587,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
...
@@ -2587,7 +2587,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
cpu_buffer
=
buffer
->
buffers
[
cpu
];
cpu_buffer
=
buffer
->
buffers
[
cpu
];
local_irq_save
(
flags
);
local_irq_save
(
flags
);
if
(
dolock
)
if
(
dolock
)
spin_lock
(
&
cpu_buffer
->
reader_lock
);
atomic_
spin_lock
(
&
cpu_buffer
->
reader_lock
);
event
=
rb_buffer_peek
(
buffer
,
cpu
,
ts
);
event
=
rb_buffer_peek
(
buffer
,
cpu
,
ts
);
if
(
!
event
)
if
(
!
event
)
...
@@ -2597,7 +2597,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
...
@@ -2597,7 +2597,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
out_unlock:
out_unlock:
if
(
dolock
)
if
(
dolock
)
spin_unlock
(
&
cpu_buffer
->
reader_lock
);
atomic_
spin_unlock
(
&
cpu_buffer
->
reader_lock
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
out:
out:
...
@@ -2645,11 +2645,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
...
@@ -2645,11 +2645,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
atomic_inc
(
&
cpu_buffer
->
record_disabled
);
atomic_inc
(
&
cpu_buffer
->
record_disabled
);
synchronize_sched
();
synchronize_sched
();
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
__raw_spin_lock
(
&
cpu_buffer
->
lock
);
__raw_spin_lock
(
&
cpu_buffer
->
lock
);
rb_iter_reset
(
iter
);
rb_iter_reset
(
iter
);
__raw_spin_unlock
(
&
cpu_buffer
->
lock
);
__raw_spin_unlock
(
&
cpu_buffer
->
lock
);
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
return
iter
;
return
iter
;
}
}
...
@@ -2687,14 +2687,14 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
...
@@ -2687,14 +2687,14 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
unsigned
long
flags
;
unsigned
long
flags
;
again:
again:
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
event
=
rb_iter_peek
(
iter
,
ts
);
event
=
rb_iter_peek
(
iter
,
ts
);
if
(
!
event
)
if
(
!
event
)
goto
out
;
goto
out
;
rb_advance_iter
(
iter
);
rb_advance_iter
(
iter
);
out:
out:
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
if
(
event
&&
event
->
type_len
==
RINGBUF_TYPE_PADDING
)
{
if
(
event
&&
event
->
type_len
==
RINGBUF_TYPE_PADDING
)
{
cpu_relax
();
cpu_relax
();
...
@@ -2762,7 +2762,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
...
@@ -2762,7 +2762,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
atomic_inc
(
&
cpu_buffer
->
record_disabled
);
atomic_inc
(
&
cpu_buffer
->
record_disabled
);
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
__raw_spin_lock
(
&
cpu_buffer
->
lock
);
__raw_spin_lock
(
&
cpu_buffer
->
lock
);
...
@@ -2770,7 +2770,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
...
@@ -2770,7 +2770,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
__raw_spin_unlock
(
&
cpu_buffer
->
lock
);
__raw_spin_unlock
(
&
cpu_buffer
->
lock
);
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_dec
(
&
cpu_buffer
->
record_disabled
);
atomic_dec
(
&
cpu_buffer
->
record_disabled
);
}
}
...
@@ -2808,10 +2808,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
...
@@ -2808,10 +2808,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
cpu_buffer
=
buffer
->
buffers
[
cpu
];
cpu_buffer
=
buffer
->
buffers
[
cpu
];
local_irq_save
(
flags
);
local_irq_save
(
flags
);
if
(
dolock
)
if
(
dolock
)
spin_lock
(
&
cpu_buffer
->
reader_lock
);
atomic_
spin_lock
(
&
cpu_buffer
->
reader_lock
);
ret
=
rb_per_cpu_empty
(
cpu_buffer
);
ret
=
rb_per_cpu_empty
(
cpu_buffer
);
if
(
dolock
)
if
(
dolock
)
spin_unlock
(
&
cpu_buffer
->
reader_lock
);
atomic_
spin_unlock
(
&
cpu_buffer
->
reader_lock
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
if
(
!
ret
)
if
(
!
ret
)
...
@@ -2842,10 +2842,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
...
@@ -2842,10 +2842,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer
=
buffer
->
buffers
[
cpu
];
cpu_buffer
=
buffer
->
buffers
[
cpu
];
local_irq_save
(
flags
);
local_irq_save
(
flags
);
if
(
dolock
)
if
(
dolock
)
spin_lock
(
&
cpu_buffer
->
reader_lock
);
atomic_
spin_lock
(
&
cpu_buffer
->
reader_lock
);
ret
=
rb_per_cpu_empty
(
cpu_buffer
);
ret
=
rb_per_cpu_empty
(
cpu_buffer
);
if
(
dolock
)
if
(
dolock
)
spin_unlock
(
&
cpu_buffer
->
reader_lock
);
atomic_
spin_unlock
(
&
cpu_buffer
->
reader_lock
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
return
ret
;
return
ret
;
...
@@ -3031,7 +3031,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
...
@@ -3031,7 +3031,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
if
(
!
bpage
)
if
(
!
bpage
)
goto
out
;
goto
out
;
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
cpu_buffer
->
reader_lock
,
flags
);
reader
=
rb_get_reader_page
(
cpu_buffer
);
reader
=
rb_get_reader_page
(
cpu_buffer
);
if
(
!
reader
)
if
(
!
reader
)
...
@@ -3106,7 +3106,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
...
@@ -3106,7 +3106,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
ret
=
read
;
ret
=
read
;
out_unlock:
out_unlock:
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
cpu_buffer
->
reader_lock
,
flags
);
out:
out:
return
ret
;
return
ret
;
...
...
kernel/trace/trace.c
View file @
87654a70
...
@@ -668,7 +668,7 @@ static void trace_init_cmdlines(void)
...
@@ -668,7 +668,7 @@ static void trace_init_cmdlines(void)
}
}
static
int
trace_stop_count
;
static
int
trace_stop_count
;
static
DEFINE_SPINLOCK
(
tracing_start_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
tracing_start_lock
);
/**
/**
* ftrace_off_permanent - disable all ftrace code permanently
* ftrace_off_permanent - disable all ftrace code permanently
...
@@ -699,7 +699,7 @@ void tracing_start(void)
...
@@ -699,7 +699,7 @@ void tracing_start(void)
if
(
tracing_disabled
)
if
(
tracing_disabled
)
return
;
return
;
spin_lock_irqsave
(
&
tracing_start_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
tracing_start_lock
,
flags
);
if
(
--
trace_stop_count
)
{
if
(
--
trace_stop_count
)
{
if
(
trace_stop_count
<
0
)
{
if
(
trace_stop_count
<
0
)
{
/* Someone screwed up their debugging */
/* Someone screwed up their debugging */
...
@@ -720,7 +720,7 @@ void tracing_start(void)
...
@@ -720,7 +720,7 @@ void tracing_start(void)
ftrace_start
();
ftrace_start
();
out:
out:
spin_unlock_irqrestore
(
&
tracing_start_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
tracing_start_lock
,
flags
);
}
}
/**
/**
...
@@ -735,7 +735,7 @@ void tracing_stop(void)
...
@@ -735,7 +735,7 @@ void tracing_stop(void)
unsigned
long
flags
;
unsigned
long
flags
;
ftrace_stop
();
ftrace_stop
();
spin_lock_irqsave
(
&
tracing_start_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
tracing_start_lock
,
flags
);
if
(
trace_stop_count
++
)
if
(
trace_stop_count
++
)
goto
out
;
goto
out
;
...
@@ -748,7 +748,7 @@ void tracing_stop(void)
...
@@ -748,7 +748,7 @@ void tracing_stop(void)
ring_buffer_record_disable
(
buffer
);
ring_buffer_record_disable
(
buffer
);
out:
out:
spin_unlock_irqrestore
(
&
tracing_start_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
tracing_start_lock
,
flags
);
}
}
void
trace_stop_cmdline_recording
(
void
);
void
trace_stop_cmdline_recording
(
void
);
...
...
kernel/trace/trace_irqsoff.c
View file @
87654a70
...
@@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly;
...
@@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly;
static
DEFINE_PER_CPU
(
int
,
tracing_cpu
);
static
DEFINE_PER_CPU
(
int
,
tracing_cpu
);
static
DEFINE_SPINLOCK
(
max_trace_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
max_trace_lock
);
enum
{
enum
{
TRACER_IRQS_OFF
=
(
1
<<
1
),
TRACER_IRQS_OFF
=
(
1
<<
1
),
...
@@ -149,7 +149,7 @@ check_critical_timing(struct trace_array *tr,
...
@@ -149,7 +149,7 @@ check_critical_timing(struct trace_array *tr,
if
(
!
report_latency
(
delta
))
if
(
!
report_latency
(
delta
))
goto
out
;
goto
out
;
spin_lock_irqsave
(
&
max_trace_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
max_trace_lock
,
flags
);
/* check if we are still the max latency */
/* check if we are still the max latency */
if
(
!
report_latency
(
delta
))
if
(
!
report_latency
(
delta
))
...
@@ -173,7 +173,7 @@ check_critical_timing(struct trace_array *tr,
...
@@ -173,7 +173,7 @@ check_critical_timing(struct trace_array *tr,
max_sequence
++
;
max_sequence
++
;
out_unlock:
out_unlock:
spin_unlock_irqrestore
(
&
max_trace_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
max_trace_lock
,
flags
);
out:
out:
data
->
critical_sequence
=
max_sequence
;
data
->
critical_sequence
=
max_sequence
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment