Commit fc2c504b authored by Thomas Gleixner's avatar Thomas Gleixner

Merge branch 'rt/rcu' into rt/base

parents aed9cd6a b526aded
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#ifndef _LINUX_SRCU_H #ifndef _LINUX_SRCU_H
#define _LINUX_SRCU_H #define _LINUX_SRCU_H
#include <linux/wait.h>
struct srcu_struct_array { struct srcu_struct_array {
int c[2]; int c[2];
}; };
...@@ -50,4 +52,24 @@ void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); ...@@ -50,4 +52,24 @@ void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
void synchronize_srcu(struct srcu_struct *sp); void synchronize_srcu(struct srcu_struct *sp);
long srcu_batches_completed(struct srcu_struct *sp); long srcu_batches_completed(struct srcu_struct *sp);
/*
* fully compatible with srcu, but optimized for writers.
*/
struct qrcu_struct {
int completed;
atomic_t ctr[2];
wait_queue_head_t wq;
struct mutex mutex;
};
int init_qrcu_struct(struct qrcu_struct *qp);
int qrcu_read_lock(struct qrcu_struct *qp);
void qrcu_read_unlock(struct qrcu_struct *qp, int idx);
void synchronize_qrcu(struct qrcu_struct *qp);
static inline void cleanup_qrcu_struct(struct qrcu_struct *qp)
{
}
#endif #endif
...@@ -318,6 +318,7 @@ choice ...@@ -318,6 +318,7 @@ choice
config CLASSIC_RCU config CLASSIC_RCU
bool "Classic RCU" bool "Classic RCU"
depends on !PREEMPT_RT
help help
This option selects the classic RCU implementation that is This option selects the classic RCU implementation that is
designed for best read-side performance on non-realtime designed for best read-side performance on non-realtime
...@@ -327,6 +328,7 @@ config CLASSIC_RCU ...@@ -327,6 +328,7 @@ config CLASSIC_RCU
config TREE_RCU config TREE_RCU
bool "Tree-based hierarchical RCU" bool "Tree-based hierarchical RCU"
depends on !PREEMPT_RT
help help
This option selects the RCU implementation that is This option selects the RCU implementation that is
designed for very large SMP system with hundreds or designed for very large SMP system with hundreds or
......
...@@ -255,3 +255,89 @@ EXPORT_SYMBOL_GPL(srcu_read_lock); ...@@ -255,3 +255,89 @@ EXPORT_SYMBOL_GPL(srcu_read_lock);
EXPORT_SYMBOL_GPL(srcu_read_unlock); EXPORT_SYMBOL_GPL(srcu_read_unlock);
EXPORT_SYMBOL_GPL(synchronize_srcu); EXPORT_SYMBOL_GPL(synchronize_srcu);
EXPORT_SYMBOL_GPL(srcu_batches_completed); EXPORT_SYMBOL_GPL(srcu_batches_completed);
int init_qrcu_struct(struct qrcu_struct *qp)
{
qp->completed = 0;
atomic_set(qp->ctr + 0, 1);
atomic_set(qp->ctr + 1, 0);
init_waitqueue_head(&qp->wq);
mutex_init(&qp->mutex);
return 0;
}
int qrcu_read_lock(struct qrcu_struct *qp)
{
for (;;) {
int idx = qp->completed & 0x1;
if (likely(atomic_inc_not_zero(qp->ctr + idx)))
return idx;
}
}
void qrcu_read_unlock(struct qrcu_struct *qp, int idx)
{
if (atomic_dec_and_test(qp->ctr + idx))
wake_up(&qp->wq);
}
void synchronize_qrcu(struct qrcu_struct *qp)
{
int idx;
smp_mb(); /* Force preceding change to happen before fastpath check. */
/*
* Fastpath: If the two counters sum to "1" at a given point in
* time, there are no readers. However, it takes two separate
* loads to sample both counters, which won't occur simultaneously.
* So we might race with a counter switch, so that we might see
* ctr[0]==0, then the counter might switch, then we might see
* ctr[1]==1 (unbeknownst to us because there is a reader still
* there). So we do a read memory barrier and recheck. If the
* same race happens again, there must have been a second counter
* switch. This second counter switch could not have happened
* until all preceding readers finished, so if the condition
* is true both times, we may safely proceed.
*
* This relies critically on the atomic increment and atomic
* decrement being seen as executing in order.
*/
if (atomic_read(&qp->ctr[0]) + atomic_read(&qp->ctr[1]) <= 1) {
smp_rmb(); /* Keep two checks independent. */
if (atomic_read(&qp->ctr[0]) + atomic_read(&qp->ctr[1]) <= 1)
goto out;
}
mutex_lock(&qp->mutex);
idx = qp->completed & 0x1;
if (atomic_read(qp->ctr + idx) == 1)
goto out_unlock;
atomic_inc(qp->ctr + (idx ^ 0x1));
/*
* Prevent subsequent decrement from being seen before previous
* increment -- such an inversion could cause the fastpath
* above to falsely conclude that there were no readers. Also,
* reduce the likelihood that qrcu_read_lock() will loop.
*/
smp_mb__after_atomic_inc();
qp->completed++;
atomic_dec(qp->ctr + idx);
__wait_event(qp->wq, !atomic_read(qp->ctr + idx));
out_unlock:
mutex_unlock(&qp->mutex);
out:
smp_mb(); /* force subsequent free after qrcu_read_unlock(). */
}
EXPORT_SYMBOL_GPL(init_qrcu_struct);
EXPORT_SYMBOL_GPL(qrcu_read_lock);
EXPORT_SYMBOL_GPL(qrcu_read_unlock);
EXPORT_SYMBOL_GPL(synchronize_qrcu);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment