rcu: Move rcu_barrier_completion to rcu_state structure
In order to allow each RCU flavor to concurrently execute its rcu_barrier() function, it is necessary to move the relevant state to the rcu_state structure. This commit therefore moves the rcu_barrier_completion global variable to a new ->barrier_completion field in the rcu_state structure. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
parent
24ebbca8ec
commit
7db74df88b
2 changed files with 5 additions and 5 deletions
|
@ -158,7 +158,6 @@ unsigned long rcutorture_vernum;
|
||||||
/* State information for rcu_barrier() and friends. */
|
/* State information for rcu_barrier() and friends. */
|
||||||
|
|
||||||
static DEFINE_MUTEX(rcu_barrier_mutex);
|
static DEFINE_MUTEX(rcu_barrier_mutex);
|
||||||
static struct completion rcu_barrier_completion;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
|
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
|
||||||
|
@ -2275,7 +2274,7 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
|
||||||
struct rcu_state *rsp = rdp->rsp;
|
struct rcu_state *rsp = rdp->rsp;
|
||||||
|
|
||||||
if (atomic_dec_and_test(&rsp->barrier_cpu_count))
|
if (atomic_dec_and_test(&rsp->barrier_cpu_count))
|
||||||
complete(&rcu_barrier_completion);
|
complete(&rsp->barrier_completion);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2325,7 +2324,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
|
||||||
* 6. Both rcu_barrier_callback() callbacks are invoked, awakening
|
* 6. Both rcu_barrier_callback() callbacks are invoked, awakening
|
||||||
* us -- but before CPU 1's orphaned callbacks are invoked!!!
|
* us -- but before CPU 1's orphaned callbacks are invoked!!!
|
||||||
*/
|
*/
|
||||||
init_completion(&rcu_barrier_completion);
|
init_completion(&rsp->barrier_completion);
|
||||||
atomic_set(&rsp->barrier_cpu_count, 1);
|
atomic_set(&rsp->barrier_cpu_count, 1);
|
||||||
raw_spin_lock_irqsave(&rsp->onofflock, flags);
|
raw_spin_lock_irqsave(&rsp->onofflock, flags);
|
||||||
rsp->rcu_barrier_in_progress = current;
|
rsp->rcu_barrier_in_progress = current;
|
||||||
|
@ -2375,10 +2374,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
|
||||||
* CPU, and thus each counted, remove the initial count.
|
* CPU, and thus each counted, remove the initial count.
|
||||||
*/
|
*/
|
||||||
if (atomic_dec_and_test(&rsp->barrier_cpu_count))
|
if (atomic_dec_and_test(&rsp->barrier_cpu_count))
|
||||||
complete(&rcu_barrier_completion);
|
complete(&rsp->barrier_completion);
|
||||||
|
|
||||||
/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
|
/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
|
||||||
wait_for_completion(&rcu_barrier_completion);
|
wait_for_completion(&rsp->barrier_completion);
|
||||||
|
|
||||||
/* Other rcu_barrier() invocations can now safely proceed. */
|
/* Other rcu_barrier() invocations can now safely proceed. */
|
||||||
mutex_unlock(&rcu_barrier_mutex);
|
mutex_unlock(&rcu_barrier_mutex);
|
||||||
|
|
|
@ -401,6 +401,7 @@ struct rcu_state {
|
||||||
/* Task doing rcu_barrier(), */
|
/* Task doing rcu_barrier(), */
|
||||||
/* or NULL if no barrier. */
|
/* or NULL if no barrier. */
|
||||||
atomic_t barrier_cpu_count; /* # CPUs waiting on. */
|
atomic_t barrier_cpu_count; /* # CPUs waiting on. */
|
||||||
|
struct completion barrier_completion; /* Wake at barrier end. */
|
||||||
raw_spinlock_t fqslock; /* Only one task forcing */
|
raw_spinlock_t fqslock; /* Only one task forcing */
|
||||||
/* quiescent states. */
|
/* quiescent states. */
|
||||||
unsigned long jiffies_force_qs; /* Time at which to invoke */
|
unsigned long jiffies_force_qs; /* Time at which to invoke */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue