struct rcu_node *rnp0;
struct rcu_node *rnp1 = NULL;
- /*
- * First try directly acquiring the root lock in order to reduce
- * latency in the common case where expedited grace periods are
- * rare. We check mutex_is_locked() to avoid pathological levels of
- * memory contention on ->exp_funnel_mutex in the heavy-load case.
- */
- rnp0 = rcu_get_root(rsp);
- if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) {
- if (mutex_trylock(&rnp0->exp_funnel_mutex)) {
- trace_rcu_exp_funnel_lock(rsp->name, rnp0->level,
- rnp0->grplo, rnp0->grphi,
- TPS("acq"));
- if (sync_exp_work_done(rsp, rnp0, NULL,
- &rdp->expedited_workdone0, s))
- return NULL;
- return rnp0;
- }
- }
-
/*
* Each pass through the following loop works its way
* up the rcu_node tree, returning if others have done the
* can be inexact, as it is just promoting locality and is not
* strictly needed for correctness.
*/
- if (sync_exp_work_done(rsp, NULL, NULL, &rdp->expedited_workdone1, s))
+ if (sync_exp_work_done(rsp, NULL, NULL, &rdp->exp_workdone1, s))
return NULL;
mutex_lock(&rdp->exp_funnel_mutex);
trace_rcu_exp_funnel_lock(rsp->name, rdp->mynode->level + 1,
rdp->cpu, rdp->cpu, TPS("acq"));
rnp0 = rdp->mynode;
for (; rnp0 != NULL; rnp0 = rnp0->parent) {
- if (sync_exp_work_done(rsp, rnp1, rdp,
- &rdp->expedited_workdone2, s))
+ if (sync_exp_work_done(rsp, rnp1, rdp, &rdp->exp_workdone2, s))
return NULL;
mutex_lock(&rnp0->exp_funnel_mutex);
trace_rcu_exp_funnel_lock(rsp->name, rnp0->level,
}
rnp1 = rnp0;
}
- if (sync_exp_work_done(rsp, rnp1, rdp,
- &rdp->expedited_workdone3, s))
+ if (sync_exp_work_done(rsp, rnp1, rdp, &rdp->exp_workdone3, s))
return NULL;
return rnp1;
}
ndetected++;
rdp = per_cpu_ptr(rsp->rda, cpu);
pr_cont(" %d-%c%c%c", cpu,
- "O."[cpu_online(cpu)],
+ "O."[!!cpu_online(cpu)],
"o."[!!(rdp->grpmask & rnp->expmaskinit)],
"N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
}