MIPS: Fix octeon FP context switch handling
[cascardo/linux.git] / kernel / rcu / tree.c
index b280373..9f75f25 100644 (file)
@@ -71,6 +71,7 @@ MODULE_ALIAS("rcutree");
 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
 static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS];
+static struct lock_class_key rcu_exp_sched_class[RCU_NUM_LVLS];
 
 /*
  * In order to export the rcu_state name to the tracing tools, it
@@ -645,12 +646,12 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
         * It is illegal to enter an extended quiescent state while
         * in an RCU read-side critical section.
         */
-       rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
-                          "Illegal idle entry in RCU read-side critical section.");
-       rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
-                          "Illegal idle entry in RCU-bh read-side critical section.");
-       rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
-                          "Illegal idle entry in RCU-sched read-side critical section.");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
+                        "Illegal idle entry in RCU read-side critical section.");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
+                        "Illegal idle entry in RCU-bh read-side critical section.");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
+                        "Illegal idle entry in RCU-sched read-side critical section.");
 }
 
 /*
@@ -697,7 +698,7 @@ void rcu_idle_enter(void)
 }
 EXPORT_SYMBOL_GPL(rcu_idle_enter);
 
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_NO_HZ_FULL
 /**
  * rcu_user_enter - inform RCU that we are resuming userspace.
  *
@@ -710,7 +711,7 @@ void rcu_user_enter(void)
 {
        rcu_eqs_enter(1);
 }
-#endif /* CONFIG_RCU_USER_QS */
+#endif /* CONFIG_NO_HZ_FULL */
 
 /**
  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
@@ -824,7 +825,7 @@ void rcu_idle_exit(void)
 }
 EXPORT_SYMBOL_GPL(rcu_idle_exit);
 
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_NO_HZ_FULL
 /**
  * rcu_user_exit - inform RCU that we are exiting userspace.
  *
@@ -835,7 +836,7 @@ void rcu_user_exit(void)
 {
        rcu_eqs_exit(1);
 }
-#endif /* CONFIG_RCU_USER_QS */
+#endif /* CONFIG_NO_HZ_FULL */
 
 /**
  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
@@ -974,9 +975,9 @@ bool notrace rcu_is_watching(void)
 {
        bool ret;
 
-       preempt_disable();
+       preempt_disable_notrace();
        ret = __rcu_is_watching();
-       preempt_enable();
+       preempt_enable_notrace();
        return ret;
 }
 EXPORT_SYMBOL_GPL(rcu_is_watching);
@@ -2088,7 +2089,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        rsp->gp_state = RCU_GP_WAIT_FQS;
                        ret = wait_event_interruptible_timeout(rsp->gp_wq,
                                        rcu_gp_fqs_check_wake(rsp, &gf), j);
-                       rsp->gp_state = RCU_GP_DONE_FQS;
+                       rsp->gp_state = RCU_GP_DOING_FQS;
                        /* Locking provides needed memory barriers. */
                        /* If grace period done, leave loop. */
                        if (!READ_ONCE(rnp->qsmask) &&
@@ -3179,10 +3180,10 @@ static inline int rcu_blocking_is_gp(void)
  */
 void synchronize_sched(void)
 {
-       rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
-                          !lock_is_held(&rcu_lock_map) &&
-                          !lock_is_held(&rcu_sched_lock_map),
-                          "Illegal synchronize_sched() in RCU-sched read-side critical section");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+                        lock_is_held(&rcu_lock_map) ||
+                        lock_is_held(&rcu_sched_lock_map),
+                        "Illegal synchronize_sched() in RCU-sched read-side critical section");
        if (rcu_blocking_is_gp())
                return;
        if (rcu_gp_is_expedited())
@@ -3206,10 +3207,10 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
  */
 void synchronize_rcu_bh(void)
 {
-       rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
-                          !lock_is_held(&rcu_lock_map) &&
-                          !lock_is_held(&rcu_sched_lock_map),
-                          "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
+       RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+                        lock_is_held(&rcu_lock_map) ||
+                        lock_is_held(&rcu_sched_lock_map),
+                        "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
        if (rcu_blocking_is_gp())
                return;
        if (rcu_gp_is_expedited())
@@ -3271,6 +3272,58 @@ void cond_synchronize_rcu(unsigned long oldstate)
 }
 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
 
+/**
+ * get_state_synchronize_sched - Snapshot current RCU-sched state
+ *
+ * Returns a cookie that is used by a later call to cond_synchronize_sched()
+ * to determine whether or not a full grace period has elapsed in the
+ * meantime.
+ */
+unsigned long get_state_synchronize_sched(void)
+{
+       /*
+        * Any prior manipulation of RCU-protected data must happen
+        * before the load from ->gpnum.
+        */
+       smp_mb();  /* ^^^ */
+
+       /*
+        * Make sure this load happens before the purportedly
+        * time-consuming work between get_state_synchronize_sched()
+        * and cond_synchronize_sched().
+        */
+       return smp_load_acquire(&rcu_sched_state.gpnum);
+}
+EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
+
+/**
+ * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
+ *
+ * @oldstate: return value from earlier call to get_state_synchronize_sched()
+ *
+ * If a full RCU-sched grace period has elapsed since the earlier call to
+ * get_state_synchronize_sched(), just return.  Otherwise, invoke
+ * synchronize_sched() to wait for a full grace period.
+ *
+ * Yes, this function does not take counter wrap into account.  But
+ * counter wrap is harmless.  If the counter wraps, we have waited for
+ * more than 2 billion grace periods (and way more on a 64-bit system!),
+ * so waiting for one additional grace period should be just fine.
+ */
+void cond_synchronize_sched(unsigned long oldstate)
+{
+       unsigned long newstate;
+
+       /*
+        * Ensure that this load happens before any RCU-destructive
+        * actions the caller might carry out after we return.
+        */
+       newstate = smp_load_acquire(&rcu_sched_state.completed);
+       if (ULONG_CMP_GE(oldstate, newstate))
+               synchronize_sched();
+}
+EXPORT_SYMBOL_GPL(cond_synchronize_sched);
+
 /* Adjust sequence number for start of update-side operation. */
 static void rcu_seq_start(unsigned long *sp)
 {
@@ -3355,6 +3408,22 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
        struct rcu_node *rnp0;
        struct rcu_node *rnp1 = NULL;
 
+       /*
+        * First try directly acquiring the root lock in order to reduce
+        * latency in the common case where expedited grace periods are
+        * rare.  We check mutex_is_locked() to avoid pathological levels of
+        * memory contention on ->exp_funnel_mutex in the heavy-load case.
+        */
+       rnp0 = rcu_get_root(rsp);
+       if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) {
+               if (mutex_trylock(&rnp0->exp_funnel_mutex)) {
+                       if (sync_exp_work_done(rsp, rnp0, NULL,
+                                              &rsp->expedited_workdone0, s))
+                               return NULL;
+                       return rnp0;
+               }
+       }
+
        /*
         * Each pass through the following loop works its way
         * up the rcu_node tree, returning if others have done the
@@ -4033,6 +4102,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
        static const char * const buf[] = RCU_NODE_NAME_INIT;
        static const char * const fqs[] = RCU_FQS_NAME_INIT;
        static const char * const exp[] = RCU_EXP_NAME_INIT;
+       static const char * const exp_sched[] = RCU_EXP_SCHED_NAME_INIT;
        static u8 fl_mask = 0x1;
 
        int levelcnt[RCU_NUM_LVLS];             /* # nodes in each level. */
@@ -4092,8 +4162,14 @@ static void __init rcu_init_one(struct rcu_state *rsp,
                        INIT_LIST_HEAD(&rnp->blkd_tasks);
                        rcu_init_one_nocb(rnp);
                        mutex_init(&rnp->exp_funnel_mutex);
-                       lockdep_set_class_and_name(&rnp->exp_funnel_mutex,
-                                                  &rcu_exp_class[i], exp[i]);
+                       if (rsp == &rcu_sched_state)
+                               lockdep_set_class_and_name(
+                                       &rnp->exp_funnel_mutex,
+                                       &rcu_exp_sched_class[i], exp_sched[i]);
+                       else
+                               lockdep_set_class_and_name(
+                                       &rnp->exp_funnel_mutex,
+                                       &rcu_exp_class[i], exp[i]);
                }
        }