Merge commit 'fixes.2015.02.23a' into core/rcu
[cascardo/linux.git] / kernel / rcu / tree.h
index 83360b4..df668c0 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/threads.h>
 #include <linux/cpumask.h>
 #include <linux/seqlock.h>
+#include <linux/swait.h>
 #include <linux/stop_machine.h>
 
 /*
@@ -149,8 +150,9 @@ struct rcu_dynticks {
  * Definition for node within the RCU grace-period-detection hierarchy.
  */
 struct rcu_node {
-       raw_spinlock_t lock;    /* Root rcu_node's lock protects some */
-                               /*  rcu_state fields as well as following. */
+       raw_spinlock_t __private lock;  /* Root rcu_node's lock protects */
+                                       /*  some rcu_state fields as well as */
+                                       /*  following. */
        unsigned long gpnum;    /* Current grace period for this node. */
                                /*  This will either be equal to or one */
                                /*  behind the root rcu_node's gpnum. */
@@ -243,7 +245,7 @@ struct rcu_node {
                                /* Refused to boost: not sure why, though. */
                                /*  This can happen due to race conditions. */
 #ifdef CONFIG_RCU_NOCB_CPU
-       wait_queue_head_t nocb_gp_wq[2];
+       struct swait_queue_head nocb_gp_wq[2];
                                /* Place for rcu_nocb_kthread() to wait GP. */
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
        int need_future_gp[2];
@@ -399,7 +401,7 @@ struct rcu_data {
        atomic_long_t nocb_q_count_lazy; /*  invocation (all stages). */
        struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
        struct rcu_head **nocb_follower_tail;
-       wait_queue_head_t nocb_wq;      /* For nocb kthreads to sleep on. */
+       struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */
        struct task_struct *nocb_kthread;
        int nocb_defer_wakeup;          /* Defer wakeup of nocb_kthread. */
 
@@ -478,7 +480,7 @@ struct rcu_state {
        unsigned long gpnum;                    /* Current gp number. */
        unsigned long completed;                /* # of last completed gp. */
        struct task_struct *gp_kthread;         /* Task for grace periods. */
-       wait_queue_head_t gp_wq;                /* Where GP task waits. */
+       struct swait_queue_head gp_wq;          /* Where GP task waits. */
        short gp_flags;                         /* Commands for GP task. */
        short gp_state;                         /* GP kthread sleep state. */
 
@@ -506,7 +508,7 @@ struct rcu_state {
        unsigned long expedited_sequence;       /* Take a ticket. */
        atomic_long_t expedited_normal;         /* # fallbacks to normal. */
        atomic_t expedited_need_qs;             /* # CPUs left to check in. */
-       wait_queue_head_t expedited_wq;         /* Wait for check-ins. */
+       struct swait_queue_head expedited_wq;   /* Wait for check-ins. */
        int ncpus_snap;                         /* # CPUs seen last time. */
 
        unsigned long jiffies_force_qs;         /* Time at which to invoke */
@@ -621,7 +623,8 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp);
 static void increment_cpu_stall_ticks(void);
 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
-static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
 static void rcu_init_one_nocb(struct rcu_node *rnp);
 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
                            bool lazy, unsigned long flags);
@@ -680,7 +683,7 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
 #endif /* #else #ifdef CONFIG_PPC */
 
 /*
- * Wrappers for the rcu_node::lock acquire.
+ * Wrappers for the rcu_node::lock acquire and release.
  *
  * Because the rcu_nodes form a tree, the tree traversal locking will observe
  * different lock values, this in turn means that an UNLOCK of one level
@@ -689,29 +692,48 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
  *
  * In order to restore full ordering between tree levels, augment the regular
  * lock acquire functions with smp_mb__after_unlock_lock().
+ *
+ * As ->lock of struct rcu_node is a __private field, therefore one should use
+ * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
  */
 static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
 {
-       raw_spin_lock(&rnp->lock);
+       raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
        smp_mb__after_unlock_lock();
 }
 
+static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
+{
+       raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
+}
+
 static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
 {
-       raw_spin_lock_irq(&rnp->lock);
+       raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
        smp_mb__after_unlock_lock();
 }
 
-#define raw_spin_lock_irqsave_rcu_node(rnp, flags)     \
-do {                                                   \
-       typecheck(unsigned long, flags);                \
-       raw_spin_lock_irqsave(&(rnp)->lock, flags);     \
-       smp_mb__after_unlock_lock();                    \
+static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
+{
+       raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
+}
+
+#define raw_spin_lock_irqsave_rcu_node(rnp, flags)                     \
+do {                                                                   \
+       typecheck(unsigned long, flags);                                \
+       raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags);       \
+       smp_mb__after_unlock_lock();                                    \
+} while (0)
+
+#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags)                        \
+do {                                                                   \
+       typecheck(unsigned long, flags);                                \
+       raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags);  \
 } while (0)
 
 static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
 {
-       bool locked = raw_spin_trylock(&rnp->lock);
+       bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
 
        if (locked)
                smp_mb__after_unlock_lock();