blktrace: pass the right pointer to kfree()
[sfrench/cifs-2.6.git] / kernel / rcupreempt.c
index 5d59e850fb71f6f58b854cdbd0a1f1792f59c8ad..ce97a4df64d3539edc785ff9db119c3ba3edbe07 100644 (file)
@@ -147,7 +147,51 @@ struct rcu_ctrlblk {
        wait_queue_head_t sched_wq;     /* Place for rcu_sched to sleep. */
 };
 
+struct rcu_dyntick_sched {
+       int dynticks;
+       int dynticks_snap;
+       int sched_qs;
+       int sched_qs_snap;
+       int sched_dynticks_snap;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
+       .dynticks = 1,
+};
+
+void rcu_qsctr_inc(int cpu)
+{
+       struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
+
+       rdssp->sched_qs++;
+}
+
+#ifdef CONFIG_NO_HZ
+
+void rcu_enter_nohz(void)
+{
+       static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
+
+       smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+       __get_cpu_var(rcu_dyntick_sched).dynticks++;
+       WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
+}
+
+void rcu_exit_nohz(void)
+{
+       static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
+
+       __get_cpu_var(rcu_dyntick_sched).dynticks++;
+       smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+       WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
+                               &rs);
+}
+
+#endif /* CONFIG_NO_HZ */
+
+
 static DEFINE_PER_CPU(struct rcu_data, rcu_data);
+
 static struct rcu_ctrlblk rcu_ctrlblk = {
        .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
        .completed = 0,
@@ -427,10 +471,6 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp)
        }
 }
 
-DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
-       .dynticks = 1,
-};
-
 #ifdef CONFIG_NO_HZ
 static DEFINE_PER_CPU(int, rcu_update_flag);