x86: tsc prevent time going backwards
[sfrench/cifs-2.6.git] / arch / x86 / kernel / tsc_32.c
index cb19df7aee0fc7e65007ccc0c7fd7508630a9712..d7498b34c8e9aa8069bbbfd95cdb65a7965fe882 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/jiffies.h>
 #include <linux/init.h>
 #include <linux/dmi.h>
+#include <linux/percpu.h>
 
 #include <asm/delay.h>
 #include <asm/tsc.h>
@@ -23,13 +24,12 @@ static int tsc_enabled;
 unsigned int tsc_khz;
 EXPORT_SYMBOL_GPL(tsc_khz);
 
-int tsc_disable;
-
 #ifdef CONFIG_X86_TSC
 static int __init tsc_setup(char *str)
 {
        printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
-                               "cannot disable TSC.\n");
+                               "cannot disable TSC completely.\n");
+       mark_tsc_unstable("user disabled TSC");
        return 1;
 }
 #else
@@ -39,8 +39,7 @@ static int __init tsc_setup(char *str)
  */
 static int __init tsc_setup(char *str)
 {
-       tsc_disable = 1;
-
+       setup_clear_cpu_cap(X86_FEATURE_TSC);
        return 1;
 }
 #endif
@@ -74,19 +73,37 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
  *     And since SC is a constant power of two, we can convert the div
  *  into a shift.
  *
- *  We can use khz divisor instead of mhz to keep a better percision, since
+ *  We can use khz divisor instead of mhz to keep a better precision, since
  *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
  *  (mathieu.desnoyers@polymtl.ca)
  *
  *                     -johnstul@us.ibm.com "math is hard, lets go shopping!"
  */
-unsigned long cyc2ns_scale __read_mostly;
 
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+DEFINE_PER_CPU(unsigned long, cyc2ns);
 
-static inline void set_cyc2ns_scale(unsigned long cpu_khz)
+static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
 {
-       cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
+       unsigned long flags, prev_scale, *scale;
+       unsigned long long tsc_now, ns_now;
+
+       local_irq_save(flags);
+       sched_clock_idle_sleep_event();
+
+       scale = &per_cpu(cyc2ns, cpu);
+
+       rdtscll(tsc_now);
+       ns_now = __cycles_2_ns(tsc_now);
+
+       prev_scale = *scale;
+       if (cpu_khz)
+               *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
+
+       /*
+        * Start smoothly with the new frequency:
+        */
+       sched_clock_idle_wakeup_event(0);
+       local_irq_restore(flags);
 }
 
 /*
@@ -131,38 +148,43 @@ unsigned long native_calculate_cpu_khz(void)
 {
        unsigned long long start, end;
        unsigned long count;
-       u64 delta64;
+       u64 delta64 = (u64)ULLONG_MAX;
        int i;
        unsigned long flags;
 
        local_irq_save(flags);
 
-       /* run 3 times to ensure the cache is warm */
+       /* run 3 times to ensure the cache is warm and to get an accurate reading */
        for (i = 0; i < 3; i++) {
                mach_prepare_counter();
                rdtscll(start);
                mach_countup(&count);
                rdtscll(end);
-       }
-       /*
-        * Error: ECTCNEVERSET
-        * The CTC wasn't reliable: we got a hit on the very first read,
-        * or the CPU was so fast/slow that the quotient wouldn't fit in
-        * 32 bits..
-        */
-       if (count <= 1)
-               goto err;
 
-       delta64 = end - start;
+               /*
+                * Error: ECTCNEVERSET
+                * The CTC wasn't reliable: we got a hit on the very first read,
+                * or the CPU was so fast/slow that the quotient wouldn't fit in
+                * 32 bits..
+                */
+               if (count <= 1)
+                       continue;
+
+               /* cpu freq too slow: */
+               if ((end - start) <= CALIBRATE_TIME_MSEC)
+                       continue;
+
+               /*
+                * We want the minimum time of all runs in case one of them
+                * is inaccurate due to SMI or other delay
+                */
+               delta64 = min(delta64, (end - start));
+       }
 
-       /* cpu freq too fast: */
+       /* cpu freq too fast (or every run was bad): */
        if (delta64 > (1ULL<<32))
                goto err;
 
-       /* cpu freq too slow: */
-       if (delta64 <= CALIBRATE_TIME_MSEC)
-               goto err;
-
        delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
        do_div(delta64,CALIBRATE_TIME_MSEC);
 
@@ -181,8 +203,8 @@ int recalibrate_cpu_khz(void)
        if (cpu_has_tsc) {
                cpu_khz = calculate_cpu_khz();
                tsc_khz = cpu_khz;
-               cpu_data[0].loops_per_jiffy =
-                       cpufreq_scale(cpu_data[0].loops_per_jiffy,
+               cpu_data(0).loops_per_jiffy =
+                       cpufreq_scale(cpu_data(0).loops_per_jiffy,
                                        cpu_khz_old, cpu_khz);
                return 0;
        } else
@@ -215,7 +237,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
                        return 0;
                }
                ref_freq = freq->old;
-               loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
+               loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
                cpu_khz_ref = cpu_khz;
        }
 
@@ -223,7 +245,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
            (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
            (val == CPUFREQ_RESUMECHANGE)) {
                if (!(freq->flags & CPUFREQ_CONST_LOOPS))
-                       cpu_data[freq->cpu].loops_per_jiffy =
+                       cpu_data(freq->cpu).loops_per_jiffy =
                                cpufreq_scale(loops_per_jiffy_ref,
                                                ref_freq, freq->new);
 
@@ -234,7 +256,9 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
                                                ref_freq, freq->new);
                        if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
                                tsc_khz = cpu_khz;
-                               set_cyc2ns_scale(cpu_khz);
+                               preempt_disable();
+                               set_cyc2ns_scale(cpu_khz, smp_processor_id());
+                               preempt_enable();
                                /*
                                 * TSC based sched_clock turns
                                 * to junk w/ cpufreq
@@ -263,14 +287,27 @@ core_initcall(cpufreq_tsc);
 /* clock source code */
 
 static unsigned long current_tsc_khz = 0;
+static struct clocksource clocksource_tsc;
 
+/*
+ * We compare the TSC to the cycle_last value in the clocksource
+ * structure to avoid a nasty time-warp issue. This can be observed in
+ * a very small window right after one CPU updated cycle_last under
+ * xtime lock and the other CPU reads a TSC value which is smaller
+ * than the cycle_last reference value due to a TSC which is slighty
+ * behind. This delta is nowhere else observable, but in that case it
+ * results in a forward time jump in the range of hours due to the
+ * unsigned delta calculation of the time keeping core code, which is
+ * necessary to support wrapping clocksources like pm timer.
+ */
 static cycle_t read_tsc(void)
 {
        cycle_t ret;
 
        rdtscll(ret);
 
-       return ret;
+       return ret >= clocksource_tsc.cycle_last ?
+               ret : clocksource_tsc.cycle_last;
 }
 
 static struct clocksource clocksource_tsc = {
@@ -328,6 +365,11 @@ __cpuinit int unsynchronized_tsc(void)
 {
        if (!cpu_has_tsc || tsc_unstable)
                return 1;
+
+       /* Anything with constant TSC should be synchronized */
+       if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+               return 0;
+
        /*
         * Intel systems are normally all synchronized.
         * Exceptions must mark TSC as unstable:
@@ -362,7 +404,9 @@ static inline void check_geode_tsc_reliable(void) { }
 
 void __init tsc_init(void)
 {
-       if (!cpu_has_tsc || tsc_disable)
+       int cpu;
+
+       if (!cpu_has_tsc)
                goto out_no_tsc;
 
        cpu_khz = calculate_cpu_khz();
@@ -375,7 +419,15 @@ void __init tsc_init(void)
                                (unsigned long)cpu_khz / 1000,
                                (unsigned long)cpu_khz % 1000);
 
-       set_cyc2ns_scale(cpu_khz);
+       /*
+        * Secondary CPUs do not run through tsc_init(), so set up
+        * all the scale factors for all CPUs, assuming the same
+        * speed as the bootup CPU. (cpufreq notifiers will fix this
+        * up if their speed diverges)
+        */
+       for_each_possible_cpu(cpu)
+               set_cyc2ns_scale(cpu_khz, cpu);
+
        use_tsc_delay();
 
        /* Check and install the TSC clocksource */
@@ -398,10 +450,5 @@ void __init tsc_init(void)
        return;
 
 out_no_tsc:
-       /*
-        * Set the tsc_disable flag if there's no TSC support, this
-        * makes it a fast flag for the kernel to see whether it
-        * should be using the TSC.
-        */
-       tsc_disable = 1;
+       setup_clear_cpu_cap(X86_FEATURE_TSC);
 }