Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[sfrench/cifs-2.6.git] / drivers / cpuidle / governors / menu.c
index e5a5d0c8d66b1629a69f5e0a5c46f249fd49bf20..b0a7ad566081a2f3d9b51f3a3d52ae9263064b33 100644 (file)
 #include <linux/sched/stat.h>
 #include <linux/math64.h>
 
-/*
- * Please note when changing the tuning values:
- * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
- * a scaling operation multiplication may overflow on 32 bit platforms.
- * In that case, #define RESOLUTION as ULL to get 64 bit result:
- * #define RESOLUTION 1024ULL
- *
- * The default values do not overflow.
- */
 #define BUCKETS 12
 #define INTERVAL_SHIFT 3
 #define INTERVALS (1UL << INTERVAL_SHIFT)
 #define RESOLUTION 1024
 #define DECAY 8
-#define MAX_INTERESTING 50000
-
+#define MAX_INTERESTING (50000 * NSEC_PER_USEC)
 
 /*
  * Concepts and ideas behind the menu governor
@@ -120,14 +110,14 @@ struct menu_device {
        int             needs_update;
        int             tick_wakeup;
 
-       unsigned int    next_timer_us;
+       u64             next_timer_ns;
        unsigned int    bucket;
        unsigned int    correction_factor[BUCKETS];
        unsigned int    intervals[INTERVALS];
        int             interval_ptr;
 };
 
-static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
+static inline int which_bucket(u64 duration_ns, unsigned long nr_iowaiters)
 {
        int bucket = 0;
 
@@ -140,15 +130,15 @@ static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters
        if (nr_iowaiters)
                bucket = BUCKETS/2;
 
-       if (duration < 10)
+       if (duration_ns < 10ULL * NSEC_PER_USEC)
                return bucket;
-       if (duration < 100)
+       if (duration_ns < 100ULL * NSEC_PER_USEC)
                return bucket + 1;
-       if (duration < 1000)
+       if (duration_ns < 1000ULL * NSEC_PER_USEC)
                return bucket + 2;
-       if (duration < 10000)
+       if (duration_ns < 10000ULL * NSEC_PER_USEC)
                return bucket + 3;
-       if (duration < 100000)
+       if (duration_ns < 100000ULL * NSEC_PER_USEC)
                return bucket + 4;
        return bucket + 5;
 }
@@ -276,13 +266,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                       bool *stop_tick)
 {
        struct menu_device *data = this_cpu_ptr(&menu_devices);
-       int latency_req = cpuidle_governor_latency_req(dev->cpu);
-       int i;
-       int idx;
-       unsigned int interactivity_req;
+       s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
        unsigned int predicted_us;
+       u64 predicted_ns;
+       u64 interactivity_req;
        unsigned long nr_iowaiters;
        ktime_t delta_next;
+       int i, idx;
 
        if (data->needs_update) {
                menu_update(drv, dev);
@@ -290,15 +280,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
        }
 
        /* determine the expected residency time, round up */
-       data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
+       data->next_timer_ns = tick_nohz_get_sleep_length(&delta_next);
 
        nr_iowaiters = nr_iowait_cpu(dev->cpu);
-       data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
+       data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
 
        if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
-           ((data->next_timer_us < drv->states[1].target_residency ||
-             latency_req < drv->states[1].exit_latency) &&
-            !drv->states[0].disabled && !dev->states_usage[0].disable)) {
+           ((data->next_timer_ns < drv->states[1].target_residency_ns ||
+             latency_req < drv->states[1].exit_latency_ns) &&
+            !dev->states_usage[0].disable)) {
                /*
                 * In this case state[0] will be used no matter what, so return
                 * it right away and keep the tick running if state[0] is a
@@ -308,18 +298,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                return 0;
        }
 
-       /*
-        * Force the result of multiplication to be 64 bits even if both
-        * operands are 32 bits.
-        * Make sure to round up for half microseconds.
-        */
-       predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
-                                        data->correction_factor[data->bucket],
-                                        RESOLUTION * DECAY);
-       /*
-        * Use the lowest expected idle interval to pick the idle state.
-        */
-       predicted_us = min(predicted_us, get_typical_interval(data, predicted_us));
+       /* Round up the result for half microseconds. */
+       predicted_us = div_u64(data->next_timer_ns *
+                              data->correction_factor[data->bucket] +
+                              (RESOLUTION * DECAY * NSEC_PER_USEC) / 2,
+                              RESOLUTION * DECAY * NSEC_PER_USEC);
+       /* Use the lowest expected idle interval to pick the idle state. */
+       predicted_ns = (u64)min(predicted_us,
+                               get_typical_interval(data, predicted_us)) *
+                               NSEC_PER_USEC;
 
        if (tick_nohz_tick_stopped()) {
                /*
@@ -330,14 +317,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                 * the known time till the closest timer event for the idle
                 * state selection.
                 */
-               if (predicted_us < TICK_USEC)
-                       predicted_us = ktime_to_us(delta_next);
+               if (predicted_ns < TICK_NSEC)
+                       predicted_ns = delta_next;
        } else {
                /*
                 * Use the performance multiplier and the user-configurable
                 * latency_req to determine the maximum exit latency.
                 */
-               interactivity_req = predicted_us / performance_multiplier(nr_iowaiters);
+               interactivity_req = div64_u64(predicted_ns,
+                                             performance_multiplier(nr_iowaiters));
                if (latency_req > interactivity_req)
                        latency_req = interactivity_req;
        }
@@ -349,27 +337,26 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
        idx = -1;
        for (i = 0; i < drv->state_count; i++) {
                struct cpuidle_state *s = &drv->states[i];
-               struct cpuidle_state_usage *su = &dev->states_usage[i];
 
-               if (s->disabled || su->disable)
+               if (dev->states_usage[i].disable)
                        continue;
 
                if (idx == -1)
                        idx = i; /* first enabled state */
 
-               if (s->target_residency > predicted_us) {
+               if (s->target_residency_ns > predicted_ns) {
                        /*
                         * Use a physical idle state, not busy polling, unless
                         * a timer is going to trigger soon enough.
                         */
                        if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
-                           s->exit_latency <= latency_req &&
-                           s->target_residency <= data->next_timer_us) {
-                               predicted_us = s->target_residency;
+                           s->exit_latency_ns <= latency_req &&
+                           s->target_residency_ns <= data->next_timer_ns) {
+                               predicted_ns = s->target_residency_ns;
                                idx = i;
                                break;
                        }
-                       if (predicted_us < TICK_USEC)
+                       if (predicted_ns < TICK_NSEC)
                                break;
 
                        if (!tick_nohz_tick_stopped()) {
@@ -379,7 +366,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                                 * tick in that case and let the governor run
                                 * again in the next iteration of the loop.
                                 */
-                               predicted_us = drv->states[idx].target_residency;
+                               predicted_ns = drv->states[idx].target_residency_ns;
                                break;
                        }
 
@@ -389,13 +376,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                         * closest timer event, select this one to avoid getting
                         * stuck in the shallow one for too long.
                         */
-                       if (drv->states[idx].target_residency < TICK_USEC &&
-                           s->target_residency <= ktime_to_us(delta_next))
+                       if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+                           s->target_residency_ns <= delta_next)
                                idx = i;
 
                        return idx;
                }
-               if (s->exit_latency > latency_req)
+               if (s->exit_latency_ns > latency_req)
                        break;
 
                idx = i;
@@ -409,12 +396,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
         * expected idle duration is shorter than the tick period length.
         */
        if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
-            predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
-               unsigned int delta_next_us = ktime_to_us(delta_next);
-
+            predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
                *stop_tick = false;
 
-               if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
+               if (idx > 0 && drv->states[idx].target_residency_ns > delta_next) {
                        /*
                         * The tick is not going to be stopped and the target
                         * residency of the state to be returned is not within
@@ -422,12 +407,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                         * tick, so try to correct that.
                         */
                        for (i = idx - 1; i >= 0; i--) {
-                               if (drv->states[i].disabled ||
-                                   dev->states_usage[i].disable)
+                               if (dev->states_usage[i].disable)
                                        continue;
 
                                idx = i;
-                               if (drv->states[i].target_residency <= delta_next_us)
+                               if (drv->states[i].target_residency_ns <= delta_next)
                                        break;
                        }
                }
@@ -463,7 +447,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
        struct menu_device *data = this_cpu_ptr(&menu_devices);
        int last_idx = dev->last_state_idx;
        struct cpuidle_state *target = &drv->states[last_idx];
-       unsigned int measured_us;
+       u64 measured_ns;
        unsigned int new_factor;
 
        /*
@@ -481,7 +465,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * assume the state was never reached and the exit latency is 0.
         */
 
-       if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
+       if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) {
                /*
                 * The nohz code said that there wouldn't be any events within
                 * the tick boundary (if the tick was stopped), but the idle
@@ -491,7 +475,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
                 * have been idle long (but not forever) to help the idle
                 * duration predictor do a better job next time.
                 */
-               measured_us = 9 * MAX_INTERESTING / 10;
+               measured_ns = 9 * MAX_INTERESTING / 10;
        } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
                   dev->poll_time_limit) {
                /*
@@ -501,28 +485,29 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
                 * the CPU might have been woken up from idle by the next timer.
                 * Assume that to be the case.
                 */
-               measured_us = data->next_timer_us;
+               measured_ns = data->next_timer_ns;
        } else {
                /* measured value */
-               measured_us = dev->last_residency;
+               measured_ns = dev->last_residency_ns;
 
                /* Deduct exit latency */
-               if (measured_us > 2 * target->exit_latency)
-                       measured_us -= target->exit_latency;
+               if (measured_ns > 2 * target->exit_latency_ns)
+                       measured_ns -= target->exit_latency_ns;
                else
-                       measured_us /= 2;
+                       measured_ns /= 2;
        }
 
        /* Make sure our coefficients do not exceed unity */
-       if (measured_us > data->next_timer_us)
-               measured_us = data->next_timer_us;
+       if (measured_ns > data->next_timer_ns)
+               measured_ns = data->next_timer_ns;
 
        /* Update our correction ratio */
        new_factor = data->correction_factor[data->bucket];
        new_factor -= new_factor / DECAY;
 
-       if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
-               new_factor += RESOLUTION * measured_us / data->next_timer_us;
+       if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING)
+               new_factor += div64_u64(RESOLUTION * measured_ns,
+                                       data->next_timer_ns);
        else
                /*
                 * we were idle so long that we count it as a perfect
@@ -542,7 +527,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
        data->correction_factor[data->bucket] = new_factor;
 
        /* update the repeating-pattern data */
-       data->intervals[data->interval_ptr++] = measured_us;
+       data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
        if (data->interval_ptr >= INTERVALS)
                data->interval_ptr = 0;
 }