Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 13 Aug 2018 20:02:31 +0000 (13:02 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 13 Aug 2018 20:02:31 +0000 (13:02 -0700)
Pull timer updates from Thomas Gleixner:
 "The timers departement more or less proudly presents:

   - More Y2038 timekeeping work mostly in the core code. The work is
     slowly, but steadily targeting the actuall syscalls.

   - Enhanced timekeeping suspend/resume support by utilizing
     clocksources which do not stop during suspend, but are otherwise
     not the main timekeeping clocksources.

   - Make NTP adjustmets more accurate and immediate when the frequency
     is set directly and not incrementally.

   - Sanitize the overrung handing of posix timers

   - A new timer driver for Mediatek SoCs

   - The usual pile of fixes and updates all over the place"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (32 commits)
  clockevents: Warn if cpu_all_mask is used as cpumask
  tick/broadcast-hrtimer: Use cpu_possible_mask for ce_broadcast_hrtimer
  clocksource/drivers/arm_arch_timer: Fix bogus cpu_all_mask usage
  clocksource: ti-32k: Remove CLOCK_SOURCE_SUSPEND_NONSTOP flag
  timers: Clear timer_base::must_forward_clk with timer_base::lock held
  clocksource/drivers/sprd: Register one always-on timer to compensate suspend time
  clocksource/drivers/timer-mediatek: Add support for system timer
  clocksource/drivers/timer-mediatek: Convert the driver to timer-of
  clocksource/drivers/timer-mediatek: Use specific prefix for GPT
  clocksource/drivers/timer-mediatek: Rename mtk_timer to timer-mediatek
  clocksource/drivers/timer-mediatek: Add system timer bindings
  clocksource/drivers: Set clockevent device cpumask to cpu_possible_mask
  time: Introduce one suspend clocksource to compensate the suspend time
  time: Fix extra sleeptime injection when suspend fails
  timekeeping/ntp: Constify some function arguments
  ntp: Use kstrtos64 for s64 variable
  ntp: Remove redundant arguments
  timer: Fix coding style
  ktime: Provide typesafe ktime_to_ns()
  hrtimer: Improve kernel message printing
  ...

40 files changed:
Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
drivers/clocksource/Makefile
drivers/clocksource/mtk_timer.c [deleted file]
drivers/clocksource/tegra20_timer.c
drivers/clocksource/timer-atcpit100.c
drivers/clocksource/timer-keystone.c
drivers/clocksource/timer-mediatek.c [new file with mode: 0644]
drivers/clocksource/timer-sprd.c
drivers/clocksource/timer-ti-32k.c
drivers/clocksource/zevio-timer.c
fs/timerfd.c
include/linux/clocksource.h
include/linux/compat.h
include/linux/compat_time.h
include/linux/ktime.h
include/linux/posix-timers.h
include/linux/syscalls.h
include/linux/time.h
include/linux/time64.h
include/linux/timekeeping.h
include/uapi/linux/time.h
kernel/compat.c
kernel/sys.c
kernel/time/alarmtimer.c
kernel/time/clockevents.c
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/ntp.c
kernel/time/ntp_internal.h
kernel/time/posix-cpu-timers.c
kernel/time/posix-stubs.c
kernel/time/posix-timers.c
kernel/time/posix-timers.h
kernel/time/tick-broadcast-hrtimer.c
kernel/time/time.c
kernel/time/timekeeping.c
kernel/time/timekeeping_debug.c
kernel/time/timekeeping_internal.h
kernel/time/timer.c
tools/testing/selftests/timers/raw_skew.c

index b1fe7e9de1b47b056778d1bafe69e06bac1534c4..18d4d0166c76bff5dc35a3eccdd94781a68ed494 100644 (file)
@@ -1,19 +1,25 @@
-Mediatek MT6577, MT6572 and MT6589 Timers
----------------------------------------
+Mediatek Timers
+---------------
+
+Mediatek SoCs have two different timers on different platforms,
+- GPT (General Purpose Timer)
+- SYST (System Timer)
+
+The proper timer will be selected automatically by driver.
 
 Required properties:
 - compatible should contain:
-       * "mediatek,mt2701-timer" for MT2701 compatible timers
-       * "mediatek,mt6580-timer" for MT6580 compatible timers
-       * "mediatek,mt6589-timer" for MT6589 compatible timers
-       * "mediatek,mt7623-timer" for MT7623 compatible timers
-       * "mediatek,mt8127-timer" for MT8127 compatible timers
-       * "mediatek,mt8135-timer" for MT8135 compatible timers
-       * "mediatek,mt8173-timer" for MT8173 compatible timers
-       * "mediatek,mt6577-timer" for MT6577 and all above compatible timers
-- reg: Should contain location and length for timers register.
-- clocks: Clocks driving the timer hardware. This list should include two
-       clocks. The order is system clock and as second clock the RTC clock.
+       * "mediatek,mt2701-timer" for MT2701 compatible timers (GPT)
+       * "mediatek,mt6580-timer" for MT6580 compatible timers (GPT)
+       * "mediatek,mt6589-timer" for MT6589 compatible timers (GPT)
+       * "mediatek,mt7623-timer" for MT7623 compatible timers (GPT)
+       * "mediatek,mt8127-timer" for MT8127 compatible timers (GPT)
+       * "mediatek,mt8135-timer" for MT8135 compatible timers (GPT)
+       * "mediatek,mt8173-timer" for MT8173 compatible timers (GPT)
+       * "mediatek,mt6577-timer" for MT6577 and all above compatible timers (GPT)
+       * "mediatek,mt6765-timer" for MT6765 compatible timers (SYST)
+- reg: Should contain location and length for timer register.
+- clocks: Should contain system clock.
 
 Examples:
 
@@ -21,5 +27,5 @@ Examples:
                compatible = "mediatek,mt6577-timer";
                reg = <0x10008000 0x80>;
                interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>;
-               clocks = <&system_clk>, <&rtc_clk>;
+               clocks = <&system_clk>;
        };
index 00caf37e52f9c6cad3913c2c0b18bd32aa20756a..c070cc7992e9c84d45216565e7f399d54f12cfcd 100644 (file)
@@ -49,7 +49,7 @@ obj-$(CONFIG_CLKSRC_SAMSUNG_PWM)      += samsung_pwm_timer.o
 obj-$(CONFIG_FSL_FTM_TIMER)    += fsl_ftm_timer.o
 obj-$(CONFIG_VF_PIT_TIMER)     += vf_pit_timer.o
 obj-$(CONFIG_CLKSRC_QCOM)      += qcom-timer.o
-obj-$(CONFIG_MTK_TIMER)                += mtk_timer.o
+obj-$(CONFIG_MTK_TIMER)                += timer-mediatek.o
 obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
 obj-$(CONFIG_CLKSRC_TI_32K)    += timer-ti-32k.o
 obj-$(CONFIG_CLKSRC_NPS)       += timer-nps.o
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
deleted file mode 100644 (file)
index f9b724f..0000000
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Mediatek SoCs General-Purpose Timer handling.
- *
- * Copyright (C) 2014 Matthias Brugger
- *
- * Matthias Brugger <matthias.bgg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqreturn.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-#include <linux/slab.h>
-
-#define GPT_IRQ_EN_REG         0x00
-#define GPT_IRQ_ENABLE(val)    BIT((val) - 1)
-#define GPT_IRQ_ACK_REG                0x08
-#define GPT_IRQ_ACK(val)       BIT((val) - 1)
-
-#define TIMER_CTRL_REG(val)    (0x10 * (val))
-#define TIMER_CTRL_OP(val)     (((val) & 0x3) << 4)
-#define TIMER_CTRL_OP_ONESHOT  (0)
-#define TIMER_CTRL_OP_REPEAT   (1)
-#define TIMER_CTRL_OP_FREERUN  (3)
-#define TIMER_CTRL_CLEAR       (2)
-#define TIMER_CTRL_ENABLE      (1)
-#define TIMER_CTRL_DISABLE     (0)
-
-#define TIMER_CLK_REG(val)     (0x04 + (0x10 * (val)))
-#define TIMER_CLK_SRC(val)     (((val) & 0x1) << 4)
-#define TIMER_CLK_SRC_SYS13M   (0)
-#define TIMER_CLK_SRC_RTC32K   (1)
-#define TIMER_CLK_DIV1         (0x0)
-#define TIMER_CLK_DIV2         (0x1)
-
-#define TIMER_CNT_REG(val)     (0x08 + (0x10 * (val)))
-#define TIMER_CMP_REG(val)     (0x0C + (0x10 * (val)))
-
-#define GPT_CLK_EVT    1
-#define GPT_CLK_SRC    2
-
-struct mtk_clock_event_device {
-       void __iomem *gpt_base;
-       u32 ticks_per_jiffy;
-       struct clock_event_device dev;
-};
-
-static void __iomem *gpt_sched_reg __read_mostly;
-
-static u64 notrace mtk_read_sched_clock(void)
-{
-       return readl_relaxed(gpt_sched_reg);
-}
-
-static inline struct mtk_clock_event_device *to_mtk_clk(
-                               struct clock_event_device *c)
-{
-       return container_of(c, struct mtk_clock_event_device, dev);
-}
-
-static void mtk_clkevt_time_stop(struct mtk_clock_event_device *evt, u8 timer)
-{
-       u32 val;
-
-       val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
-       writel(val & ~TIMER_CTRL_ENABLE, evt->gpt_base +
-                       TIMER_CTRL_REG(timer));
-}
-
-static void mtk_clkevt_time_setup(struct mtk_clock_event_device *evt,
-                               unsigned long delay, u8 timer)
-{
-       writel(delay, evt->gpt_base + TIMER_CMP_REG(timer));
-}
-
-static void mtk_clkevt_time_start(struct mtk_clock_event_device *evt,
-               bool periodic, u8 timer)
-{
-       u32 val;
-
-       /* Acknowledge interrupt */
-       writel(GPT_IRQ_ACK(timer), evt->gpt_base + GPT_IRQ_ACK_REG);
-
-       val = readl(evt->gpt_base + TIMER_CTRL_REG(timer));
-
-       /* Clear 2 bit timer operation mode field */
-       val &= ~TIMER_CTRL_OP(0x3);
-
-       if (periodic)
-               val |= TIMER_CTRL_OP(TIMER_CTRL_OP_REPEAT);
-       else
-               val |= TIMER_CTRL_OP(TIMER_CTRL_OP_ONESHOT);
-
-       writel(val | TIMER_CTRL_ENABLE | TIMER_CTRL_CLEAR,
-              evt->gpt_base + TIMER_CTRL_REG(timer));
-}
-
-static int mtk_clkevt_shutdown(struct clock_event_device *clk)
-{
-       mtk_clkevt_time_stop(to_mtk_clk(clk), GPT_CLK_EVT);
-       return 0;
-}
-
-static int mtk_clkevt_set_periodic(struct clock_event_device *clk)
-{
-       struct mtk_clock_event_device *evt = to_mtk_clk(clk);
-
-       mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
-       mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
-       mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
-       return 0;
-}
-
-static int mtk_clkevt_next_event(unsigned long event,
-                                  struct clock_event_device *clk)
-{
-       struct mtk_clock_event_device *evt = to_mtk_clk(clk);
-
-       mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
-       mtk_clkevt_time_setup(evt, event, GPT_CLK_EVT);
-       mtk_clkevt_time_start(evt, false, GPT_CLK_EVT);
-
-       return 0;
-}
-
-static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
-{
-       struct mtk_clock_event_device *evt = dev_id;
-
-       /* Acknowledge timer0 irq */
-       writel(GPT_IRQ_ACK(GPT_CLK_EVT), evt->gpt_base + GPT_IRQ_ACK_REG);
-       evt->dev.event_handler(&evt->dev);
-
-       return IRQ_HANDLED;
-}
-
-static void
-__init mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
-{
-       writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
-               evt->gpt_base + TIMER_CTRL_REG(timer));
-
-       writel(TIMER_CLK_SRC(TIMER_CLK_SRC_SYS13M) | TIMER_CLK_DIV1,
-                       evt->gpt_base + TIMER_CLK_REG(timer));
-
-       writel(0x0, evt->gpt_base + TIMER_CMP_REG(timer));
-
-       writel(TIMER_CTRL_OP(option) | TIMER_CTRL_ENABLE,
-                       evt->gpt_base + TIMER_CTRL_REG(timer));
-}
-
-static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
-{
-       u32 val;
-
-       /* Disable all interrupts */
-       writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG);
-
-       /* Acknowledge all spurious pending interrupts */
-       writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG);
-
-       val = readl(evt->gpt_base + GPT_IRQ_EN_REG);
-       writel(val | GPT_IRQ_ENABLE(timer),
-                       evt->gpt_base + GPT_IRQ_EN_REG);
-}
-
-static int __init mtk_timer_init(struct device_node *node)
-{
-       struct mtk_clock_event_device *evt;
-       struct resource res;
-       unsigned long rate = 0;
-       struct clk *clk;
-
-       evt = kzalloc(sizeof(*evt), GFP_KERNEL);
-       if (!evt)
-               return -ENOMEM;
-
-       evt->dev.name = "mtk_tick";
-       evt->dev.rating = 300;
-       evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
-       evt->dev.set_state_shutdown = mtk_clkevt_shutdown;
-       evt->dev.set_state_periodic = mtk_clkevt_set_periodic;
-       evt->dev.set_state_oneshot = mtk_clkevt_shutdown;
-       evt->dev.tick_resume = mtk_clkevt_shutdown;
-       evt->dev.set_next_event = mtk_clkevt_next_event;
-       evt->dev.cpumask = cpu_possible_mask;
-
-       evt->gpt_base = of_io_request_and_map(node, 0, "mtk-timer");
-       if (IS_ERR(evt->gpt_base)) {
-               pr_err("Can't get resource\n");
-               goto err_kzalloc;
-       }
-
-       evt->dev.irq = irq_of_parse_and_map(node, 0);
-       if (evt->dev.irq <= 0) {
-               pr_err("Can't parse IRQ\n");
-               goto err_mem;
-       }
-
-       clk = of_clk_get(node, 0);
-       if (IS_ERR(clk)) {
-               pr_err("Can't get timer clock\n");
-               goto err_irq;
-       }
-
-       if (clk_prepare_enable(clk)) {
-               pr_err("Can't prepare clock\n");
-               goto err_clk_put;
-       }
-       rate = clk_get_rate(clk);
-
-       if (request_irq(evt->dev.irq, mtk_timer_interrupt,
-                       IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
-               pr_err("failed to setup irq %d\n", evt->dev.irq);
-               goto err_clk_disable;
-       }
-
-       evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
-
-       /* Configure clock source */
-       mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
-       clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
-                       node->name, rate, 300, 32, clocksource_mmio_readl_up);
-       gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC);
-       sched_clock_register(mtk_read_sched_clock, 32, rate);
-
-       /* Configure clock event */
-       mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
-       clockevents_config_and_register(&evt->dev, rate, 0x3,
-                                       0xffffffff);
-
-       mtk_timer_enable_irq(evt, GPT_CLK_EVT);
-
-       return 0;
-
-err_clk_disable:
-       clk_disable_unprepare(clk);
-err_clk_put:
-       clk_put(clk);
-err_irq:
-       irq_dispose_mapping(evt->dev.irq);
-err_mem:
-       iounmap(evt->gpt_base);
-       of_address_to_resource(node, 0, &res);
-       release_mem_region(res.start, resource_size(&res));
-err_kzalloc:
-       kfree(evt);
-
-       return -EINVAL;
-}
-TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
index c337a8100a7b991988fa63cd31f0de96194803a2..dabf0a1035675faeeea358b35241627b406de39c 100644 (file)
@@ -230,7 +230,7 @@ static int __init tegra20_init_timer(struct device_node *np)
                return ret;
        }
 
-       tegra_clockevent.cpumask = cpu_all_mask;
+       tegra_clockevent.cpumask = cpu_possible_mask;
        tegra_clockevent.irq = tegra_timer_irq.irq;
        clockevents_config_and_register(&tegra_clockevent, 1000000,
                                        0x1, 0x1fffffff);
index 5e23d7b4a72200f7117283cce4cb47f003022e27..b4bd2f5b801d07f9209c27c5947e0cdd9ed1bd9e 100644 (file)
@@ -185,7 +185,7 @@ static struct timer_of to = {
                .set_state_oneshot = atcpit100_clkevt_set_oneshot,
                .tick_resume = atcpit100_clkevt_shutdown,
                .set_next_event = atcpit100_clkevt_next_event,
-               .cpumask = cpu_all_mask,
+               .cpumask = cpu_possible_mask,
        },
 
        .of_irq = {
index 0eee03250cfc87ef69b4e7744e7274b477071fcb..f5b2eda30bf336f79fd0b9553b7b6f1ef44f8dd4 100644 (file)
@@ -211,7 +211,7 @@ static int __init keystone_timer_init(struct device_node *np)
        event_dev->set_state_shutdown = keystone_shutdown;
        event_dev->set_state_periodic = keystone_set_periodic;
        event_dev->set_state_oneshot = keystone_shutdown;
-       event_dev->cpumask = cpu_all_mask;
+       event_dev->cpumask = cpu_possible_mask;
        event_dev->owner = THIS_MODULE;
        event_dev->name = TIMER_NAME;
        event_dev->irq = irq;
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
new file mode 100644 (file)
index 0000000..eb10321
--- /dev/null
@@ -0,0 +1,328 @@
+/*
+ * Mediatek SoCs General-Purpose Timer handling.
+ *
+ * Copyright (C) 2014 Matthias Brugger
+ *
+ * Matthias Brugger <matthias.bgg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include "timer-of.h"
+
+#define TIMER_CLK_EVT           (1)
+#define TIMER_CLK_SRC           (2)
+
+#define TIMER_SYNC_TICKS        (3)
+
+/* gpt */
+#define GPT_IRQ_EN_REG          0x00
+#define GPT_IRQ_ENABLE(val)     BIT((val) - 1)
+#define GPT_IRQ_ACK_REG                0x08
+#define GPT_IRQ_ACK(val)        BIT((val) - 1)
+
+#define GPT_CTRL_REG(val)       (0x10 * (val))
+#define GPT_CTRL_OP(val)        (((val) & 0x3) << 4)
+#define GPT_CTRL_OP_ONESHOT     (0)
+#define GPT_CTRL_OP_REPEAT      (1)
+#define GPT_CTRL_OP_FREERUN     (3)
+#define GPT_CTRL_CLEAR          (2)
+#define GPT_CTRL_ENABLE         (1)
+#define GPT_CTRL_DISABLE        (0)
+
+#define GPT_CLK_REG(val)        (0x04 + (0x10 * (val)))
+#define GPT_CLK_SRC(val)        (((val) & 0x1) << 4)
+#define GPT_CLK_SRC_SYS13M      (0)
+#define GPT_CLK_SRC_RTC32K      (1)
+#define GPT_CLK_DIV1            (0x0)
+#define GPT_CLK_DIV2            (0x1)
+
+#define GPT_CNT_REG(val)        (0x08 + (0x10 * (val)))
+#define GPT_CMP_REG(val)        (0x0C + (0x10 * (val)))
+
+/* system timer */
+#define SYST_BASE               (0x40)
+
+#define SYST_CON                (SYST_BASE + 0x0)
+#define SYST_VAL                (SYST_BASE + 0x4)
+
+#define SYST_CON_REG(to)        (timer_of_base(to) + SYST_CON)
+#define SYST_VAL_REG(to)        (timer_of_base(to) + SYST_VAL)
+
+/*
+ * SYST_CON_EN: Clock enable. Shall be set to
+ *   - Start timer countdown.
+ *   - Allow timeout ticks being updated.
+ *   - Allow changing interrupt functions.
+ *
+ * SYST_CON_IRQ_EN: Set to allow interrupt.
+ *
+ * SYST_CON_IRQ_CLR: Set to clear interrupt.
+ */
+#define SYST_CON_EN              BIT(0)
+#define SYST_CON_IRQ_EN          BIT(1)
+#define SYST_CON_IRQ_CLR         BIT(4)
+
+static void __iomem *gpt_sched_reg __read_mostly;
+
+static void mtk_syst_ack_irq(struct timer_of *to)
+{
+       /* Clear and disable interrupt */
+       writel(SYST_CON_IRQ_CLR | SYST_CON_EN, SYST_CON_REG(to));
+}
+
+static irqreturn_t mtk_syst_handler(int irq, void *dev_id)
+{
+       struct clock_event_device *clkevt = dev_id;
+       struct timer_of *to = to_timer_of(clkevt);
+
+       mtk_syst_ack_irq(to);
+       clkevt->event_handler(clkevt);
+
+       return IRQ_HANDLED;
+}
+
+static int mtk_syst_clkevt_next_event(unsigned long ticks,
+                                     struct clock_event_device *clkevt)
+{
+       struct timer_of *to = to_timer_of(clkevt);
+
+       /* Enable clock to allow timeout tick update later */
+       writel(SYST_CON_EN, SYST_CON_REG(to));
+
+       /*
+        * Write new timeout ticks. Timer shall start countdown
+        * after timeout ticks are updated.
+        */
+       writel(ticks, SYST_VAL_REG(to));
+
+       /* Enable interrupt */
+       writel(SYST_CON_EN | SYST_CON_IRQ_EN, SYST_CON_REG(to));
+
+       return 0;
+}
+
+static int mtk_syst_clkevt_shutdown(struct clock_event_device *clkevt)
+{
+       /* Disable timer */
+       writel(0, SYST_CON_REG(to_timer_of(clkevt)));
+
+       return 0;
+}
+
+static int mtk_syst_clkevt_resume(struct clock_event_device *clkevt)
+{
+       return mtk_syst_clkevt_shutdown(clkevt);
+}
+
+static int mtk_syst_clkevt_oneshot(struct clock_event_device *clkevt)
+{
+       return 0;
+}
+
+static u64 notrace mtk_gpt_read_sched_clock(void)
+{
+       return readl_relaxed(gpt_sched_reg);
+}
+
+static void mtk_gpt_clkevt_time_stop(struct timer_of *to, u8 timer)
+{
+       u32 val;
+
+       val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
+       writel(val & ~GPT_CTRL_ENABLE, timer_of_base(to) +
+              GPT_CTRL_REG(timer));
+}
+
+static void mtk_gpt_clkevt_time_setup(struct timer_of *to,
+                                     unsigned long delay, u8 timer)
+{
+       writel(delay, timer_of_base(to) + GPT_CMP_REG(timer));
+}
+
+static void mtk_gpt_clkevt_time_start(struct timer_of *to,
+                                     bool periodic, u8 timer)
+{
+       u32 val;
+
+       /* Acknowledge interrupt */
+       writel(GPT_IRQ_ACK(timer), timer_of_base(to) + GPT_IRQ_ACK_REG);
+
+       val = readl(timer_of_base(to) + GPT_CTRL_REG(timer));
+
+       /* Clear 2 bit timer operation mode field */
+       val &= ~GPT_CTRL_OP(0x3);
+
+       if (periodic)
+               val |= GPT_CTRL_OP(GPT_CTRL_OP_REPEAT);
+       else
+               val |= GPT_CTRL_OP(GPT_CTRL_OP_ONESHOT);
+
+       writel(val | GPT_CTRL_ENABLE | GPT_CTRL_CLEAR,
+              timer_of_base(to) + GPT_CTRL_REG(timer));
+}
+
+static int mtk_gpt_clkevt_shutdown(struct clock_event_device *clk)
+{
+       mtk_gpt_clkevt_time_stop(to_timer_of(clk), TIMER_CLK_EVT);
+
+       return 0;
+}
+
+static int mtk_gpt_clkevt_set_periodic(struct clock_event_device *clk)
+{
+       struct timer_of *to = to_timer_of(clk);
+
+       mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
+       mtk_gpt_clkevt_time_setup(to, to->of_clk.period, TIMER_CLK_EVT);
+       mtk_gpt_clkevt_time_start(to, true, TIMER_CLK_EVT);
+
+       return 0;
+}
+
+static int mtk_gpt_clkevt_next_event(unsigned long event,
+                                    struct clock_event_device *clk)
+{
+       struct timer_of *to = to_timer_of(clk);
+
+       mtk_gpt_clkevt_time_stop(to, TIMER_CLK_EVT);
+       mtk_gpt_clkevt_time_setup(to, event, TIMER_CLK_EVT);
+       mtk_gpt_clkevt_time_start(to, false, TIMER_CLK_EVT);
+
+       return 0;
+}
+
+static irqreturn_t mtk_gpt_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+       struct timer_of *to = to_timer_of(clkevt);
+
+       /* Acknowledge timer0 irq */
+       writel(GPT_IRQ_ACK(TIMER_CLK_EVT), timer_of_base(to) + GPT_IRQ_ACK_REG);
+       clkevt->event_handler(clkevt);
+
+       return IRQ_HANDLED;
+}
+
+static void
+__init mtk_gpt_setup(struct timer_of *to, u8 timer, u8 option)
+{
+       writel(GPT_CTRL_CLEAR | GPT_CTRL_DISABLE,
+              timer_of_base(to) + GPT_CTRL_REG(timer));
+
+       writel(GPT_CLK_SRC(GPT_CLK_SRC_SYS13M) | GPT_CLK_DIV1,
+              timer_of_base(to) + GPT_CLK_REG(timer));
+
+       writel(0x0, timer_of_base(to) + GPT_CMP_REG(timer));
+
+       writel(GPT_CTRL_OP(option) | GPT_CTRL_ENABLE,
+              timer_of_base(to) + GPT_CTRL_REG(timer));
+}
+
+static void mtk_gpt_enable_irq(struct timer_of *to, u8 timer)
+{
+       u32 val;
+
+       /* Disable all interrupts */
+       writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
+
+       /* Acknowledge all spurious pending interrupts */
+       writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
+
+       val = readl(timer_of_base(to) + GPT_IRQ_EN_REG);
+       writel(val | GPT_IRQ_ENABLE(timer),
+              timer_of_base(to) + GPT_IRQ_EN_REG);
+}
+
+static struct timer_of to = {
+       .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+       .clkevt = {
+               .name = "mtk-clkevt",
+               .rating = 300,
+               .cpumask = cpu_possible_mask,
+       },
+
+       .of_irq = {
+               .flags = IRQF_TIMER | IRQF_IRQPOLL,
+       },
+};
+
+static int __init mtk_syst_init(struct device_node *node)
+{
+       int ret;
+
+       to.clkevt.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT;
+       to.clkevt.set_state_shutdown = mtk_syst_clkevt_shutdown;
+       to.clkevt.set_state_oneshot = mtk_syst_clkevt_oneshot;
+       to.clkevt.tick_resume = mtk_syst_clkevt_resume;
+       to.clkevt.set_next_event = mtk_syst_clkevt_next_event;
+       to.of_irq.handler = mtk_syst_handler;
+
+       ret = timer_of_init(node, &to);
+       if (ret)
+               goto err;
+
+       clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+                                       TIMER_SYNC_TICKS, 0xffffffff);
+
+       return 0;
+err:
+       timer_of_cleanup(&to);
+       return ret;
+}
+
+static int __init mtk_gpt_init(struct device_node *node)
+{
+       int ret;
+
+       to.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+       to.clkevt.set_state_shutdown = mtk_gpt_clkevt_shutdown;
+       to.clkevt.set_state_periodic = mtk_gpt_clkevt_set_periodic;
+       to.clkevt.set_state_oneshot = mtk_gpt_clkevt_shutdown;
+       to.clkevt.tick_resume = mtk_gpt_clkevt_shutdown;
+       to.clkevt.set_next_event = mtk_gpt_clkevt_next_event;
+       to.of_irq.handler = mtk_gpt_interrupt;
+
+       ret = timer_of_init(node, &to);
+       if (ret)
+               goto err;
+
+       /* Configure clock source */
+       mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
+       clocksource_mmio_init(timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC),
+                             node->name, timer_of_rate(&to), 300, 32,
+                             clocksource_mmio_readl_up);
+       gpt_sched_reg = timer_of_base(&to) + GPT_CNT_REG(TIMER_CLK_SRC);
+       sched_clock_register(mtk_gpt_read_sched_clock, 32, timer_of_rate(&to));
+
+       /* Configure clock event */
+       mtk_gpt_setup(&to, TIMER_CLK_EVT, GPT_CTRL_OP_REPEAT);
+       clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+                                       TIMER_SYNC_TICKS, 0xffffffff);
+
+       mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
+
+       return 0;
+err:
+       timer_of_cleanup(&to);
+       return ret;
+}
+TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
+TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
index ef9ebeafb3edf0d8b41d78413cbfbc0db3b6a322..430cb99d8d79b4894c1c219821d6fbdb37bf6e24 100644 (file)
@@ -156,4 +156,54 @@ static int __init sprd_timer_init(struct device_node *np)
        return 0;
 }
 
+static struct timer_of suspend_to = {
+       .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+};
+
+static u64 sprd_suspend_timer_read(struct clocksource *cs)
+{
+       return ~(u64)readl_relaxed(timer_of_base(&suspend_to) +
+                                  TIMER_VALUE_SHDW_LO) & cs->mask;
+}
+
+static int sprd_suspend_timer_enable(struct clocksource *cs)
+{
+       sprd_timer_update_counter(timer_of_base(&suspend_to),
+                                 TIMER_VALUE_LO_MASK);
+       sprd_timer_enable(timer_of_base(&suspend_to), TIMER_CTL_PERIOD_MODE);
+
+       return 0;
+}
+
+static void sprd_suspend_timer_disable(struct clocksource *cs)
+{
+       sprd_timer_disable(timer_of_base(&suspend_to));
+}
+
+static struct clocksource suspend_clocksource = {
+       .name   = "sprd_suspend_timer",
+       .rating = 200,
+       .read   = sprd_suspend_timer_read,
+       .enable = sprd_suspend_timer_enable,
+       .disable = sprd_suspend_timer_disable,
+       .mask   = CLOCKSOURCE_MASK(32),
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+};
+
+static int __init sprd_suspend_timer_init(struct device_node *np)
+{
+       int ret;
+
+       ret = timer_of_init(np, &suspend_to);
+       if (ret)
+               return ret;
+
+       clocksource_register_hz(&suspend_clocksource,
+                               timer_of_rate(&suspend_to));
+
+       return 0;
+}
+
 TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
+TIMER_OF_DECLARE(sc9860_persistent_timer, "sprd,sc9860-suspend-timer",
+                sprd_suspend_timer_init);
index 880a861ab3c82dd1709b4accc9d9200593ea9ffa..29e2e1a78a43372ee96e64bb9b93d6b21b5288f7 100644 (file)
@@ -78,8 +78,7 @@ static struct ti_32k ti_32k_timer = {
                .rating         = 250,
                .read           = ti_32k_read_cycles,
                .mask           = CLOCKSOURCE_MASK(32),
-               .flags          = CLOCK_SOURCE_IS_CONTINUOUS |
-                               CLOCK_SOURCE_SUSPEND_NONSTOP,
+               .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
        },
 };
 
index a6a0338eea77f73fbae0b549e3ec77265c00b2f4..f74689334f7cb729f4f9f1840401f97fabda5819 100644 (file)
@@ -162,7 +162,7 @@ static int __init zevio_timer_add(struct device_node *node)
                timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot;
                timer->clkevt.tick_resume       = zevio_timer_set_oneshot;
                timer->clkevt.rating            = 200;
-               timer->clkevt.cpumask           = cpu_all_mask;
+               timer->clkevt.cpumask           = cpu_possible_mask;
                timer->clkevt.features          = CLOCK_EVT_FEAT_ONESHOT;
                timer->clkevt.irq               = irqnr;
 
index cdad49da3ff710e6fd2cc1adf4bf4877623af670..38c695ce385bb91fc0185cb92132b103a7b2c2d5 100644 (file)
@@ -533,8 +533,8 @@ static int do_timerfd_gettime(int ufd, struct itimerspec64 *t)
 }
 
 SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
-               const struct itimerspec __user *, utmr,
-               struct itimerspec __user *, otmr)
+               const struct __kernel_itimerspec __user *, utmr,
+               struct __kernel_itimerspec __user *, otmr)
 {
        struct itimerspec64 new, old;
        int ret;
@@ -550,7 +550,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
        return ret;
 }
 
-SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
+SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct __kernel_itimerspec __user *, otmr)
 {
        struct itimerspec64 kotmr;
        int ret = do_timerfd_gettime(ufd, &kotmr);
@@ -559,7 +559,7 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
        return put_itimerspec64(&kotmr, otmr) ? -EFAULT : 0;
 }
 
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
 COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
                const struct compat_itimerspec __user *, utmr,
                struct compat_itimerspec __user *, otmr)
index 7dff1963c185c9d2c85da84f5e89f96cc5a2b3b5..308918928767ad5921b17b6fe16953dfdc0f6d93 100644 (file)
@@ -194,6 +194,9 @@ extern void clocksource_suspend(void);
 extern void clocksource_resume(void);
 extern struct clocksource * __init clocksource_default_clock(void);
 extern void clocksource_mark_unstable(struct clocksource *cs);
+extern void
+clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles);
+extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now);
 
 extern u64
 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
index c68acc47da57b6a7bef7b8ef84a9c897d4b83ce6..df45ee8413d6d20e0741df750a6fab8fa3196ece 100644 (file)
@@ -115,11 +115,6 @@ typedef    compat_ulong_t          compat_aio_context_t;
 struct compat_sel_arg_struct;
 struct rusage;
 
-struct compat_itimerspec {
-       struct compat_timespec it_interval;
-       struct compat_timespec it_value;
-};
-
 struct compat_utimbuf {
        compat_time_t           actime;
        compat_time_t           modtime;
@@ -300,10 +295,6 @@ extern int compat_get_timespec(struct timespec *, const void __user *);
 extern int compat_put_timespec(const struct timespec *, void __user *);
 extern int compat_get_timeval(struct timeval *, const void __user *);
 extern int compat_put_timeval(const struct timeval *, void __user *);
-extern int get_compat_itimerspec64(struct itimerspec64 *its,
-                       const struct compat_itimerspec __user *uits);
-extern int put_compat_itimerspec64(const struct itimerspec64 *its,
-                       struct compat_itimerspec __user *uits);
 
 struct compat_iovec {
        compat_uptr_t   iov_base;
index 31f2774f199464f9f7597f8603387f42dd3b1687..e70bfd1d2c3fe6f0a57e829911afa5af07faa1aa 100644 (file)
@@ -17,7 +17,16 @@ struct compat_timeval {
        s32             tv_usec;
 };
 
+struct compat_itimerspec {
+       struct compat_timespec it_interval;
+       struct compat_timespec it_value;
+};
+
 extern int compat_get_timespec64(struct timespec64 *, const void __user *);
 extern int compat_put_timespec64(const struct timespec64 *, void __user *);
+extern int get_compat_itimerspec64(struct itimerspec64 *its,
+                       const struct compat_itimerspec __user *uits);
+extern int put_compat_itimerspec64(const struct itimerspec64 *its,
+                       struct compat_itimerspec __user *uits);
 
 #endif /* _LINUX_COMPAT_TIME_H */
index 5b9fddbaac4166b11121f75a3f3b4db7af1aac4c..b2bb44f87f5a3edb6ee6f179c83fcde42a363fe5 100644 (file)
@@ -93,8 +93,11 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
 /* Map the ktime_t to timeval conversion to ns_to_timeval function */
 #define ktime_to_timeval(kt)           ns_to_timeval((kt))
 
-/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
-#define ktime_to_ns(kt)                        (kt)
+/* Convert ktime_t to nanoseconds */
+static inline s64 ktime_to_ns(const ktime_t kt)
+{
+       return kt;
+}
 
 /**
  * ktime_compare - Compares two ktime_t variables for less, greater or equal
index c85704fcdbd2189b517f407ad871b02055924df5..ee7e987ea1b4354ef93a149c997caf75b6269c1b 100644 (file)
@@ -95,8 +95,8 @@ struct k_itimer {
        clockid_t               it_clock;
        timer_t                 it_id;
        int                     it_active;
-       int                     it_overrun;
-       int                     it_overrun_last;
+       s64                     it_overrun;
+       s64                     it_overrun_last;
        int                     it_requeue_pending;
        int                     it_sigev_notify;
        ktime_t                 it_interval;
index 5c1a0933768ee3202f360164d3defd1f3a81f5de..ebb2f24027e8bd86dd1ac1e9f04a023bcf2838be 100644 (file)
@@ -506,9 +506,9 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
 /* fs/timerfd.c */
 asmlinkage long sys_timerfd_create(int clockid, int flags);
 asmlinkage long sys_timerfd_settime(int ufd, int flags,
-                                   const struct itimerspec __user *utmr,
-                                   struct itimerspec __user *otmr);
-asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
+                                   const struct __kernel_itimerspec __user *utmr,
+                                   struct __kernel_itimerspec __user *otmr);
+asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr);
 
 /* fs/utimes.c */
 asmlinkage long sys_utimensat(int dfd, const char __user *filename,
@@ -573,10 +573,10 @@ asmlinkage long sys_timer_create(clockid_t which_clock,
                                 struct sigevent __user *timer_event_spec,
                                 timer_t __user * created_timer_id);
 asmlinkage long sys_timer_gettime(timer_t timer_id,
-                               struct itimerspec __user *setting);
+                               struct __kernel_itimerspec __user *setting);
 asmlinkage long sys_timer_getoverrun(timer_t timer_id);
 asmlinkage long sys_timer_settime(timer_t timer_id, int flags,
-                               const struct itimerspec __user *new_setting,
+                               const struct __kernel_itimerspec __user *new_setting,
                                struct itimerspec __user *old_setting);
 asmlinkage long sys_timer_delete(timer_t timer_id);
 asmlinkage long sys_clock_settime(clockid_t which_clock,
index aed74463592d8121f2a38c83ad15602aa1af252f..27d83fd2ae6146a615fec51f9fa01660f86fa13d 100644 (file)
@@ -14,9 +14,9 @@ int get_timespec64(struct timespec64 *ts,
 int put_timespec64(const struct timespec64 *ts,
                struct __kernel_timespec __user *uts);
 int get_itimerspec64(struct itimerspec64 *it,
-                       const struct itimerspec __user *uit);
+                       const struct __kernel_itimerspec __user *uit);
 int put_itimerspec64(const struct itimerspec64 *it,
-                       struct itimerspec __user *uit);
+                       struct __kernel_itimerspec __user *uit);
 
 extern time64_t mktime64(const unsigned int year, const unsigned int mon,
                        const unsigned int day, const unsigned int hour,
index 0a7b2f79cec7df617525f6745a2109c8a662e7c4..05634afba0db62f0d4bebf743fed5daf7ce2ffe1 100644 (file)
@@ -12,6 +12,7 @@ typedef __u64 timeu64_t;
  */
 #ifndef CONFIG_64BIT_TIME
 #define __kernel_timespec timespec
+#define __kernel_itimerspec itimerspec
 #endif
 
 #include <uapi/linux/time.h>
index 86bc2026efcea4fd9af338eb19cb63bcea3a9547..edace6b656e9f2752692621d209c7e7518951700 100644 (file)
@@ -177,7 +177,7 @@ static inline time64_t ktime_get_clocktai_seconds(void)
 extern bool timekeeping_rtc_skipsuspend(void);
 extern bool timekeeping_rtc_skipresume(void);
 
-extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
+extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
 
 /*
  * struct system_time_snapshot - simultaneous raw/real time capture with
index fcf9366564936d5026c6e27cc56804debfb8eefb..6b56a2208be7b51032533db95bf26faf3032ee55 100644 (file)
@@ -49,6 +49,13 @@ struct __kernel_timespec {
 };
 #endif
 
+#ifndef __kernel_itimerspec
+struct __kernel_itimerspec {
+       struct __kernel_timespec it_interval;    /* timer period */
+       struct __kernel_timespec it_value;       /* timer expiration */
+};
+#endif
+
 /*
  * legacy timeval structure, only embedded in structures that
  * traditionally used 'timeval' to pass time intervals (not absolute
index 702aa846ddacabde1fdf4c0e29c491f5c9c10d00..8e40efc2928a113231fe09c9baaa99c2d77db728 100644 (file)
@@ -324,35 +324,6 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t,  pid, unsigned int, len,
        return ret;
 }
 
-/* Todo: Delete these extern declarations when get/put_compat_itimerspec64()
- * are moved to kernel/time/time.c .
- */
-extern int __compat_get_timespec64(struct timespec64 *ts64,
-                                  const struct compat_timespec __user *cts);
-extern int __compat_put_timespec64(const struct timespec64 *ts64,
-                                  struct compat_timespec __user *cts);
-
-int get_compat_itimerspec64(struct itimerspec64 *its,
-                       const struct compat_itimerspec __user *uits)
-{
-
-       if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) ||
-           __compat_get_timespec64(&its->it_value, &uits->it_value))
-               return -EFAULT;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(get_compat_itimerspec64);
-
-int put_compat_itimerspec64(const struct itimerspec64 *its,
-                       struct compat_itimerspec __user *uits)
-{
-       if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) ||
-           __compat_put_timespec64(&its->it_value, &uits->it_value))
-               return -EFAULT;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(put_compat_itimerspec64);
-
 /*
  * We currently only need the following fields from the sigevent
  * structure: sigev_value, sigev_signo, sig_notify and (sometimes
index 38509dc1f77b0916cc7633f6814d86549ea62a40..e27b51d3facdb5c617b4eecbaa2ff030dbf22acd 100644 (file)
@@ -2512,11 +2512,11 @@ static int do_sysinfo(struct sysinfo *info)
 {
        unsigned long mem_total, sav_total;
        unsigned int mem_unit, bitcount;
-       struct timespec tp;
+       struct timespec64 tp;
 
        memset(info, 0, sizeof(struct sysinfo));
 
-       get_monotonic_boottime(&tp);
+       ktime_get_boottime_ts64(&tp);
        info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
 
        get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
index 639321bf2e397934b1b9ec53c241c81bf9c074d1..fa5de5e8de61d88d266cd2651dedd2090ad89284 100644 (file)
@@ -581,11 +581,11 @@ static void alarm_timer_rearm(struct k_itimer *timr)
  * @timr:      Pointer to the posixtimer data struct
  * @now:       Current time to forward the timer against
  */
-static int alarm_timer_forward(struct k_itimer *timr, ktime_t now)
+static s64 alarm_timer_forward(struct k_itimer *timr, ktime_t now)
 {
        struct alarm *alarm = &timr->it.alarm.alarmtimer;
 
-       return (int) alarm_forward(alarm, timr->it_interval, now);
+       return alarm_forward(alarm, timr->it_interval, now);
 }
 
 /**
@@ -808,7 +808,8 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
        /* Convert (if necessary) to absolute time */
        if (flags != TIMER_ABSTIME) {
                ktime_t now = alarm_bases[type].gettime();
-               exp = ktime_add(now, exp);
+
+               exp = ktime_add_safe(now, exp);
        }
 
        ret = alarmtimer_do_nsleep(&alarm, exp, type);
index 16c027e9cc730a38d1bb97da89dd9f30c4557145..8c0e4092f661824edf50a85342284d9ddcb03e6b 100644 (file)
@@ -463,6 +463,12 @@ void clockevents_register_device(struct clock_event_device *dev)
                dev->cpumask = cpumask_of(smp_processor_id());
        }
 
+       if (dev->cpumask == cpu_all_mask) {
+               WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n",
+                    dev->name);
+               dev->cpumask = cpu_possible_mask;
+       }
+
        raw_spin_lock_irqsave(&clockevents_lock, flags);
 
        list_add(&dev->list, &clockevent_devices);
index f89a78e2792b645ea8b8abc540988569c560799b..f74fb00d806444739f9d8ee1611a98c694325f95 100644 (file)
@@ -94,6 +94,8 @@ EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
 /*[Clocksource internal variables]---------
  * curr_clocksource:
  *     currently selected clocksource.
+ * suspend_clocksource:
+ *     used to calculate the suspend time.
  * clocksource_list:
  *     linked list with the registered clocksources
  * clocksource_mutex:
@@ -102,10 +104,12 @@ EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
  *     Name of the user-specified clocksource.
  */
 static struct clocksource *curr_clocksource;
+static struct clocksource *suspend_clocksource;
 static LIST_HEAD(clocksource_list);
 static DEFINE_MUTEX(clocksource_mutex);
 static char override_name[CS_NAME_LEN];
 static int finished_booting;
+static u64 suspend_start;
 
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
 static void clocksource_watchdog_work(struct work_struct *work);
@@ -447,6 +451,140 @@ static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
 
 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
 
+static bool clocksource_is_suspend(struct clocksource *cs)
+{
+       return cs == suspend_clocksource;
+}
+
+static void __clocksource_suspend_select(struct clocksource *cs)
+{
+       /*
+        * Skip the clocksource which will be stopped in suspend state.
+        */
+       if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
+               return;
+
+       /*
+        * The nonstop clocksource can be selected as the suspend clocksource to
+        * calculate the suspend time, so it should not supply suspend/resume
+        * interfaces to suspend the nonstop clocksource when system suspends.
+        */
+       if (cs->suspend || cs->resume) {
+               pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
+                       cs->name);
+       }
+
+       /* Pick the best rating. */
+       if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
+               suspend_clocksource = cs;
+}
+
+/**
+ * clocksource_suspend_select - Select the best clocksource for suspend timing
+ * @fallback:  if select a fallback clocksource
+ */
+static void clocksource_suspend_select(bool fallback)
+{
+       struct clocksource *cs, *old_suspend;
+
+       old_suspend = suspend_clocksource;
+       if (fallback)
+               suspend_clocksource = NULL;
+
+       list_for_each_entry(cs, &clocksource_list, list) {
+               /* Skip current if we were requested for a fallback. */
+               if (fallback && cs == old_suspend)
+                       continue;
+
+               __clocksource_suspend_select(cs);
+       }
+}
+
+/**
+ * clocksource_start_suspend_timing - Start measuring the suspend timing
+ * @cs:                        current clocksource from timekeeping
+ * @start_cycles:      current cycles from timekeeping
+ *
+ * This function will save the start cycle values of suspend timer to calculate
+ * the suspend time when resuming system.
+ *
+ * This function is called late in the suspend process from timekeeping_suspend(),
+ * that means processes are freezed, non-boot cpus and interrupts are disabled
+ * now. It is therefore possible to start the suspend timer without taking the
+ * clocksource mutex.
+ */
+void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
+{
+       if (!suspend_clocksource)
+               return;
+
+       /*
+        * If current clocksource is the suspend timer, we should use the
+        * tkr_mono.cycle_last value as suspend_start to avoid same reading
+        * from suspend timer.
+        */
+       if (clocksource_is_suspend(cs)) {
+               suspend_start = start_cycles;
+               return;
+       }
+
+       if (suspend_clocksource->enable &&
+           suspend_clocksource->enable(suspend_clocksource)) {
+               pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
+               return;
+       }
+
+       suspend_start = suspend_clocksource->read(suspend_clocksource);
+}
+
+/**
+ * clocksource_stop_suspend_timing - Stop measuring the suspend timing
+ * @cs:                current clocksource from timekeeping
+ * @cycle_now: current cycles from timekeeping
+ *
+ * This function will calculate the suspend time from suspend timer.
+ *
+ * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
+ *
+ * This function is called early in the resume process from timekeeping_resume(),
+ * that means there is only one cpu, no processes are running and the interrupts
+ * are disabled. It is therefore possible to stop the suspend timer without
+ * taking the clocksource mutex.
+ */
+u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
+{
+       u64 now, delta, nsec = 0;
+
+       if (!suspend_clocksource)
+               return 0;
+
+       /*
+        * If current clocksource is the suspend timer, we should use the
+        * tkr_mono.cycle_last value from timekeeping as current cycle to
+        * avoid same reading from suspend timer.
+        */
+       if (clocksource_is_suspend(cs))
+               now = cycle_now;
+       else
+               now = suspend_clocksource->read(suspend_clocksource);
+
+       if (now > suspend_start) {
+               delta = clocksource_delta(now, suspend_start,
+                                         suspend_clocksource->mask);
+               nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
+                                      suspend_clocksource->shift);
+       }
+
+       /*
+        * Disable the suspend timer to save power if current clocksource is
+        * not the suspend timer.
+        */
+       if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
+               suspend_clocksource->disable(suspend_clocksource);
+
+       return nsec;
+}
+
 /**
  * clocksource_suspend - suspend the clocksource(s)
  */
@@ -792,6 +930,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
 
        clocksource_select();
        clocksource_select_watchdog(false);
+       __clocksource_suspend_select(cs);
        mutex_unlock(&clocksource_mutex);
        return 0;
 }
@@ -820,6 +959,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
 
        clocksource_select();
        clocksource_select_watchdog(false);
+       clocksource_suspend_select(false);
        mutex_unlock(&clocksource_mutex);
 }
 EXPORT_SYMBOL(clocksource_change_rating);
@@ -845,6 +985,15 @@ static int clocksource_unbind(struct clocksource *cs)
                        return -EBUSY;
        }
 
+       if (clocksource_is_suspend(cs)) {
+               /*
+                * Select and try to install a replacement suspend clocksource.
+                * If no replacement suspend clocksource, we will just let the
+                * clocksource go and have no suspend clocksource.
+                */
+               clocksource_suspend_select(true);
+       }
+
        clocksource_watchdog_lock(&flags);
        clocksource_dequeue_watchdog(cs);
        list_del_init(&cs->list);
index 3e93c54bd3a16b7fc282a20064f5d75f7c812ee8..e1a549c9e39918303d359c036d532f5ec22ed682 100644 (file)
@@ -718,8 +718,8 @@ static void hrtimer_switch_to_hres(void)
        struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 
        if (tick_init_highres()) {
-               printk(KERN_WARNING "Could not switch to high resolution "
-                                   "mode on CPU %d\n", base->cpu);
+               pr_warn("Could not switch to high resolution mode on CPU %u\n",
+                       base->cpu);
                return;
        }
        base->hres_active = 1;
@@ -1573,8 +1573,7 @@ retry:
        else
                expires_next = ktime_add(now, delta);
        tick_program_event(expires_next, 1);
-       printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
-                   ktime_to_ns(delta));
+       pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
 }
 
 /* called with interrupts disabled */
index a09ded765f6c50f6c0d9ba46c30644b539d1472a..c5e0cba3b39cc12c5b6ef2f12c04395123e999c6 100644 (file)
@@ -502,7 +502,7 @@ static void sched_sync_hw_clock(struct timespec64 now,
 {
        struct timespec64 next;
 
-       getnstimeofday64(&next);
+       ktime_get_real_ts64(&next);
        if (!fail)
                next.tv_sec = 659;
        else {
@@ -537,7 +537,7 @@ static void sync_rtc_clock(void)
        if (!IS_ENABLED(CONFIG_RTC_SYSTOHC))
                return;
 
-       getnstimeofday64(&now);
+       ktime_get_real_ts64(&now);
 
        adjust = now;
        if (persistent_clock_is_local)
@@ -591,7 +591,7 @@ static bool sync_cmos_clock(void)
         * Architectures are strongly encouraged to use rtclib and not
         * implement this legacy API.
         */
-       getnstimeofday64(&now);
+       ktime_get_real_ts64(&now);
        if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) {
                if (persistent_clock_is_local)
                        adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
@@ -642,7 +642,7 @@ void ntp_notify_cmos_timer(void)
 /*
  * Propagate a new txc->status value into the NTP state:
  */
-static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
+static inline void process_adj_status(const struct timex *txc)
 {
        if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
                time_state = TIME_OK;
@@ -665,12 +665,10 @@ static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
 }
 
 
-static inline void process_adjtimex_modes(struct timex *txc,
-                                               struct timespec64 *ts,
-                                               s32 *time_tai)
+static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai)
 {
        if (txc->modes & ADJ_STATUS)
-               process_adj_status(txc, ts);
+               process_adj_status(txc);
 
        if (txc->modes & ADJ_NANO)
                time_status |= STA_NANO;
@@ -718,7 +716,7 @@ static inline void process_adjtimex_modes(struct timex *txc,
  * adjtimex mainly allows reading (and writing, if superuser) of
  * kernel time-keeping variables. used by xntpd.
  */
-int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
+int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai)
 {
        int result;
 
@@ -735,7 +733,7 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
 
                /* If there are input parameters, then process them: */
                if (txc->modes)
-                       process_adjtimex_modes(txc, ts, time_tai);
+                       process_adjtimex_modes(txc, time_tai);
 
                txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
                                  NTP_SCALE_SHIFT);
@@ -1022,12 +1020,11 @@ void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_t
 
 static int __init ntp_tick_adj_setup(char *str)
 {
-       int rc = kstrtol(str, 0, (long *)&ntp_tick_adj);
-
+       int rc = kstrtos64(str, 0, &ntp_tick_adj);
        if (rc)
                return rc;
-       ntp_tick_adj <<= NTP_SCALE_SHIFT;
 
+       ntp_tick_adj <<= NTP_SCALE_SHIFT;
        return 1;
 }
 
index 909bd1f1bfb14dba87f6dc82fc85009108cabcdc..c24b0e13f0111de1d1febbf676a8724fcd6cd042 100644 (file)
@@ -8,6 +8,6 @@ extern void ntp_clear(void);
 extern u64 ntp_tick_length(void);
 extern ktime_t ntp_get_next_leap(void);
 extern int second_overflow(time64_t secs);
-extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
-extern void __hardpps(const struct timespec64 *, const struct timespec64 *);
+extern int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai);
+extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts);
 #endif /* _LINUX_NTP_INTERNAL_H */
index 9cdf54b04ca8860b7aa2eec2e3625de076b5f7e2..294d7b65af33638bd978d279bf4fa53448efd7a8 100644 (file)
@@ -85,7 +85,7 @@ static void bump_cpu_timer(struct k_itimer *timer, u64 now)
                        continue;
 
                timer->it.cpu.expires += incr;
-               timer->it_overrun += 1 << i;
+               timer->it_overrun += 1LL << i;
                delta -= incr;
        }
 }
index 26aa9569e24a54a6060584a5931ce36748f36d83..2c6847d5d69bae179291f1a6259f611e469dfb8c 100644 (file)
@@ -81,7 +81,7 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp)
                ktime_get_ts64(tp);
                break;
        case CLOCK_BOOTTIME:
-               get_monotonic_boottime64(tp);
+               ktime_get_boottime_ts64(tp);
                break;
        default:
                return -EINVAL;
index e08ce3f27447390846394e55a11cd4e9c029bb37..3ac7295306dcdf3ccd32547159b1fded304acbc4 100644 (file)
@@ -228,21 +228,21 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp)
  */
 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
 {
-       getrawmonotonic64(tp);
+       ktime_get_raw_ts64(tp);
        return 0;
 }
 
 
 static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
 {
-       *tp = current_kernel_time64();
+       ktime_get_coarse_real_ts64(tp);
        return 0;
 }
 
 static int posix_get_monotonic_coarse(clockid_t which_clock,
                                                struct timespec64 *tp)
 {
-       *tp = get_monotonic_coarse64();
+       ktime_get_coarse_ts64(tp);
        return 0;
 }
 
@@ -254,13 +254,13 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *
 
 static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
 {
-       get_monotonic_boottime64(tp);
+       ktime_get_boottime_ts64(tp);
        return 0;
 }
 
 static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
 {
-       timekeeping_clocktai64(tp);
+       ktime_get_clocktai_ts64(tp);
        return 0;
 }
 
@@ -283,6 +283,17 @@ static __init int init_posix_timers(void)
 }
 __initcall(init_posix_timers);
 
+/*
+ * The siginfo si_overrun field and the return value of timer_getoverrun(2)
+ * are of type int. Clamp the overrun value to INT_MAX
+ */
+static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
+{
+       s64 sum = timr->it_overrun_last + (s64)baseval;
+
+       return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
+}
+
 static void common_hrtimer_rearm(struct k_itimer *timr)
 {
        struct hrtimer *timer = &timr->it.real.timer;
@@ -290,9 +301,8 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
        if (!timr->it_interval)
                return;
 
-       timr->it_overrun += (unsigned int) hrtimer_forward(timer,
-                                               timer->base->get_time(),
-                                               timr->it_interval);
+       timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
+                                           timr->it_interval);
        hrtimer_restart(timer);
 }
 
@@ -321,10 +331,10 @@ void posixtimer_rearm(struct siginfo *info)
 
                timr->it_active = 1;
                timr->it_overrun_last = timr->it_overrun;
-               timr->it_overrun = -1;
+               timr->it_overrun = -1LL;
                ++timr->it_requeue_pending;
 
-               info->si_overrun += timr->it_overrun_last;
+               info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
        }
 
        unlock_timer(timr, flags);
@@ -418,9 +428,8 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
                                        now = ktime_add(now, kj);
                        }
 #endif
-                       timr->it_overrun += (unsigned int)
-                               hrtimer_forward(timer, now,
-                                               timr->it_interval);
+                       timr->it_overrun += hrtimer_forward(timer, now,
+                                                           timr->it_interval);
                        ret = HRTIMER_RESTART;
                        ++timr->it_requeue_pending;
                        timr->it_active = 1;
@@ -524,7 +533,7 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event,
        new_timer->it_id = (timer_t) new_timer_id;
        new_timer->it_clock = which_clock;
        new_timer->kclock = kc;
-       new_timer->it_overrun = -1;
+       new_timer->it_overrun = -1LL;
 
        if (event) {
                rcu_read_lock();
@@ -645,11 +654,11 @@ static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
        return __hrtimer_expires_remaining_adjusted(timer, now);
 }
 
-static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
+static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
 {
        struct hrtimer *timer = &timr->it.real.timer;
 
-       return (int)hrtimer_forward(timer, now, timr->it_interval);
+       return hrtimer_forward(timer, now, timr->it_interval);
 }
 
 /*
@@ -743,7 +752,7 @@ static int do_timer_gettime(timer_t timer_id,  struct itimerspec64 *setting)
 
 /* Get the time remaining on a POSIX.1b interval timer. */
 SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
-               struct itimerspec __user *, setting)
+               struct __kernel_itimerspec __user *, setting)
 {
        struct itimerspec64 cur_setting;
 
@@ -755,7 +764,8 @@ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
        return ret;
 }
 
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
+
 COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
                       struct compat_itimerspec __user *, setting)
 {
@@ -768,6 +778,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
        }
        return ret;
 }
+
 #endif
 
 /*
@@ -789,7 +800,7 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
        if (!timr)
                return -EINVAL;
 
-       overrun = timr->it_overrun_last;
+       overrun = timer_overrun_to_int(timr, 0);
        unlock_timer(timr, flags);
 
        return overrun;
@@ -906,8 +917,8 @@ retry:
 
 /* Set a POSIX.1b interval timer */
 SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
-               const struct itimerspec __user *, new_setting,
-               struct itimerspec __user *, old_setting)
+               const struct __kernel_itimerspec __user *, new_setting,
+               struct __kernel_itimerspec __user *, old_setting)
 {
        struct itimerspec64 new_spec, old_spec;
        struct itimerspec64 *rtn = old_setting ? &old_spec : NULL;
@@ -927,7 +938,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
        return error;
 }
 
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
 COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
                       struct compat_itimerspec __user *, new,
                       struct compat_itimerspec __user *, old)
index 151e28f5bf304ce134897038639c53998adae4d8..ddb21145211a0280f9b20667a33b9503ce62d776 100644 (file)
@@ -19,7 +19,7 @@ struct k_clock {
        void    (*timer_get)(struct k_itimer *timr,
                             struct itimerspec64 *cur_setting);
        void    (*timer_rearm)(struct k_itimer *timr);
-       int     (*timer_forward)(struct k_itimer *timr, ktime_t now);
+       s64     (*timer_forward)(struct k_itimer *timr, ktime_t now);
        ktime_t (*timer_remaining)(struct k_itimer *timr, ktime_t now);
        int     (*timer_try_to_cancel)(struct k_itimer *timr);
        void    (*timer_arm)(struct k_itimer *timr, ktime_t expires,
index 58045eb976c38fc7c3540f8c0d12db9cf256aa52..a59641fb88b6963ac837a3b4cf4657c24e05098b 100644 (file)
@@ -90,7 +90,7 @@ static struct clock_event_device ce_broadcast_hrtimer = {
        .max_delta_ticks        = ULONG_MAX,
        .mult                   = 1,
        .shift                  = 0,
-       .cpumask                = cpu_all_mask,
+       .cpumask                = cpu_possible_mask,
 };
 
 static enum hrtimer_restart bc_handler(struct hrtimer *t)
index 2b41e8e2d31db26faaaf905543af749463939b9c..ccdb351277eecf739605be2e98b44174de595883 100644 (file)
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(sys_tz);
  */
 SYSCALL_DEFINE1(time, time_t __user *, tloc)
 {
-       time_t i = get_seconds();
+       time_t i = (time_t)ktime_get_real_seconds();
 
        if (tloc) {
                if (put_user(i,tloc))
@@ -107,11 +107,9 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr)
 /* compat_time_t is a 32 bit "long" and needs to get converted. */
 COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
 {
-       struct timeval tv;
        compat_time_t i;
 
-       do_gettimeofday(&tv);
-       i = tv.tv_sec;
+       i = (compat_time_t)ktime_get_real_seconds();
 
        if (tloc) {
                if (put_user(i,tloc))
@@ -931,7 +929,7 @@ int compat_put_timespec64(const struct timespec64 *ts, void __user *uts)
 EXPORT_SYMBOL_GPL(compat_put_timespec64);
 
 int get_itimerspec64(struct itimerspec64 *it,
-                       const struct itimerspec __user *uit)
+                       const struct __kernel_itimerspec __user *uit)
 {
        int ret;
 
@@ -946,7 +944,7 @@ int get_itimerspec64(struct itimerspec64 *it,
 EXPORT_SYMBOL_GPL(get_itimerspec64);
 
 int put_itimerspec64(const struct itimerspec64 *it,
-                       struct itimerspec __user *uit)
+                       struct __kernel_itimerspec __user *uit)
 {
        int ret;
 
@@ -959,3 +957,24 @@ int put_itimerspec64(const struct itimerspec64 *it,
        return ret;
 }
 EXPORT_SYMBOL_GPL(put_itimerspec64);
+
+int get_compat_itimerspec64(struct itimerspec64 *its,
+                       const struct compat_itimerspec __user *uits)
+{
+
+       if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) ||
+           __compat_get_timespec64(&its->it_value, &uits->it_value))
+               return -EFAULT;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(get_compat_itimerspec64);
+
+int put_compat_itimerspec64(const struct itimerspec64 *its,
+                       struct compat_itimerspec __user *uits)
+{
+       if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) ||
+           __compat_put_timespec64(&its->it_value, &uits->it_value))
+               return -EFAULT;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(put_compat_itimerspec64);
index 4786df904c22d0a8e9473e600efee97244eb5782..d9e659a12c7684fd27bdd2e20eea07ba5034e12f 100644 (file)
 #define TK_MIRROR              (1 << 1)
 #define TK_CLOCK_WAS_SET       (1 << 2)
 
+enum timekeeping_adv_mode {
+       /* Update timekeeper when a tick has passed */
+       TK_ADV_TICK,
+
+       /* Update timekeeper on a direct frequency change */
+       TK_ADV_FREQ
+};
+
 /*
  * The most important data for readout fits into a single 64 byte
  * cache line.
@@ -97,7 +105,7 @@ static inline void tk_normalize_xtime(struct timekeeper *tk)
        }
 }
 
-static inline struct timespec64 tk_xtime(struct timekeeper *tk)
+static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
 {
        struct timespec64 ts;
 
@@ -154,7 +162,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
  * a read of the fast-timekeeper tkrs (which is protected by its own locking
  * and update logic).
  */
-static inline u64 tk_clock_read(struct tk_read_base *tkr)
+static inline u64 tk_clock_read(const struct tk_read_base *tkr)
 {
        struct clocksource *clock = READ_ONCE(tkr->clock);
 
@@ -203,7 +211,7 @@ static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
        }
 }
 
-static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        u64 now, last, mask, max, delta;
@@ -247,7 +255,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
 static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
 {
 }
-static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
 {
        u64 cycle_now, delta;
 
@@ -344,7 +352,7 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
 static inline u32 arch_gettimeoffset(void) { return 0; }
 #endif
 
-static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
+static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
 {
        u64 nsec;
 
@@ -355,7 +363,7 @@ static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
        return nsec + arch_gettimeoffset();
 }
 
-static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
+static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
 {
        u64 delta;
 
@@ -363,7 +371,7 @@ static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
        return timekeeping_delta_to_ns(tkr, delta);
 }
 
-static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
+static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
 {
        u64 delta;
 
@@ -386,7 +394,8 @@ static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
  * slightly wrong timestamp (a few nanoseconds). See
  * @ktime_get_mono_fast_ns.
  */
-static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
+static void update_fast_timekeeper(const struct tk_read_base *tkr,
+                                  struct tk_fast *tkf)
 {
        struct tk_read_base *base = tkf->base;
 
@@ -541,10 +550,10 @@ EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
  * number of cycles every time until timekeeping is resumed at which time the
  * proper readout base for the fast timekeeper will be restored automatically.
  */
-static void halt_fast_timekeeper(struct timekeeper *tk)
+static void halt_fast_timekeeper(const struct timekeeper *tk)
 {
        static struct tk_read_base tkr_dummy;
-       struct tk_read_base *tkr = &tk->tkr_mono;
+       const struct tk_read_base *tkr = &tk->tkr_mono;
 
        memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
        cycles_at_suspend = tk_clock_read(tkr);
@@ -1269,7 +1278,7 @@ EXPORT_SYMBOL(do_settimeofday64);
  *
  * Adds or subtracts an offset value from the current time.
  */
-static int timekeeping_inject_offset(struct timespec64 *ts)
+static int timekeeping_inject_offset(const struct timespec64 *ts)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long flags;
@@ -1510,8 +1519,20 @@ void __weak read_boot_clock64(struct timespec64 *ts)
        ts->tv_nsec = 0;
 }
 
-/* Flag for if timekeeping_resume() has injected sleeptime */
-static bool sleeptime_injected;
+/*
+ * Flag reflecting whether timekeeping_resume() has injected sleeptime.
+ *
+ * The flag starts of false and is only set when a suspend reaches
+ * timekeeping_suspend(), timekeeping_resume() sets it to false when the
+ * timekeeper clocksource is not stopping across suspend and has been
+ * used to update sleep time. If the timekeeper clocksource has stopped
+ * then the flag stays true and is used by the RTC resume code to decide
+ * whether sleeptime must be injected and if so the flag gets false then.
+ *
+ * If a suspend fails before reaching timekeeping_resume() then the flag
+ * stays false and prevents erroneous sleeptime injection.
+ */
+static bool suspend_timing_needed;
 
 /* Flag for if there is a persistent clock on this platform */
 static bool persistent_clock_exists;
@@ -1577,7 +1598,7 @@ static struct timespec64 timekeeping_suspend_time;
  * adds the sleep offset to the timekeeping variables.
  */
 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
-                                          struct timespec64 *delta)
+                                          const struct timespec64 *delta)
 {
        if (!timespec64_valid_strict(delta)) {
                printk_deferred(KERN_WARNING
@@ -1610,7 +1631,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
  */
 bool timekeeping_rtc_skipresume(void)
 {
-       return sleeptime_injected;
+       return !suspend_timing_needed;
 }
 
 /**
@@ -1638,7 +1659,7 @@ bool timekeeping_rtc_skipsuspend(void)
  * This function should only be called by rtc_resume(), and allows
  * a suspend offset to be injected into the timekeeping values.
  */
-void timekeeping_inject_sleeptime64(struct timespec64 *delta)
+void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        unsigned long flags;
@@ -1646,6 +1667,8 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta)
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
        write_seqcount_begin(&tk_core.seq);
 
+       suspend_timing_needed = false;
+
        timekeeping_forward_now(tk);
 
        __timekeeping_inject_sleeptime(tk, delta);
@@ -1669,9 +1692,9 @@ void timekeeping_resume(void)
        struct clocksource *clock = tk->tkr_mono.clock;
        unsigned long flags;
        struct timespec64 ts_new, ts_delta;
-       u64 cycle_now;
+       u64 cycle_now, nsec;
+       bool inject_sleeptime = false;
 
-       sleeptime_injected = false;
        read_persistent_clock64(&ts_new);
 
        clockevents_resume();
@@ -1693,22 +1716,19 @@ void timekeeping_resume(void)
         * usable source. The rtc part is handled separately in rtc core code.
         */
        cycle_now = tk_clock_read(&tk->tkr_mono);
-       if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
-               cycle_now > tk->tkr_mono.cycle_last) {
-               u64 nsec, cyc_delta;
-
-               cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
-                                             tk->tkr_mono.mask);
-               nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift);
+       nsec = clocksource_stop_suspend_timing(clock, cycle_now);
+       if (nsec > 0) {
                ts_delta = ns_to_timespec64(nsec);
-               sleeptime_injected = true;
+               inject_sleeptime = true;
        } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
                ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
-               sleeptime_injected = true;
+               inject_sleeptime = true;
        }
 
-       if (sleeptime_injected)
+       if (inject_sleeptime) {
+               suspend_timing_needed = false;
                __timekeeping_inject_sleeptime(tk, &ts_delta);
+       }
 
        /* Re-base the last cycle value */
        tk->tkr_mono.cycle_last = cycle_now;
@@ -1732,6 +1752,8 @@ int timekeeping_suspend(void)
        unsigned long flags;
        struct timespec64               delta, delta_delta;
        static struct timespec64        old_delta;
+       struct clocksource *curr_clock;
+       u64 cycle_now;
 
        read_persistent_clock64(&timekeeping_suspend_time);
 
@@ -1743,11 +1765,22 @@ int timekeeping_suspend(void)
        if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
                persistent_clock_exists = true;
 
+       suspend_timing_needed = true;
+
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
        write_seqcount_begin(&tk_core.seq);
        timekeeping_forward_now(tk);
        timekeeping_suspended = 1;
 
+       /*
+        * Since we've called forward_now, cycle_last stores the value
+        * just read from the current clocksource. Save this to potentially
+        * use in suspend timing.
+        */
+       curr_clock = tk->tkr_mono.clock;
+       cycle_now = tk->tkr_mono.cycle_last;
+       clocksource_start_suspend_timing(curr_clock, cycle_now);
+
        if (persistent_clock_exists) {
                /*
                 * To avoid drift caused by repeated suspend/resumes,
@@ -2021,11 +2054,11 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
        return offset;
 }
 
-/**
- * update_wall_time - Uses the current clocksource to increment the wall time
- *
+/*
+ * timekeeping_advance - Updates the timekeeper to the current time and
+ * current NTP tick length
  */
-void update_wall_time(void)
+static void timekeeping_advance(enum timekeeping_adv_mode mode)
 {
        struct timekeeper *real_tk = &tk_core.timekeeper;
        struct timekeeper *tk = &shadow_timekeeper;
@@ -2042,14 +2075,17 @@ void update_wall_time(void)
 
 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
        offset = real_tk->cycle_interval;
+
+       if (mode != TK_ADV_TICK)
+               goto out;
 #else
        offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
                                   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
-#endif
 
        /* Check if there's really nothing to do */
-       if (offset < real_tk->cycle_interval)
+       if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
                goto out;
+#endif
 
        /* Do some additional sanity checking */
        timekeeping_check_update(tk, offset);
@@ -2105,6 +2141,15 @@ out:
                clock_was_set_delayed();
 }
 
+/**
+ * update_wall_time - Uses the current clocksource to increment the wall time
+ *
+ */
+void update_wall_time(void)
+{
+       timekeeping_advance(TK_ADV_TICK);
+}
+
 /**
  * getboottime64 - Return the real time of system boot.
  * @ts:                pointer to the timespec64 to be set
@@ -2220,7 +2265,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
 /**
  * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
  */
-static int timekeeping_validate_timex(struct timex *txc)
+static int timekeeping_validate_timex(const struct timex *txc)
 {
        if (txc->modes & ADJ_ADJTIME) {
                /* singleshot must not be used with any other mode bits */
@@ -2310,7 +2355,7 @@ int do_adjtimex(struct timex *txc)
                        return ret;
        }
 
-       getnstimeofday64(&ts);
+       ktime_get_real_ts64(&ts);
 
        raw_spin_lock_irqsave(&timekeeper_lock, flags);
        write_seqcount_begin(&tk_core.seq);
@@ -2327,6 +2372,10 @@ int do_adjtimex(struct timex *txc)
        write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
+       /* Update the multiplier immediately if frequency was set directly */
+       if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
+               timekeeping_advance(TK_ADV_FREQ);
+
        if (tai != orig_tai)
                clock_was_set();
 
index 0754cadfa9e61b044b6bfcf81f4146c8c45f63c3..238e4be6022955f5b11cefae73b236cb8ba2d39a 100644 (file)
@@ -70,7 +70,7 @@ static int __init tk_debug_sleep_time_init(void)
 }
 late_initcall(tk_debug_sleep_time_init);
 
-void tk_debug_account_sleep_time(struct timespec64 *t)
+void tk_debug_account_sleep_time(const struct timespec64 *t)
 {
        /* Cap bin index so we don't overflow the array */
        int bin = min(fls(t->tv_sec), NUM_BINS-1);
index cf5c0828ee3157bf620561f39838edd6c144e140..bcbb52db22565971d1a4885bd8e177c21265e6a5 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/time.h>
 
 #ifdef CONFIG_DEBUG_FS
-extern void tk_debug_account_sleep_time(struct timespec64 *t);
+extern void tk_debug_account_sleep_time(const struct timespec64 *t);
 #else
 #define tk_debug_account_sleep_time(x)
 #endif
index cc2d23e6ff6162ccb8101705d2bf5cb4d4554165..fa49cd753dea74ff442e49a637895ca71993703d 100644 (file)
@@ -581,7 +581,7 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
         * wheel:
         */
        base->next_expiry = timer->expires;
-               wake_up_nohz_cpu(base->cpu);
+       wake_up_nohz_cpu(base->cpu);
 }
 
 static void
@@ -1657,6 +1657,22 @@ static inline void __run_timers(struct timer_base *base)
 
        raw_spin_lock_irq(&base->lock);
 
+       /*
+        * timer_base::must_forward_clk must be cleared before running
+        * timers so that any timer functions that call mod_timer() will
+        * not try to forward the base. Idle tracking / clock forwarding
+        * logic is only used with BASE_STD timers.
+        *
+        * The must_forward_clk flag is cleared unconditionally also for
+        * the deferrable base. The deferrable base is not affected by idle
+        * tracking and never forwarded, so clearing the flag is a NOOP.
+        *
+        * The fact that the deferrable base is never forwarded can cause
+        * large variations in granularity for deferrable timers, but they
+        * can be deferred for long periods due to idle anyway.
+        */
+       base->must_forward_clk = false;
+
        while (time_after_eq(jiffies, base->clk)) {
 
                levels = collect_expired_timers(base, heads);
@@ -1676,19 +1692,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
 {
        struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 
-       /*
-        * must_forward_clk must be cleared before running timers so that any
-        * timer functions that call mod_timer will not try to forward the
-        * base. idle trcking / clock forwarding logic is only used with
-        * BASE_STD timers.
-        *
-        * The deferrable base does not do idle tracking at all, so we do
-        * not forward it. This can result in very large variations in
-        * granularity for deferrable timers, but they can be deferred for
-        * long periods due to idle.
-        */
-       base->must_forward_clk = false;
-
        __run_timers(base);
        if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
                __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
index ca6cd146aafe15420cb345ef3ba934dd21e13220..dcf73c5dab6e12e9074d15efae1ba9c65a139d60 100644 (file)
@@ -134,6 +134,11 @@ int main(int argv, char **argc)
        printf(" %lld.%i(act)", ppm/1000, abs((int)(ppm%1000)));
 
        if (llabs(eppm - ppm) > 1000) {
+               if (tx1.offset || tx2.offset ||
+                   tx1.freq != tx2.freq || tx1.tick != tx2.tick) {
+                       printf("        [SKIP]\n");
+                       return ksft_exit_skip("The clock was adjusted externally. Shutdown NTPd or other time sync daemons\n");
+               }
                printf("        [FAILED]\n");
                return ksft_exit_fail();
        }