Merge commit 'v2.6.28-rc8' into x86/doc
[sfrench/cifs-2.6.git] / arch / x86 / kernel / hpet.c
index acf62fc233da6c0ee8196d18c53a9a55d31bd10f..067d8de913f612d45018090a1fae4e5cc920462f 100644 (file)
@@ -1,29 +1,49 @@
 #include <linux/clocksource.h>
 #include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/sysdev.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/hpet.h>
 #include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/cpu.h>
 #include <linux/pm.h>
+#include <linux/io.h>
 
 #include <asm/fixmap.h>
-#include <asm/hpet.h>
 #include <asm/i8253.h>
-#include <asm/io.h>
+#include <asm/hpet.h>
 
-#define HPET_MASK      CLOCKSOURCE_MASK(32)
-#define HPET_SHIFT     22
+#define HPET_MASK                      CLOCKSOURCE_MASK(32)
+#define HPET_SHIFT                     22
 
 /* FSEC = 10^-15
    NSEC = 10^-9 */
-#define FSEC_PER_NSEC  1000000L
+#define FSEC_PER_NSEC                  1000000L
+
+#define HPET_DEV_USED_BIT              2
+#define HPET_DEV_USED                  (1 << HPET_DEV_USED_BIT)
+#define HPET_DEV_VALID                 0x8
+#define HPET_DEV_FSB_CAP               0x1000
+#define HPET_DEV_PERI_CAP              0x2000
+
+#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
 
 /*
  * HPET address is set in acpi/boot.c, when an ACPI entry exists
  */
-unsigned long hpet_address;
-static void __iomem *hpet_virt_address;
+unsigned long                          hpet_address;
+unsigned long                          hpet_num_timers;
+static void __iomem                    *hpet_virt_address;
+
+struct hpet_dev {
+       struct clock_event_device       evt;
+       unsigned int                    num;
+       int                             cpu;
+       unsigned int                    irq;
+       unsigned int                    flags;
+       char                            name[10];
+};
 
 unsigned long hpet_readl(unsigned long a)
 {
@@ -59,7 +79,7 @@ static inline void hpet_clear_mapping(void)
 static int boot_hpet_disable;
 int hpet_force_user;
 
-static int __init hpet_setup(charstr)
+static int __init hpet_setup(char *str)
 {
        if (str) {
                if (!strncmp("disable", str, 7))
@@ -80,7 +100,7 @@ __setup("nohpet", disable_hpet);
 
 static inline int is_hpet_capable(void)
 {
-       return (!boot_hpet_disable && hpet_address);
+       return !boot_hpet_disable && hpet_address;
 }
 
 /*
@@ -102,6 +122,9 @@ EXPORT_SYMBOL_GPL(is_hpet_enabled);
  * timer 0 and timer 1 in case of RTC emulation.
  */
 #ifdef CONFIG_HPET
+
+static void hpet_reserve_msi_timers(struct hpet_data *hd);
+
 static void hpet_reserve_platform_timers(unsigned long id)
 {
        struct hpet __iomem *hpet = hpet_virt_address;
@@ -111,10 +134,10 @@ static void hpet_reserve_platform_timers(unsigned long id)
 
        nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
 
-       memset(&hd, 0, sizeof (hd));
-       hd.hd_phys_address = hpet_address;
-       hd.hd_address = hpet;
-       hd.hd_nirqs = nrtimers;
+       memset(&hd, 0, sizeof(hd));
+       hd.hd_phys_address      = hpet_address;
+       hd.hd_address           = hpet;
+       hd.hd_nirqs             = nrtimers;
        hpet_reserve_timer(&hd, 0);
 
 #ifdef CONFIG_HPET_EMULATE_RTC
@@ -130,10 +153,12 @@ static void hpet_reserve_platform_timers(unsigned long id)
        hd.hd_irq[1] = HPET_LEGACY_RTC;
 
        for (i = 2; i < nrtimers; timer++, i++) {
-               hd.hd_irq[i] = (readl(&timer->hpet_config) & Tn_INT_ROUTE_CNF_MASK) >>
-                       Tn_INT_ROUTE_CNF_SHIFT;
+               hd.hd_irq[i] = (readl(&timer->hpet_config) &
+                       Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
        }
 
+       hpet_reserve_msi_timers(&hd);
+
        hpet_alloc(&hd);
 
 }
@@ -227,71 +252,422 @@ static void hpet_legacy_clockevent_register(void)
        printk(KERN_DEBUG "hpet clockevent registered\n");
 }
 
-static void hpet_legacy_set_mode(enum clock_event_mode mode,
-                         struct clock_event_device *evt)
+static int hpet_setup_msi_irq(unsigned int irq);
+
+static void hpet_set_mode(enum clock_event_mode mode,
+                         struct clock_event_device *evt, int timer)
 {
        unsigned long cfg, cmp, now;
        uint64_t delta;
 
-       switch(mode) {
+       switch (mode) {
        case CLOCK_EVT_MODE_PERIODIC:
-               delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * hpet_clockevent.mult;
-               delta >>= hpet_clockevent.shift;
+               delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
+               delta >>= evt->shift;
                now = hpet_readl(HPET_COUNTER);
                cmp = now + (unsigned long) delta;
-               cfg = hpet_readl(HPET_T0_CFG);
+               cfg = hpet_readl(HPET_Tn_CFG(timer));
                cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
                       HPET_TN_SETVAL | HPET_TN_32BIT;
-               hpet_writel(cfg, HPET_T0_CFG);
+               hpet_writel(cfg, HPET_Tn_CFG(timer));
                /*
                 * The first write after writing TN_SETVAL to the
                 * config register sets the counter value, the second
                 * write sets the period.
                 */
-               hpet_writel(cmp, HPET_T0_CMP);
+               hpet_writel(cmp, HPET_Tn_CMP(timer));
                udelay(1);
-               hpet_writel((unsigned long) delta, HPET_T0_CMP);
+               hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
                break;
 
        case CLOCK_EVT_MODE_ONESHOT:
-               cfg = hpet_readl(HPET_T0_CFG);
+               cfg = hpet_readl(HPET_Tn_CFG(timer));
                cfg &= ~HPET_TN_PERIODIC;
                cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
-               hpet_writel(cfg, HPET_T0_CFG);
+               hpet_writel(cfg, HPET_Tn_CFG(timer));
                break;
 
        case CLOCK_EVT_MODE_UNUSED:
        case CLOCK_EVT_MODE_SHUTDOWN:
-               cfg = hpet_readl(HPET_T0_CFG);
+               cfg = hpet_readl(HPET_Tn_CFG(timer));
                cfg &= ~HPET_TN_ENABLE;
-               hpet_writel(cfg, HPET_T0_CFG);
+               hpet_writel(cfg, HPET_Tn_CFG(timer));
                break;
 
        case CLOCK_EVT_MODE_RESUME:
-               hpet_enable_legacy_int();
+               if (timer == 0) {
+                       hpet_enable_legacy_int();
+               } else {
+                       struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+                       hpet_setup_msi_irq(hdev->irq);
+                       disable_irq(hdev->irq);
+                       irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu));
+                       enable_irq(hdev->irq);
+               }
                break;
        }
 }
 
-static int hpet_legacy_next_event(unsigned long delta,
-                                 struct clock_event_device *evt)
+static int hpet_next_event(unsigned long delta,
+                          struct clock_event_device *evt, int timer)
 {
        u32 cnt;
 
        cnt = hpet_readl(HPET_COUNTER);
        cnt += (u32) delta;
-       hpet_writel(cnt, HPET_T0_CMP);
+       hpet_writel(cnt, HPET_Tn_CMP(timer));
 
        /*
         * We need to read back the CMP register to make sure that
         * what we wrote hit the chip before we compare it to the
         * counter.
         */
-       WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt);
+       WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt);
 
        return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
 }
 
+static void hpet_legacy_set_mode(enum clock_event_mode mode,
+                       struct clock_event_device *evt)
+{
+       hpet_set_mode(mode, evt, 0);
+}
+
+static int hpet_legacy_next_event(unsigned long delta,
+                       struct clock_event_device *evt)
+{
+       return hpet_next_event(delta, evt, 0);
+}
+
+/*
+ * HPET MSI Support
+ */
+#ifdef CONFIG_PCI_MSI
+
+static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
+static struct hpet_dev *hpet_devs;
+
+void hpet_msi_unmask(unsigned int irq)
+{
+       struct hpet_dev *hdev = get_irq_data(irq);
+       unsigned long cfg;
+
+       /* unmask it */
+       cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
+       cfg |= HPET_TN_FSB;
+       hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
+}
+
+void hpet_msi_mask(unsigned int irq)
+{
+       unsigned long cfg;
+       struct hpet_dev *hdev = get_irq_data(irq);
+
+       /* mask it */
+       cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
+       cfg &= ~HPET_TN_FSB;
+       hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
+}
+
+void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
+{
+       struct hpet_dev *hdev = get_irq_data(irq);
+
+       hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
+       hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
+}
+
+void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
+{
+       struct hpet_dev *hdev = get_irq_data(irq);
+
+       msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
+       msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
+       msg->address_hi = 0;
+}
+
+static void hpet_msi_set_mode(enum clock_event_mode mode,
+                               struct clock_event_device *evt)
+{
+       struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+       hpet_set_mode(mode, evt, hdev->num);
+}
+
+static int hpet_msi_next_event(unsigned long delta,
+                               struct clock_event_device *evt)
+{
+       struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+       return hpet_next_event(delta, evt, hdev->num);
+}
+
+static int hpet_setup_msi_irq(unsigned int irq)
+{
+       if (arch_setup_hpet_msi(irq)) {
+               destroy_irq(irq);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int hpet_assign_irq(struct hpet_dev *dev)
+{
+       unsigned int irq;
+
+       irq = create_irq();
+       if (!irq)
+               return -EINVAL;
+
+       set_irq_data(irq, dev);
+
+       if (hpet_setup_msi_irq(irq))
+               return -EINVAL;
+
+       dev->irq = irq;
+       return 0;
+}
+
+static irqreturn_t hpet_interrupt_handler(int irq, void *data)
+{
+       struct hpet_dev *dev = (struct hpet_dev *)data;
+       struct clock_event_device *hevt = &dev->evt;
+
+       if (!hevt->event_handler) {
+               printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
+                               dev->num);
+               return IRQ_HANDLED;
+       }
+
+       hevt->event_handler(hevt);
+       return IRQ_HANDLED;
+}
+
+static int hpet_setup_irq(struct hpet_dev *dev)
+{
+
+       if (request_irq(dev->irq, hpet_interrupt_handler,
+                       IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev))
+               return -1;
+
+       disable_irq(dev->irq);
+       irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu));
+       enable_irq(dev->irq);
+
+       printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
+                        dev->name, dev->irq);
+
+       return 0;
+}
+
+/* This should be called in specific @cpu */
+static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
+{
+       struct clock_event_device *evt = &hdev->evt;
+       uint64_t hpet_freq;
+
+       WARN_ON(cpu != smp_processor_id());
+       if (!(hdev->flags & HPET_DEV_VALID))
+               return;
+
+       if (hpet_setup_msi_irq(hdev->irq))
+               return;
+
+       hdev->cpu = cpu;
+       per_cpu(cpu_hpet_dev, cpu) = hdev;
+       evt->name = hdev->name;
+       hpet_setup_irq(hdev);
+       evt->irq = hdev->irq;
+
+       evt->rating = 110;
+       evt->features = CLOCK_EVT_FEAT_ONESHOT;
+       if (hdev->flags & HPET_DEV_PERI_CAP)
+               evt->features |= CLOCK_EVT_FEAT_PERIODIC;
+
+       evt->set_mode = hpet_msi_set_mode;
+       evt->set_next_event = hpet_msi_next_event;
+       evt->shift = 32;
+
+       /*
+        * The period is a femto seconds value. We need to calculate the
+        * scaled math multiplication factor for nanosecond to hpet tick
+        * conversion.
+        */
+       hpet_freq = 1000000000000000ULL;
+       do_div(hpet_freq, hpet_period);
+       evt->mult = div_sc((unsigned long) hpet_freq,
+                                     NSEC_PER_SEC, evt->shift);
+       /* Calculate the max delta */
+       evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt);
+       /* 5 usec minimum reprogramming delta. */
+       evt->min_delta_ns = 5000;
+
+       evt->cpumask = cpumask_of_cpu(hdev->cpu);
+       clockevents_register_device(evt);
+}
+
+#ifdef CONFIG_HPET
+/* Reserve at least one timer for userspace (/dev/hpet) */
+#define RESERVE_TIMERS 1
+#else
+#define RESERVE_TIMERS 0
+#endif
+
+static void hpet_msi_capability_lookup(unsigned int start_timer)
+{
+       unsigned int id;
+       unsigned int num_timers;
+       unsigned int num_timers_used = 0;
+       int i;
+
+       id = hpet_readl(HPET_ID);
+
+       num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
+       num_timers++; /* Value read out starts from 0 */
+
+       hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
+       if (!hpet_devs)
+               return;
+
+       hpet_num_timers = num_timers;
+
+       for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
+               struct hpet_dev *hdev = &hpet_devs[num_timers_used];
+               unsigned long cfg = hpet_readl(HPET_Tn_CFG(i));
+
+               /* Only consider HPET timer with MSI support */
+               if (!(cfg & HPET_TN_FSB_CAP))
+                       continue;
+
+               hdev->flags = 0;
+               if (cfg & HPET_TN_PERIODIC_CAP)
+                       hdev->flags |= HPET_DEV_PERI_CAP;
+               hdev->num = i;
+
+               sprintf(hdev->name, "hpet%d", i);
+               if (hpet_assign_irq(hdev))
+                       continue;
+
+               hdev->flags |= HPET_DEV_FSB_CAP;
+               hdev->flags |= HPET_DEV_VALID;
+               num_timers_used++;
+               if (num_timers_used == num_possible_cpus())
+                       break;
+       }
+
+       printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
+               num_timers, num_timers_used);
+}
+
+#ifdef CONFIG_HPET
+static void hpet_reserve_msi_timers(struct hpet_data *hd)
+{
+       int i;
+
+       if (!hpet_devs)
+               return;
+
+       for (i = 0; i < hpet_num_timers; i++) {
+               struct hpet_dev *hdev = &hpet_devs[i];
+
+               if (!(hdev->flags & HPET_DEV_VALID))
+                       continue;
+
+               hd->hd_irq[hdev->num] = hdev->irq;
+               hpet_reserve_timer(hd, hdev->num);
+       }
+}
+#endif
+
+static struct hpet_dev *hpet_get_unused_timer(void)
+{
+       int i;
+
+       if (!hpet_devs)
+               return NULL;
+
+       for (i = 0; i < hpet_num_timers; i++) {
+               struct hpet_dev *hdev = &hpet_devs[i];
+
+               if (!(hdev->flags & HPET_DEV_VALID))
+                       continue;
+               if (test_and_set_bit(HPET_DEV_USED_BIT,
+                       (unsigned long *)&hdev->flags))
+                       continue;
+               return hdev;
+       }
+       return NULL;
+}
+
+struct hpet_work_struct {
+       struct delayed_work work;
+       struct completion complete;
+};
+
+static void hpet_work(struct work_struct *w)
+{
+       struct hpet_dev *hdev;
+       int cpu = smp_processor_id();
+       struct hpet_work_struct *hpet_work;
+
+       hpet_work = container_of(w, struct hpet_work_struct, work.work);
+
+       hdev = hpet_get_unused_timer();
+       if (hdev)
+               init_one_hpet_msi_clockevent(hdev, cpu);
+
+       complete(&hpet_work->complete);
+}
+
+static int hpet_cpuhp_notify(struct notifier_block *n,
+               unsigned long action, void *hcpu)
+{
+       unsigned long cpu = (unsigned long)hcpu;
+       struct hpet_work_struct work;
+       struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
+
+       switch (action & 0xf) {
+       case CPU_ONLINE:
+               INIT_DELAYED_WORK(&work.work, hpet_work);
+               init_completion(&work.complete);
+               /* FIXME: add schedule_work_on() */
+               schedule_delayed_work_on(cpu, &work.work, 0);
+               wait_for_completion(&work.complete);
+               break;
+       case CPU_DEAD:
+               if (hdev) {
+                       free_irq(hdev->irq, hdev);
+                       hdev->flags &= ~HPET_DEV_USED;
+                       per_cpu(cpu_hpet_dev, cpu) = NULL;
+               }
+               break;
+       }
+       return NOTIFY_OK;
+}
+#else
+
+static int hpet_setup_msi_irq(unsigned int irq)
+{
+       return 0;
+}
+static void hpet_msi_capability_lookup(unsigned int start_timer)
+{
+       return;
+}
+
+#ifdef CONFIG_HPET
+static void hpet_reserve_msi_timers(struct hpet_data *hd)
+{
+       return;
+}
+#endif
+
+static int hpet_cpuhp_notify(struct notifier_block *n,
+               unsigned long action, void *hcpu)
+{
+       return NOTIFY_OK;
+}
+
+#endif
+
 /*
  * Clock source related code
  */
@@ -427,8 +803,10 @@ int __init hpet_enable(void)
 
        if (id & HPET_ID_LEGSUP) {
                hpet_legacy_clockevent_register();
+               hpet_msi_capability_lookup(2);
                return 1;
        }
+       hpet_msi_capability_lookup(0);
        return 0;
 
 out_nohpet:
@@ -445,6 +823,8 @@ out_nohpet:
  */
 static __init int hpet_late_init(void)
 {
+       int cpu;
+
        if (boot_hpet_disable)
                return -ENODEV;
 
@@ -460,6 +840,13 @@ static __init int hpet_late_init(void)
 
        hpet_reserve_platform_timers(hpet_readl(HPET_ID));
 
+       for_each_online_cpu(cpu) {
+               hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
+       }
+
+       /* This notifier should be called after workqueue is ready */
+       hotcpu_notifier(hpet_cpuhp_notify, -20);
+
        return 0;
 }
 fs_initcall(hpet_late_init);