PC, KVM, CMA: Fix regression caused by wrong get_order() use
[sfrench/cifs-2.6.git] / arch / powerpc / kernel / pmc.c
1 /*
2  *  arch/powerpc/kernel/pmc.c
3  *
4  *  Copyright (C) 2004 David Gibson, IBM Corporation.
5  *  Includes code formerly from arch/ppc/kernel/perfmon.c:
6  *    Author: Andy Fleming
7  *    Copyright (c) 2004 Freescale Semiconductor, Inc
8  *
9  *  This program is free software; you can redistribute it and/or
10  *  modify it under the terms of the GNU General Public License
11  *  as published by the Free Software Foundation; either version
12  *  2 of the License, or (at your option) any later version.
13  */
14
15 #include <linux/errno.h>
16 #include <linux/bug.h>
17 #include <linux/spinlock.h>
18 #include <linux/export.h>
19
20 #include <asm/processor.h>
21 #include <asm/cputable.h>
22 #include <asm/pmc.h>
23
24 #ifndef MMCR0_PMAO
25 #define MMCR0_PMAO      0
26 #endif
27
28 static void dummy_perf(struct pt_regs *regs)
29 {
30 #if defined(CONFIG_FSL_EMB_PERFMON)
31         mtpmr(PMRN_PMGC0, mfpmr(PMRN_PMGC0) & ~PMGC0_PMIE);
32 #elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
33         if (cur_cpu_spec->pmc_type == PPC_PMC_IBM)
34                 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~(MMCR0_PMXE|MMCR0_PMAO));
35 #else
36         mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_PMXE);
37 #endif
38 }
39
40
41 static DEFINE_RAW_SPINLOCK(pmc_owner_lock);
42 static void *pmc_owner_caller; /* mostly for debugging */
43 perf_irq_t perf_irq = dummy_perf;
44
45 int reserve_pmc_hardware(perf_irq_t new_perf_irq)
46 {
47         int err = 0;
48
49         raw_spin_lock(&pmc_owner_lock);
50
51         if (pmc_owner_caller) {
52                 printk(KERN_WARNING "reserve_pmc_hardware: "
53                        "PMC hardware busy (reserved by caller %p)\n",
54                        pmc_owner_caller);
55                 err = -EBUSY;
56                 goto out;
57         }
58
59         pmc_owner_caller = __builtin_return_address(0);
60         perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
61
62  out:
63         raw_spin_unlock(&pmc_owner_lock);
64         return err;
65 }
66 EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
67
68 void release_pmc_hardware(void)
69 {
70         raw_spin_lock(&pmc_owner_lock);
71
72         WARN_ON(! pmc_owner_caller);
73
74         pmc_owner_caller = NULL;
75         perf_irq = dummy_perf;
76
77         raw_spin_unlock(&pmc_owner_lock);
78 }
79 EXPORT_SYMBOL_GPL(release_pmc_hardware);
80
81 #ifdef CONFIG_PPC64
82 void power4_enable_pmcs(void)
83 {
84         unsigned long hid0;
85
86         hid0 = mfspr(SPRN_HID0);
87         hid0 |= 1UL << (63 - 20);
88
89         /* POWER4 requires the following sequence */
90         asm volatile(
91                 "sync\n"
92                 "mtspr     %1, %0\n"
93                 "mfspr     %0, %1\n"
94                 "mfspr     %0, %1\n"
95                 "mfspr     %0, %1\n"
96                 "mfspr     %0, %1\n"
97                 "mfspr     %0, %1\n"
98                 "mfspr     %0, %1\n"
99                 "isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
100                 "memory");
101 }
102 #endif /* CONFIG_PPC64 */