2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * Authors: Waiman Long <longman@redhat.com>
15 #include "lock_events.h"
17 #ifdef CONFIG_LOCK_EVENT_COUNTS
18 #ifdef CONFIG_PARAVIRT_SPINLOCKS
20 * Collect pvqspinlock locking event counts
22 #include <linux/sched.h>
23 #include <linux/sched/clock.h>
26 #define EVENT_COUNT(ev) lockevents[LOCKEVENT_ ## ev]
29 * PV specific per-cpu counter
31 static DEFINE_PER_CPU(u64, pv_kick_time);
34 * Function to read and return the PV qspinlock counts.
36 * The following counters are handled specially:
38 * Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
40 * Average wake latency (ns) = pv_latency_wake/pv_kick_wake
42 * Average hops/hash = pv_hash_hops/pv_kick_unlock
44 ssize_t lockevent_read(struct file *file, char __user *user_buf,
45 size_t count, loff_t *ppos)
49 u64 sum = 0, kicks = 0;
52 * Get the counter ID stored in file->f_inode->i_private
54 id = (long)file_inode(file)->i_private;
56 if (id >= lockevent_num)
59 for_each_possible_cpu(cpu) {
60 sum += per_cpu(lockevents[id], cpu);
62 * Need to sum additional counters for some of them
66 case LOCKEVENT_pv_latency_kick:
67 case LOCKEVENT_pv_hash_hops:
68 kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
71 case LOCKEVENT_pv_latency_wake:
72 kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
77 if (id == LOCKEVENT_pv_hash_hops) {
81 frac = 100ULL * do_div(sum, kicks);
82 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
86 * Return a X.XX decimal number
88 len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n",
92 * Round to the nearest ns
94 if ((id == LOCKEVENT_pv_latency_kick) ||
95 (id == LOCKEVENT_pv_latency_wake)) {
97 sum = DIV_ROUND_CLOSEST_ULL(sum, kicks);
99 len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
102 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
108 static inline void lockevent_pv_hop(int hopcnt)
110 this_cpu_add(EVENT_COUNT(pv_hash_hops), hopcnt);
114 * Replacement function for pv_kick()
116 static inline void __pv_kick(int cpu)
118 u64 start = sched_clock();
120 per_cpu(pv_kick_time, cpu) = start;
122 this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start);
126 * Replacement function for pv_wait()
128 static inline void __pv_wait(u8 *ptr, u8 val)
130 u64 *pkick_time = this_cpu_ptr(&pv_kick_time);
135 this_cpu_add(EVENT_COUNT(pv_latency_wake),
136 sched_clock() - *pkick_time);
137 lockevent_inc(pv_kick_wake);
141 #define pv_kick(c) __pv_kick(c)
142 #define pv_wait(p, v) __pv_wait(p, v)
144 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
146 #else /* CONFIG_LOCK_EVENT_COUNTS */
148 static inline void lockevent_pv_hop(int hopcnt) { }
150 #endif /* CONFIG_LOCK_EVENT_COUNTS */