Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-fixes
[sfrench/cifs-2.6.git] / arch / x86 / kvm / mmutrace.h
1 #if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define _TRACE_KVMMMU_H
3
4 #include <linux/tracepoint.h>
5 #include <linux/ftrace_event.h>
6
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM kvmmmu
9
10 #define KVM_MMU_PAGE_FIELDS \
11         __field(__u64, gfn) \
12         __field(__u32, role) \
13         __field(__u32, root_count) \
14         __field(bool, unsync)
15
16 #define KVM_MMU_PAGE_ASSIGN(sp)                      \
17         __entry->gfn = sp->gfn;                      \
18         __entry->role = sp->role.word;               \
19         __entry->root_count = sp->root_count;        \
20         __entry->unsync = sp->unsync;
21
22 #define KVM_MMU_PAGE_PRINTK() ({                                        \
23         const char *ret = p->buffer + p->len;                           \
24         static const char *access_str[] = {                             \
25                 "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux"  \
26         };                                                              \
27         union kvm_mmu_page_role role;                                   \
28                                                                         \
29         role.word = __entry->role;                                      \
30                                                                         \
31         trace_seq_printf(p, "sp gfn %llx %u%s q%u%s %s%s"               \
32                          " %snxe root %u %s%c",                         \
33                          __entry->gfn, role.level,                      \
34                          role.cr4_pae ? " pae" : "",                    \
35                          role.quadrant,                                 \
36                          role.direct ? " direct" : "",                  \
37                          access_str[role.access],                       \
38                          role.invalid ? " invalid" : "",                \
39                          role.nxe ? "" : "!",                           \
40                          __entry->root_count,                           \
41                          __entry->unsync ? "unsync" : "sync", 0);       \
42         ret;                                                            \
43                 })
44
45 #define kvm_mmu_trace_pferr_flags       \
46         { PFERR_PRESENT_MASK, "P" },    \
47         { PFERR_WRITE_MASK, "W" },      \
48         { PFERR_USER_MASK, "U" },       \
49         { PFERR_RSVD_MASK, "RSVD" },    \
50         { PFERR_FETCH_MASK, "F" }
51
52 /*
53  * A pagetable walk has started
54  */
55 TRACE_EVENT(
56         kvm_mmu_pagetable_walk,
57         TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault),
58         TP_ARGS(addr, write_fault, user_fault, fetch_fault),
59
60         TP_STRUCT__entry(
61                 __field(__u64, addr)
62                 __field(__u32, pferr)
63         ),
64
65         TP_fast_assign(
66                 __entry->addr = addr;
67                 __entry->pferr = (!!write_fault << 1) | (!!user_fault << 2)
68                                  | (!!fetch_fault << 4);
69         ),
70
71         TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
72                   __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
73 );
74
75
76 /* We just walked a paging element */
77 TRACE_EVENT(
78         kvm_mmu_paging_element,
79         TP_PROTO(u64 pte, int level),
80         TP_ARGS(pte, level),
81
82         TP_STRUCT__entry(
83                 __field(__u64, pte)
84                 __field(__u32, level)
85                 ),
86
87         TP_fast_assign(
88                 __entry->pte = pte;
89                 __entry->level = level;
90                 ),
91
92         TP_printk("pte %llx level %u", __entry->pte, __entry->level)
93 );
94
95 DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
96
97         TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
98
99         TP_ARGS(table_gfn, index, size),
100
101         TP_STRUCT__entry(
102                 __field(__u64, gpa)
103         ),
104
105         TP_fast_assign(
106                 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
107                                 + index * size;
108                 ),
109
110         TP_printk("gpa %llx", __entry->gpa)
111 );
112
113 /* We set a pte accessed bit */
114 DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
115
116         TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
117
118         TP_ARGS(table_gfn, index, size)
119 );
120
121 /* We set a pte dirty bit */
122 DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
123
124         TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
125
126         TP_ARGS(table_gfn, index, size)
127 );
128
129 TRACE_EVENT(
130         kvm_mmu_walker_error,
131         TP_PROTO(u32 pferr),
132         TP_ARGS(pferr),
133
134         TP_STRUCT__entry(
135                 __field(__u32, pferr)
136                 ),
137
138         TP_fast_assign(
139                 __entry->pferr = pferr;
140                 ),
141
142         TP_printk("pferr %x %s", __entry->pferr,
143                   __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
144 );
145
146 TRACE_EVENT(
147         kvm_mmu_get_page,
148         TP_PROTO(struct kvm_mmu_page *sp, bool created),
149         TP_ARGS(sp, created),
150
151         TP_STRUCT__entry(
152                 KVM_MMU_PAGE_FIELDS
153                 __field(bool, created)
154                 ),
155
156         TP_fast_assign(
157                 KVM_MMU_PAGE_ASSIGN(sp)
158                 __entry->created = created;
159                 ),
160
161         TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
162                   __entry->created ? "new" : "existing")
163 );
164
165 DECLARE_EVENT_CLASS(kvm_mmu_page_class,
166
167         TP_PROTO(struct kvm_mmu_page *sp),
168         TP_ARGS(sp),
169
170         TP_STRUCT__entry(
171                 KVM_MMU_PAGE_FIELDS
172         ),
173
174         TP_fast_assign(
175                 KVM_MMU_PAGE_ASSIGN(sp)
176         ),
177
178         TP_printk("%s", KVM_MMU_PAGE_PRINTK())
179 );
180
181 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
182         TP_PROTO(struct kvm_mmu_page *sp),
183
184         TP_ARGS(sp)
185 );
186
187 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
188         TP_PROTO(struct kvm_mmu_page *sp),
189
190         TP_ARGS(sp)
191 );
192
193 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_zap_page,
194         TP_PROTO(struct kvm_mmu_page *sp),
195
196         TP_ARGS(sp)
197 );
198 #endif /* _TRACE_KVMMMU_H */
199
200 #undef TRACE_INCLUDE_PATH
201 #define TRACE_INCLUDE_PATH .
202 #undef TRACE_INCLUDE_FILE
203 #define TRACE_INCLUDE_FILE mmutrace
204
205 /* This part must be outside protection */
206 #include <trace/define_trace.h>