kvm/x86/mmu: Pass gfn and level to rmapp callback.
authorAndres Lagar-Cavilla <andreslc@google.com>
Tue, 23 Sep 2014 19:34:54 +0000 (12:34 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 24 Sep 2014 12:07:57 +0000 (14:07 +0200)
Callbacks don't have to do extra computation to learn what the caller
(lvm_handle_hva_range()) knows very well. Useful for
debugging/tracing/printk/future.

Signed-off-by: Andres Lagar-Cavilla <andreslc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c
include/trace/events/kvm.h

index bdd1acbab84d3ff81e5b0391e20b00d68fd44a9a..47d534066325a1cb1ec78a735fcf1a611f79c465 100644 (file)
@@ -1262,7 +1262,8 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
 }
 
 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
-                          struct kvm_memory_slot *slot, unsigned long data)
+                          struct kvm_memory_slot *slot, gfn_t gfn, int level,
+                          unsigned long data)
 {
        u64 *sptep;
        struct rmap_iterator iter;
@@ -1270,7 +1271,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
 
        while ((sptep = rmap_get_first(*rmapp, &iter))) {
                BUG_ON(!(*sptep & PT_PRESENT_MASK));
-               rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", sptep, *sptep);
+               rmap_printk("kvm_rmap_unmap_hva: spte %p %llx gfn %llx (%d)\n",
+                            sptep, *sptep, gfn, level);
 
                drop_spte(kvm, sptep);
                need_tlb_flush = 1;
@@ -1280,7 +1282,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
 }
 
 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
-                            struct kvm_memory_slot *slot, unsigned long data)
+                            struct kvm_memory_slot *slot, gfn_t gfn, int level,
+                            unsigned long data)
 {
        u64 *sptep;
        struct rmap_iterator iter;
@@ -1294,7 +1297,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
 
        for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
                BUG_ON(!is_shadow_present_pte(*sptep));
-               rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep);
+               rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
+                            sptep, *sptep, gfn, level);
 
                need_flush = 1;
 
@@ -1328,6 +1332,8 @@ static int kvm_handle_hva_range(struct kvm *kvm,
                                int (*handler)(struct kvm *kvm,
                                               unsigned long *rmapp,
                                               struct kvm_memory_slot *slot,
+                                              gfn_t gfn,
+                                              int level,
                                               unsigned long data))
 {
        int j;
@@ -1357,6 +1363,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
                     j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
                        unsigned long idx, idx_end;
                        unsigned long *rmapp;
+                       gfn_t gfn = gfn_start;
 
                        /*
                         * {idx(page_j) | page_j intersects with
@@ -1367,8 +1374,10 @@ static int kvm_handle_hva_range(struct kvm *kvm,
 
                        rmapp = __gfn_to_rmap(gfn_start, j, memslot);
 
-                       for (; idx <= idx_end; ++idx)
-                               ret |= handler(kvm, rmapp++, memslot, data);
+                       for (; idx <= idx_end;
+                              ++idx, gfn += (1UL << KVM_HPAGE_GFN_SHIFT(j)))
+                               ret |= handler(kvm, rmapp++, memslot,
+                                              gfn, j, data);
                }
        }
 
@@ -1379,6 +1388,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
                          unsigned long data,
                          int (*handler)(struct kvm *kvm, unsigned long *rmapp,
                                         struct kvm_memory_slot *slot,
+                                        gfn_t gfn, int level,
                                         unsigned long data))
 {
        return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
@@ -1400,7 +1410,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 }
 
 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
-                        struct kvm_memory_slot *slot, unsigned long data)
+                        struct kvm_memory_slot *slot, gfn_t gfn, int level,
+                        unsigned long data)
 {
        u64 *sptep;
        struct rmap_iterator uninitialized_var(iter);
@@ -1415,7 +1426,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
         * out actively used pages or breaking up actively used hugepages.
         */
        if (!shadow_accessed_mask) {
-               young = kvm_unmap_rmapp(kvm, rmapp, slot, data);
+               young = kvm_unmap_rmapp(kvm, rmapp, slot, gfn, level, data);
                goto out;
        }
 
@@ -1430,13 +1441,13 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
                }
        }
 out:
-       /* @data has hva passed to kvm_age_hva(). */
-       trace_kvm_age_page(data, slot, young);
+       trace_kvm_age_page(gfn, level, slot, young);
        return young;
 }
 
 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
-                             struct kvm_memory_slot *slot, unsigned long data)
+                             struct kvm_memory_slot *slot, gfn_t gfn,
+                             int level, unsigned long data)
 {
        u64 *sptep;
        struct rmap_iterator iter;
@@ -1474,13 +1485,13 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 
        rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
 
-       kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, 0);
+       kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0);
        kvm_flush_remote_tlbs(vcpu->kvm);
 }
 
 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
 {
-       return kvm_handle_hva(kvm, hva, hva, kvm_age_rmapp);
+       return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
 }
 
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
index ab679c395042247ac13b0837051325aab96c55af..6edf1f2028cdb0e0801af85183e02d0416fa73ed 100644 (file)
@@ -225,24 +225,26 @@ TRACE_EVENT(kvm_fpu,
 );
 
 TRACE_EVENT(kvm_age_page,
-       TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
-       TP_ARGS(hva, slot, ref),
+       TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
+       TP_ARGS(gfn, level, slot, ref),
 
        TP_STRUCT__entry(
                __field(        u64,    hva             )
                __field(        u64,    gfn             )
+               __field(        u8,     level           )
                __field(        u8,     referenced      )
        ),
 
        TP_fast_assign(
-               __entry->hva            = hva;
-               __entry->gfn            =
-                 slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
+               __entry->gfn            = gfn;
+               __entry->level          = level;
+               __entry->hva            = ((gfn - slot->base_gfn) <<
+                                           PAGE_SHIFT) + slot->userspace_addr;
                __entry->referenced     = ref;
        ),
 
-       TP_printk("hva %llx gfn %llx %s",
-                 __entry->hva, __entry->gfn,
+       TP_printk("hva %llx gfn %llx level %u %s",
+                 __entry->hva, __entry->gfn, __entry->level,
                  __entry->referenced ? "YOUNG" : "OLD")
 );