Commit f691fe1d authored by Avi Kivity's avatar Avi Kivity

KVM: Trace shadow page lifecycle

Create, sync, unsync, zap.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 9c1b96e3
...@@ -1122,6 +1122,7 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1122,6 +1122,7 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
return 1; return 1;
} }
trace_kvm_mmu_sync_page(sp);
if (rmap_write_protect(vcpu->kvm, sp->gfn)) if (rmap_write_protect(vcpu->kvm, sp->gfn))
kvm_flush_remote_tlbs(vcpu->kvm); kvm_flush_remote_tlbs(vcpu->kvm);
kvm_unlink_unsync_page(vcpu->kvm, sp); kvm_unlink_unsync_page(vcpu->kvm, sp);
...@@ -1244,8 +1245,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -1244,8 +1245,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant; role.quadrant = quadrant;
} }
pgprintk("%s: looking gfn %lx role %x\n", __func__,
gfn, role.word);
index = kvm_page_table_hashfn(gfn); index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index]; bucket = &vcpu->kvm->arch.mmu_page_hash[index];
hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link) hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
...@@ -1262,14 +1261,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -1262,14 +1261,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
kvm_mmu_mark_parents_unsync(vcpu, sp); kvm_mmu_mark_parents_unsync(vcpu, sp);
} }
pgprintk("%s: found\n", __func__); trace_kvm_mmu_get_page(sp, false);
return sp; return sp;
} }
++vcpu->kvm->stat.mmu_cache_miss; ++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, parent_pte); sp = kvm_mmu_alloc_page(vcpu, parent_pte);
if (!sp) if (!sp)
return sp; return sp;
pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
sp->gfn = gfn; sp->gfn = gfn;
sp->role = role; sp->role = role;
hlist_add_head(&sp->hash_link, bucket); hlist_add_head(&sp->hash_link, bucket);
...@@ -1282,6 +1280,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -1282,6 +1280,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
vcpu->arch.mmu.prefetch_page(vcpu, sp); vcpu->arch.mmu.prefetch_page(vcpu, sp);
else else
nonpaging_prefetch_page(vcpu, sp); nonpaging_prefetch_page(vcpu, sp);
trace_kvm_mmu_get_page(sp, true);
return sp; return sp;
} }
...@@ -1410,6 +1409,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm, ...@@ -1410,6 +1409,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{ {
int ret; int ret;
trace_kvm_mmu_zap_page(sp);
++kvm->stat.mmu_shadow_zapped; ++kvm->stat.mmu_shadow_zapped;
ret = mmu_zap_unsync_children(kvm, sp); ret = mmu_zap_unsync_children(kvm, sp);
kvm_mmu_page_unlink_children(kvm, sp); kvm_mmu_page_unlink_children(kvm, sp);
...@@ -1656,6 +1657,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1656,6 +1657,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
struct kvm_mmu_page *s; struct kvm_mmu_page *s;
struct hlist_node *node, *n; struct hlist_node *node, *n;
trace_kvm_mmu_unsync_page(sp);
index = kvm_page_table_hashfn(sp->gfn); index = kvm_page_table_hashfn(sp->gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index]; bucket = &vcpu->kvm->arch.mmu_page_hash[index];
/* don't unsync if pagetable is shadowed with multiple roles */ /* don't unsync if pagetable is shadowed with multiple roles */
......
...@@ -2,12 +2,48 @@ ...@@ -2,12 +2,48 @@
#define _TRACE_KVMMMU_H #define _TRACE_KVMMMU_H
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <linux/ftrace_event.h>
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM kvmmmu #define TRACE_SYSTEM kvmmmu
#define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE mmutrace #define TRACE_INCLUDE_FILE mmutrace
#define KVM_MMU_PAGE_FIELDS \
__field(__u64, gfn) \
__field(__u32, role) \
__field(__u32, root_count) \
__field(__u32, unsync)
#define KVM_MMU_PAGE_ASSIGN(sp) \
__entry->gfn = sp->gfn; \
__entry->role = sp->role.word; \
__entry->root_count = sp->root_count; \
__entry->unsync = sp->unsync;
#define KVM_MMU_PAGE_PRINTK() ({ \
const char *ret = p->buffer + p->len; \
static const char *access_str[] = { \
"---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
}; \
union kvm_mmu_page_role role; \
\
role.word = __entry->role; \
\
trace_seq_printf(p, "sp gfn %llx %u/%u q%u%s %s%s %spge" \
" %snxe root %u %s%c", \
__entry->gfn, role.level, role.glevels, \
role.quadrant, \
role.direct ? " direct" : "", \
access_str[role.access], \
role.invalid ? " invalid" : "", \
role.cr4_pge ? "" : "!", \
role.nxe ? "" : "!", \
__entry->root_count, \
__entry->unsync ? "unsync" : "sync", 0); \
ret; \
})
#define kvm_mmu_trace_pferr_flags \ #define kvm_mmu_trace_pferr_flags \
{ PFERR_PRESENT_MASK, "P" }, \ { PFERR_PRESENT_MASK, "P" }, \
{ PFERR_WRITE_MASK, "W" }, \ { PFERR_WRITE_MASK, "W" }, \
...@@ -111,6 +147,73 @@ TRACE_EVENT( ...@@ -111,6 +147,73 @@ TRACE_EVENT(
__print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags)) __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
); );
TRACE_EVENT(
kvm_mmu_get_page,
TP_PROTO(struct kvm_mmu_page *sp, bool created),
TP_ARGS(sp, created),
TP_STRUCT__entry(
KVM_MMU_PAGE_FIELDS
__field(bool, created)
),
TP_fast_assign(
KVM_MMU_PAGE_ASSIGN(sp)
__entry->created = created;
),
TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
__entry->created ? "new" : "existing")
);
TRACE_EVENT(
kvm_mmu_sync_page,
TP_PROTO(struct kvm_mmu_page *sp),
TP_ARGS(sp),
TP_STRUCT__entry(
KVM_MMU_PAGE_FIELDS
),
TP_fast_assign(
KVM_MMU_PAGE_ASSIGN(sp)
),
TP_printk("%s", KVM_MMU_PAGE_PRINTK())
);
TRACE_EVENT(
kvm_mmu_unsync_page,
TP_PROTO(struct kvm_mmu_page *sp),
TP_ARGS(sp),
TP_STRUCT__entry(
KVM_MMU_PAGE_FIELDS
),
TP_fast_assign(
KVM_MMU_PAGE_ASSIGN(sp)
),
TP_printk("%s", KVM_MMU_PAGE_PRINTK())
);
TRACE_EVENT(
kvm_mmu_zap_page,
TP_PROTO(struct kvm_mmu_page *sp),
TP_ARGS(sp),
TP_STRUCT__entry(
KVM_MMU_PAGE_FIELDS
),
TP_fast_assign(
KVM_MMU_PAGE_ASSIGN(sp)
),
TP_printk("%s", KVM_MMU_PAGE_PRINTK())
);
#endif /* _TRACE_KVMMMU_H */ #endif /* _TRACE_KVMMMU_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment