Skip to content

Commit

Permalink
KVM: x86/mmu: Account pf_{fixed,emulate,spurious} in callers of "do p…
Browse files Browse the repository at this point in the history
…age fault"

Move the accounting of the result of kvm_mmu_do_page_fault() to its
callers, as only pf_fixed is common to guest page faults and async #PFs,
and upcoming support KVM_PRE_FAULT_MEMORY won't bump _any_ stats.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
sean-jc authored and bonzini committed Jul 12, 2024
1 parent 5186ec2 commit f5e7f00
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 14 deletions.
19 changes: 18 additions & 1 deletion arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -4291,7 +4291,16 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
return;

kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code, true, NULL);
r = kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code,
true, NULL);

/*
* Account fixed page faults, otherwise they'll never be counted, but
* ignore stats for all other return times. Page-ready "faults" aren't
* truly spurious and never trigger emulation
*/
if (r == RET_PF_FIXED)
vcpu->stat.pf_fixed++;
}

static inline u8 kvm_max_level_for_order(int order)
Expand Down Expand Up @@ -5935,6 +5944,14 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err

if (r < 0)
return r;

if (r == RET_PF_FIXED)
vcpu->stat.pf_fixed++;
else if (r == RET_PF_EMULATE)
vcpu->stat.pf_emulate++;
else if (r == RET_PF_SPURIOUS)
vcpu->stat.pf_spurious++;

if (r != RET_PF_EMULATE)
return 1;

Expand Down
13 changes: 0 additions & 13 deletions arch/x86/kvm/mmu/mmu_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -337,19 +337,6 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
if (fault.write_fault_to_shadow_pgtable && emulation_type)
*emulation_type |= EMULTYPE_WRITE_PF_TO_SP;

/*
* Similar to above, prefetch faults aren't truly spurious, and the
* async #PF path doesn't do emulation. Do count faults that are fixed
* by the async #PF handler though, otherwise they'll never be counted.
*/
if (r == RET_PF_FIXED)
vcpu->stat.pf_fixed++;
else if (prefetch)
;
else if (r == RET_PF_EMULATE)
vcpu->stat.pf_emulate++;
else if (r == RET_PF_SPURIOUS)
vcpu->stat.pf_spurious++;
return r;
}

Expand Down

0 comments on commit f5e7f00

Please sign in to comment.