diff options
-rw-r--r-- | xen/arch/x86/hvm/hvm.c | 17 |
1 files changed, 17 insertions, 0 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 7c3cb15053..bcf96093d4 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1361,6 +1361,17 @@ int hvm_hap_nested_page_fault(paddr_t gpa, } } + /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs, + * a fast path for LAPIC accesses, skipping the p2m lookup. */ + if ( !nestedhvm_vcpu_in_guestmode(v) + && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) ) + { + if ( !handle_mmio() ) + hvm_inject_hw_exception(TRAP_gp_fault, 0); + rc = 1; + goto out; + } + p2m = p2m_get_hostp2m(v->domain); mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, P2M_ALLOC | (access_w ? P2M_UNSHARE : 0), NULL); @@ -2471,6 +2482,12 @@ static enum hvm_copy_result __hvm_copy( gfn = addr >> PAGE_SHIFT; } + /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs, + * a fast path for LAPIC accesses, skipping the p2m lookup. */ + if ( !nestedhvm_vcpu_in_guestmode(curr) + && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(curr))) ) + return HVMCOPY_bad_gfn_to_mfn; + page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE); if ( p2m_is_paging(p2mt) ) |