diff options
author | David Vrabel <david.vrabel@citrix.com> | 2015-03-11 14:49:57 +0000 |
---|---|---|
committer | David Vrabel <david.vrabel@citrix.com> | 2015-03-16 14:49:15 +0000 |
commit | 4e8c0c8c4bf3a5b5c98046e146ab3884bf7a7d0e (patch) | |
tree | e2da5980fd405c7109a342b13f0a2a1214f94b61 /drivers/xen/xlate_mmu.c | |
parent | 628c28eefd6f2cef03b212081b466ae43fd093a3 (diff) |
xen/privcmd: improve performance of MMAPBATCH_V2
Make the IOCTL_PRIVCMD_MMAPBATCH_V2 (and older V1 version) map
multiple frames at a time rather than one at a time, despite the pages
being non-consecutive GFNs.
xen_remap_foreign_mfn_array() is added which maps an array of GFNs
(instead of a consecutive range of GFNs).
Since per-frame errors are returned in an array, privcmd must set the
MMAPBATCH_V1 error bits as part of the "report errors" phase, after
all the frames are mapped.
Migrate times are significantly improved (when using a PV toolstack
domain). For example, for an idle 12 GiB PV guest:
Before After
real 0m38.179s 0m26.868s
user 0m15.096s 0m13.652s
sys 0m28.988s 0m18.732s
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Diffstat (limited to 'drivers/xen/xlate_mmu.c')
-rw-r--r-- | drivers/xen/xlate_mmu.c | 46 |
1 files changed, 28 insertions, 18 deletions
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c index 7724d90fc697..58a5389aec89 100644 --- a/drivers/xen/xlate_mmu.c +++ b/drivers/xen/xlate_mmu.c @@ -62,13 +62,15 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, } struct remap_data { - xen_pfn_t fgmfn; /* foreign domain's gmfn */ + xen_pfn_t *fgmfn; /* foreign domain's gmfn */ pgprot_t prot; domid_t domid; struct vm_area_struct *vma; int index; struct page **pages; struct xen_remap_mfn_info *info; + int *err_ptr; + int mapped; }; static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, @@ -80,38 +82,46 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); int rc; - rc = map_foreign_page(pfn, info->fgmfn, info->domid); - if (rc < 0) - return rc; - set_pte_at(info->vma->vm_mm, addr, ptep, pte); + rc = map_foreign_page(pfn, *info->fgmfn, info->domid); + *info->err_ptr++ = rc; + if (!rc) { + set_pte_at(info->vma->vm_mm, addr, ptep, pte); + info->mapped++; + } + info->fgmfn++; return 0; } -int xen_xlate_remap_gfn_range(struct vm_area_struct *vma, +int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, unsigned long addr, - xen_pfn_t gfn, int nr, - pgprot_t prot, unsigned domid, + xen_pfn_t *mfn, int nr, + int *err_ptr, pgprot_t prot, + unsigned domid, struct page **pages) { int err; struct remap_data data; + unsigned long range = nr << PAGE_SHIFT; - /* TBD: Batching, current sole caller only does page at a time */ - if (nr > 1) - return -EINVAL; + /* Kept here for the purpose of making sure code doesn't break + x86 PVOPS */ + BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); - data.fgmfn = gfn; - data.prot = prot; + data.fgmfn = mfn; + data.prot = prot; data.domid = domid; - data.vma = vma; - data.index = 0; + data.vma = vma; data.pages = pages; - err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, + data.index = 0; + data.err_ptr = err_ptr; + data.mapped = 0; + + err = apply_to_page_range(vma->vm_mm, addr, range, remap_pte_fn, &data); - return err; + return err < 0 ? err : data.mapped; } -EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_range); +EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array); int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, int nr, struct page **pages) |