summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@xensource.com>2007-07-17 18:37:03 -0700
committerJeremy Fitzhardinge <jeremy@goop.org>2007-07-18 08:47:40 -0700
commitfdb4c338c8d1d494e17c3422a3ea2129f6791596 (patch)
tree08305dcbbf4179299f5d001796d658ff08ecc014 /arch
parent810bab448e563ffd1718d78e9a3756806b626acc (diff)
paravirt: add an "mm" argument to alloc_pt
It's useful to know which mm is allocating a pagetable. Xen uses this to determine whether the pagetable being added to is pinned or not. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/vmi.c2
-rw-r--r--arch/i386/mm/init.c2
-rw-r--r--arch/i386/mm/pageattr.c2
3 files changed, 3 insertions, 3 deletions
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index c12720d7cbc5..234bd6ff518d 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -362,7 +362,7 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
}
#endif
-static void vmi_allocate_pt(u32 pfn)
+static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn)
{
vmi_set_page_type(pfn, VMI_PAGE_L1);
vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 7135946d3663..f9b6a8878547 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -87,7 +87,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT);
+ paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
BUG_ON(page_table != pte_offset_kernel(pmd, 0));
}
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 2eb14a73be9c..37992ffb1633 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -60,7 +60,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
address = __pa(address);
addr = address & LARGE_PAGE_MASK;
pbase = (pte_t *)page_address(base);
- paravirt_alloc_pt(page_to_pfn(base));
+ paravirt_alloc_pt(&init_mm, page_to_pfn(base));
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
addr == address ? prot : ref_prot));