diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-06-25 00:19:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-08 13:11:08 +0200 |
commit | 7c934d3990aa4d785feddcef700f4c2c4aba2251 (patch) | |
tree | 6fb0b00811039239a585e256791d2cf80149cc96 | |
parent | 4f9c11dd49fb73e1ec088b27ed6539681a445988 (diff) |
x86, 64-bit: create small vmemmap mappings if PSE not available
If PSE is not available, then fall back to 4k page mappings for the
vmemmap area.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: xen-devel <xen-devel@lists.xensource.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/mm/init_64.c | 62 |
1 files changed, 40 insertions, 22 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 363751dc3fb8..e62cbdd4e2d9 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -988,7 +988,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node) pmd_t *pmd; for (; addr < end; addr = next) { - next = pmd_addr_end(addr, end); + void *p = NULL; pgd = vmemmap_pgd_populate(addr, node); if (!pgd) @@ -998,33 +998,51 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node) if (!pud) return -ENOMEM; - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { - pte_t entry; - void *p; + if (!cpu_has_pse) { + next = (addr + PAGE_SIZE) & PAGE_MASK; + pmd = vmemmap_pmd_populate(pud, addr, node); + + if (!pmd) + return -ENOMEM; + + p = vmemmap_pte_populate(pmd, addr, node); - p = vmemmap_alloc_block(PMD_SIZE, node); if (!p) return -ENOMEM; - entry = pfn_pte(__pa(p) >> PAGE_SHIFT, - PAGE_KERNEL_LARGE); - set_pmd(pmd, __pmd(pte_val(entry))); - - /* check to see if we have contiguous blocks */ - if (p_end != p || node_start != node) { - if (p_start) - printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", - addr_start, addr_end-1, p_start, p_end-1, node_start); - addr_start = addr; - node_start = node; - p_start = p; - } - addr_end = addr + PMD_SIZE; - p_end = p + PMD_SIZE; + addr_end = addr + PAGE_SIZE; + p_end = p + PAGE_SIZE; } else { - vmemmap_verify((pte_t *)pmd, node, addr, next); + next = pmd_addr_end(addr, end); + + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + pte_t entry; + + p = vmemmap_alloc_block(PMD_SIZE, node); + if (!p) + return -ENOMEM; + + entry = pfn_pte(__pa(p) >> PAGE_SHIFT, + PAGE_KERNEL_LARGE); + set_pmd(pmd, __pmd(pte_val(entry))); + + addr_end = addr + PMD_SIZE; + p_end = p + PMD_SIZE; + + /* check to see if we have contiguous blocks */ + if (p_end != p || node_start != node) { + if (p_start) + printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", + addr_start, addr_end-1, p_start, p_end-1, node_start); + addr_start = addr; + node_start = node; + p_start = p; + } + } else + vmemmap_verify((pte_t *)pmd, node, addr, next); } + } return 0; } |