diff options
Diffstat (limited to 'arch/ia64/mm/init.c')
-rw-r--r-- | arch/ia64/mm/init.c | 46 |
1 files changed, 39 insertions, 7 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1373fae7657f..f225dd72968b 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -19,6 +19,7 @@ #include <linux/swap.h> #include <linux/proc_fs.h> #include <linux/bitops.h> +#include <linux/kexec.h> #include <asm/a.out.h> #include <asm/dma.h> @@ -67,7 +68,7 @@ max_pgt_pages(void) #ifndef CONFIG_NUMA node_free_pages = nr_free_pages(); #else - node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id())); + node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES); #endif max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM; max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES); @@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte) set_bit(PG_arch_1, &page->flags); /* mark page as clean */ } +/* + * Since DMA is i-cache coherent, any (complete) pages that were written via + * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to + * flush them when they get mapped into an executable vm-area. + */ +void +dma_mark_clean(void *addr, size_t size) +{ + unsigned long pg_addr, end; + + pg_addr = PAGE_ALIGN((unsigned long) addr); + end = (unsigned long) addr + size; + while (pg_addr + PAGE_SIZE <= end) { + struct page *page = virt_to_page(pg_addr); + set_bit(PG_arch_1, &page->flags); + pg_addr += PAGE_SIZE; + } +} + inline void ia64_set_rbs_bot (void) { @@ -156,9 +176,8 @@ ia64_init_addr_space (void) * the problem. When the process attempts to write to the register backing store * for the first time, it will get a SEGFAULT in this case. */ - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (vma) { - memset(vma, 0, sizeof(*vma)); vma->vm_mm = current->mm; vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; @@ -175,9 +194,8 @@ ia64_init_addr_space (void) /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ if (!(current->personality & MMAP_PAGE_ZERO)) { - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (vma) { - memset(vma, 0, sizeof(*vma)); vma->vm_mm = current->mm; vma->vm_end = PAGE_SIZE; vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); @@ -595,13 +613,27 @@ find_largest_hole (u64 start, u64 end, void *arg) return 0; } +#endif /* CONFIG_VIRTUAL_MEM_MAP */ + int __init register_active_ranges(u64 start, u64 end, void *arg) { - add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); + int nid = paddr_to_nid(__pa(start)); + + if (nid < 0) + nid = 0; +#ifdef CONFIG_KEXEC + if (start > crashk_res.start && start < crashk_res.end) + start = crashk_res.end; + if (end > crashk_res.start && end < crashk_res.end) + end = crashk_res.start; +#endif + + if (start < end) + add_active_range(nid, __pa(start) >> PAGE_SHIFT, + __pa(end) >> PAGE_SHIFT); return 0; } -#endif /* CONFIG_VIRTUAL_MEM_MAP */ static int __init count_reserved_pages (u64 start, u64 end, void *arg) |