summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorSusheel Khiani <skhiani@codeaurora.org>2015-09-08 15:05:43 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-22 11:03:58 -0700
commitc064333eace6def3291fa4d07be9eed6654ccea3 (patch)
tree7ac57391f9f6daee3dfddf5a36bbc459b4f89ce7 /arch/arm/mm
parent4af3c048cfad82e108db8c4565e9c268dbc1ca18 (diff)
msm: Allow lowmem to be non contiguous and mixed
Currently on 32 bit systems, virtual space above PAGE_OFFSET is reserved for direct mapped lowmem and part of virtual address space is reserved for vmalloc. We want to optimize such as to have as much direct mapped memory as possible since there is penalty for mapping/unmapping highmem. Now, we may have an image that is expected to have a lifetime of the entire system and is reserved in physical region that would be part of direct mapped lowmem. The physical memory which is thus reserved is never used by Linux. This means that even though the system is not actually accessing the virtual memory corresponding to the reserved physical memory, we are still losing that portion of direct mapped lowmem space. So by allowing lowmem to be non contiguous we can give this unused virtual address space of reserved region back for use in vmalloc. Change-Id: I980b3dfafac71884dcdcb8cd2e4a6363cde5746a Signed-off-by: Susheel Khiani <skhiani@codeaurora.org>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/ioremap.c3
-rw-r--r--arch/arm/mm/mmu.c40
2 files changed, 40 insertions, 3 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0c81056c1dd7..7ffe365b5f03 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -91,7 +91,8 @@ void __init add_static_vm_early(struct static_vm *svm)
void *vaddr;
vm = &svm->vm;
- vm_area_add_early(vm);
+ if (!vm_area_check_early(vm))
+ vm_area_add_early(vm);
vaddr = vm->addr;
list_for_each_entry(curr_svm, &static_vmlist, list) {
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4867f5daf82c..76504f896696 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1385,12 +1385,21 @@ static void __init map_lowmem(void)
struct memblock_region *reg;
phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ struct static_vm *svm;
+ phys_addr_t start;
+ phys_addr_t end;
+ unsigned long vaddr;
+ unsigned long pfn;
+ unsigned long length;
+ unsigned int type;
+ int nr = 0;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
struct map_desc map;
+ start = reg->base;
+ end = start + reg->size;
+ nr++;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
@@ -1439,6 +1448,33 @@ static void __init map_lowmem(void)
}
}
}
+ svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
+
+ for_each_memblock(memory, reg) {
+ struct vm_struct *vm;
+
+ start = reg->base;
+ end = start + reg->size;
+
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
+ if (start >= end)
+ break;
+
+ vm = &svm->vm;
+ pfn = __phys_to_pfn(start);
+ vaddr = __phys_to_virt(start);
+ length = end - start;
+ type = MT_MEMORY_RW;
+
+ vm->addr = (void *)(vaddr & PAGE_MASK);
+ vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
+ vm->phys_addr = __pfn_to_phys(pfn);
+ vm->flags = VM_LOWMEM;
+ vm->flags |= VM_ARM_MTYPE(type);
+ vm->caller = map_lowmem;
+ add_static_vm_early(svm++);
+ }
}
#ifdef CONFIG_ARM_PV_FIXUP