From 7c732cba3d9312882e82d91d5948261dfd5c8fe6 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Tue, 24 Dec 2013 15:12:03 +0800 Subject: powerpc/fsl_booke: protect the access to MAS7 The e500v1 doesn't implement the MAS7, so we should avoid to access this register on that implementations. In the current kernel, the access to MAS7 are protected by either CONFIG_PHYS_64BIT or MMU_FTR_BIG_PHYS. Since some code are executed before the code patching, we have to use CONFIG_PHYS_64BIT in these cases. Signed-off-by: Kevin Hao Signed-off-by: Scott Wood --- arch/powerpc/mm/hugetlbpage-book3e.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c index 74551b5e41e5..646c4bffaeba 100644 --- a/arch/powerpc/mm/hugetlbpage-book3e.c +++ b/arch/powerpc/mm/hugetlbpage-book3e.c @@ -103,7 +103,8 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) { mtspr(SPRN_MAS7_MAS3, mas7_3); } else { - mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); + if (mmu_has_feature(MMU_FTR_BIG_PHYS)) + mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); mtspr(SPRN_MAS3, lower_32_bits(mas7_3)); } -- cgit v1.2.3 From dd189692d40948d6445bbaeb8cb9bf9d15f54dc6 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Tue, 24 Dec 2013 15:12:06 +0800 Subject: powerpc: enable the relocatable support for the fsl booke 32bit kernel This is based on the codes in the head_44x.S. The difference is that the init tlb size we used is 64M. With this patch we can only load the kernel at address between memstart_addr ~ memstart_addr + 64M. We will fix this restriction in the following patches. Signed-off-by: Kevin Hao Signed-off-by: Scott Wood --- arch/powerpc/Kconfig | 2 +- arch/powerpc/kernel/fsl_booke_entry_mapping.S | 2 ++ arch/powerpc/kernel/head_fsl_booke.S | 34 +++++++++++++++++++++++++++ arch/powerpc/mm/fsl_booke_mmu.c | 28 ++++++++++++++++++++++ 4 files changed, 65 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index f0a893142cee..4bb52e6488ea 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -884,7 +884,7 @@ config DYNAMIC_MEMSTART config RELOCATABLE bool "Build a relocatable kernel" - depends on ADVANCED_OPTIONS && FLATMEM && 44x + depends on ADVANCED_OPTIONS && FLATMEM && (44x || FSL_BOOKE) select NONSTATIC_KERNEL help This builds a kernel image that is capable of running at the diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S index a92c79be2728..f22e7e44fbf3 100644 --- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S +++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S @@ -176,6 +176,8 @@ skpinv: addi r6,r6,1 /* Increment */ /* 7. Jump to KERNELBASE mapping */ lis r6,(KERNELBASE & ~0xfff)@h ori r6,r6,(KERNELBASE & ~0xfff)@l + rlwinm r7,r25,0,0x03ffffff + add r6,r7,r6 #elif defined(ENTRY_MAPPING_KEXEC_SETUP) /* diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 196950f29c00..19bd574bda9d 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -73,6 +73,30 @@ _ENTRY(_start); li r24,0 /* CPU number */ li r23,0 /* phys kernel start (high) */ +#ifdef CONFIG_RELOCATABLE + LOAD_REG_ADDR_PIC(r3, _stext) /* Get our current runtime base */ + + /* Translate _stext address to physical, save in r23/r25 */ + bl get_phys_addr + mr r23,r3 + mr r25,r4 + + /* + * We have the runtime (virutal) address of our base. + * We calculate our shift of offset from a 64M page. + * We could map the 64M page we belong to at PAGE_OFFSET and + * get going from there. + */ + lis r4,KERNELBASE@h + ori r4,r4,KERNELBASE@l + rlwinm r6,r25,0,0x3ffffff /* r6 = PHYS_START % 64M */ + rlwinm r5,r4,0,0x3ffffff /* r5 = KERNELBASE % 64M */ + subf r3,r5,r6 /* r3 = r6 - r5 */ + add r3,r4,r3 /* Required Virtual Address */ + + bl relocate +#endif + /* We try to not make any assumptions about how the boot loader * setup or used the TLBs. We invalidate all mappings from the * boot loader and load a single entry in TLB1[0] to map the @@ -182,6 +206,16 @@ _ENTRY(__early_start) bl early_init +#ifdef CONFIG_RELOCATABLE +#ifdef CONFIG_PHYS_64BIT + mr r3,r23 + mr r4,r25 +#else + mr r3,r25 +#endif + bl relocate_init +#endif + #ifdef CONFIG_DYNAMIC_MEMSTART lis r3,kernstart_addr@ha la r3,kernstart_addr@l(r3) diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 07ba45b0f07c..ce4a1163ddd3 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -241,4 +241,32 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, /* 64M mapped initially according to head_fsl_booke.S */ memblock_set_current_limit(min_t(u64, limit, 0x04000000)); } + +#ifdef CONFIG_RELOCATABLE +notrace void __init relocate_init(phys_addr_t start) +{ + unsigned long base = KERNELBASE; + + /* + * Relocatable kernel support based on processing of dynamic + * relocation entries. + * Compute the virt_phys_offset : + * virt_phys_offset = stext.run - kernstart_addr + * + * stext.run = (KERNELBASE & ~0x3ffffff) + (kernstart_addr & 0x3ffffff) + * When we relocate, we have : + * + * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff) + * + * hence: + * virt_phys_offset = (KERNELBASE & ~0x3ffffff) - + * (kernstart_addr & ~0x3ffffff) + * + */ + kernstart_addr = start; + start &= ~0x3ffffff; + base &= ~0x3ffffff; + virt_phys_offset = base - start; +} +#endif #endif -- cgit v1.2.3 From 78a235efdc42ff363de81fdbc171385e8b86b69b Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Tue, 24 Dec 2013 15:12:07 +0800 Subject: powerpc/fsl_booke: set the tlb entry for the kernel address in AS1 We use the tlb1 entries to map low mem to the kernel space. In the current code, it assumes that the first tlb entry would cover the kernel image. But this is not true for some special cases, such as when we run a relocatable kernel above the 64M or set CONFIG_KERNEL_START above 64M. So we choose to switch to address space 1 before setting these tlb entries. Signed-off-by: Kevin Hao Signed-off-by: Scott Wood --- arch/powerpc/kernel/head_fsl_booke.S | 81 ++++++++++++++++++++++++++++++++++++ arch/powerpc/mm/fsl_booke_mmu.c | 2 + arch/powerpc/mm/mmu_decl.h | 2 + 3 files changed, 85 insertions(+) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 19bd574bda9d..75f0223e6d0d 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -1156,6 +1156,87 @@ __secondary_hold_acknowledge: .long -1 #endif +/* + * Create a tlb entry with the same effective and physical address as + * the tlb entry used by the current running code. But set the TS to 1. + * Then switch to the address space 1. It will return with the r3 set to + * the ESEL of the new created tlb. + */ +_GLOBAL(switch_to_as1) + mflr r5 + + /* Find a entry not used */ + mfspr r3,SPRN_TLB1CFG + andi. r3,r3,0xfff + mfspr r4,SPRN_PID + rlwinm r4,r4,16,0x3fff0000 /* turn PID into MAS6[SPID] */ + mtspr SPRN_MAS6,r4 +1: lis r4,0x1000 /* Set MAS0(TLBSEL) = 1 */ + addi r3,r3,-1 + rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r4 + tlbre + mfspr r4,SPRN_MAS1 + andis. r4,r4,MAS1_VALID@h + bne 1b + + /* Get the tlb entry used by the current running code */ + bl 0f +0: mflr r4 + tlbsx 0,r4 + + mfspr r4,SPRN_MAS1 + ori r4,r4,MAS1_TS /* Set the TS = 1 */ + mtspr SPRN_MAS1,r4 + + mfspr r4,SPRN_MAS0 + rlwinm r4,r4,0,~MAS0_ESEL_MASK + rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r4 + tlbwe + isync + sync + + mfmsr r4 + ori r4,r4,MSR_IS | MSR_DS + mtspr SPRN_SRR0,r5 + mtspr SPRN_SRR1,r4 + sync + rfi + +/* + * Restore to the address space 0 and also invalidate the tlb entry created + * by switch_to_as1. +*/ +_GLOBAL(restore_to_as0) + mflr r0 + + bl 0f +0: mflr r9 + addi r9,r9,1f - 0b + + mfmsr r7 + li r8,(MSR_IS | MSR_DS) + andc r7,r7,r8 + + mtspr SPRN_SRR0,r9 + mtspr SPRN_SRR1,r7 + sync + rfi + + /* Invalidate the temporary tlb entry for AS1 */ +1: lis r9,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r9,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r9 + tlbre + mfspr r9,SPRN_MAS1 + rlwinm r9,r9,0,2,31 /* Clear MAS1 Valid and IPPROT */ + mtspr SPRN_MAS1,r9 + tlbwe + isync + mtlr r0 + blr + /* * We put a few things here that have to be page-aligned. This stuff * goes at the beginning of the data segment, which is page-aligned. diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index ce4a1163ddd3..1d54f6d35e71 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -222,7 +222,9 @@ void __init adjust_total_lowmem(void) /* adjust lowmem size to __max_low_memory */ ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); + i = switch_to_as1(); __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); + restore_to_as0(i); pr_info("Memory CAM mapping: "); for (i = 0; i < tlbcam_index - 1; i++) diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 83eb5d5f53d5..eefbf7bb4331 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -148,6 +148,8 @@ extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(unsigned long top); extern void adjust_total_lowmem(void); +extern int switch_to_as1(void); +extern void restore_to_as0(int esel); #endif extern void loadcam_entry(unsigned int index); -- cgit v1.2.3 From 813125d83372e19edecaba811d4d0dc115d36819 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Tue, 24 Dec 2013 15:12:09 +0800 Subject: powerpc/fsl_booke: introduce map_mem_in_cams_addr Introduce this function so we can set both the physical and virtual address for the map in cams. This will be used by the relocation code. Signed-off-by: Kevin Hao Signed-off-by: Scott Wood --- arch/powerpc/mm/fsl_booke_mmu.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 1d54f6d35e71..ca956c83e3a2 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -171,11 +171,10 @@ unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, return 1UL << camsize; } -unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx) +static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, + unsigned long ram, int max_cam_idx) { int i; - unsigned long virt = PAGE_OFFSET; - phys_addr_t phys = memstart_addr; unsigned long amount_mapped = 0; /* Calculate CAM values */ @@ -195,6 +194,14 @@ unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx) return amount_mapped; } +unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx) +{ + unsigned long virt = PAGE_OFFSET; + phys_addr_t phys = memstart_addr; + + return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx); +} + #ifdef CONFIG_PPC32 #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) -- cgit v1.2.3 From 7d2471f9fa85089beb1cb9436ffc28f9e11e518d Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Tue, 24 Dec 2013 15:12:10 +0800 Subject: powerpc/fsl_booke: make sure PAGE_OFFSET map to memstart_addr for relocatable kernel This is always true for a non-relocatable kernel. Otherwise the kernel would get stuck. But for a relocatable kernel, it seems a little complicated. When booting a relocatable kernel, we just align the kernel start addr to 64M and map the PAGE_OFFSET from there. The relocation will base on this virtual address. But if this address is not the same as the memstart_addr, we will have to change the map of PAGE_OFFSET to the real memstart_addr and do another relocation again. Signed-off-by: Kevin Hao [scottwood@freescale.com: make offset long and non-negative in simple case] Signed-off-by: Scott Wood --- arch/powerpc/kernel/head_fsl_booke.S | 74 +++++++++++++++++++++++++++++++++--- arch/powerpc/mm/fsl_booke_mmu.c | 42 +++++++++++++++++--- arch/powerpc/mm/mmu_decl.h | 2 +- 3 files changed, 106 insertions(+), 12 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 75f0223e6d0d..b1f7edc3c360 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -81,6 +81,39 @@ _ENTRY(_start); mr r23,r3 mr r25,r4 + bl 0f +0: mflr r8 + addis r3,r8,(is_second_reloc - 0b)@ha + lwz r19,(is_second_reloc - 0b)@l(r3) + + /* Check if this is the second relocation. */ + cmpwi r19,1 + bne 1f + + /* + * For the second relocation, we already get the real memstart_addr + * from device tree. So we will map PAGE_OFFSET to memstart_addr, + * then the virtual address of start kernel should be: + * PAGE_OFFSET + (kernstart_addr - memstart_addr) + * Since the offset between kernstart_addr and memstart_addr should + * never be beyond 1G, so we can just use the lower 32bit of them + * for the calculation. + */ + lis r3,PAGE_OFFSET@h + + addis r4,r8,(kernstart_addr - 0b)@ha + addi r4,r4,(kernstart_addr - 0b)@l + lwz r5,4(r4) + + addis r6,r8,(memstart_addr - 0b)@ha + addi r6,r6,(memstart_addr - 0b)@l + lwz r7,4(r6) + + subf r5,r7,r5 + add r3,r3,r5 + b 2f + +1: /* * We have the runtime (virutal) address of our base. * We calculate our shift of offset from a 64M page. @@ -94,7 +127,14 @@ _ENTRY(_start); subf r3,r5,r6 /* r3 = r6 - r5 */ add r3,r4,r3 /* Required Virtual Address */ - bl relocate +2: bl relocate + + /* + * For the second relocation, we already set the right tlb entries + * for the kernel space, so skip the code in fsl_booke_entry_mapping.S + */ + cmpwi r19,1 + beq set_ivor #endif /* We try to not make any assumptions about how the boot loader @@ -122,6 +162,7 @@ _ENTRY(__early_start) #include "fsl_booke_entry_mapping.S" #undef ENTRY_MAPPING_BOOT_SETUP +set_ivor: /* Establish the interrupt vector offsets */ SET_IVOR(0, CriticalInput); SET_IVOR(1, MachineCheck); @@ -207,11 +248,13 @@ _ENTRY(__early_start) bl early_init #ifdef CONFIG_RELOCATABLE + mr r3,r30 + mr r4,r31 #ifdef CONFIG_PHYS_64BIT - mr r3,r23 - mr r4,r25 + mr r5,r23 + mr r6,r25 #else - mr r3,r25 + mr r5,r25 #endif bl relocate_init #endif @@ -1207,6 +1250,9 @@ _GLOBAL(switch_to_as1) /* * Restore to the address space 0 and also invalidate the tlb entry created * by switch_to_as1. + * r3 - the tlb entry which should be invalidated + * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0) + * r5 - device tree virtual address. If r4 is 0, r5 is ignored. */ _GLOBAL(restore_to_as0) mflr r0 @@ -1215,7 +1261,15 @@ _GLOBAL(restore_to_as0) 0: mflr r9 addi r9,r9,1f - 0b - mfmsr r7 + /* + * We may map the PAGE_OFFSET in AS0 to a different physical address, + * so we need calculate the right jump and device tree address based + * on the offset passed by r4. + */ + add r9,r9,r4 + add r5,r5,r4 + +2: mfmsr r7 li r8,(MSR_IS | MSR_DS) andc r7,r7,r8 @@ -1234,9 +1288,19 @@ _GLOBAL(restore_to_as0) mtspr SPRN_MAS1,r9 tlbwe isync + + cmpwi r4,0 + bne 3f mtlr r0 blr + /* + * The PAGE_OFFSET will map to a different physical address, + * jump to _start to do another relocation again. + */ +3: mr r3,r5 + bl _start + /* * We put a few things here that have to be page-aligned. This stuff * goes at the beginning of the data segment, which is page-aligned. diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index ca956c83e3a2..95deb9fdf92f 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -231,7 +231,7 @@ void __init adjust_total_lowmem(void) i = switch_to_as1(); __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); - restore_to_as0(i); + restore_to_as0(i, 0, 0); pr_info("Memory CAM mapping: "); for (i = 0; i < tlbcam_index - 1; i++) @@ -252,17 +252,25 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, } #ifdef CONFIG_RELOCATABLE -notrace void __init relocate_init(phys_addr_t start) +int __initdata is_second_reloc; +notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) { unsigned long base = KERNELBASE; + kernstart_addr = start; + if (is_second_reloc) { + virt_phys_offset = PAGE_OFFSET - memstart_addr; + return; + } + /* * Relocatable kernel support based on processing of dynamic - * relocation entries. - * Compute the virt_phys_offset : + * relocation entries. Before we get the real memstart_addr, + * We will compute the virt_phys_offset like this: * virt_phys_offset = stext.run - kernstart_addr * - * stext.run = (KERNELBASE & ~0x3ffffff) + (kernstart_addr & 0x3ffffff) + * stext.run = (KERNELBASE & ~0x3ffffff) + + * (kernstart_addr & 0x3ffffff) * When we relocate, we have : * * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff) @@ -272,10 +280,32 @@ notrace void __init relocate_init(phys_addr_t start) * (kernstart_addr & ~0x3ffffff) * */ - kernstart_addr = start; start &= ~0x3ffffff; base &= ~0x3ffffff; virt_phys_offset = base - start; + early_get_first_memblock_info(__va(dt_ptr), NULL); + /* + * We now get the memstart_addr, then we should check if this + * address is the same as what the PAGE_OFFSET map to now. If + * not we have to change the map of PAGE_OFFSET to memstart_addr + * and do a second relocation. + */ + if (start != memstart_addr) { + int n; + long offset = start - memstart_addr; + + is_second_reloc = 1; + n = switch_to_as1(); + /* map a 64M area for the second relocation */ + if (memstart_addr > start) + map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM); + else + map_mem_in_cams_addr(start, PAGE_OFFSET + offset, + 0x4000000, CONFIG_LOWMEM_CAM_NUM); + restore_to_as0(n, offset, __va(dt_ptr)); + /* We should never reach here */ + panic("Relocation error"); + } } #endif #endif diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index eefbf7bb4331..91da910210cb 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -149,7 +149,7 @@ extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(unsigned long top); extern void adjust_total_lowmem(void); extern int switch_to_as1(void); -extern void restore_to_as0(int esel); +extern void restore_to_as0(int esel, int offset, void *dt_ptr); #endif extern void loadcam_entry(unsigned int index); -- cgit v1.2.3 From 0be7d969b0efef085ed6497d462ba16a875ca737 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Tue, 24 Dec 2013 15:12:11 +0800 Subject: powerpc/fsl_booke: smp support for booting a relocatable kernel above 64M When booting above the 64M for a secondary cpu, we also face the same issue as the boot cpu that the PAGE_OFFSET map two different physical address for the init tlb and the final map. So we have to use switch_to_as1/restore_to_as0 between the conversion of these two maps. When restoring to as0 for a secondary cpu, we only need to return to the caller. So add a new parameter for function restore_to_as0 for this purpose. Use LOAD_REG_ADDR_PIC to get the address of variables which may be used before we set the final map in cams for the secondary cpu. Move the setting of cams a bit earlier in order to avoid the unnecessary using of LOAD_REG_ADDR_PIC. Signed-off-by: Kevin Hao Signed-off-by: Scott Wood --- arch/powerpc/kernel/head_fsl_booke.S | 41 ++++++++++++++++++++++++------------ arch/powerpc/mm/fsl_booke_mmu.c | 4 ++-- arch/powerpc/mm/mmu_decl.h | 2 +- arch/powerpc/mm/tlb_nohash_low.S | 4 +++- 4 files changed, 34 insertions(+), 17 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index b1f7edc3c360..b497188a94a1 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -216,8 +216,7 @@ set_ivor: /* Check to see if we're the second processor, and jump * to the secondary_start code if so */ - lis r24, boot_cpuid@h - ori r24, r24, boot_cpuid@l + LOAD_REG_ADDR_PIC(r24, boot_cpuid) lwz r24, 0(r24) cmpwi r24, -1 mfspr r24,SPRN_PIR @@ -1146,24 +1145,36 @@ _GLOBAL(__flush_disable_L1) /* When we get here, r24 needs to hold the CPU # */ .globl __secondary_start __secondary_start: - lis r3,__secondary_hold_acknowledge@h - ori r3,r3,__secondary_hold_acknowledge@l - stw r24,0(r3) - - li r3,0 - mr r4,r24 /* Why? */ - bl call_setup_cpu - - lis r3,tlbcam_index@ha - lwz r3,tlbcam_index@l(r3) + LOAD_REG_ADDR_PIC(r3, tlbcam_index) + lwz r3,0(r3) mtctr r3 li r26,0 /* r26 safe? */ + bl switch_to_as1 + mr r27,r3 /* tlb entry */ /* Load each CAM entry */ 1: mr r3,r26 bl loadcam_entry addi r26,r26,1 bdnz 1b + mr r3,r27 /* tlb entry */ + LOAD_REG_ADDR_PIC(r4, memstart_addr) + lwz r4,0(r4) + mr r5,r25 /* phys kernel start */ + rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */ + subf r4,r5,r4 /* memstart_addr - phys kernel start */ + li r5,0 /* no device tree */ + li r6,0 /* not boot cpu */ + bl restore_to_as0 + + + lis r3,__secondary_hold_acknowledge@h + ori r3,r3,__secondary_hold_acknowledge@l + stw r24,0(r3) + + li r3,0 + mr r4,r24 /* Why? */ + bl call_setup_cpu /* get current_thread_info and current */ lis r1,secondary_ti@ha @@ -1253,6 +1264,7 @@ _GLOBAL(switch_to_as1) * r3 - the tlb entry which should be invalidated * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0) * r5 - device tree virtual address. If r4 is 0, r5 is ignored. + * r6 - boot cpu */ _GLOBAL(restore_to_as0) mflr r0 @@ -1268,6 +1280,7 @@ _GLOBAL(restore_to_as0) */ add r9,r9,r4 add r5,r5,r4 + add r0,r0,r4 2: mfmsr r7 li r8,(MSR_IS | MSR_DS) @@ -1290,7 +1303,9 @@ _GLOBAL(restore_to_as0) isync cmpwi r4,0 - bne 3f + cmpwi cr1,r6,0 + cror eq,4*cr1+eq,eq + bne 3f /* offset != 0 && is_boot_cpu */ mtlr r0 blr diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 95deb9fdf92f..a68671c18ad4 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -231,7 +231,7 @@ void __init adjust_total_lowmem(void) i = switch_to_as1(); __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); - restore_to_as0(i, 0, 0); + restore_to_as0(i, 0, 0, 1); pr_info("Memory CAM mapping: "); for (i = 0; i < tlbcam_index - 1; i++) @@ -302,7 +302,7 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) else map_mem_in_cams_addr(start, PAGE_OFFSET + offset, 0x4000000, CONFIG_LOWMEM_CAM_NUM); - restore_to_as0(n, offset, __va(dt_ptr)); + restore_to_as0(n, offset, __va(dt_ptr), 1); /* We should never reach here */ panic("Relocation error"); } diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 91da910210cb..9615d82919b8 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -149,7 +149,7 @@ extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(unsigned long top); extern void adjust_total_lowmem(void); extern int switch_to_as1(void); -extern void restore_to_as0(int esel, int offset, void *dt_ptr); +extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu); #endif extern void loadcam_entry(unsigned int index); diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S index 626ad081639f..43ff3c797fbf 100644 --- a/arch/powerpc/mm/tlb_nohash_low.S +++ b/arch/powerpc/mm/tlb_nohash_low.S @@ -402,7 +402,9 @@ _GLOBAL(set_context) * Load TLBCAM[index] entry in to the L2 CAM MMU */ _GLOBAL(loadcam_entry) - LOAD_REG_ADDR(r4, TLBCAM) + mflr r5 + LOAD_REG_ADDR_PIC(r4, TLBCAM) + mtlr r5 mulli r5,r3,TLBCAM_SIZE add r3,r5,r4 lwz r4,TLBCAM_MAS0(r3) -- cgit v1.2.3 From 47ce8af4209f4344f152aa6fc538efe9d6bdfd1a Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Fri, 11 Oct 2013 19:22:37 -0500 Subject: powerpc: add barrier after writing kernel PTE There is no barrier between something like ioremap() writing to a PTE, and returning the value to a caller that may then store the pointer in a place that is visible to other CPUs. Such callers generally don't perform barriers of their own. Even if callers of ioremap() and similar things did use barriers, the most logical choise would be smp_wmb(), which is not architecturally sufficient when BookE hardware tablewalk is used. A full sync is specified by the architecture. For userspace mappings, OTOH, we generally already have an lwsync due to locking, and if we occasionally take a spurious fault due to not having a full sync with hardware tablewalk, it will not be fatal because we will retry rather than oops. Signed-off-by: Scott Wood --- arch/powerpc/mm/pgtable_32.c | 1 + arch/powerpc/mm/pgtable_64.c | 12 ++++++++++++ 2 files changed, 13 insertions(+) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 5b9601715289..343a87fa78b5 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -299,6 +299,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); } + smp_wmb(); return err; } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 02e8681fb865..755138218e04 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -153,6 +153,18 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags) } #endif /* !CONFIG_PPC_MMU_NOHASH */ } + +#ifdef CONFIG_PPC_BOOK3E_64 + /* + * With hardware tablewalk, a sync is needed to ensure that + * subsequent accesses see the PTE we just wrote. Unlike userspace + * mappings, we can't tolerate spurious faults, so make sure + * the new PTE will be seen the first time. + */ + mb(); +#else + smp_wmb(); +#endif return 0; } -- cgit v1.2.3 From 28efc35fe68dacbddc4b12c2fa8f2df1593a4ad3 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Fri, 11 Oct 2013 19:22:38 -0500 Subject: powerpc/e6500: TLB miss handler with hardware tablewalk support There are a few things that make the existing hw tablewalk handlers unsuitable for e6500: - Indirect entries go in TLB1 (though the resulting direct entries go in TLB0). - It has threads, but no "tlbsrx." -- so we need a spinlock and a normal "tlbsx". Because we need this lock, hardware tablewalk is mandatory on e6500 unless we want to add spinlock+tlbsx to the normal bolted TLB miss handler. - TLB1 has no HES (nor next-victim hint) so we need software round robin (TODO: integrate this round robin data with hugetlb/KVM) - The existing tablewalk handlers map half of a page table at a time, because IBM hardware has a fixed 1MiB indirect page size. e6500 has variable size indirect entries, with a minimum of 2MiB. So we can't do the half-page indirect mapping, and even if we could it would be less efficient than mapping the full page. - Like on e5500, the linear mapping is bolted, so we don't need the overhead of supporting nested tlb misses. Note that hardware tablewalk does not work in rev1 of e6500. We do not expect to support e6500 rev1 in mainline Linux. Signed-off-by: Scott Wood Cc: Mihai Caraman --- arch/powerpc/include/asm/mmu-book3e.h | 13 +++ arch/powerpc/include/asm/mmu.h | 21 +++-- arch/powerpc/include/asm/paca.h | 6 ++ arch/powerpc/kernel/asm-offsets.c | 9 ++ arch/powerpc/kernel/paca.c | 5 + arch/powerpc/kernel/setup_64.c | 31 ++++++ arch/powerpc/mm/fsl_booke_mmu.c | 7 ++ arch/powerpc/mm/mem.c | 6 ++ arch/powerpc/mm/tlb_low_64e.S | 171 ++++++++++++++++++++++++++++++++++ arch/powerpc/mm/tlb_nohash.c | 93 ++++++++++++------ 10 files changed, 326 insertions(+), 36 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 936db360790a..89b785d16846 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -286,8 +286,21 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) extern int mmu_linear_psize; extern int mmu_vmemmap_psize; +struct tlb_core_data { + /* For software way selection, as on Freescale TLB1 */ + u8 esel_next, esel_max, esel_first; + + /* Per-core spinlock for e6500 TLB handlers (no tlbsrx.) */ + u8 lock; +}; + #ifdef CONFIG_PPC64 extern unsigned long linear_map_top; +extern int book3e_htw_mode; + +#define PPC_HTW_NONE 0 +#define PPC_HTW_IBM 1 +#define PPC_HTW_E6500 2 /* * 64-bit booke platforms don't load the tlb in the tlb miss handler code. diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 691fd8aca939..f8d1d6dcf7db 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -180,16 +180,17 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) #define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ #define MMU_PAGE_256K 4 #define MMU_PAGE_1M 5 -#define MMU_PAGE_4M 6 -#define MMU_PAGE_8M 7 -#define MMU_PAGE_16M 8 -#define MMU_PAGE_64M 9 -#define MMU_PAGE_256M 10 -#define MMU_PAGE_1G 11 -#define MMU_PAGE_16G 12 -#define MMU_PAGE_64G 13 - -#define MMU_PAGE_COUNT 14 +#define MMU_PAGE_2M 6 +#define MMU_PAGE_4M 7 +#define MMU_PAGE_8M 8 +#define MMU_PAGE_16M 9 +#define MMU_PAGE_64M 10 +#define MMU_PAGE_256M 11 +#define MMU_PAGE_1G 12 +#define MMU_PAGE_16G 13 +#define MMU_PAGE_64G 14 + +#define MMU_PAGE_COUNT 15 #if defined(CONFIG_PPC_STD_MMU_64) /* 64-bit classic hash table MMU */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index c3523d1dda58..e81731c62a7f 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -113,6 +113,10 @@ struct paca_struct { /* Keep pgd in the same cacheline as the start of extlb */ pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */ pgd_t *kernel_pgd; /* Kernel PGD */ + + /* Shared by all threads of a core -- points to tcd of first thread */ + struct tlb_core_data *tcd_ptr; + /* We can have up to 3 levels of reentrancy in the TLB miss handler */ u64 extlb[3][EX_TLB_SIZE / sizeof(u64)]; u64 exmc[8]; /* used for machine checks */ @@ -123,6 +127,8 @@ struct paca_struct { void *mc_kstack; void *crit_kstack; void *dbg_kstack; + + struct tlb_core_data tcd; #endif /* CONFIG_PPC_BOOK3E */ mm_context_t context; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 41a283956a29..ed8d68ce71f3 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -203,6 +203,15 @@ int main(void) DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack)); DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack)); DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack)); + DEFINE(PACA_TCD_PTR, offsetof(struct paca_struct, tcd_ptr)); + + DEFINE(TCD_ESEL_NEXT, + offsetof(struct tlb_core_data, esel_next)); + DEFINE(TCD_ESEL_MAX, + offsetof(struct tlb_core_data, esel_max)); + DEFINE(TCD_ESEL_FIRST, + offsetof(struct tlb_core_data, esel_first)); + DEFINE(TCD_LOCK, offsetof(struct tlb_core_data, lock)); #endif /* CONFIG_PPC_BOOK3E */ #ifdef CONFIG_PPC_STD_MMU_64 diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 623c356fe34f..bf0aada02fe4 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -160,6 +160,11 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) #ifdef CONFIG_PPC_STD_MMU_64 new_paca->slb_shadow_ptr = init_slb_shadow(cpu); #endif /* CONFIG_PPC_STD_MMU_64 */ + +#ifdef CONFIG_PPC_BOOK3E + /* For now -- if we have threads this will be adjusted later */ + new_paca->tcd_ptr = &new_paca->tcd; +#endif } /* Put the paca pointer into r13 and SPRG_PACA */ diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 2232aff66059..1ce9b87d7df8 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -97,6 +97,36 @@ int dcache_bsize; int icache_bsize; int ucache_bsize; +#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) +static void setup_tlb_core_data(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + int first = cpu_first_thread_sibling(cpu); + + paca[cpu].tcd_ptr = &paca[first].tcd; + + /* + * If we have threads, we need either tlbsrx. + * or e6500 tablewalk mode, or else TLB handlers + * will be racy and could produce duplicate entries. + */ + if (smt_enabled_at_boot >= 2 && + !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && + book3e_htw_mode != PPC_HTW_E6500) { + /* Should we panic instead? */ + WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n", + __func__); + } + } +} +#else +static void setup_tlb_core_data(void) +{ +} +#endif + #ifdef CONFIG_SMP static char *smt_enabled_cmdline; @@ -445,6 +475,7 @@ void __init setup_system(void) smp_setup_cpu_maps(); check_smt_enabled(); + setup_tlb_core_data(); #ifdef CONFIG_SMP /* Release secondary cpus out of their spinloops at 0x60 now that diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index a68671c18ad4..94cd728166d3 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -52,6 +52,7 @@ #include #include #include +#include #include "mmu_decl.h" @@ -191,6 +192,12 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, } tlbcam_index = i; +#ifdef CONFIG_PPC64 + get_paca()->tcd.esel_next = i; + get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; + get_paca()->tcd.esel_first = i; +#endif + return amount_mapped; } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 3fa93dc7fe75..94448cd4b444 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -307,6 +307,12 @@ static void __init register_page_bootmem_info(void) void __init mem_init(void) { + /* + * book3s is limited to 16 page sizes due to encoding this in + * a 4-bit field for slices. + */ + BUILD_BUG_ON(MMU_PAGE_COUNT > 16); + #ifdef CONFIG_SWIOTLB swiotlb_init(0); #endif diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index b4113bf86353..75f5d2777f61 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -239,6 +239,177 @@ itlb_miss_fault_bolted: beq tlb_miss_common_bolted b itlb_miss_kernel_bolted +/* + * TLB miss handling for e6500 and derivatives, using hardware tablewalk. + * + * Linear mapping is bolted: no virtual page table or nested TLB misses + * Indirect entries in TLB1, hardware loads resulting direct entries + * into TLB0 + * No HES or NV hint on TLB1, so we need to do software round-robin + * No tlbsrx. so we need a spinlock, and we have to deal + * with MAS-damage caused by tlbsx + * 4K pages only + */ + + START_EXCEPTION(instruction_tlb_miss_e6500) + tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0 + + ld r11,PACA_TCD_PTR(r13) + srdi. r15,r16,60 /* get region */ + ori r16,r16,1 + + TLB_MISS_STATS_SAVE_INFO_BOLTED + bne tlb_miss_kernel_e6500 /* user/kernel test */ + + b tlb_miss_common_e6500 + + START_EXCEPTION(data_tlb_miss_e6500) + tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR + + ld r11,PACA_TCD_PTR(r13) + srdi. r15,r16,60 /* get region */ + rldicr r16,r16,0,62 + + TLB_MISS_STATS_SAVE_INFO_BOLTED + bne tlb_miss_kernel_e6500 /* user vs kernel check */ + +/* + * This is the guts of the TLB miss handler for e6500 and derivatives. + * We are entered with: + * + * r16 = page of faulting address (low bit 0 if data, 1 if instruction) + * r15 = crap (free to use) + * r14 = page table base + * r13 = PACA + * r11 = tlb_per_core ptr + * r10 = crap (free to use) + */ +tlb_miss_common_e6500: + /* + * Search if we already have an indirect entry for that virtual + * address, and if we do, bail out. + * + * MAS6:IND should be already set based on MAS4 + */ + addi r10,r11,TCD_LOCK +1: lbarx r15,0,r10 + cmpdi r15,0 + bne 2f + li r15,1 + stbcx. r15,0,r10 + bne 1b + .subsection 1 +2: lbz r15,0(r10) + cmpdi r15,0 + bne 2b + b 1b + .previous + + mfspr r15,SPRN_MAS2 + + tlbsx 0,r16 + mfspr r10,SPRN_MAS1 + andis. r10,r10,MAS1_VALID@h + bne tlb_miss_done_e6500 + + /* Undo MAS-damage from the tlbsx */ + mfspr r10,SPRN_MAS1 + oris r10,r10,MAS1_VALID@h + mtspr SPRN_MAS1,r10 + mtspr SPRN_MAS2,r15 + + /* Now, we need to walk the page tables. First check if we are in + * range. + */ + rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 + bne- tlb_miss_fault_e6500 + + rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3 + cmpldi cr0,r14,0 + clrrdi r15,r15,3 + beq- tlb_miss_fault_e6500 /* No PGDIR, bail */ + ldx r14,r14,r15 /* grab pgd entry */ + + rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3 + clrrdi r15,r15,3 + cmpdi cr0,r14,0 + bge tlb_miss_fault_e6500 /* Bad pgd entry or hugepage; bail */ + ldx r14,r14,r15 /* grab pud entry */ + + rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3 + clrrdi r15,r15,3 + cmpdi cr0,r14,0 + bge tlb_miss_fault_e6500 + ldx r14,r14,r15 /* Grab pmd entry */ + + mfspr r10,SPRN_MAS0 + cmpdi cr0,r14,0 + bge tlb_miss_fault_e6500 + + /* Now we build the MAS for a 2M indirect page: + * + * MAS 0 : ESEL needs to be filled by software round-robin + * MAS 1 : Fully set up + * - PID already updated by caller if necessary + * - TSIZE for now is base ind page size always + * - TID already cleared if necessary + * MAS 2 : Default not 2M-aligned, need to be redone + * MAS 3+7 : Needs to be done + */ + + ori r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT) + mtspr SPRN_MAS7_MAS3,r14 + + clrrdi r15,r16,21 /* make EA 2M-aligned */ + mtspr SPRN_MAS2,r15 + + lbz r15,TCD_ESEL_NEXT(r11) + lbz r16,TCD_ESEL_MAX(r11) + lbz r14,TCD_ESEL_FIRST(r11) + rlwimi r10,r15,16,0x00ff0000 /* insert esel_next into MAS0 */ + addi r15,r15,1 /* increment esel_next */ + mtspr SPRN_MAS0,r10 + cmpw r15,r16 + iseleq r15,r14,r15 /* if next == last use first */ + stb r15,TCD_ESEL_NEXT(r11) + + tlbwe + +tlb_miss_done_e6500: + .macro tlb_unlock_e6500 + li r15,0 + isync + stb r15,TCD_LOCK(r11) + .endm + + tlb_unlock_e6500 + TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK) + tlb_epilog_bolted + rfi + +tlb_miss_kernel_e6500: + mfspr r10,SPRN_MAS1 + ld r14,PACA_KERNELPGD(r13) + cmpldi cr0,r15,8 /* Check for vmalloc region */ + rlwinm r10,r10,0,16,1 /* Clear TID */ + mtspr SPRN_MAS1,r10 + beq+ tlb_miss_common_e6500 + +tlb_miss_fault_e6500: + tlb_unlock_e6500 + /* We need to check if it was an instruction miss */ + andi. r16,r16,1 + bne itlb_miss_fault_e6500 +dtlb_miss_fault_e6500: + TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT) + tlb_epilog_bolted + b exc_data_storage_book3e +itlb_miss_fault_e6500: + TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT) + tlb_epilog_bolted + b exc_instruction_storage_book3e + + /********************************************************************** * * * TLB miss handling for Book3E with TLB reservation and HES support * diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 8805b7b87dc6..735839b74dc5 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "mmu_decl.h" @@ -58,6 +59,10 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { .shift = 12, .enc = BOOK3E_PAGESZ_4K, }, + [MMU_PAGE_2M] = { + .shift = 21, + .enc = BOOK3E_PAGESZ_2M, + }, [MMU_PAGE_4M] = { .shift = 22, .enc = BOOK3E_PAGESZ_4M, @@ -136,7 +141,7 @@ static inline int mmu_get_tsize(int psize) int mmu_linear_psize; /* Page size used for the linear mapping */ int mmu_pte_psize; /* Page size used for PTE pages */ int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ -int book3e_htw_enabled; /* Is HW tablewalk enabled ? */ +int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ unsigned long linear_map_top; /* Top of linear mapping */ #endif /* CONFIG_PPC64 */ @@ -377,7 +382,7 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) { int tsize = mmu_psize_defs[mmu_pte_psize].enc; - if (book3e_htw_enabled) { + if (book3e_htw_mode != PPC_HTW_NONE) { unsigned long start = address & PMD_MASK; unsigned long end = address + PMD_SIZE; unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; @@ -430,7 +435,7 @@ static void setup_page_sizes(void) def = &mmu_psize_defs[psize]; shift = def->shift; - if (shift == 0) + if (shift == 0 || shift & 1) continue; /* adjust to be in terms of 4^shift Kb */ @@ -440,21 +445,40 @@ static void setup_page_sizes(void) def->flags |= MMU_PAGE_SIZE_DIRECT; } - goto no_indirect; + goto out; } if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { - u32 tlb1ps = mfspr(SPRN_TLB1PS); + u32 tlb1cfg, tlb1ps; + + tlb0cfg = mfspr(SPRN_TLB0CFG); + tlb1cfg = mfspr(SPRN_TLB1CFG); + tlb1ps = mfspr(SPRN_TLB1PS); + eptcfg = mfspr(SPRN_EPTCFG); + + if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) + book3e_htw_mode = PPC_HTW_E6500; + + /* + * We expect 4K subpage size and unrestricted indirect size. + * The lack of a restriction on indirect size is a Freescale + * extension, indicated by PSn = 0 but SPSn != 0. + */ + if (eptcfg != 2) + book3e_htw_mode = PPC_HTW_NONE; for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; if (tlb1ps & (1U << (def->shift - 10))) { def->flags |= MMU_PAGE_SIZE_DIRECT; + + if (book3e_htw_mode && psize == MMU_PAGE_2M) + def->flags |= MMU_PAGE_SIZE_INDIRECT; } } - goto no_indirect; + goto out; } #endif @@ -471,8 +495,11 @@ static void setup_page_sizes(void) } /* Indirect page sizes supported ? */ - if ((tlb0cfg & TLBnCFG_IND) == 0) - goto no_indirect; + if ((tlb0cfg & TLBnCFG_IND) == 0 || + (tlb0cfg & TLBnCFG_PT) == 0) + goto out; + + book3e_htw_mode = PPC_HTW_IBM; /* Now, we only deal with one IND page size for each * direct size. Hopefully all implementations today are @@ -497,8 +524,8 @@ static void setup_page_sizes(void) def->ind = ps + 10; } } - no_indirect: +out: /* Cleanup array and print summary */ pr_info("MMU: Supported page sizes\n"); for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { @@ -520,23 +547,23 @@ static void setup_page_sizes(void) static void setup_mmu_htw(void) { - /* Check if HW tablewalk is present, and if yes, enable it by: - * - * - patching the TLB miss handlers to branch to the - * one dedicates to it - * - * - setting the global book3e_htw_enabled - */ - unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG); + /* + * If we want to use HW tablewalk, enable it by patching the TLB miss + * handlers to branch to the one dedicated to it. + */ - if ((tlb0cfg & TLBnCFG_IND) && - (tlb0cfg & TLBnCFG_PT)) { + switch (book3e_htw_mode) { + case PPC_HTW_IBM: patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); - book3e_htw_enabled = 1; + break; + case PPC_HTW_E6500: + patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); + patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); + break; } pr_info("MMU: Book3E HW tablewalk %s\n", - book3e_htw_enabled ? "enabled" : "not supported"); + book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); } /* @@ -576,8 +603,16 @@ static void __early_init_mmu(int boot_cpu) /* Set MAS4 based on page table setting */ mas4 = 0x4 << MAS4_WIMGED_SHIFT; - if (book3e_htw_enabled) { - mas4 |= mas4 | MAS4_INDD; + switch (book3e_htw_mode) { + case PPC_HTW_E6500: + mas4 |= MAS4_INDD; + mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; + mas4 |= MAS4_TLBSELD(1); + mmu_pte_psize = MMU_PAGE_2M; + break; + + case PPC_HTW_IBM: + mas4 |= MAS4_INDD; #ifdef CONFIG_PPC_64K_PAGES mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT; mmu_pte_psize = MMU_PAGE_256M; @@ -585,13 +620,16 @@ static void __early_init_mmu(int boot_cpu) mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; mmu_pte_psize = MMU_PAGE_1M; #endif - } else { + break; + + case PPC_HTW_NONE: #ifdef CONFIG_PPC_64K_PAGES mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT; #else mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; #endif mmu_pte_psize = mmu_virtual_psize; + break; } mtspr(SPRN_MAS4, mas4); @@ -611,8 +649,11 @@ static void __early_init_mmu(int boot_cpu) /* limit memory so we dont have linear faults */ memblock_enforce_memory_limit(linear_map_top); - patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); - patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e); + if (book3e_htw_mode == PPC_HTW_NONE) { + patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); + patch_exception(0x1e0, + exc_instruction_tlb_miss_bolted_book3e); + } } #endif -- cgit v1.2.3 From bbead78c06e0ddda3cc53f81ecdaa2062a56247a Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Fri, 11 Oct 2013 19:22:39 -0500 Subject: powerpc/fsl-book3e-64: Use paca for hugetlb TLB1 entry selection This keeps usage coordinated for hugetlb and indirect entries, which should make entry selection more predictable and probably improve overall performance when mixing the two. Signed-off-by: Scott Wood --- arch/powerpc/mm/hugetlbpage-book3e.c | 51 +++++++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 10 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c index 646c4bffaeba..5e4ee2573903 100644 --- a/arch/powerpc/mm/hugetlbpage-book3e.c +++ b/arch/powerpc/mm/hugetlbpage-book3e.c @@ -8,6 +8,44 @@ #include #include +#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC64 +static inline int tlb1_next(void) +{ + struct paca_struct *paca = get_paca(); + struct tlb_core_data *tcd; + int this, next; + + tcd = paca->tcd_ptr; + this = tcd->esel_next; + + next = this + 1; + if (next >= tcd->esel_max) + next = tcd->esel_first; + + tcd->esel_next = next; + return this; +} +#else +static inline int tlb1_next(void) +{ + int index, ncams; + + ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; + + index = __get_cpu_var(next_tlbcam_idx); + + /* Just round-robin the entries and wrap when we hit the end */ + if (unlikely(index == ncams - 1)) + __get_cpu_var(next_tlbcam_idx) = tlbcam_index; + else + __get_cpu_var(next_tlbcam_idx)++; + + return index; +} +#endif /* !PPC64 */ +#endif /* FSL */ + static inline int mmu_get_tsize(int psize) { return mmu_psize_defs[psize].enc; @@ -47,7 +85,7 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, struct mm_struct *mm; #ifdef CONFIG_PPC_FSL_BOOK3E - int index, ncams; + int index; #endif if (unlikely(is_kernel_addr(ea))) @@ -77,18 +115,11 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, } #ifdef CONFIG_PPC_FSL_BOOK3E - ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; - /* We have to use the CAM(TLB1) on FSL parts for hugepages */ - index = __get_cpu_var(next_tlbcam_idx); + index = tlb1_next(); mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1)); - - /* Just round-robin the entries and wrap when we hit the end */ - if (unlikely(index == ncams - 1)) - __get_cpu_var(next_tlbcam_idx) = tlbcam_index; - else - __get_cpu_var(next_tlbcam_idx)++; #endif + mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize); mas2 = ea & ~((1UL << shift) - 1); mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; -- cgit v1.2.3 From 1149e8a73ffea953d8d6615ee37bce820a3eaeb8 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Tue, 12 Jun 2012 17:02:32 -0500 Subject: powerpc/booke-64: fix tlbsrx. path in bolted tlb handler It was branching to the cleanup part of the non-bolted handler, which would have been bad if there were any chips with tlbsrx. that use the bolted handler. Signed-off-by: Scott Wood --- arch/powerpc/mm/tlb_low_64e.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index 75f5d2777f61..16250b162375 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -136,7 +136,7 @@ BEGIN_MMU_FTR_SECTION */ PPC_TLBSRX_DOT(0,R16) ldx r14,r14,r15 /* grab pgd entry */ - beq normal_tlb_miss_done /* tlb exists already, bail */ + beq tlb_miss_done_bolted /* tlb exists already, bail */ MMU_FTR_SECTION_ELSE ldx r14,r14,r15 /* grab pgd entry */ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV) @@ -192,6 +192,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV) mtspr SPRN_MAS7_MAS3,r15 tlbwe +tlb_miss_done_bolted: TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK) tlb_epilog_bolted rfi -- cgit v1.2.3