diff options
author | Stefan Roese <sr@denx.de> | 2007-11-20 21:19:16 +1100 |
---|---|---|
committer | Josh Boyer <jwboyer@linux.vnet.ibm.com> | 2007-12-23 13:31:16 -0600 |
commit | f4151b9ba87901eb3a7bc49f418cc352d4e1927e (patch) | |
tree | 7481891935cea47d7794e583a6fdfe530575c17d /arch/ppc/mm | |
parent | 4922566f0394ac41c72fb960f22b4f84333026bc (diff) |
[POWERPC] 4xx: Fix TLB 0 problem with CONFIG_SERIAL_TEXT_DEBUG
Right now TLB entry 0 ist used as UART0 mapping for the early debug
output (via CONFIG_SERIAL_TEXT_DEBUG). This causes problems when many
TLB's get used upon Linux bootup (e.g. while PCIe scanning behind
bridges and/or switches on 440SPe platforms). This will overwrite the
TLB 0 entry and further debug output's may crash/hang the system.
This patch moves the early debug UART0 TLB entry from 0 to 62 as done
in arch/powerpc. This way it is in the "pinned" area and will not get
overwritten. Also the arch/ppc/mm/44x_mmu.c code is now synced with the
newer code from arch/powerpc.
Signed-off-by: Stefan Roese <sr@denx.de>
Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
Diffstat (limited to 'arch/ppc/mm')
-rw-r--r-- | arch/ppc/mm/44x_mmu.c | 51 |
1 files changed, 16 insertions, 35 deletions
diff --git a/arch/ppc/mm/44x_mmu.c b/arch/ppc/mm/44x_mmu.c index 6536a25cfcb8..fbb577a0d165 100644 --- a/arch/ppc/mm/44x_mmu.c +++ b/arch/ppc/mm/44x_mmu.c @@ -60,38 +60,28 @@ extern char etext[], _stext[]; * Just needed it declared someplace. */ unsigned int tlb_44x_index = 0; -unsigned int tlb_44x_hwater = 62; +unsigned int tlb_44x_hwater = PPC4XX_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; int icache_44x_need_flush; /* * "Pins" a 256MB TLB entry in AS0 for kernel lowmem */ -static void __init -ppc44x_pin_tlb(int slot, unsigned int virt, unsigned int phys) +static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) { - unsigned long attrib = 0; - - __asm__ __volatile__("\ - clrrwi %2,%2,10\n\ - ori %2,%2,%4\n\ - clrrwi %1,%1,10\n\ - li %0,0\n\ - ori %0,%0,%5\n\ - tlbwe %2,%3,%6\n\ - tlbwe %1,%3,%7\n\ - tlbwe %0,%3,%8" + __asm__ __volatile__( + "tlbwe %2,%3,%4\n" + "tlbwe %1,%3,%5\n" + "tlbwe %0,%3,%6\n" : - : "r" (attrib), "r" (phys), "r" (virt), "r" (slot), - "i" (PPC44x_TLB_VALID | PPC44x_TLB_256M), - "i" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), + : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), + "r" (phys), + "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), + "r" (tlb_44x_hwater--), /* slot for this TLB entry */ "i" (PPC44x_TLB_PAGEID), "i" (PPC44x_TLB_XLAT), "i" (PPC44x_TLB_ATTRIB)); } -/* - * MMU_init_hw does the chip-specific initialization of the MMU hardware. - */ void __init MMU_init_hw(void) { flush_instruction_cache(); @@ -99,22 +89,13 @@ void __init MMU_init_hw(void) unsigned long __init mmu_mapin_ram(void) { - unsigned int pinned_tlbs = 1; - int i; - - /* Determine number of entries necessary to cover lowmem */ - pinned_tlbs = (unsigned int) - (_ALIGN(total_lowmem, PPC_PIN_SIZE) >> PPC44x_PIN_SHIFT); - - /* Write upper watermark to save location */ - tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs; + unsigned long addr; - /* If necessary, set additional pinned TLBs */ - if (pinned_tlbs > 1) - for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) { - unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC_PIN_SIZE; - ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr); - } + /* Pin in enough TLBs to cover any lowmem not covered by the + * initial 256M mapping established in head_44x.S */ + for (addr = PPC_PIN_SIZE; addr < total_lowmem; + addr += PPC_PIN_SIZE) + ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); return total_lowmem; } |