diff options
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r-- | arch/mips/mm/tlbex.c | 33 |
1 files changed, 17 insertions, 16 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 1c8ac49ec72c..820e6612d744 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -5,8 +5,8 @@ * * Synthesize TLB refill handlers at runtime. * - * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer - * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki + * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer + * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2008, 2009 Cavium Networks, Inc. * Copyright (C) 2011 MIPS Technologies, Inc. @@ -212,7 +212,7 @@ static void __cpuinit uasm_bgezl_label(struct uasm_label **l, /* * pgtable bits are assigned dynamically depending on processor feature * and statically based on kernel configuration. This spits out the actual - * values the kernel is using. Required to make sense from disassembled + * values the kernel is using. Required to make sense from disassembled * TLB exception handlers. */ static void output_pgtable_bits_defines(void) @@ -464,8 +464,8 @@ static u32 final_handler[64] __cpuinitdata; * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: * 2. A timing hazard exists for the TLBP instruction. * - * stalling_instruction - * TLBP + * stalling_instruction + * TLBP * * The JTLB is being read for the TLBP throughout the stall generated by the * previous instruction. This is not really correct as the stalling instruction @@ -476,7 +476,7 @@ static u32 final_handler[64] __cpuinitdata; * The software work-around is to not allow the instruction preceding the TLBP * to stall - make it an NOP or some other instruction guaranteed not to stall. * - * Errata 2 will not be fixed. This errata is also on the R5000. + * Errata 2 will not be fixed. This errata is also on the R5000. * * As if we MIPS hackers wouldn't know how to nop pipelines happy ... */ @@ -581,6 +581,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_4KC: case CPU_4KEC: case CPU_M14KC: + case CPU_M14KEC: case CPU_SB1: case CPU_SB1A: case CPU_4KSC: @@ -748,7 +749,7 @@ static __cpuinit void build_huge_update_entries(u32 **p, */ small_sequence = (HPAGE_SIZE >> 7) < 0x10000; - /* We can clobber tmp. It isn't used after this.*/ + /* We can clobber tmp. It isn't used after this.*/ if (!small_sequence) uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); @@ -830,12 +831,12 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, /* Clear lower 23 bits of context. */ uasm_i_dins(p, ptr, 0, 0, 23); - /* 1 0 1 0 1 << 6 xkphys cached */ + /* 1 0 1 0 1 << 6 xkphys cached */ uasm_i_ori(p, ptr, ptr, 0x540); uasm_i_drotr(p, ptr, ptr, 11); } #elif defined(CONFIG_SMP) -# ifdef CONFIG_MIPS_MT_SMTC +# ifdef CONFIG_MIPS_MT_SMTC /* * SMTC uses TCBind value as "CPU" index */ @@ -955,7 +956,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ #ifdef CONFIG_SMP -#ifdef CONFIG_MIPS_MT_SMTC +#ifdef CONFIG_MIPS_MT_SMTC /* * SMTC uses TCBind value as "CPU" index */ @@ -965,7 +966,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) #else /* * smp_processor_id() << 3 is stored in CONTEXT. - */ + */ uasm_i_mfc0(p, ptr, C0_CONTEXT); UASM_i_LA_mostly(p, tmp, pgdc); uasm_i_srl(p, ptr, ptr, 23); @@ -1153,7 +1154,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, if (pgd_reg == -1) { vmalloc_branch_delay_filled = 1; - /* 1 0 1 0 1 << 6 xkphys cached */ + /* 1 0 1 0 1 << 6 xkphys cached */ uasm_i_ori(p, ptr, ptr, 0x540); uasm_i_drotr(p, ptr, ptr, 11); } @@ -1171,9 +1172,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, uasm_l_vmalloc_done(l, *p); /* - * tmp ptr - * fall-through case = badvaddr *pgd_current - * vmalloc case = badvaddr swapper_pg_dir + * tmp ptr + * fall-through case = badvaddr *pgd_current + * vmalloc case = badvaddr swapper_pg_dir */ if (vmalloc_branch_delay_filled) @@ -1212,7 +1213,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update); /* * The in the LWX case we don't want to do the load in the - * delay slot. It cannot issue in the same cycle and may be + * delay slot. It cannot issue in the same cycle and may be * speculative and unneeded. */ if (use_lwx_insns()) |