diff options
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r-- | arch/mips/mm/tlbex.c | 148 |
1 files changed, 69 insertions, 79 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 9ab0f907a52c..556cb4815770 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -136,7 +136,7 @@ static int scratchpad_offset(int i) * why; it's not an issue caused by the core RTL. * */ -static int __cpuinit m4kc_tlbp_war(void) +static int m4kc_tlbp_war(void) { return (current_cpu_data.processor_id & 0xffff00) == (PRID_COMP_MIPS | PRID_IMP_4KC); @@ -181,11 +181,9 @@ UASM_L_LA(_large_segbits_fault) UASM_L_LA(_tlb_huge_update) #endif -static int __cpuinitdata hazard_instance; +static int hazard_instance; -static void __cpuinit uasm_bgezl_hazard(u32 **p, - struct uasm_reloc **r, - int instance) +static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance) { switch (instance) { case 0 ... 7: @@ -196,9 +194,7 @@ static void __cpuinit uasm_bgezl_hazard(u32 **p, } } -static void __cpuinit uasm_bgezl_label(struct uasm_label **l, - u32 **p, - int instance) +static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance) { switch (instance) { case 0 ... 7: @@ -295,15 +291,15 @@ static inline void dump_handler(const char *symbol, const u32 *handler, int coun * We deliberately chose a buffer size of 128, so we won't scribble * over anything important on overflow before we panic. */ -static u32 tlb_handler[128] __cpuinitdata; +static u32 tlb_handler[128]; /* simply assume worst case size for labels and relocs */ -static struct uasm_label labels[128] __cpuinitdata; -static struct uasm_reloc relocs[128] __cpuinitdata; +static struct uasm_label labels[128]; +static struct uasm_reloc relocs[128]; -static int check_for_high_segbits __cpuinitdata; +static int check_for_high_segbits; -static unsigned int kscratch_used_mask __cpuinitdata; +static unsigned int kscratch_used_mask; static inline int __maybe_unused c0_kscratch(void) { @@ -316,7 +312,7 @@ static inline int __maybe_unused c0_kscratch(void) } } -static int __cpuinit allocate_kscratch(void) +static int allocate_kscratch(void) { int r; unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; @@ -333,11 +329,11 @@ static int __cpuinit allocate_kscratch(void) return r; } -static int scratch_reg __cpuinitdata; -static int pgd_reg __cpuinitdata; +static int scratch_reg; +static int pgd_reg; enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; -static struct work_registers __cpuinit build_get_work_registers(u32 **p) +static struct work_registers build_get_work_registers(u32 **p) { struct work_registers r; @@ -393,7 +389,7 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p) return r; } -static void __cpuinit build_restore_work_registers(u32 **p) +static void build_restore_work_registers(u32 **p) { if (scratch_reg >= 0) { UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); @@ -418,7 +414,7 @@ extern unsigned long pgd_current[]; /* * The R3000 TLB handler is simple. */ -static void __cpuinit build_r3000_tlb_refill_handler(void) +static void build_r3000_tlb_refill_handler(void) { long pgdc = (long)pgd_current; u32 *p; @@ -463,7 +459,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void) * other one.To keep things simple, we first assume linear space, * then we relocate it to the final handler layout as needed. */ -static u32 final_handler[64] __cpuinitdata; +static u32 final_handler[64]; /* * Hazards @@ -487,7 +483,7 @@ static u32 final_handler[64] __cpuinitdata; * * As if we MIPS hackers wouldn't know how to nop pipelines happy ... */ -static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) +static void __maybe_unused build_tlb_probe_entry(u32 **p) { switch (current_cpu_type()) { /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ @@ -511,9 +507,9 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) */ enum tlb_write_entry { tlb_random, tlb_indexed }; -static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, - struct uasm_reloc **r, - enum tlb_write_entry wmode) +static void build_tlb_write_entry(u32 **p, struct uasm_label **l, + struct uasm_reloc **r, + enum tlb_write_entry wmode) { void(*tlbw)(u32 **) = NULL; @@ -647,8 +643,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, } } -static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, - unsigned int reg) +static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, + unsigned int reg) { if (cpu_has_rixi) { UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); @@ -663,11 +659,9 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT -static __cpuinit void build_restore_pagemask(u32 **p, - struct uasm_reloc **r, - unsigned int tmp, - enum label_id lid, - int restore_scratch) +static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, + unsigned int tmp, enum label_id lid, + int restore_scratch) { if (restore_scratch) { /* Reset default page size */ @@ -706,12 +700,11 @@ static __cpuinit void build_restore_pagemask(u32 **p, } } -static __cpuinit void build_huge_tlb_write_entry(u32 **p, - struct uasm_label **l, - struct uasm_reloc **r, - unsigned int tmp, - enum tlb_write_entry wmode, - int restore_scratch) +static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l, + struct uasm_reloc **r, + unsigned int tmp, + enum tlb_write_entry wmode, + int restore_scratch) { /* Set huge page tlb entry size */ uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); @@ -726,9 +719,9 @@ static __cpuinit void build_huge_tlb_write_entry(u32 **p, /* * Check if Huge PTE is present, if so then jump to LABEL. */ -static void __cpuinit +static void build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, - unsigned int pmd, int lid) + unsigned int pmd, int lid) { UASM_i_LW(p, tmp, 0, pmd); if (use_bbit_insns()) { @@ -739,9 +732,8 @@ build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, } } -static __cpuinit void build_huge_update_entries(u32 **p, - unsigned int pte, - unsigned int tmp) +static void build_huge_update_entries(u32 **p, unsigned int pte, + unsigned int tmp) { int small_sequence; @@ -771,11 +763,10 @@ static __cpuinit void build_huge_update_entries(u32 **p, UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ } -static __cpuinit void build_huge_handler_tail(u32 **p, - struct uasm_reloc **r, - struct uasm_label **l, - unsigned int pte, - unsigned int ptr) +static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, + struct uasm_label **l, + unsigned int pte, + unsigned int ptr) { #ifdef CONFIG_SMP UASM_i_SC(p, pte, 0, ptr); @@ -794,7 +785,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p, * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pmd entry. */ -static void __cpuinit +static void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int tmp, unsigned int ptr) { @@ -886,7 +877,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * BVADDR is the faulting address, PTR is scratch. * PTR will hold the pgd for vmalloc. */ -static void __cpuinit +static void build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int bvaddr, unsigned int ptr, enum vmalloc64_mode mode) @@ -956,7 +947,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * TMP and PTR are scratch. * TMP will be clobbered, PTR will hold the pgd entry. */ -static void __cpuinit __maybe_unused +static void __maybe_unused build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) { long pgdc = (long)pgd_current; @@ -991,7 +982,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) #endif /* !CONFIG_64BIT */ -static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) +static void build_adjust_context(u32 **p, unsigned int ctx) { unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); @@ -1017,7 +1008,7 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) uasm_i_andi(p, ctx, ctx, mask); } -static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) +static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) { /* * Bug workaround for the Nevada. It seems as if under certain @@ -1042,8 +1033,7 @@ static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ } -static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, - unsigned int ptep) +static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) { /* * 64bit address support (36bit on a 32bit CPU) in a 32bit @@ -1104,7 +1094,7 @@ struct mips_huge_tlb_info { int restore_scratch; }; -static struct mips_huge_tlb_info __cpuinit +static struct mips_huge_tlb_info build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int tmp, unsigned int ptr, int c0_scratch_reg) @@ -1282,7 +1272,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, */ #define MIPS64_REFILL_INSNS 32 -static void __cpuinit build_r4000_tlb_refill_handler(void) +static void build_r4000_tlb_refill_handler(void) { u32 *p = tlb_handler; struct uasm_label *l = labels; @@ -1462,11 +1452,11 @@ extern u32 handle_tlbm[], handle_tlbm_end[]; #ifdef CONFIG_MIPS_PGD_C0_CONTEXT extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; -static void __cpuinit build_r4000_setup_pgd(void) +static void build_r4000_setup_pgd(void) { const int a0 = 4; const int a1 = 5; - u32 *p = tlbmiss_handler_setup_pgd_array; + u32 *p = tlbmiss_handler_setup_pgd; const int tlbmiss_handler_setup_pgd_size = tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd; struct uasm_label *l = labels; @@ -1513,7 +1503,7 @@ static void __cpuinit build_r4000_setup_pgd(void) } #endif -static void __cpuinit +static void iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) { #ifdef CONFIG_SMP @@ -1533,7 +1523,7 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) #endif } -static void __cpuinit +static void iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, unsigned int mode) { @@ -1593,7 +1583,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, * the page table where this PTE is located, PTE will be re-loaded * with it's original value. */ -static void __cpuinit +static void build_pte_present(u32 **p, struct uasm_reloc **r, int pte, int ptr, int scratch, enum label_id lid) { @@ -1621,7 +1611,7 @@ build_pte_present(u32 **p, struct uasm_reloc **r, } /* Make PTE valid, store result in PTR. */ -static void __cpuinit +static void build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr) { @@ -1634,7 +1624,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, * Check if PTE can be written to, if not branch to LABEL. Regardless * restore PTE with value from PTR when done. */ -static void __cpuinit +static void build_pte_writable(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, int scratch, enum label_id lid) @@ -1654,7 +1644,7 @@ build_pte_writable(u32 **p, struct uasm_reloc **r, /* Make PTE writable, update software status bits as well, then store * at PTR. */ -static void __cpuinit +static void build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr) { @@ -1668,7 +1658,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, * Check if PTE can be modified, if not branch to LABEL. Regardless * restore PTE with value from PTR when done. */ -static void __cpuinit +static void build_pte_modifiable(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, int scratch, enum label_id lid) @@ -1697,7 +1687,7 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r, * This places the pte into ENTRYLO0 and writes it with tlbwi. * Then it returns. */ -static void __cpuinit +static void build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) { uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ @@ -1713,7 +1703,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) * may have the probe fail bit set as a result of a trap on a * kseg2 access, i.e. without refill. Then it returns. */ -static void __cpuinit +static void build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int pte, unsigned int tmp) @@ -1731,7 +1721,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, uasm_i_rfe(p); /* branch delay */ } -static void __cpuinit +static void build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, unsigned int ptr) { @@ -1751,7 +1741,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, uasm_i_tlbp(p); /* load delay */ } -static void __cpuinit build_r3000_tlb_load_handler(void) +static void build_r3000_tlb_load_handler(void) { u32 *p = handle_tlbl; const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; @@ -1782,7 +1772,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void) dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); } -static void __cpuinit build_r3000_tlb_store_handler(void) +static void build_r3000_tlb_store_handler(void) { u32 *p = handle_tlbs; const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; @@ -1803,7 +1793,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void) uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); - if (p >= handle_tlbs) + if (p >= handle_tlbs_end) panic("TLB store handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); @@ -1813,7 +1803,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void) dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); } -static void __cpuinit build_r3000_tlb_modify_handler(void) +static void build_r3000_tlb_modify_handler(void) { u32 *p = handle_tlbm; const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; @@ -1848,7 +1838,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) /* * R4000 style TLB load/store/modify handlers. */ -static struct work_registers __cpuinit +static struct work_registers build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, struct uasm_reloc **r) { @@ -1884,7 +1874,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, return wr; } -static void __cpuinit +static void build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int tmp, unsigned int ptr) @@ -1902,7 +1892,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, #endif } -static void __cpuinit build_r4000_tlb_load_handler(void) +static void build_r4000_tlb_load_handler(void) { u32 *p = handle_tlbl; const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; @@ -2085,7 +2075,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void) dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); } -static void __cpuinit build_r4000_tlb_store_handler(void) +static void build_r4000_tlb_store_handler(void) { u32 *p = handle_tlbs; const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; @@ -2140,7 +2130,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void) dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); } -static void __cpuinit build_r4000_tlb_modify_handler(void) +static void build_r4000_tlb_modify_handler(void) { u32 *p = handle_tlbm; const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; @@ -2196,7 +2186,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); } -static void __cpuinit flush_tlb_handlers(void) +static void flush_tlb_handlers(void) { local_flush_icache_range((unsigned long)handle_tlbl, (unsigned long)handle_tlbl_end); @@ -2210,7 +2200,7 @@ static void __cpuinit flush_tlb_handlers(void) #endif } -void __cpuinit build_tlb_refill_handler(void) +void build_tlb_refill_handler(void) { /* * The refill handler is generated per-CPU, multi-node systems |