diff options
Diffstat (limited to 'arch/avr32/mm')
-rw-r--r-- | arch/avr32/mm/Makefile | 6 | ||||
-rw-r--r-- | arch/avr32/mm/cache.c | 150 | ||||
-rw-r--r-- | arch/avr32/mm/clear_page.S | 25 | ||||
-rw-r--r-- | arch/avr32/mm/copy_page.S | 28 | ||||
-rw-r--r-- | arch/avr32/mm/dma-coherent.c | 139 | ||||
-rw-r--r-- | arch/avr32/mm/fault.c | 315 | ||||
-rw-r--r-- | arch/avr32/mm/init.c | 480 | ||||
-rw-r--r-- | arch/avr32/mm/ioremap.c | 197 | ||||
-rw-r--r-- | arch/avr32/mm/tlb.c | 378 |
9 files changed, 1718 insertions, 0 deletions
diff --git a/arch/avr32/mm/Makefile b/arch/avr32/mm/Makefile new file mode 100644 index 000000000000..0066491f90d4 --- /dev/null +++ b/arch/avr32/mm/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Linux/AVR32 kernel. +# + +obj-y += init.o clear_page.o copy_page.o dma-coherent.o +obj-y += ioremap.o cache.o fault.o tlb.o diff --git a/arch/avr32/mm/cache.c b/arch/avr32/mm/cache.c new file mode 100644 index 000000000000..450515b245a0 --- /dev/null +++ b/arch/avr32/mm/cache.c @@ -0,0 +1,150 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/highmem.h> +#include <linux/unistd.h> + +#include <asm/cacheflush.h> +#include <asm/cachectl.h> +#include <asm/processor.h> +#include <asm/uaccess.h> + +/* + * If you attempt to flush anything more than this, you need superuser + * privileges. The value is completely arbitrary. + */ +#define CACHEFLUSH_MAX_LEN 1024 + +void invalidate_dcache_region(void *start, size_t size) +{ + unsigned long v, begin, end, linesz; + + linesz = boot_cpu_data.dcache.linesz; + + //printk("invalidate dcache: %p + %u\n", start, size); + + /* You asked for it, you got it */ + begin = (unsigned long)start & ~(linesz - 1); + end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); + + for (v = begin; v < end; v += linesz) + invalidate_dcache_line((void *)v); +} + +void clean_dcache_region(void *start, size_t size) +{ + unsigned long v, begin, end, linesz; + + linesz = boot_cpu_data.dcache.linesz; + begin = (unsigned long)start & ~(linesz - 1); + end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); + + for (v = begin; v < end; v += linesz) + clean_dcache_line((void *)v); + flush_write_buffer(); +} + +void flush_dcache_region(void *start, size_t size) +{ + unsigned long v, begin, end, linesz; + + linesz = boot_cpu_data.dcache.linesz; + begin = (unsigned long)start & ~(linesz - 1); + end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); + + for (v = begin; v < end; v += linesz) + flush_dcache_line((void *)v); + flush_write_buffer(); +} + +void invalidate_icache_region(void *start, size_t size) +{ + unsigned long v, begin, end, linesz; + + linesz = boot_cpu_data.icache.linesz; + begin = (unsigned long)start & ~(linesz - 1); + end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); + + for (v = begin; v < end; v += linesz) + invalidate_icache_line((void *)v); +} + +static inline void __flush_icache_range(unsigned long start, unsigned long end) +{ + unsigned long v, linesz; + + linesz = boot_cpu_data.dcache.linesz; + for (v = start; v < end; v += linesz) { + clean_dcache_line((void *)v); + invalidate_icache_line((void *)v); + } + + flush_write_buffer(); +} + +/* + * This one is called after a module has been loaded. + */ +void flush_icache_range(unsigned long start, unsigned long end) +{ + unsigned long linesz; + + linesz = boot_cpu_data.dcache.linesz; + __flush_icache_range(start & ~(linesz - 1), + (end + linesz - 1) & ~(linesz - 1)); +} + +/* + * This one is called from do_no_page(), do_swap_page() and install_page(). + */ +void flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + if (vma->vm_flags & VM_EXEC) { + void *v = kmap(page); + __flush_icache_range((unsigned long)v, (unsigned long)v + PAGE_SIZE); + kunmap(v); + } +} + +/* + * This one is used by copy_to_user_page() + */ +void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len) +{ + if (vma->vm_flags & VM_EXEC) + flush_icache_range(addr, addr + len); +} + +asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len) +{ + int ret; + + if (len > CACHEFLUSH_MAX_LEN) { + ret = -EPERM; + if (!capable(CAP_SYS_ADMIN)) + goto out; + } + + ret = -EFAULT; + if (!access_ok(VERIFY_WRITE, addr, len)) + goto out; + + switch (operation) { + case CACHE_IFLUSH: + flush_icache_range((unsigned long)addr, + (unsigned long)addr + len); + ret = 0; + break; + default: + ret = -EINVAL; + } + +out: + return ret; +} diff --git a/arch/avr32/mm/clear_page.S b/arch/avr32/mm/clear_page.S new file mode 100644 index 000000000000..5d70dca00699 --- /dev/null +++ b/arch/avr32/mm/clear_page.S @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/linkage.h> +#include <asm/page.h> + +/* + * clear_page + * r12: P1 address (to) + */ + .text + .global clear_page +clear_page: + sub r9, r12, -PAGE_SIZE + mov r10, 0 + mov r11, 0 +0: st.d r12++, r10 + cp r12, r9 + brne 0b + mov pc, lr diff --git a/arch/avr32/mm/copy_page.S b/arch/avr32/mm/copy_page.S new file mode 100644 index 000000000000..c2b3752946b8 --- /dev/null +++ b/arch/avr32/mm/copy_page.S @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/linkage.h> +#include <asm/page.h> + +/* + * copy_page + * + * r12 to (P1 address) + * r11 from (P1 address) + * r8-r10 scratch + */ + .text + .global copy_page +copy_page: + sub r10, r11, -(1 << PAGE_SHIFT) + /* pref r11[0] */ +1: /* pref r11[8] */ + ld.d r8, r11++ + st.d r12++, r8 + cp r11, r10 + brlo 1b + mov pc, lr diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c new file mode 100644 index 000000000000..44ab8a7bdae2 --- /dev/null +++ b/arch/avr32/mm/dma-coherent.c @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/dma-mapping.h> + +#include <asm/addrspace.h> +#include <asm/cacheflush.h> + +void dma_cache_sync(void *vaddr, size_t size, int direction) +{ + /* + * No need to sync an uncached area + */ + if (PXSEG(vaddr) == P2SEG) + return; + + switch (direction) { + case DMA_FROM_DEVICE: /* invalidate only */ + dma_cache_inv(vaddr, size); + break; + case DMA_TO_DEVICE: /* writeback only */ + dma_cache_wback(vaddr, size); + break; + case DMA_BIDIRECTIONAL: /* writeback and invalidate */ + dma_cache_wback_inv(vaddr, size); + break; + default: + BUG(); + } +} +EXPORT_SYMBOL(dma_cache_sync); + +static struct page *__dma_alloc(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp) +{ + struct page *page, *free, *end; + int order; + + size = PAGE_ALIGN(size); + order = get_order(size); + + page = alloc_pages(gfp, order); + if (!page) + return NULL; + split_page(page, order); + + /* + * When accessing physical memory with valid cache data, we + * get a cache hit even if the virtual memory region is marked + * as uncached. + * + * Since the memory is newly allocated, there is no point in + * doing a writeback. If the previous owner cares, he should + * have flushed the cache before releasing the memory. + */ + invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size); + + *handle = page_to_bus(page); + free = page + (size >> PAGE_SHIFT); + end = page + (1 << order); + + /* + * Free any unused pages + */ + while (free < end) { + __free_page(free); + free++; + } + + return page; +} + +static void __dma_free(struct device *dev, size_t size, + struct page *page, dma_addr_t handle) +{ + struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT); + + while (page < end) + __free_page(page++); +} + +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp) +{ + struct page *page; + void *ret = NULL; + + page = __dma_alloc(dev, size, handle, gfp); + if (page) + ret = phys_to_uncached(page_to_phys(page)); + + return ret; +} +EXPORT_SYMBOL(dma_alloc_coherent); + +void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t handle) +{ + void *addr = phys_to_cached(uncached_to_phys(cpu_addr)); + struct page *page; + + pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n", + cpu_addr, (unsigned long)handle, (unsigned)size); + BUG_ON(!virt_addr_valid(addr)); + page = virt_to_page(addr); + __dma_free(dev, size, page, handle); +} +EXPORT_SYMBOL(dma_free_coherent); + +#if 0 +void *dma_alloc_writecombine(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp) +{ + struct page *page; + + page = __dma_alloc(dev, size, handle, gfp); + + /* Now, map the page into P3 with write-combining turned on */ + return __ioremap(page_to_phys(page), size, _PAGE_BUFFER); +} +EXPORT_SYMBOL(dma_alloc_writecombine); + +void dma_free_writecombine(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t handle) +{ + struct page *page; + + iounmap(cpu_addr); + + page = bus_to_page(handle); + __dma_free(dev, size, page, handle); +} +EXPORT_SYMBOL(dma_free_writecombine); +#endif diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c new file mode 100644 index 000000000000..678557260a35 --- /dev/null +++ b/arch/avr32/mm/fault.c @@ -0,0 +1,315 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * Based on linux/arch/sh/mm/fault.c: + * Copyright (C) 1999 Niibe Yutaka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/pagemap.h> + +#include <asm/kdebug.h> +#include <asm/mmu_context.h> +#include <asm/sysreg.h> +#include <asm/uaccess.h> +#include <asm/tlb.h> + +#ifdef DEBUG +static void dump_code(unsigned long pc) +{ + char *p = (char *)pc; + char val; + int i; + + + printk(KERN_DEBUG "Code:"); + for (i = 0; i < 16; i++) { + if (__get_user(val, p + i)) + break; + printk(" %02x", val); + } + printk("\n"); +} +#endif + +#ifdef CONFIG_KPROBES +ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); + +/* Hook to register for page fault notifications */ +int register_page_fault_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); +} + +int unregister_page_fault_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); +} + +static inline int notify_page_fault(enum die_val val, struct pt_regs *regs, + int trap, int sig) +{ + struct die_args args = { + .regs = regs, + .trapnr = trap, + }; + return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); +} +#else +static inline int notify_page_fault(enum die_val val, struct pt_regs *regs, + int trap, int sig) +{ + return NOTIFY_DONE; +} +#endif + +/* + * This routine handles page faults. It determines the address and the + * problem, and then passes it off to one of the appropriate routines. + * + * ecr is the Exception Cause Register. Possible values are: + * 5: Page not found (instruction access) + * 6: Protection fault (instruction access) + * 12: Page not found (read access) + * 13: Page not found (write access) + * 14: Protection fault (read access) + * 15: Protection fault (write access) + */ +asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) +{ + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct *vma; + const struct exception_table_entry *fixup; + unsigned long address; + unsigned long page; + int writeaccess = 0; + + if (notify_page_fault(DIE_PAGE_FAULT, regs, + ecr, SIGSEGV) == NOTIFY_STOP) + return; + + address = sysreg_read(TLBEAR); + + tsk = current; + mm = tsk->mm; + + /* + * If we're in an interrupt or have no user context, we must + * not take the fault... + */ + if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) + goto no_context; + + local_irq_enable(); + + down_read(&mm->mmap_sem); + + vma = find_vma(mm, address); + if (!vma) + goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (expand_stack(vma, address)) + goto bad_area; + + /* + * Ok, we have a good vm_area for this memory access, so we + * can handle it... + */ +good_area: + //pr_debug("good area: vm_flags = 0x%lx\n", vma->vm_flags); + switch (ecr) { + case ECR_PROTECTION_X: + case ECR_TLB_MISS_X: + if (!(vma->vm_flags & VM_EXEC)) + goto bad_area; + break; + case ECR_PROTECTION_R: + case ECR_TLB_MISS_R: + if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) + goto bad_area; + break; + case ECR_PROTECTION_W: + case ECR_TLB_MISS_W: + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + writeaccess = 1; + break; + default: + panic("Unhandled case %lu in do_page_fault!", ecr); + } + + /* + * If for any reason at all we couldn't handle the fault, make + * sure we exit gracefully rather than endlessly redo the + * fault. + */ +survive: + switch (handle_mm_fault(mm, vma, address, writeaccess)) { + case VM_FAULT_MINOR: + tsk->min_flt++; + break; + case VM_FAULT_MAJOR: + tsk->maj_flt++; + break; + case VM_FAULT_SIGBUS: + goto do_sigbus; + case VM_FAULT_OOM: + goto out_of_memory; + default: + BUG(); + } + + up_read(&mm->mmap_sem); + return; + + /* + * Something tried to access memory that isn't in our memory + * map. Fix it, but check if it's kernel or user first... + */ +bad_area: + pr_debug("Bad area [%s:%u]: addr %08lx, ecr %lu\n", + tsk->comm, tsk->pid, address, ecr); + + up_read(&mm->mmap_sem); + + if (user_mode(regs)) { + /* Hmm...we have to pass address and ecr somehow... */ + /* tsk->thread.address = address; + tsk->thread.error_code = ecr; */ +#ifdef DEBUG + show_regs(regs); + dump_code(regs->pc); + + page = sysreg_read(PTBR); + printk("ptbr = %08lx", page); + if (page) { + page = ((unsigned long *)page)[address >> 22]; + printk(" pgd = %08lx", page); + if (page & _PAGE_PRESENT) { + page &= PAGE_MASK; + address &= 0x003ff000; + page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; + printk(" pte = %08lx\n", page); + } + } +#endif + pr_debug("Sending SIGSEGV to PID %d...\n", + tsk->pid); + force_sig(SIGSEGV, tsk); + return; + } + +no_context: + pr_debug("No context\n"); + + /* Are we prepared to handle this kernel fault? */ + fixup = search_exception_tables(regs->pc); + if (fixup) { + regs->pc = fixup->fixup; + pr_debug("Found fixup at %08lx\n", fixup->fixup); + return; + } + + /* + * Oops. The kernel tried to access some bad page. We'll have + * to terminate things with extreme prejudice. + */ + if (address < PAGE_SIZE) + printk(KERN_ALERT + "Unable to handle kernel NULL pointer dereference"); + else + printk(KERN_ALERT + "Unable to handle kernel paging request"); + printk(" at virtual address %08lx\n", address); + printk(KERN_ALERT "pc = %08lx\n", regs->pc); + + page = sysreg_read(PTBR); + printk(KERN_ALERT "ptbr = %08lx", page); + if (page) { + page = ((unsigned long *)page)[address >> 22]; + printk(" pgd = %08lx", page); + if (page & _PAGE_PRESENT) { + page &= PAGE_MASK; + address &= 0x003ff000; + page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; + printk(" pte = %08lx\n", page); + } + } + die("\nOops", regs, ecr); + do_exit(SIGKILL); + + /* + * We ran out of memory, or some other thing happened to us + * that made us unable to handle the page fault gracefully. + */ +out_of_memory: + printk("Out of memory\n"); + up_read(&mm->mmap_sem); + if (current->pid == 1) { + yield(); + down_read(&mm->mmap_sem); + goto survive; + } + printk("VM: Killing process %s\n", tsk->comm); + if (user_mode(regs)) + do_exit(SIGKILL); + goto no_context; + +do_sigbus: + up_read(&mm->mmap_sem); + + /* + * Send a sigbus, regardless of whether we were in kernel or + * user mode. + */ + /* address, error_code, trap_no, ... */ +#ifdef DEBUG + show_regs(regs); + dump_code(regs->pc); +#endif + pr_debug("Sending SIGBUS to PID %d...\n", tsk->pid); + force_sig(SIGBUS, tsk); + + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + goto no_context; +} + +asmlinkage void do_bus_error(unsigned long addr, int write_access, + struct pt_regs *regs) +{ + printk(KERN_ALERT + "Bus error at physical address 0x%08lx (%s access)\n", + addr, write_access ? "write" : "read"); + printk(KERN_INFO "DTLB dump:\n"); + dump_dtlb(); + die("Bus Error", regs, write_access); + do_exit(SIGKILL); +} + +/* + * This functionality is currently not possible to implement because + * we're using segmentation to ensure a fixed mapping of the kernel + * virtual address space. + * + * It would be possible to implement this, but it would require us to + * disable segmentation at startup and load the kernel mappings into + * the TLB like any other pages. There will be lots of trickery to + * avoid recursive invocation of the TLB miss handler, though... + */ +#ifdef CONFIG_DEBUG_PAGEALLOC +void kernel_map_pages(struct page *page, int numpages, int enable) +{ + +} +EXPORT_SYMBOL(kernel_map_pages); +#endif diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c new file mode 100644 index 000000000000..3e6c41039808 --- /dev/null +++ b/arch/avr32/mm/init.c @@ -0,0 +1,480 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/swap.h> +#include <linux/init.h> +#include <linux/initrd.h> +#include <linux/mmzone.h> +#include <linux/bootmem.h> +#include <linux/pagemap.h> +#include <linux/pfn.h> +#include <linux/nodemask.h> + +#include <asm/page.h> +#include <asm/mmu_context.h> +#include <asm/tlb.h> +#include <asm/io.h> +#include <asm/dma.h> +#include <asm/setup.h> +#include <asm/sections.h> + +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); + +pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +struct page *empty_zero_page; + +/* + * Cache of MMU context last used. + */ +unsigned long mmu_context_cache = NO_CONTEXT; + +#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) +#define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn) + +void show_mem(void) +{ + int total = 0, reserved = 0, cached = 0; + int slab = 0, free = 0, shared = 0; + pg_data_t *pgdat; + + printk("Mem-info:\n"); + show_free_areas(); + + for_each_online_pgdat(pgdat) { + struct page *page, *end; + + page = pgdat->node_mem_map; + end = page + pgdat->node_spanned_pages; + + do { + total++; + if (PageReserved(page)) + reserved++; + else if (PageSwapCache(page)) + cached++; + else if (PageSlab(page)) + slab++; + else if (!page_count(page)) + free++; + else + shared += page_count(page) - 1; + page++; + } while (page < end); + } + + printk ("%d pages of RAM\n", total); + printk ("%d free pages\n", free); + printk ("%d reserved pages\n", reserved); + printk ("%d slab pages\n", slab); + printk ("%d pages shared\n", shared); + printk ("%d pages swap cached\n", cached); +} + +static void __init print_memory_map(const char *what, + struct tag_mem_range *mem) +{ + printk ("%s:\n", what); + for (; mem; mem = mem->next) { + printk (" %08lx - %08lx\n", + (unsigned long)mem->addr, + (unsigned long)(mem->addr + mem->size)); + } +} + +#define MAX_LOWMEM HIGHMEM_START +#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM) + +/* + * Sort a list of memory regions in-place by ascending address. + * + * We're using bubble sort because we only have singly linked lists + * with few elements. + */ +static void __init sort_mem_list(struct tag_mem_range **pmem) +{ + int done; + struct tag_mem_range **a, **b; + + if (!*pmem) + return; + + do { + done = 1; + a = pmem, b = &(*pmem)->next; + while (*b) { + if ((*a)->addr > (*b)->addr) { + struct tag_mem_range *tmp; + tmp = (*b)->next; + (*b)->next = *a; + *a = *b; + *b = tmp; + done = 0; + } + a = &(*a)->next; + b = &(*a)->next; + } + } while (!done); +} + +/* + * Find a free memory region large enough for storing the + * bootmem bitmap. + */ +static unsigned long __init +find_bootmap_pfn(const struct tag_mem_range *mem) +{ + unsigned long bootmap_pages, bootmap_len; + unsigned long node_pages = PFN_UP(mem->size); + unsigned long bootmap_addr = mem->addr; + struct tag_mem_range *reserved = mem_reserved; + struct tag_mem_range *ramdisk = mem_ramdisk; + unsigned long kern_start = virt_to_phys(_stext); + unsigned long kern_end = virt_to_phys(_end); + + bootmap_pages = bootmem_bootmap_pages(node_pages); + bootmap_len = bootmap_pages << PAGE_SHIFT; + + /* + * Find a large enough region without reserved pages for + * storing the bootmem bitmap. We can take advantage of the + * fact that all lists have been sorted. + * + * We have to check explicitly reserved regions as well as the + * kernel image and any RAMDISK images... + * + * Oh, and we have to make sure we don't overwrite the taglist + * since we're going to use it until the bootmem allocator is + * fully up and running. + */ + while (1) { + if ((bootmap_addr < kern_end) && + ((bootmap_addr + bootmap_len) > kern_start)) + bootmap_addr = kern_end; + + while (reserved && + (bootmap_addr >= (reserved->addr + reserved->size))) + reserved = reserved->next; + + if (reserved && + ((bootmap_addr + bootmap_len) >= reserved->addr)) { + bootmap_addr = reserved->addr + reserved->size; + continue; + } + + while (ramdisk && + (bootmap_addr >= (ramdisk->addr + ramdisk->size))) + ramdisk = ramdisk->next; + + if (!ramdisk || + ((bootmap_addr + bootmap_len) < ramdisk->addr)) + break; + + bootmap_addr = ramdisk->addr + ramdisk->size; + } + + if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size)) + return ~0UL; + + return PFN_UP(bootmap_addr); +} + +void __init setup_bootmem(void) +{ + unsigned bootmap_size; + unsigned long first_pfn, bootmap_pfn, pages; + unsigned long max_pfn, max_low_pfn; + unsigned long kern_start = virt_to_phys(_stext); + unsigned long kern_end = virt_to_phys(_end); + unsigned node = 0; + struct tag_mem_range *bank, *res; + + sort_mem_list(&mem_phys); + sort_mem_list(&mem_reserved); + + print_memory_map("Physical memory", mem_phys); + print_memory_map("Reserved memory", mem_reserved); + + nodes_clear(node_online_map); + + if (mem_ramdisk) { +#ifdef CONFIG_BLK_DEV_INITRD + initrd_start = __va(mem_ramdisk->addr); + initrd_end = initrd_start + mem_ramdisk->size; + + print_memory_map("RAMDISK images", mem_ramdisk); + if (mem_ramdisk->next) + printk(KERN_WARNING + "Warning: Only the first RAMDISK image " + "will be used\n"); + sort_mem_list(&mem_ramdisk); +#else + printk(KERN_WARNING "RAM disk image present, but " + "no initrd support in kernel!\n"); +#endif + } + + if (mem_phys->next) + printk(KERN_WARNING "Only using first memory bank\n"); + + for (bank = mem_phys; bank; bank = NULL) { + first_pfn = PFN_UP(bank->addr); + max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size); + bootmap_pfn = find_bootmap_pfn(bank); + if (bootmap_pfn > max_pfn) + panic("No space for bootmem bitmap!\n"); + + if (max_low_pfn > MAX_LOWMEM_PFN) { + max_low_pfn = MAX_LOWMEM_PFN; +#ifndef CONFIG_HIGHMEM + /* + * Lowmem is memory that can be addressed + * directly through P1/P2 + */ + printk(KERN_WARNING + "Node %u: Only %ld MiB of memory will be used.\n", + node, MAX_LOWMEM >> 20); + printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); +#else +#error HIGHMEM is not supported by AVR32 yet +#endif + } + + /* Initialize the boot-time allocator with low memory only. */ + bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn, + first_pfn, max_low_pfn); + + printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n", + node, NODE_DATA(node)->bdata, + NODE_DATA(node)->bdata->node_bootmem_map); + + /* + * Register fully available RAM pages with the bootmem + * allocator. + */ + pages = max_low_pfn - first_pfn; + free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn), + PFN_PHYS(pages)); + + /* + * Reserve space for the kernel image (if present in + * this node)... + */ + if ((kern_start >= PFN_PHYS(first_pfn)) && + (kern_start < PFN_PHYS(max_pfn))) { + printk("Node %u: Kernel image %08lx - %08lx\n", + node, kern_start, kern_end); + reserve_bootmem_node(NODE_DATA(node), kern_start, + kern_end - kern_start); + } + + /* ...the bootmem bitmap... */ + reserve_bootmem_node(NODE_DATA(node), + PFN_PHYS(bootmap_pfn), + bootmap_size); + + /* ...any RAMDISK images... */ + for (res = mem_ramdisk; res; res = res->next) { + if (res->addr > PFN_PHYS(max_pfn)) + break; + + if (res->addr >= PFN_PHYS(first_pfn)) { + printk("Node %u: RAMDISK %08lx - %08lx\n", + node, + (unsigned long)res->addr, + (unsigned long)(res->addr + res->size)); + reserve_bootmem_node(NODE_DATA(node), + res->addr, res->size); + } + } + + /* ...and any other reserved regions. */ + for (res = mem_reserved; res; res = res->next) { + if (res->addr > PFN_PHYS(max_pfn)) + break; + + if (res->addr >= PFN_PHYS(first_pfn)) { + printk("Node %u: Reserved %08lx - %08lx\n", + node, + (unsigned long)res->addr, + (unsigned long)(res->addr + res->size)); + reserve_bootmem_node(NODE_DATA(node), + res->addr, res->size); + } + } + + node_set_online(node); + } +} + +/* + * paging_init() sets up the page tables + * + * This routine also unmaps the page at virtual kernel address 0, so + * that we can trap those pesky NULL-reference errors in the kernel. + */ +void __init paging_init(void) +{ + extern unsigned long _evba; + void *zero_page; + int nid; + + /* + * Make sure we can handle exceptions before enabling + * paging. Not that we should ever _get_ any exceptions this + * early, but you never know... + */ + printk("Exception vectors start at %p\n", &_evba); + sysreg_write(EVBA, (unsigned long)&_evba); + + /* + * Since we are ready to handle exceptions now, we should let + * the CPU generate them... + */ + __asm__ __volatile__ ("csrf %0" : : "i"(SR_EM_BIT)); + + /* + * Allocate the zero page. The allocator will panic if it + * can't satisfy the request, so no need to check. + */ + zero_page = alloc_bootmem_low_pages_node(NODE_DATA(0), + PAGE_SIZE); + + { + pgd_t *pg_dir; + int i; + + pg_dir = swapper_pg_dir; + sysreg_write(PTBR, (unsigned long)pg_dir); + + for (i = 0; i < PTRS_PER_PGD; i++) + pgd_val(pg_dir[i]) = 0; + + enable_mmu(); + printk ("CPU: Paging enabled\n"); + } + + for_each_online_node(nid) { + pg_data_t *pgdat = NODE_DATA(nid); + unsigned long zones_size[MAX_NR_ZONES]; + unsigned long low, start_pfn; + + start_pfn = pgdat->bdata->node_boot_start; + start_pfn >>= PAGE_SHIFT; + low = pgdat->bdata->node_low_pfn; + + memset(zones_size, 0, sizeof(zones_size)); + zones_size[ZONE_NORMAL] = low - start_pfn; + + printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", + nid, start_pfn, low); + + free_area_init_node(nid, pgdat, zones_size, start_pfn, NULL); + + printk("Node %u: mem_map starts at %p\n", + pgdat->node_id, pgdat->node_mem_map); + } + + mem_map = NODE_DATA(0)->node_mem_map; + + memset(zero_page, 0, PAGE_SIZE); + empty_zero_page = virt_to_page(zero_page); + flush_dcache_page(empty_zero_page); +} + +void __init mem_init(void) +{ + int codesize, reservedpages, datasize, initsize; + int nid, i; + + reservedpages = 0; + high_memory = NULL; + + /* this will put all low memory onto the freelists */ + for_each_online_node(nid) { + pg_data_t *pgdat = NODE_DATA(nid); + unsigned long node_pages = 0; + void *node_high_memory; + + num_physpages += pgdat->node_present_pages; + + if (pgdat->node_spanned_pages != 0) + node_pages = free_all_bootmem_node(pgdat); + + totalram_pages += node_pages; + + for (i = 0; i < node_pages; i++) + if (PageReserved(pgdat->node_mem_map + i)) + reservedpages++; + + node_high_memory = (void *)((pgdat->node_start_pfn + + pgdat->node_spanned_pages) + << PAGE_SHIFT); + if (node_high_memory > high_memory) + high_memory = node_high_memory; + } + + max_mapnr = MAP_NR(high_memory); + + codesize = (unsigned long)_etext - (unsigned long)_text; + datasize = (unsigned long)_edata - (unsigned long)_data; + initsize = (unsigned long)__init_end - (unsigned long)__init_begin; + + printk ("Memory: %luk/%luk available (%dk kernel code, " + "%dk reserved, %dk data, %dk init)\n", + (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10), + totalram_pages << (PAGE_SHIFT - 10), + codesize >> 10, + reservedpages << (PAGE_SHIFT - 10), + datasize >> 10, + initsize >> 10); +} + +static inline void free_area(unsigned long addr, unsigned long end, char *s) +{ + unsigned int size = (end - addr) >> 10; + + for (; addr < end; addr += PAGE_SIZE) { + struct page *page = virt_to_page(addr); + ClearPageReserved(page); + init_page_count(page); + free_page(addr); + totalram_pages++; + } + + if (size && s) + printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n", + s, size, end - (size << 10), end); +} + +void free_initmem(void) +{ + free_area((unsigned long)__init_begin, (unsigned long)__init_end, + "init"); +} + +#ifdef CONFIG_BLK_DEV_INITRD + +static int keep_initrd; + +void free_initrd_mem(unsigned long start, unsigned long end) +{ + if (!keep_initrd) + free_area(start, end, "initrd"); +} + +static int __init keepinitrd_setup(char *__unused) +{ + keep_initrd = 1; + return 1; +} + +__setup("keepinitrd", keepinitrd_setup); +#endif diff --git a/arch/avr32/mm/ioremap.c b/arch/avr32/mm/ioremap.c new file mode 100644 index 000000000000..536021877df6 --- /dev/null +++ b/arch/avr32/mm/ioremap.c @@ -0,0 +1,197 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/vmalloc.h> +#include <linux/module.h> + +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> +#include <asm/addrspace.h> + +static inline int remap_area_pte(pte_t *pte, unsigned long address, + unsigned long end, unsigned long phys_addr, + pgprot_t prot) +{ + unsigned long pfn; + + pfn = phys_addr >> PAGE_SHIFT; + do { + WARN_ON(!pte_none(*pte)); + + set_pte(pte, pfn_pte(pfn, prot)); + address += PAGE_SIZE; + pfn++; + pte++; + } while (address && (address < end)); + + return 0; +} + +static inline int remap_area_pmd(pmd_t *pmd, unsigned long address, + unsigned long end, unsigned long phys_addr, + pgprot_t prot) +{ + unsigned long next; + + phys_addr -= address; + + do { + pte_t *pte = pte_alloc_kernel(pmd, address); + if (!pte) + return -ENOMEM; + + next = (address + PMD_SIZE) & PMD_MASK; + if (remap_area_pte(pte, address, next, + address + phys_addr, prot)) + return -ENOMEM; + + address = next; + pmd++; + } while (address && (address < end)); + return 0; +} + +static int remap_area_pud(pud_t *pud, unsigned long address, + unsigned long end, unsigned long phys_addr, + pgprot_t prot) +{ + unsigned long next; + + phys_addr -= address; + + do { + pmd_t *pmd = pmd_alloc(&init_mm, pud, address); + if (!pmd) + return -ENOMEM; + next = (address + PUD_SIZE) & PUD_MASK; + if (remap_area_pmd(pmd, address, next, + phys_addr + address, prot)) + return -ENOMEM; + + address = next; + pud++; + } while (address && address < end); + + return 0; +} + +static int remap_area_pages(unsigned long address, unsigned long phys_addr, + size_t size, pgprot_t prot) +{ + unsigned long end = address + size; + unsigned long next; + pgd_t *pgd; + int err = 0; + + phys_addr -= address; + + pgd = pgd_offset_k(address); + flush_cache_all(); + BUG_ON(address >= end); + + spin_lock(&init_mm.page_table_lock); + do { + pud_t *pud = pud_alloc(&init_mm, pgd, address); + + err = -ENOMEM; + if (!pud) + break; + + next = (address + PGDIR_SIZE) & PGDIR_MASK; + if (next < address || next > end) + next = end; + err = remap_area_pud(pud, address, next, + phys_addr + address, prot); + if (err) + break; + + address = next; + pgd++; + } while (address && (address < end)); + + spin_unlock(&init_mm.page_table_lock); + flush_tlb_all(); + return err; +} + +/* + * Re-map an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access physical + * memory directly. + */ +void __iomem *__ioremap(unsigned long phys_addr, size_t size, + unsigned long flags) +{ + void *addr; + struct vm_struct *area; + unsigned long offset, last_addr; + pgprot_t prot; + + /* + * Check if we can simply use the P4 segment. This area is + * uncacheable, so if caching/buffering is requested, we can't + * use it. + */ + if ((phys_addr >= P4SEG) && (flags == 0)) + return (void __iomem *)phys_addr; + + /* Don't allow wraparound or zero size */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) + return NULL; + + /* + * XXX: When mapping regular RAM, we'd better make damn sure + * it's never used for anything else. But this is really the + * caller's responsibility... + */ + if (PHYSADDR(P2SEGADDR(phys_addr)) == phys_addr) + return (void __iomem *)P2SEGADDR(phys_addr); + + /* Mappings have to be page-aligned */ + offset = phys_addr & ~PAGE_MASK; + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(last_addr + 1) - phys_addr; + + prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY + | _PAGE_ACCESSED | _PAGE_TYPE_SMALL | flags); + + /* + * Ok, go for it.. + */ + area = get_vm_area(size, VM_IOREMAP); + if (!area) + return NULL; + area->phys_addr = phys_addr; + addr = area->addr; + if (remap_area_pages((unsigned long)addr, phys_addr, size, prot)) { + vunmap(addr); + return NULL; + } + + return (void __iomem *)(offset + (char *)addr); +} +EXPORT_SYMBOL(__ioremap); + +void __iounmap(void __iomem *addr) +{ + struct vm_struct *p; + + if ((unsigned long)addr >= P4SEG) + return; + + p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr)); + if (unlikely(!p)) { + printk (KERN_ERR "iounmap: bad address %p\n", addr); + return; + } + + kfree (p); +} +EXPORT_SYMBOL(__iounmap); diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c new file mode 100644 index 000000000000..5d0523bbe298 --- /dev/null +++ b/arch/avr32/mm/tlb.c @@ -0,0 +1,378 @@ +/* + * AVR32 TLB operations + * + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/mm.h> + +#include <asm/mmu_context.h> + +#define _TLBEHI_I 0x100 + +void show_dtlb_entry(unsigned int index) +{ + unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save, flags; + + local_irq_save(flags); + mmucr_save = sysreg_read(MMUCR); + tlbehi_save = sysreg_read(TLBEHI); + mmucr = mmucr_save & 0x13; + mmucr |= index << 14; + sysreg_write(MMUCR, mmucr); + + asm volatile("tlbr" : : : "memory"); + cpu_sync_pipeline(); + + tlbehi = sysreg_read(TLBEHI); + tlbelo = sysreg_read(TLBELO); + + printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n", + index, + (tlbehi & 0x200)?'1':'0', + (tlbelo & 0x100)?'1':'0', + (tlbehi & 0xff), + (tlbehi >> 12), (tlbelo >> 12), + (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, + (tlbelo & 0x200)?'1':'0', + (tlbelo & 0x080)?'1':'0', + (tlbelo & 0x001)?'1':'0', + (tlbelo & 0x002)?'1':'0'); + + sysreg_write(MMUCR, mmucr_save); + sysreg_write(TLBEHI, tlbehi_save); + cpu_sync_pipeline(); + local_irq_restore(flags); +} + +void dump_dtlb(void) +{ + unsigned int i; + + printk("ID V G ASID VPN PFN AP SZ C B W D\n"); + for (i = 0; i < 32; i++) + show_dtlb_entry(i); +} + +static unsigned long last_mmucr; + +static inline void set_replacement_pointer(unsigned shift) +{ + unsigned long mmucr, mmucr_save; + + mmucr = mmucr_save = sysreg_read(MMUCR); + + /* Does this mapping already exist? */ + __asm__ __volatile__( + " tlbs\n" + " mfsr %0, %1" + : "=r"(mmucr) + : "i"(SYSREG_MMUCR)); + + if (mmucr & SYSREG_BIT(MMUCR_N)) { + /* Not found -- pick a not-recently-accessed entry */ + unsigned long rp; + unsigned long tlbar = sysreg_read(TLBARLO); + + rp = 32 - fls(tlbar); + if (rp == 32) { + rp = 0; + sysreg_write(TLBARLO, -1L); + } + + mmucr &= 0x13; + mmucr |= (rp << shift); + + sysreg_write(MMUCR, mmucr); + } + + last_mmucr = mmucr; +} + +static void update_dtlb(unsigned long address, pte_t pte, unsigned long asid) +{ + unsigned long vpn; + + vpn = (address & MMU_VPN_MASK) | _TLBEHI_VALID | asid; + sysreg_write(TLBEHI, vpn); + cpu_sync_pipeline(); + + set_replacement_pointer(14); + + sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK); + + /* Let's go */ + asm volatile("nop\n\ttlbw" : : : "memory"); + cpu_sync_pipeline(); +} + +void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ + unsigned long flags; + + /* ptrace may call this routine */ + if (vma && current->active_mm != vma->vm_mm) + return; + + local_irq_save(flags); + update_dtlb(address, pte, get_asid()); + local_irq_restore(flags); +} + +void __flush_tlb_page(unsigned long asid, unsigned long page) +{ + unsigned long mmucr, tlbehi; + + page |= asid; + sysreg_write(TLBEHI, page); + cpu_sync_pipeline(); + asm volatile("tlbs"); + mmucr = sysreg_read(MMUCR); + + if (!(mmucr & SYSREG_BIT(MMUCR_N))) { + unsigned long tlbarlo; + unsigned long entry; + + /* Clear the "valid" bit */ + tlbehi = sysreg_read(TLBEHI); + tlbehi &= ~_TLBEHI_VALID; + sysreg_write(TLBEHI, tlbehi); + cpu_sync_pipeline(); + + /* mark the entry as "not accessed" */ + entry = (mmucr >> 14) & 0x3f; + tlbarlo = sysreg_read(TLBARLO); + tlbarlo |= (0x80000000 >> entry); + sysreg_write(TLBARLO, tlbarlo); + + /* update the entry with valid bit clear */ + asm volatile("tlbw"); + cpu_sync_pipeline(); + } +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { + unsigned long flags, asid; + unsigned long saved_asid = MMU_NO_ASID; + + asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; + page &= PAGE_MASK; + + local_irq_save(flags); + if (vma->vm_mm != current->mm) { + saved_asid = get_asid(); + set_asid(asid); + } + + __flush_tlb_page(asid, page); + + if (saved_asid != MMU_NO_ASID) + set_asid(saved_asid); + local_irq_restore(flags); + } +} + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + + if (mm->context != NO_CONTEXT) { + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ + mm->context = NO_CONTEXT; + if (mm == current->mm) + activate_context(mm); + } else { + unsigned long asid = mm->context & MMU_CONTEXT_ASID_MASK; + unsigned long saved_asid = MMU_NO_ASID; + + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + if (mm != current->mm) { + saved_asid = get_asid(); + set_asid(asid); + } + + while (start < end) { + __flush_tlb_page(asid, start); + start += PAGE_SIZE; + } + if (saved_asid != MMU_NO_ASID) + set_asid(saved_asid); + } + local_irq_restore(flags); + } +} + +/* + * TODO: If this is only called for addresses > TASK_SIZE, we can probably + * skip the ASID stuff and just use the Global bit... + */ +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ + flush_tlb_all(); + } else { + unsigned long asid = init_mm.context & MMU_CONTEXT_ASID_MASK; + unsigned long saved_asid = get_asid(); + + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + set_asid(asid); + while (start < end) { + __flush_tlb_page(asid, start); + start += PAGE_SIZE; + } + set_asid(saved_asid); + } + local_irq_restore(flags); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + /* Invalidate all TLB entries of this process by getting a new ASID */ + if (mm->context != NO_CONTEXT) { + unsigned long flags; + + local_irq_save(flags); + mm->context = NO_CONTEXT; + if (mm == current->mm) + activate_context(mm); + local_irq_restore(flags); + } +} + +void flush_tlb_all(void) +{ + unsigned long flags; + + local_irq_save(flags); + sysreg_write(MMUCR, sysreg_read(MMUCR) | SYSREG_BIT(MMUCR_I)); + local_irq_restore(flags); +} + +#ifdef CONFIG_PROC_FS + +#include <linux/seq_file.h> +#include <linux/proc_fs.h> +#include <linux/init.h> + +static void *tlb_start(struct seq_file *tlb, loff_t *pos) +{ + static unsigned long tlb_index; + + if (*pos >= 32) + return NULL; + + tlb_index = 0; + return &tlb_index; +} + +static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos) +{ + unsigned long *index = v; + + if (*index >= 31) + return NULL; + + ++*pos; + ++*index; + return index; +} + +static void tlb_stop(struct seq_file *tlb, void *v) +{ + +} + +static int tlb_show(struct seq_file *tlb, void *v) +{ + unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save, flags; + unsigned long *index = v; + + if (*index == 0) + seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n"); + + BUG_ON(*index >= 32); + + local_irq_save(flags); + mmucr_save = sysreg_read(MMUCR); + tlbehi_save = sysreg_read(TLBEHI); + mmucr = mmucr_save & 0x13; + mmucr |= *index << 14; + sysreg_write(MMUCR, mmucr); + + asm volatile("tlbr" : : : "memory"); + cpu_sync_pipeline(); + + tlbehi = sysreg_read(TLBEHI); + tlbelo = sysreg_read(TLBELO); + + sysreg_write(MMUCR, mmucr_save); + sysreg_write(TLBEHI, tlbehi_save); + cpu_sync_pipeline(); + local_irq_restore(flags); + + seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n", + *index, + (tlbehi & 0x200)?'1':'0', + (tlbelo & 0x100)?'1':'0', + (tlbehi & 0xff), + (tlbehi >> 12), (tlbelo >> 12), + (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, + (tlbelo & 0x200)?'1':'0', + (tlbelo & 0x080)?'1':'0', + (tlbelo & 0x001)?'1':'0', + (tlbelo & 0x002)?'1':'0'); + + return 0; +} + +static struct seq_operations tlb_ops = { + .start = tlb_start, + .next = tlb_next, + .stop = tlb_stop, + .show = tlb_show, +}; + +static int tlb_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &tlb_ops); +} + +static struct file_operations proc_tlb_operations = { + .open = tlb_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init proctlb_init(void) +{ + struct proc_dir_entry *entry; + + entry = create_proc_entry("tlb", 0, NULL); + if (entry) + entry->proc_fops = &proc_tlb_operations; + return 0; +} +late_initcall(proctlb_init); +#endif /* CONFIG_PROC_FS */ |