summaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2008-09-04 08:08:42 -0700
committerH. Peter Anvin <hpa@zytor.com>2008-09-04 08:08:42 -0700
commit7203781c98ad9147564d327de6f6513ad8fc0f4e (patch)
tree5c29a2a04a626bf08a0d56fd8a0068b3c92ad284 /arch/arm/include
parent671eef85a3e885dff4ce210d8774ad50a91d5967 (diff)
parentaf2e1f276ff08f17192411ea3b71c13a758dfe12 (diff)
Merge branch 'x86/cpu' into x86/core
Conflicts: arch/x86/kernel/cpu/feature_names.c include/asm-x86/cpufeature.h
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/dma-mapping.h86
-rw-r--r--arch/arm/include/asm/kexec.h2
-rw-r--r--arch/arm/include/asm/memory.h22
-rw-r--r--arch/arm/include/asm/mtd-xip.h2
-rw-r--r--arch/arm/include/asm/processor.h4
-rw-r--r--arch/arm/include/asm/tlbflush.h7
-rw-r--r--arch/arm/include/asm/unistd.h6
7 files changed, 92 insertions, 37 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 45329fca1b64..7b95d2058395 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -3,11 +3,48 @@
#ifdef __KERNEL__
-#include <linux/mm.h> /* need struct page */
-
+#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <asm-generic/dma-coherent.h>
+#include <asm/memory.h>
+
+/*
+ * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
+ * used internally by the DMA-mapping API to provide DMA addresses. They
+ * must not be used by drivers.
+ */
+#ifndef __arch_page_to_dma
+static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
+{
+ return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
+}
+
+static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
+{
+ return (void *)__bus_to_virt(addr);
+}
+
+static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
+{
+ return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
+}
+#else
+static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
+{
+ return __arch_page_to_dma(dev, page);
+}
+
+static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
+{
+ return __arch_dma_to_virt(dev, addr);
+}
+
+static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
+{
+ return __arch_virt_to_dma(dev, addr);
+}
+#endif
/*
* DMA-consistent mapping functions. These allocate/free a region of
@@ -169,7 +206,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
if (!arch_is_coherent())
dma_cache_maint(cpu_addr, size, dir);
- return virt_to_dma(dev, (unsigned long)cpu_addr);
+ return virt_to_dma(dev, cpu_addr);
}
#else
extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
@@ -195,7 +232,7 @@ dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
- return dma_map_single(dev, page_address(page) + offset, size, (int)dir);
+ return dma_map_single(dev, page_address(page) + offset, size, dir);
}
/**
@@ -241,7 +278,7 @@ static inline void
dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
- dma_unmap_single(dev, handle, size, (int)dir);
+ dma_unmap_single(dev, handle, size, dir);
}
/**
@@ -314,11 +351,12 @@ extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_da
/**
- * dma_sync_single_for_cpu
+ * dma_sync_single_range_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
+ * @offset: offset of region to start sync
+ * @size: size of region to sync
+ * @dir: DMA transfer direction (same as passed to dma_map_single)
*
* Make physical memory consistent for a single streaming mode DMA
* translation after a transfer.
@@ -332,25 +370,41 @@ extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_da
*/
#ifndef CONFIG_DMABOUNCE
static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
if (!arch_is_coherent())
- dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
+ dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
}
static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
if (!arch_is_coherent())
- dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
+ dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
}
#else
-extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
-extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction);
+extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
+extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
#endif
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_device(dev, handle, 0, size, dir);
+}
+
/**
* dma_sync_sg_for_cpu
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index c8986bb99ed5..df15a0dc228e 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -10,7 +10,7 @@
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
-#define KEXEC_CONTROL_CODE_SIZE 4096
+#define KEXEC_CONTROL_PAGE_SIZE 4096
#define KEXEC_ARCH KEXEC_ARCH_ARM
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 1e070a2b561a..bf7c737c9226 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -150,6 +150,14 @@
#endif
/*
+ * Amount of memory reserved for the vmalloc() area, and minimum
+ * address for vmalloc mappings.
+ */
+extern unsigned long vmalloc_reserve;
+
+#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
+
+/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*
@@ -306,20 +314,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
/*
- * Optional device DMA address remapping. Do _not_ use directly!
- * We should really eliminate virt_to_bus() here - it's deprecated.
- */
-#ifndef __arch_page_to_dma
-#define page_to_dma(dev, page) ((dma_addr_t)__virt_to_bus((unsigned long)page_address(page)))
-#define dma_to_virt(dev, addr) ((void *)__bus_to_virt(addr))
-#define virt_to_dma(dev, addr) ((dma_addr_t)__virt_to_bus((unsigned long)(addr)))
-#else
-#define page_to_dma(dev, page) (__arch_page_to_dma(dev, page))
-#define dma_to_virt(dev, addr) (__arch_dma_to_virt(dev, addr))
-#define virt_to_dma(dev, addr) (__arch_virt_to_dma(dev, addr))
-#endif
-
-/*
* Optional coherency support. Currently used only by selected
* Intel XSC3-based systems.
*/
diff --git a/arch/arm/include/asm/mtd-xip.h b/arch/arm/include/asm/mtd-xip.h
index 4225372a26f3..d8fbe2d9b8b9 100644
--- a/arch/arm/include/asm/mtd-xip.h
+++ b/arch/arm/include/asm/mtd-xip.h
@@ -10,8 +10,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * $Id: xip.h,v 1.2 2004/12/01 15:49:10 nico Exp $
*/
#ifndef __ARM_MTD_XIP_H__
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index b01d5e7e3d5a..517a4d6ffc74 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -112,9 +112,9 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
static inline void prefetch(const void *ptr)
{
__asm__ __volatile__(
- "pld\t%0"
+ "pld\t%a0"
:
- : "o" (*(char *)ptr)
+ : "p" (ptr)
: "cc");
}
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 0d0d40f1b599..b543a054a17e 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -54,6 +54,7 @@
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
* fr - Feroceon (v4wbi with non-outer-cacheable page table walks)
* v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
+ * v7wbi - identical to v6wbi
*/
#undef _TLB
#undef MULTI_TLB
@@ -266,14 +267,16 @@ extern struct cpu_tlb_fns cpu_tlb;
v4wbi_possible_flags | \
fr_possible_flags | \
v4wb_possible_flags | \
- v6wbi_possible_flags)
+ v6wbi_possible_flags | \
+ v7wbi_possible_flags)
#define always_tlb_flags (v3_always_flags & \
v4_always_flags & \
v4wbi_always_flags & \
fr_always_flags & \
v4wb_always_flags & \
- v6wbi_always_flags)
+ v6wbi_always_flags & \
+ v7wbi_always_flags)
#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index f95fbb2fcb5f..010618487cf1 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -381,6 +381,12 @@
#define __NR_fallocate (__NR_SYSCALL_BASE+352)
#define __NR_timerfd_settime (__NR_SYSCALL_BASE+353)
#define __NR_timerfd_gettime (__NR_SYSCALL_BASE+354)
+#define __NR_signalfd4 (__NR_SYSCALL_BASE+355)
+#define __NR_eventfd2 (__NR_SYSCALL_BASE+356)
+#define __NR_epoll_create1 (__NR_SYSCALL_BASE+357)
+#define __NR_dup3 (__NR_SYSCALL_BASE+358)
+#define __NR_pipe2 (__NR_SYSCALL_BASE+359)
+#define __NR_inotify_init1 (__NR_SYSCALL_BASE+360)
/*
* The following SWIs are ARM private.