summaryrefslogtreecommitdiff
path: root/arch/parisc/include
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-09-15 15:08:05 +0200
committerJiri Kosina <jkosina@suse.cz>2011-09-15 15:08:18 +0200
commite060c38434b2caa78efe7cedaff4191040b65a15 (patch)
tree407361230bf6733f63d8e788e4b5e6566ee04818 /arch/parisc/include
parent10e4ac572eeffe5317019bd7330b6058a400dfc2 (diff)
parentcc39c6a9bbdebfcf1a7dee64d83bf302bc38d941 (diff)
Merge branch 'master' into for-next
Fast-forward merge with Linus to be able to merge patches based on more recent version of the tree.
Diffstat (limited to 'arch/parisc/include')
-rw-r--r--arch/parisc/include/asm/atomic.h20
-rw-r--r--arch/parisc/include/asm/bitops.h11
-rw-r--r--arch/parisc/include/asm/futex.h66
-rw-r--r--arch/parisc/include/asm/mmu_context.h2
-rw-r--r--arch/parisc/include/asm/ptrace.h1
-rw-r--r--arch/parisc/include/asm/unistd.h3
6 files changed, 72 insertions, 31 deletions
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index f81955934aeb..4054b31e0fa9 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -197,15 +197,15 @@ static __inline__ int atomic_read(const atomic_t *v)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
- * atomic_add_unless - add unless the number is a given value
+ * __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
@@ -217,10 +217,9 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
break;
c = old;
}
- return c != (u);
+ return c;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
#define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v))))
@@ -259,10 +258,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
-static __inline__ int
+static __inline__ s64
__atomic64_add_return(s64 i, atomic64_t *v)
{
- int ret;
+ s64 ret;
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
@@ -317,7 +316,7 @@ atomic64_read(const atomic64_t *v)
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
@@ -336,12 +335,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-#else /* CONFIG_64BIT */
-
-#include <asm-generic/atomic64.h>
-
#endif /* !CONFIG_64BIT */
-#include <asm-generic/atomic-long.h>
#endif /* _ASM_PARISC_ATOMIC_H_ */
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 43c516fa17ff..8c9b631d2a78 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -8,7 +8,7 @@
#include <linux/compiler.h>
#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
#include <asm/byteorder.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/*
* HP-PARISC specific bit operations
@@ -223,14 +223,7 @@ static __inline__ int fls(int x)
#ifdef __KERNEL__
#include <asm-generic/bitops/le.h>
-
-/* '3' is bits per byte */
-#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
-
-#define ext2_set_bit_atomic(l,nr,addr) \
- test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
-#define ext2_clear_bit_atomic(l,nr,addr) \
- test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* __KERNEL__ */
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 67a33cc27ef2..2388bdb32832 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -5,11 +5,14 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
+#include <asm/atomic.h>
#include <asm/errno.h>
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
+ unsigned long int flags;
+ u32 val;
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
@@ -18,21 +21,58 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
return -EFAULT;
pagefault_disable();
+ _atomic_spin_lock_irqsave(uaddr, flags);
+
switch (op) {
case FUTEX_OP_SET:
+ /* *(int *)UADDR2 = OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret)
+ ret = put_user(oparg, uaddr);
+ break;
case FUTEX_OP_ADD:
+ /* *(int *)UADDR2 += OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval + oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
case FUTEX_OP_OR:
+ /* *(int *)UADDR2 |= OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval | oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
case FUTEX_OP_ANDN:
+ /* *(int *)UADDR2 &= ~OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval & ~oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
case FUTEX_OP_XOR:
+ /* *(int *)UADDR2 ^= OPARG; */
+ ret = get_user(oldval, uaddr);
+ if (!ret) {
+ val = oldval ^ oparg;
+ ret = put_user(val, uaddr);
+ }
+ break;
default:
ret = -ENOSYS;
}
+ _atomic_spin_unlock_irqrestore(uaddr, flags);
+
pagefault_enable();
if (!ret) {
@@ -54,7 +94,9 @@ static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ int ret;
u32 val;
+ unsigned long flags;
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble...
@@ -65,12 +107,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
- if (get_user(val, uaddr))
- return -EFAULT;
- if (val == oldval && put_user(newval, uaddr))
- return -EFAULT;
+ /* HPPA has no cmpxchg in hardware and therefore the
+ * best we can do here is use an array of locks. The
+ * lock selected is based on a hash of the userspace
+ * address. This should scale to a couple of CPUs.
+ */
+
+ _atomic_spin_lock_irqsave(uaddr, flags);
+
+ ret = get_user(val, uaddr);
+
+ if (!ret && val == oldval)
+ ret = put_user(newval, uaddr);
+
*uval = val;
- return 0;
+
+ _atomic_spin_unlock_irqrestore(uaddr, flags);
+
+ return ret;
}
#endif /*__KERNEL__*/
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 354b2aca990e..59be25764433 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -3,7 +3,7 @@
#include <linux/mm.h>
#include <linux/sched.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm-generic/mm_hooks.h>
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
index 7f09533da771..250ae35aa062 100644
--- a/arch/parisc/include/asm/ptrace.h
+++ b/arch/parisc/include/asm/ptrace.h
@@ -56,7 +56,6 @@ struct pt_regs {
#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3)
#define user_stack_pointer(regs) ((regs)->gr[30])
unsigned long profile_pc(struct pt_regs *);
-extern void show_regs(struct pt_regs *);
#endif /* __KERNEL__ */
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 3392de3e7be0..d61de64f990a 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -821,8 +821,9 @@
#define __NR_open_by_handle_at (__NR_Linux + 326)
#define __NR_syncfs (__NR_Linux + 327)
#define __NR_setns (__NR_Linux + 328)
+#define __NR_sendmmsg (__NR_Linux + 329)
-#define __NR_Linux_syscalls (__NR_setns + 1)
+#define __NR_Linux_syscalls (__NR_sendmmsg + 1)
#define __IGNORE_select /* newselect */