summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bitops/builtin-__ffs.h15
-rw-r--r--include/asm-generic/bitops/builtin-__fls.h15
-rw-r--r--include/asm-generic/bitops/builtin-ffs.h17
-rw-r--r--include/asm-generic/bitops/builtin-fls.h16
-rw-r--r--include/asm-generic/mutex-xchg.h11
-rw-r--r--include/asm-generic/unistd.h4
6 files changed, 75 insertions, 3 deletions
diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h
new file mode 100644
index 000000000000..90041e3a41f0
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-__ffs.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ return __builtin_ctzl(word);
+}
+
+#endif
diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h
new file mode 100644
index 000000000000..0248f386635f
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-__fls.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __fls(unsigned long word)
+{
+ return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
+}
+
+#endif
diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h
new file mode 100644
index 000000000000..064825829e1c
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-ffs.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static __always_inline int ffs(int x)
+{
+ return __builtin_ffs(x);
+}
+
+#endif
diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h
new file mode 100644
index 000000000000..eda652d0ac7f
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-fls.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_
+
+/**
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static __always_inline int fls(int x)
+{
+ return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
+}
+
+#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 580a6d35c700..c04e0db8a2d6 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -26,7 +26,13 @@ static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
- fail_fn(count);
+ /*
+ * We failed to acquire the lock, so mark it contended
+ * to ensure that any waiting tasks are woken up by the
+ * unlock slow path.
+ */
+ if (likely(atomic_xchg(count, -1) != 1))
+ fail_fn(count);
}
/**
@@ -43,7 +49,8 @@ static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
- return fail_fn(count);
+ if (likely(atomic_xchg(count, -1) != 1))
+ return fail_fn(count);
return 0;
}
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 991ef01cd77e..3748ec92dcbc 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -691,9 +691,11 @@ __SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
#define __NR_process_vm_writev 271
__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
compat_sys_process_vm_writev)
+#define __NR_kcmp 272
+__SYSCALL(__NR_kcmp, sys_kcmp)
#undef __NR_syscalls
-#define __NR_syscalls 272
+#define __NR_syscalls 273
/*
* All syscalls below here should go away really,