summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@linaro.org>2016-06-02 12:18:55 +0800
committerAlex Shi <alex.shi@linaro.org>2016-06-02 12:18:55 +0800
commit37aa27cffb68b92b8f42d1a9f6300ed758d07552 (patch)
treebb9cc689e044e6b127278379e181610d82def315 /include/asm-generic
parent2ea80ad4205178dcea1152c0a603f23531c5412c (diff)
parent351d2d4d3108a9a0f0e112dabbc36c2b1446e4f8 (diff)
Merge tag 'v4.4.12' into linux-linaro-lsk-v4.4
This is the 4.4.12 stable release
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/qspinlock.h27
-rw-r--r--include/asm-generic/siginfo.h15
2 files changed, 26 insertions, 16 deletions
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index e2aadbc7151f..7d633f19e38a 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -27,7 +27,30 @@
*/
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
- return atomic_read(&lock->val);
+ /*
+ * queued_spin_lock_slowpath() can ACQUIRE the lock before
+ * issuing the unordered store that sets _Q_LOCKED_VAL.
+ *
+ * See both smp_cond_acquire() sites for more detail.
+ *
+ * This however means that in code like:
+ *
+ * spin_lock(A) spin_lock(B)
+ * spin_unlock_wait(B) spin_is_locked(A)
+ * do_something() do_something()
+ *
+ * Both CPUs can end up running do_something() because the store
+ * setting _Q_LOCKED_VAL will pass through the loads in
+ * spin_unlock_wait() and/or spin_is_locked().
+ *
+ * Avoid this by issuing a full memory barrier between the spin_lock()
+ * and the loads in spin_unlock_wait() and spin_is_locked().
+ *
+ * Note that regular mutual exclusion doesn't care about this
+ * delayed store.
+ */
+ smp_mb();
+ return atomic_read(&lock->val) & _Q_LOCKED_MASK;
}
/**
@@ -107,6 +130,8 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
*/
static inline void queued_spin_unlock_wait(struct qspinlock *lock)
{
+ /* See queued_spin_is_locked() */
+ smp_mb();
while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
cpu_relax();
}
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index 3d1a3af5cf59..a2508a8f9a9c 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -17,21 +17,6 @@
struct siginfo;
void do_schedule_next_timer(struct siginfo *info);
-#ifndef HAVE_ARCH_COPY_SIGINFO
-
-#include <linux/string.h>
-
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-{
- if (from->si_code < 0)
- memcpy(to, from, sizeof(*to));
- else
- /* _sigchld is currently the largest know union member */
- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
-}
-
-#endif
-
extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
#endif