summaryrefslogtreecommitdiff
path: root/kernel/locking
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/mutex.c12
-rw-r--r--kernel/locking/osq_lock.c35
-rw-r--r--kernel/locking/rwsem-xadd.c35
-rw-r--r--kernel/locking/spinlock_debug.c14
4 files changed, 94 insertions, 2 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index f42f83a36506..bf5277ee11d3 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/osq_lock.h>
+#include <linux/delay.h>
/*
* In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -378,6 +379,17 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* values at the cost of a few extra spins.
*/
cpu_relax_lowlatency();
+
+ /*
+ * On arm systems, we must slow down the waiter's repeated
+ * aquisition of spin_mlock and atomics on the lock count, or
+ * we risk starving out a thread attempting to release the
+ * mutex. The mutex slowpath release must take spin lock
+ * wait_lock. This spin lock can share a monitor with the
+ * other waiter atomics in the mutex data structure, so must
+ * take care to rate limit the waiters.
+ */
+ udelay(1);
}
osq_unlock(&lock->osq);
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 05a37857ab55..1e6a51cc25c4 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -1,6 +1,7 @@
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/osq_lock.h>
+#include <linux/sched/rt.h>
/*
* An MCS like lock especially tailored for optimistic spinning for sleeping
@@ -85,6 +86,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
{
struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
struct optimistic_spin_node *prev, *next;
+ struct task_struct *task = current;
int curr = encode_cpu(smp_processor_id());
int old;
@@ -104,6 +106,32 @@ bool osq_lock(struct optimistic_spin_queue *lock)
prev = decode_cpu(old);
node->prev = prev;
+
+ /*
+ * We need to avoid reordering of link updation sequence of osq.
+ * A case in which the status of optimistic spin queue is
+ * CPU6->CPU2 in which CPU6 has acquired the lock. At this point
+ * if CPU0 comes in to acquire osq_lock, it will update the tail
+ * count. After tail count update if CPU2 starts to unqueue itself
+ * from optimistic spin queue, it will find updated tail count with
+ * CPU0 and update CPU2 node->next to NULL in osq_wait_next(). If
+ * reordering of following stores happen then prev->next where prev
+ * being CPU2 would be updated to point to CPU0 node:
+ * node->prev = prev;
+ * WRITE_ONCE(prev->next, node);
+ *
+ * At this point if next instruction
+ * WRITE_ONCE(next->prev, prev);
+ * in CPU2 path is committed before the update of CPU0 node->prev =
+ * prev then CPU0 node->prev will point to CPU6 node. At this point
+ * if CPU0 path's node->prev = prev is committed resulting in change
+ * of CPU0 prev back to CPU2 node. CPU2 node->next is NULL, so if
+ * CPU0 gets into unqueue path of osq_lock it will keep spinning
+ * in infinite loop as condition prev->next == node will never be
+ * true.
+ */
+ smp_mb();
+
WRITE_ONCE(prev->next, node);
/*
@@ -118,8 +146,13 @@ bool osq_lock(struct optimistic_spin_queue *lock)
while (!READ_ONCE(node->locked)) {
/*
* If we need to reschedule bail... so we can block.
+ * If a task spins on owner on a CPU after acquiring
+ * osq_lock while a RT task spins on another CPU to
+ * acquire osq_lock, it will starve the owner from
+ * completing if owner is to be scheduled on the same CPU.
+ * It will be a live lock.
*/
- if (need_resched())
+ if (need_resched() || rt_task(task))
goto unqueue;
cpu_relax_lowlatency();
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index a4d4de05b2d1..75c950ede9c7 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -511,6 +511,41 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
unsigned long flags;
/*
+ * If a spinner is present, there is a chance that the load of
+ * rwsem_has_spinner() in rwsem_wake() can be reordered with
+ * respect to decrement of rwsem count in __up_write() leading
+ * to wakeup being missed.
+ *
+ * spinning writer up_write caller
+ * --------------- -----------------------
+ * [S] osq_unlock() [L] osq
+ * spin_lock(wait_lock)
+ * sem->count=0xFFFFFFFF00000001
+ * +0xFFFFFFFF00000000
+ * count=sem->count
+ * MB
+ * sem->count=0xFFFFFFFE00000001
+ * -0xFFFFFFFF00000001
+ * RMB
+ * spin_trylock(wait_lock)
+ * return
+ * rwsem_try_write_lock(count)
+ * spin_unlock(wait_lock)
+ * schedule()
+ *
+ * Reordering of atomic_long_sub_return_release() in __up_write()
+ * and rwsem_has_spinner() in rwsem_wake() can cause missing of
+ * wakeup in up_write() context. In spinning writer, sem->count
+ * and local variable count is 0XFFFFFFFE00000001. It would result
+ * in rwsem_try_write_lock() failing to acquire rwsem and spinning
+ * writer going to sleep in rwsem_down_write_failed().
+ *
+ * The smp_rmb() here is to make sure that the spinner state is
+ * consulted after sem->count is updated in up_write context.
+ */
+ smp_rmb();
+
+ /*
* If a spinner is present, it is not necessary to do the wakeup.
* Try to do wakeup only if the trylock succeeds to minimize
* spinlock contention which may introduce too much delay in the
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 0374a596cffa..d381f559e0ce 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -12,6 +12,8 @@
#include <linux/debug_locks.h>
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/bug.h>
+#include <soc/qcom/watchdog.h>
void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key)
@@ -64,6 +66,11 @@ static void spin_dump(raw_spinlock_t *lock, const char *msg)
owner ? owner->comm : "<none>",
owner ? task_pid_nr(owner) : -1,
lock->owner_cpu);
+#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
+ msm_trigger_wdog_bite();
+#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
+ BUG();
+#endif
dump_stack();
}
@@ -114,7 +121,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
__delay(1);
}
/* lockup suspected: */
- spin_dump(lock, "lockup suspected");
+ spin_bug(lock, "lockup suspected");
#ifdef CONFIG_SMP
trigger_all_cpu_backtrace();
#endif
@@ -167,6 +174,11 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
msg, raw_smp_processor_id(), current->comm,
task_pid_nr(current), lock);
+#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
+ msm_trigger_wdog_bite();
+#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
+ BUG();
+#endif
dump_stack();
}