summaryrefslogtreecommitdiff
path: root/drivers/android
diff options
context:
space:
mode:
authorMartijn Coenen <maco@google.com>2017-06-07 09:29:14 -0700
committerMartijn Coenen <maco@android.com>2017-07-14 10:24:50 +0200
commitadb685439e768b120a5bdae15f333967d1e239ed (patch)
tree126a9e723c14fdf7e0429b04dc703f4c5b84aed4 /drivers/android
parentd30e6a877a84916efb17836566aeecf2ce0d6e9f (diff)
ANDROID: binder: add min sched_policy to node.
This change adds flags to flat_binder_object.flags to allow indicating a minimum scheduling policy for the node. It also clarifies the valid value range for the priority bits in the flags. Internally, we use the priority map that the kernel uses, e.g. [0..99] for real-time policies and [100..139] for the SCHED_NORMAL/SCHED_BATCH policies. Bug: 34461621 Bug: 37293077 Change-Id: I12438deecb53df432da18c6fc77460768ae726d2 Signed-off-by: Martijn Coenen <maco@google.com>
Diffstat (limited to 'drivers/android')
-rw-r--r--drivers/android/binder.c26
1 files changed, 21 insertions, 5 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 85a4651361b6..838f4dbc9af5 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -353,6 +353,8 @@ struct binder_error {
* and by @lock)
* @has_async_transaction: async transaction to node in progress
* (protected by @lock)
+ * @sched_policy: minimum scheduling policy for node
+ * (invariant after initialized)
* @accept_fds: file descriptor operations supported for node
* (invariant after initialized)
* @min_priority: minimum scheduling priority
@@ -392,6 +394,7 @@ struct binder_node {
/*
* invariant after initialization
*/
+ u8 sched_policy:2;
u8 accept_fds:1;
u8 min_priority;
};
@@ -1208,6 +1211,7 @@ static struct binder_node *binder_init_node_ilocked(
binder_uintptr_t ptr = fp ? fp->binder : 0;
binder_uintptr_t cookie = fp ? fp->cookie : 0;
__u32 flags = fp ? fp->flags : 0;
+ s8 priority;
BUG_ON(!spin_is_locked(&proc->inner_lock));
while (*p) {
@@ -1239,8 +1243,10 @@ static struct binder_node *binder_init_node_ilocked(
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
- node->min_priority = NICE_TO_PRIO(
- flags & FLAT_BINDER_FLAG_PRIORITY_MASK);
+ priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >>
+ FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
+ node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
@@ -4052,8 +4058,17 @@ retry:
tr.cookie = target_node->cookie;
t->saved_priority.sched_policy = current->policy;
t->saved_priority.prio = current->normal_prio;
- if (target_node->min_priority < t->priority.prio) {
- prio.sched_policy = SCHED_NORMAL;
+ if (target_node->min_priority < t->priority.prio ||
+ (target_node->min_priority == t->priority.prio &&
+ target_node->sched_policy == SCHED_FIFO)) {
+ /*
+ * In case the minimum priority on the node is
+ * higher (lower value), use that priority. If
+ * the priority is the same, but the node uses
+ * SCHED_FIFO, prefer SCHED_FIFO, since it can
+ * run unbounded, unlike SCHED_RR.
+ */
+ prio.sched_policy = target_node->sched_policy;
prio.prio = target_node->min_priority;
}
binder_set_priority(current, prio);
@@ -5092,8 +5107,9 @@ static void print_binder_node_nilocked(struct seq_file *m,
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
+ seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->debug_id, (u64)node->ptr, (u64)node->cookie,
+ node->sched_policy, node->min_priority,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
node->internal_strong_refs, count, node->tmp_refs);