summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-11-01 18:13:36 -0700
committerSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-11-07 14:46:21 -0800
commit576259be4abfbd8f7bee476b48c3ce2eee05cfb4 (patch)
tree8946d71e37fd74204274b423ef18514fe41e1921 /kernel
parentecd8f7800fc4aa3b62cfb01773d8e99d5bee228d (diff)
sched/hmp: Use GFP_KERNEL for top task memory allocations
Task load structure allocations can consume a lot of memory as the number of tasks begin to increase. Also they might exhaust the atomic memory pool pretty quickly if a workload starts spawning lots of threads in a short amount of time thus increasing the possibility of failed allocations. Move the call to init_new_task_load() outside atomic context and start using GFP_KERNEL for allocations. There is no need for this allocation to be in atomic context. Change-Id: I357772e10bf8958804d9cd0c78eda27139054b21 Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/sched/hmp.c18
-rw-r--r--kernel/sched/sched.h3
3 files changed, 24 insertions, 15 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f3b1688b3be7..6e39de2836ea 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2269,17 +2269,7 @@ void sched_exit(struct task_struct *p)
reset_task_stats(p);
p->ravg.mark_start = wallclock;
p->ravg.sum_history[0] = EXITING_TASK_MARKER;
-
- kfree(p->ravg.curr_window_cpu);
- kfree(p->ravg.prev_window_cpu);
-
- /*
- * update_task_ravg() can be called for exiting tasks. While the
- * function itself ensures correct behavior, the corresponding
- * trace event requires that these pointers be NULL.
- */
- p->ravg.curr_window_cpu = NULL;
- p->ravg.prev_window_cpu = NULL;
+ free_task_load_ptrs(p);
enqueue_task(rq, p, 0);
clear_ed_task(p, rq);
@@ -2384,10 +2374,12 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
int sched_fork(unsigned long clone_flags, struct task_struct *p)
{
unsigned long flags;
- int cpu = get_cpu();
+ int cpu;
- __sched_fork(clone_flags, p);
init_new_task_load(p, false);
+ cpu = get_cpu();
+
+ __sched_fork(clone_flags, p);
/*
* We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index b30aa78d9823..30391aae0822 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -1624,6 +1624,20 @@ unsigned int cpu_temp(int cpu)
return 0;
}
+void free_task_load_ptrs(struct task_struct *p)
+{
+ kfree(p->ravg.curr_window_cpu);
+ kfree(p->ravg.prev_window_cpu);
+
+ /*
+ * update_task_ravg() can be called for exiting tasks. While the
+ * function itself ensures correct behavior, the corresponding
+ * trace event requires that these pointers be NULL.
+ */
+ p->ravg.curr_window_cpu = NULL;
+ p->ravg.prev_window_cpu = NULL;
+}
+
void init_new_task_load(struct task_struct *p, bool idle_task)
{
int i;
@@ -1636,8 +1650,8 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
memset(&p->ravg, 0, sizeof(struct ravg));
p->cpu_cycles = 0;
- p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_ATOMIC);
- p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_ATOMIC);
+ p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
+ p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
/* Don't have much choice. CPU frequency would be bogus */
BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4289bf6cd642..de29c926379b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1079,6 +1079,7 @@ extern unsigned int __read_mostly sched_downmigrate;
extern unsigned int __read_mostly sysctl_sched_spill_nr_run;
extern unsigned int __read_mostly sched_load_granule;
+extern void free_task_load_ptrs(struct task_struct *p);
extern void init_new_task_load(struct task_struct *p, bool idle_task);
extern u64 sched_ktime_clock(void);
extern int got_boost_kick(void);
@@ -1527,6 +1528,8 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq)
return NULL;
}
+static inline void free_task_load_ptrs(struct task_struct *p) { }
+
static inline void init_new_task_load(struct task_struct *p, bool idle_task)
{
}