summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 16:23:15 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 16:23:15 +0200
commitfaafcba3b5e15999cf75d5c5a513ac8e47e2545f (patch)
tree47d58d1c00e650e820506c91eb9a41268756bdda /include
parent13ead805c5a14b0e7ecd34f61404a5bfba655895 (diff)
parentf10e00f4bf360c36edbe6bf18a6c75b171cbe012 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes in this cycle were: - Optimized support for Intel "Cluster-on-Die" (CoD) topologies (Dave Hansen) - Various sched/idle refinements for better idle handling (Nicolas Pitre, Daniel Lezcano, Chuansheng Liu, Vincent Guittot) - sched/numa updates and optimizations (Rik van Riel) - sysbench speedup (Vincent Guittot) - capacity calculation cleanups/refactoring (Vincent Guittot) - Various cleanups to thread group iteration (Oleg Nesterov) - Double-rq-lock removal optimization and various refactorings (Kirill Tkhai) - various sched/deadline fixes ... and lots of other changes" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (72 commits) sched/dl: Use dl_bw_of() under rcu_read_lock_sched() sched/fair: Delete resched_cpu() from idle_balance() sched, time: Fix build error with 64 bit cputime_t on 32 bit systems sched: Improve sysbench performance by fixing spurious active migration sched/x86: Fix up typo in topology detection x86, sched: Add new topology for multi-NUMA-node CPUs sched/rt: Use resched_curr() in task_tick_rt() sched: Use rq->rd in sched_setaffinity() under RCU read lock sched: cleanup: Rename 'out_unlock' to 'out_free_new_mask' sched: Use dl_bw_of() under RCU read lock sched/fair: Remove duplicate code from can_migrate_task() sched, mips, ia64: Remove __ARCH_WANT_UNLOCKED_CTXSW sched: print_rq(): Don't use tasklist_lock sched: normalize_rt_tasks(): Don't use _irqsave for tasklist_lock, use task_rq_lock() sched: Fix the task-group check in tg_has_rt_tasks() sched/fair: Leverage the idle state info when choosing the "idlest" cpu sched: Let the scheduler see CPU idle states sched/deadline: Fix inter- exclusive cpusets migrations sched/deadline: Clear dl_entity params when setscheduling to different class sched/numa: Kill the wrong/dead TASK_DEAD check in task_numa_fault() ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/cputime_jiffies.h2
-rw-r--r--include/asm-generic/cputime_nsecs.h2
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/seqlock.h19
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/wait.h16
6 files changed, 41 insertions, 6 deletions
diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h
index d5cb78f53986..fe386fc6e85e 100644
--- a/include/asm-generic/cputime_jiffies.h
+++ b/include/asm-generic/cputime_jiffies.h
@@ -3,6 +3,8 @@
typedef unsigned long __nocast cputime_t;
+#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
+
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
#define cputime_to_scaled(__ct) (__ct)
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index 4e817606c549..0419485891f2 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -21,6 +21,8 @@
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
+#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
+
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 05a8c00e8339..5e344bbe63ec 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -57,6 +57,7 @@ struct sched_param {
#include <linux/llist.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
+#include <linux/magic.h>
#include <asm/processor.h>
@@ -646,6 +647,7 @@ struct signal_struct {
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
+ seqlock_t stats_lock;
cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
@@ -1024,6 +1026,7 @@ struct sched_domain_topology_level {
extern struct sched_domain_topology_level *sched_domain_topology;
extern void set_sched_topology(struct sched_domain_topology_level *tl);
+extern void wake_up_if_idle(int cpu);
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(type) .name = #type
@@ -2647,6 +2650,8 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
}
#endif
+#define task_stack_end_corrupted(task) \
+ (*(end_of_stack(task)) != STACK_END_MAGIC)
static inline int object_is_on_stack(void *obj)
{
@@ -2669,6 +2674,7 @@ static inline unsigned long stack_not_used(struct task_struct *p)
return (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif
+extern void set_task_stack_end_magic(struct task_struct *tsk);
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index cc359636cfa3..f5df8f687b4d 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -456,4 +456,23 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
spin_unlock_irqrestore(&sl->lock, flags);
}
+static inline unsigned long
+read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
+{
+ unsigned long flags = 0;
+
+ if (!(*seq & 1)) /* Even */
+ *seq = read_seqbegin(lock);
+ else /* Odd */
+ read_seqlock_excl_irqsave(lock, flags);
+
+ return flags;
+}
+
+static inline void
+done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
+{
+ if (seq & 1)
+ read_sequnlock_excl_irqrestore(lock, flags);
+}
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 34347f26be9b..93dff5fff524 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -100,6 +100,7 @@ int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait);
void kick_all_cpus_sync(void);
+void wake_up_all_idle_cpus(void);
/*
* Generic and arch helpers
@@ -148,6 +149,7 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
}
static inline void kick_all_cpus_sync(void) { }
+static inline void wake_up_all_idle_cpus(void) { }
#endif /* !SMP */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 80115bf88671..e4a8eb9312ea 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -281,9 +281,11 @@ do { \
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
- * The function returns 0 if the @timeout elapsed, or the remaining
- * jiffies (at least 1) if the @condition evaluated to %true before
- * the @timeout elapsed.
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * or the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed.
*/
#define wait_event_timeout(wq, condition, timeout) \
({ \
@@ -364,9 +366,11 @@ do { \
* change the result of the wait condition.
*
* Returns:
- * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
- * a signal, or the remaining jiffies (at least 1) if the @condition
- * evaluated to %true before the @timeout elapsed.
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
+ * interrupted by a signal.
*/
#define wait_event_interruptible_timeout(wq, condition, timeout) \
({ \