summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/power/swap.c5
-rw-r--r--kernel/printk/printk.c9
-rw-r--r--kernel/sched/core.c3
-rw-r--r--kernel/signal.c17
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace.h64
-rw-r--r--kernel/trace/trace_functions.c2
7 files changed, 39 insertions, 65 deletions
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 0336ab14b408..77fef12bb45b 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -1528,9 +1528,10 @@ end:
int swsusp_check(void)
{
int error;
+ void *holder;
hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
- FMODE_READ, NULL);
+ FMODE_READ | FMODE_EXCL, &holder);
if (!IS_ERR(hib_resume_bdev)) {
set_blocksize(hib_resume_bdev, PAGE_SIZE);
clear_page(swsusp_header);
@@ -1556,7 +1557,7 @@ int swsusp_check(void)
put:
if (error)
- blkdev_put(hib_resume_bdev, FMODE_READ);
+ blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL);
else
pr_debug("PM: Image signature found, resuming\n");
} else {
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 78a906303e6d..6613e3437b91 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2039,8 +2039,15 @@ static int __init console_setup(char *str)
char *s, *options, *brl_options = NULL;
int idx;
- if (str[0] == 0)
+ /*
+ * console="" or console=null have been suggested as a way to
+ * disable console output. Use ttynull that has been created
+ * for exacly this purpose.
+ */
+ if (str[0] == 0 || strcmp(str, "null") == 0) {
+ __add_preferred_console("ttynull", 0, NULL, NULL);
return 1;
+ }
if (_braille_console_setup(&str, &brl_options))
return 1;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 473293dd40a3..633139647129 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1989,6 +1989,9 @@ out:
bool cpus_share_cache(int this_cpu, int that_cpu)
{
+ if (this_cpu == that_cpu)
+ return true;
+
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
}
#endif /* CONFIG_SMP */
diff --git a/kernel/signal.c b/kernel/signal.c
index 6aa9ca45ebb1..a699055ebfe8 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1824,16 +1824,6 @@ static inline int may_ptrace_stop(void)
}
/*
- * Return non-zero if there is a SIGKILL that should be waking us up.
- * Called with the siglock held.
- */
-static int sigkill_pending(struct task_struct *tsk)
-{
- return sigismember(&tsk->pending.signal, SIGKILL) ||
- sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
-}
-
-/*
* This must be called with current->sighand->siglock held.
*
* This should be the path for all ptrace stops.
@@ -1858,15 +1848,10 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
* calling arch_ptrace_stop, so we must release it now.
* To preserve proper semantics, we must do this before
* any signal bookkeeping like checking group_stop_count.
- * Meanwhile, a SIGKILL could come in before we retake the
- * siglock. That must prevent us from sleeping in TASK_TRACED.
- * So after regaining the lock, we must check for SIGKILL.
*/
spin_unlock_irq(&current->sighand->siglock);
arch_ptrace_stop(exit_code, info);
spin_lock_irq(&current->sighand->siglock);
- if (sigkill_pending(current))
- return;
}
/*
@@ -1875,6 +1860,8 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
* Also, transition to TRACED and updates to ->jobctl should be
* atomic with respect to siglock and should be done after the arch
* hook as siglock is released and regrabbed across it.
+ * schedule() will not sleep if there is a pending signal that
+ * can awaken the task.
*/
set_current_state(TASK_TRACED);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index e591da4449f0..c5484723abda 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5185,7 +5185,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op;
int bit;
- bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
+ bit = trace_test_and_set_recursion(TRACE_LIST_START);
if (bit < 0)
return;
@@ -5246,7 +5246,7 @@ static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
{
int bit;
- bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
+ bit = trace_test_and_set_recursion(TRACE_LIST_START);
if (bit < 0)
return;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c5c6daef8cdf..53e8ccbae818 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -431,23 +431,8 @@ struct tracer {
* When function tracing occurs, the following steps are made:
* If arch does not support a ftrace feature:
* call internal function (uses INTERNAL bits) which calls...
- * If callback is registered to the "global" list, the list
- * function is called and recursion checks the GLOBAL bits.
- * then this function calls...
* The function callback, which can use the FTRACE bits to
* check for recursion.
- *
- * Now if the arch does not suppport a feature, and it calls
- * the global list function which calls the ftrace callback
- * all three of these steps will do a recursion protection.
- * There's no reason to do one if the previous caller already
- * did. The recursion that we are protecting against will
- * go through the same steps again.
- *
- * To prevent the multiple recursion checks, if a recursion
- * bit is set that is higher than the MAX bit of the current
- * check, then we know that the check was made by the previous
- * caller, and we can skip the current check.
*/
enum {
TRACE_BUFFER_BIT,
@@ -460,12 +445,14 @@ enum {
TRACE_FTRACE_NMI_BIT,
TRACE_FTRACE_IRQ_BIT,
TRACE_FTRACE_SIRQ_BIT,
+ TRACE_FTRACE_TRANSITION_BIT,
- /* INTERNAL_BITs must be greater than FTRACE_BITs */
+ /* Internal use recursion bits */
TRACE_INTERNAL_BIT,
TRACE_INTERNAL_NMI_BIT,
TRACE_INTERNAL_IRQ_BIT,
TRACE_INTERNAL_SIRQ_BIT,
+ TRACE_INTERNAL_TRANSITION_BIT,
TRACE_CONTROL_BIT,
@@ -478,12 +465,6 @@ enum {
* can only be modified by current, we can reuse trace_recursion.
*/
TRACE_IRQ_BIT,
-
- /*
- * When transitioning between context, the preempt_count() may
- * not be correct. Allow for a single recursion to cover this case.
- */
- TRACE_TRANSITION_BIT,
};
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
@@ -493,12 +474,18 @@ enum {
#define TRACE_CONTEXT_BITS 4
#define TRACE_FTRACE_START TRACE_FTRACE_BIT
-#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_LIST_START TRACE_INTERNAL_BIT
-#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
-#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
+#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
+
+enum {
+ TRACE_CTX_NMI,
+ TRACE_CTX_IRQ,
+ TRACE_CTX_SOFTIRQ,
+ TRACE_CTX_NORMAL,
+ TRACE_CTX_TRANSITION,
+};
static __always_inline int trace_get_context_bit(void)
{
@@ -506,59 +493,48 @@ static __always_inline int trace_get_context_bit(void)
if (in_interrupt()) {
if (in_nmi())
- bit = 0;
+ bit = TRACE_CTX_NMI;
else if (in_irq())
- bit = 1;
+ bit = TRACE_CTX_IRQ;
else
- bit = 2;
+ bit = TRACE_CTX_SOFTIRQ;
} else
- bit = 3;
+ bit = TRACE_CTX_NORMAL;
return bit;
}
-static __always_inline int trace_test_and_set_recursion(int start, int max)
+static __always_inline int trace_test_and_set_recursion(int start)
{
unsigned int val = current->trace_recursion;
int bit;
- /* A previous recursion check was made */
- if ((val & TRACE_CONTEXT_MASK) > max)
- return 0;
-
bit = trace_get_context_bit() + start;
if (unlikely(val & (1 << bit))) {
/*
* It could be that preempt_count has not been updated during
* a switch between contexts. Allow for a single recursion.
*/
- bit = TRACE_TRANSITION_BIT;
+ bit = start + TRACE_CTX_TRANSITION;
if (trace_recursion_test(bit))
return -1;
trace_recursion_set(bit);
barrier();
- return bit + 1;
+ return bit;
}
- /* Normal check passed, clear the transition to allow it again */
- trace_recursion_clear(TRACE_TRANSITION_BIT);
-
val |= 1 << bit;
current->trace_recursion = val;
barrier();
- return bit + 1;
+ return bit;
}
static __always_inline void trace_clear_recursion(int bit)
{
unsigned int val = current->trace_recursion;
- if (!bit)
- return;
-
- bit--;
bit = 1 << bit;
val &= ~bit;
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index fcd41a166405..7adbfcf555fd 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -137,7 +137,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
pc = preempt_count();
preempt_disable_notrace();
- bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
+ bit = trace_test_and_set_recursion(TRACE_FTRACE_START);
if (bit < 0)
goto out;