summaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2013-02-04 19:05:43 +0100
committerOleg Nesterov <oleg@redhat.com>2013-02-08 18:28:08 +0100
commitb2fe8ba674e8acbb9e8e63510b802c6d054d88a3 (patch)
tree1bd1defbfe3f285dfa7c77f94bc5523ac4a82679 /kernel/trace
parentf42d24a1d20d2e72d1e5d48930f18b138dfad117 (diff)
uprobes/perf: Avoid uprobe_apply() whenever possible
uprobe_perf_open/close call the costly uprobe_apply() every time, we can avoid it if: - "nr_systemwide != 0" is not changed. - There is another process/thread with the same ->mm. - copy_proccess() does inherit_event(). dup_mmap() preserves the inserted breakpoints. - event->attr.enable_on_exec == T, we can rely on uprobe_mmap() called by exec/mmap paths. - tp_target is exiting. Only _close() checks PF_EXITING, I don't think TRACE_REG_PERF_OPEN can hit the dying task too often. Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_uprobe.c42
1 files changed, 36 insertions, 6 deletions
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 2399f1416555..8dad2a92dee9 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -680,30 +680,60 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
return false;
}
+static inline bool
+uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
+{
+ return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
+}
+
static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
{
+ bool done;
+
write_lock(&tu->filter.rwlock);
- if (event->hw.tp_target)
+ if (event->hw.tp_target) {
+ /*
+ * event->parent != NULL means copy_process(), we can avoid
+ * uprobe_apply(). current->mm must be probed and we can rely
+ * on dup_mmap() which preserves the already installed bp's.
+ *
+ * attr.enable_on_exec means that exec/mmap will install the
+ * breakpoints we need.
+ */
+ done = tu->filter.nr_systemwide ||
+ event->parent || event->attr.enable_on_exec ||
+ uprobe_filter_event(tu, event);
list_add(&event->hw.tp_list, &tu->filter.perf_events);
- else
+ } else {
+ done = tu->filter.nr_systemwide;
tu->filter.nr_systemwide++;
+ }
write_unlock(&tu->filter.rwlock);
- uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
+ if (!done)
+ uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
return 0;
}
static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
{
+ bool done;
+
write_lock(&tu->filter.rwlock);
- if (event->hw.tp_target)
+ if (event->hw.tp_target) {
list_del(&event->hw.tp_list);
- else
+ done = tu->filter.nr_systemwide ||
+ (event->hw.tp_target->flags & PF_EXITING) ||
+ uprobe_filter_event(tu, event);
+ } else {
tu->filter.nr_systemwide--;
+ done = tu->filter.nr_systemwide;
+ }
write_unlock(&tu->filter.rwlock);
- uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
+ if (!done)
+ uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
return 0;
}