diff options
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/Makefile | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_event_perf.c (renamed from kernel/trace/trace_event_profile.c) | 50 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 29 | ||||
| -rw-r--r-- | kernel/trace/trace_syscalls.c | 72 | 
5 files changed, 80 insertions, 75 deletions
| diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index d00c6fe23f54..78edc6490038 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o  obj-$(CONFIG_EVENT_TRACING) += trace_export.o  obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o  ifeq ($(CONFIG_PERF_EVENTS),y) -obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o +obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o  endif  obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o  obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_perf.c index c1cc3ab633de..81f691eb3a30 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_perf.c @@ -1,32 +1,36 @@  /* - * trace event based perf counter profiling + * trace event based perf event profiling/tracing   *   * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> - * + * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>   */  #include <linux/module.h>  #include <linux/kprobes.h>  #include "trace.h" +DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); +EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); + +EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);  static char *perf_trace_buf;  static char *perf_trace_buf_nmi; -typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; +typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;  /* Count the events in use (per event id, not per instance) */ -static int	total_profile_count; +static int	total_ref_count; -static int ftrace_profile_enable_event(struct ftrace_event_call *event) +static int perf_trace_event_enable(struct ftrace_event_call *event)  {  	char *buf;  	int ret = -ENOMEM; -	if (event->profile_count++ > 0) +	if (event->perf_refcount++ > 0)  		return 0; -	if (!total_profile_count) { +	if (!total_ref_count) {  		buf = (char *)alloc_percpu(perf_trace_t);  		if (!buf)  			goto fail_buf; @@ -40,35 +44,35 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)  		rcu_assign_pointer(perf_trace_buf_nmi, buf);  	} -	ret = event->profile_enable(event); +	ret = event->perf_event_enable(event);  	if (!ret) { -		total_profile_count++; +		total_ref_count++;  		return 0;  	}  fail_buf_nmi: -	if (!total_profile_count) { +	if (!total_ref_count) {  		free_percpu(perf_trace_buf_nmi);  		free_percpu(perf_trace_buf);  		perf_trace_buf_nmi = NULL;  		perf_trace_buf = NULL;  	}  fail_buf: -	event->profile_count--; +	event->perf_refcount--;  	return ret;  } -int ftrace_profile_enable(int event_id) +int perf_trace_enable(int event_id)  {  	struct ftrace_event_call *event;  	int ret = -EINVAL;  	mutex_lock(&event_mutex);  	list_for_each_entry(event, &ftrace_events, list) { -		if (event->id == event_id && event->profile_enable && +		if (event->id == event_id && event->perf_event_enable &&  		    try_module_get(event->mod)) { -			ret = ftrace_profile_enable_event(event); +			ret = perf_trace_event_enable(event);  			break;  		}  	} @@ -77,16 +81,16 @@ int ftrace_profile_enable(int event_id)  	return ret;  } -static void ftrace_profile_disable_event(struct ftrace_event_call *event) +static void perf_trace_event_disable(struct ftrace_event_call *event)  {  	char *buf, *nmi_buf; -	if (--event->profile_count > 0) +	if (--event->perf_refcount > 0)  		return; -	event->profile_disable(event); +	event->perf_event_disable(event); -	if (!--total_profile_count) { +	if (!--total_ref_count) {  		buf = perf_trace_buf;  		rcu_assign_pointer(perf_trace_buf, NULL); @@ -104,14 +108,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event)  	}  } -void ftrace_profile_disable(int event_id) +void perf_trace_disable(int event_id)  {  	struct ftrace_event_call *event;  	mutex_lock(&event_mutex);  	list_for_each_entry(event, &ftrace_events, list) {  		if (event->id == event_id) { -			ftrace_profile_disable_event(event); +			perf_trace_event_disable(event);  			module_put(event->mod);  			break;  		} @@ -119,8 +123,8 @@ void ftrace_profile_disable(int event_id)  	mutex_unlock(&event_mutex);  } -__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, -					int *rctxp, unsigned long *irq_flags) +__kprobes void *perf_trace_buf_prepare(int size, unsigned short type, +				       int *rctxp, unsigned long *irq_flags)  {  	struct trace_entry *entry;  	char *trace_buf, *raw_data; @@ -161,4 +165,4 @@ err_recursion:  	local_irq_restore(*irq_flags);  	return NULL;  } -EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); +EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 3f972ad98d04..beab8bf2f310 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,  		trace_create_file("enable", 0644, call->dir, call,  				  enable); -	if (call->id && call->profile_enable) +	if (call->id && call->perf_event_enable)  		trace_create_file("id", 0444, call->dir, call,  		 		  id); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 505c92273b1a..1251e367bae9 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp)  #ifdef CONFIG_PERF_EVENTS  /* Kprobe profile handler */ -static __kprobes void kprobe_profile_func(struct kprobe *kp, +static __kprobes void kprobe_perf_func(struct kprobe *kp,  					 struct pt_regs *regs)  {  	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); @@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,  	__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);  	size = ALIGN(__size + sizeof(u32), sizeof(u64));  	size -= sizeof(u32); -	if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, +	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,  		     "profile buffer not large enough"))  		return; -	entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); +	entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);  	if (!entry)  		return; @@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,  	for (i = 0; i < tp->nr_args; i++)  		entry->args[i] = call_fetch(&tp->args[i].fetch, regs); -	ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); +	perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);  }  /* Kretprobe profile handler */ -static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, +static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,  					    struct pt_regs *regs)  {  	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); @@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,  	__size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);  	size = ALIGN(__size + sizeof(u32), sizeof(u64));  	size -= sizeof(u32); -	if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, +	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,  		     "profile buffer not large enough"))  		return; -	entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); +	entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);  	if (!entry)  		return; @@ -1271,10 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,  	for (i = 0; i < tp->nr_args; i++)  		entry->args[i] = call_fetch(&tp->args[i].fetch, regs); -	ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); +	perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, +			       irq_flags, regs);  } -static int probe_profile_enable(struct ftrace_event_call *call) +static int probe_perf_enable(struct ftrace_event_call *call)  {  	struct trace_probe *tp = (struct trace_probe *)call->data; @@ -1286,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call)  		return enable_kprobe(&tp->rp.kp);  } -static void probe_profile_disable(struct ftrace_event_call *call) +static void probe_perf_disable(struct ftrace_event_call *call)  {  	struct trace_probe *tp = (struct trace_probe *)call->data; @@ -1311,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)  		kprobe_trace_func(kp, regs);  #ifdef CONFIG_PERF_EVENTS  	if (tp->flags & TP_FLAG_PROFILE) -		kprobe_profile_func(kp, regs); +		kprobe_perf_func(kp, regs);  #endif  	return 0;	/* We don't tweek kernel, so just return 0 */  } @@ -1325,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)  		kretprobe_trace_func(ri, regs);  #ifdef CONFIG_PERF_EVENTS  	if (tp->flags & TP_FLAG_PROFILE) -		kretprobe_profile_func(ri, regs); +		kretprobe_perf_func(ri, regs);  #endif  	return 0;	/* We don't tweek kernel, so just return 0 */  } @@ -1358,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp)  	call->unregfunc = probe_event_disable;  #ifdef CONFIG_PERF_EVENTS -	call->profile_enable = probe_profile_enable; -	call->profile_disable = probe_profile_disable; +	call->perf_event_enable = probe_perf_enable; +	call->perf_event_disable = probe_perf_disable;  #endif  	call->data = tp;  	ret = trace_add_event_call(call); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index cba47d7935cc..33c2a5b769dc 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls);  #ifdef CONFIG_PERF_EVENTS -static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); -static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); -static int sys_prof_refcount_enter; -static int sys_prof_refcount_exit; +static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); +static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); +static int sys_perf_refcount_enter; +static int sys_perf_refcount_exit; -static void prof_syscall_enter(struct pt_regs *regs, long id) +static void perf_syscall_enter(struct pt_regs *regs, long id)  {  	struct syscall_metadata *sys_data;  	struct syscall_trace_enter *rec; @@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)  	int size;  	syscall_nr = syscall_get_nr(current, regs); -	if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) +	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))  		return;  	sys_data = syscall_nr_to_meta(syscall_nr); @@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)  	size = ALIGN(size + sizeof(u32), sizeof(u64));  	size -= sizeof(u32); -	if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, -		      "profile buffer not large enough")) +	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, +		      "perf buffer not large enough"))  		return; -	rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, +	rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,  				sys_data->enter_event->id, &rctx, &flags);  	if (!rec)  		return; @@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)  	rec->nr = syscall_nr;  	syscall_get_arguments(current, regs, 0, sys_data->nb_args,  			       (unsigned long *)&rec->args); -	ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); +	perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);  } -int prof_sysenter_enable(struct ftrace_event_call *call) +int perf_sysenter_enable(struct ftrace_event_call *call)  {  	int ret = 0;  	int num; @@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call)  	num = ((struct syscall_metadata *)call->data)->syscall_nr;  	mutex_lock(&syscall_trace_lock); -	if (!sys_prof_refcount_enter) -		ret = register_trace_sys_enter(prof_syscall_enter); +	if (!sys_perf_refcount_enter) +		ret = register_trace_sys_enter(perf_syscall_enter);  	if (ret) {  		pr_info("event trace: Could not activate"  				"syscall entry trace point");  	} else { -		set_bit(num, enabled_prof_enter_syscalls); -		sys_prof_refcount_enter++; +		set_bit(num, enabled_perf_enter_syscalls); +		sys_perf_refcount_enter++;  	}  	mutex_unlock(&syscall_trace_lock);  	return ret;  } -void prof_sysenter_disable(struct ftrace_event_call *call) +void perf_sysenter_disable(struct ftrace_event_call *call)  {  	int num;  	num = ((struct syscall_metadata *)call->data)->syscall_nr;  	mutex_lock(&syscall_trace_lock); -	sys_prof_refcount_enter--; -	clear_bit(num, enabled_prof_enter_syscalls); -	if (!sys_prof_refcount_enter) -		unregister_trace_sys_enter(prof_syscall_enter); +	sys_perf_refcount_enter--; +	clear_bit(num, enabled_perf_enter_syscalls); +	if (!sys_perf_refcount_enter) +		unregister_trace_sys_enter(perf_syscall_enter);  	mutex_unlock(&syscall_trace_lock);  } -static void prof_syscall_exit(struct pt_regs *regs, long ret) +static void perf_syscall_exit(struct pt_regs *regs, long ret)  {  	struct syscall_metadata *sys_data;  	struct syscall_trace_exit *rec; @@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)  	int size;  	syscall_nr = syscall_get_nr(current, regs); -	if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) +	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))  		return;  	sys_data = syscall_nr_to_meta(syscall_nr); @@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)  	 * Impossible, but be paranoid with the future  	 * How to put this check outside runtime?  	 */ -	if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, -		"exit event has grown above profile buffer size")) +	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, +		"exit event has grown above perf buffer size"))  		return; -	rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, +	rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,  				sys_data->exit_event->id, &rctx, &flags);  	if (!rec)  		return; @@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)  	rec->nr = syscall_nr;  	rec->ret = syscall_get_return_value(current, regs); -	ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); +	perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);  } -int prof_sysexit_enable(struct ftrace_event_call *call) +int perf_sysexit_enable(struct ftrace_event_call *call)  {  	int ret = 0;  	int num; @@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call)  	num = ((struct syscall_metadata *)call->data)->syscall_nr;  	mutex_lock(&syscall_trace_lock); -	if (!sys_prof_refcount_exit) -		ret = register_trace_sys_exit(prof_syscall_exit); +	if (!sys_perf_refcount_exit) +		ret = register_trace_sys_exit(perf_syscall_exit);  	if (ret) {  		pr_info("event trace: Could not activate"  				"syscall exit trace point");  	} else { -		set_bit(num, enabled_prof_exit_syscalls); -		sys_prof_refcount_exit++; +		set_bit(num, enabled_perf_exit_syscalls); +		sys_perf_refcount_exit++;  	}  	mutex_unlock(&syscall_trace_lock);  	return ret;  } -void prof_sysexit_disable(struct ftrace_event_call *call) +void perf_sysexit_disable(struct ftrace_event_call *call)  {  	int num;  	num = ((struct syscall_metadata *)call->data)->syscall_nr;  	mutex_lock(&syscall_trace_lock); -	sys_prof_refcount_exit--; -	clear_bit(num, enabled_prof_exit_syscalls); -	if (!sys_prof_refcount_exit) -		unregister_trace_sys_exit(prof_syscall_exit); +	sys_perf_refcount_exit--; +	clear_bit(num, enabled_perf_exit_syscalls); +	if (!sys_perf_refcount_exit) +		unregister_trace_sys_exit(perf_syscall_exit);  	mutex_unlock(&syscall_trace_lock);  } | 
