From e1d8aa9f1dd655a3534b22fcfbecb70cdb125766 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 12 Jan 2009 23:15:46 +0100 Subject: tracing: add a new workqueue tracer Impact: new tracer The workqueue tracer provides some statistical informations about each cpu workqueue thread such as the number of the works inserted and executed since their creation. It can help to evaluate the amount of work each of them have to perform. For example it can help a developer to decide whether he should choose a per cpu workqueue instead of a singlethreaded one. It only traces statistical informations for now but it will probably later provide event tracing too. Such a tracer could help too, and be improved, to help rt priority sorted workqueue development. To have a snapshot of the workqueues state at any time, just do cat /debugfs/tracing/trace_stat/workqueues Ie: 1 125 125 reiserfs/1 1 0 0 scsi_tgtd/1 1 0 0 aio/1 1 0 0 ata/1 1 114 114 kblockd/1 1 0 0 kintegrityd/1 1 2147 2147 events/1 0 0 0 kpsmoused 0 105 105 reiserfs/0 0 0 0 scsi_tgtd/0 0 0 0 aio/0 0 0 0 ata_aux 0 0 0 ata/0 0 0 0 cqueue 0 0 0 kacpi_notify 0 0 0 kacpid 0 149 149 kblockd/0 0 0 0 kintegrityd/0 0 1000 1000 khelper 0 2270 2270 events/0 Changes in V2: _ Drop the static array based on NR_CPU and dynamically allocate the stat array with num_possible_cpus() and other cpu mask facilities.... _ Trace workqueue insertion at a bit lower level (insert_work instead of queue_work) to handle even the workqueue barriers. Signed-off-by: Frederic Weisbecker Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace_workqueue.c | 287 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 287 insertions(+) create mode 100644 kernel/trace/trace_workqueue.c (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c new file mode 100644 index 000000000000..f8118d39ca9b --- /dev/null +++ b/kernel/trace/trace_workqueue.c @@ -0,0 +1,287 @@ +/* + * Workqueue statistical tracer. + * + * Copyright (C) 2008 Frederic Weisbecker + * + */ + + +#include +#include +#include "trace_stat.h" +#include "trace.h" + + +/* A cpu workqueue thread */ +struct cpu_workqueue_stats { + struct list_head list; +/* Useful to know if we print the cpu headers */ + bool first_entry; + int cpu; + pid_t pid; +/* Can be inserted from interrupt or user context, need to be atomic */ + atomic_t inserted; +/* + * Don't need to be atomic, works are serialized in a single workqueue thread + * on a single CPU. + */ + unsigned int executed; +}; + +/* List of workqueue threads on one cpu */ +struct workqueue_global_stats { + struct list_head list; + spinlock_t lock; +}; + +/* Don't need a global lock because allocated before the workqueues, and + * never freed. + */ +static struct workqueue_global_stats *all_workqueue_stat; + +/* Insertion of a work */ +static void +probe_workqueue_insertion(struct task_struct *wq_thread, + struct work_struct *work) +{ + int cpu = cpumask_first(&wq_thread->cpus_allowed); + struct cpu_workqueue_stats *node, *next; + unsigned long flags; + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, + list) { + if (node->pid == wq_thread->pid) { + atomic_inc(&node->inserted); + goto found; + } + } + pr_debug("trace_workqueue: entry not found\n"); +found: + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); +} + +/* Execution of a work */ +static void +probe_workqueue_execution(struct task_struct *wq_thread, + struct work_struct *work) +{ + int cpu = cpumask_first(&wq_thread->cpus_allowed); + struct cpu_workqueue_stats *node, *next; + unsigned long flags; + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, + list) { + if (node->pid == wq_thread->pid) { + node->executed++; + goto found; + } + } + pr_debug("trace_workqueue: entry not found\n"); +found: + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); +} + +/* Creation of a cpu workqueue thread */ +static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) +{ + struct cpu_workqueue_stats *cws; + unsigned long flags; + + WARN_ON(cpu < 0 || cpu >= num_possible_cpus()); + + /* Workqueues are sometimes created in atomic context */ + cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC); + if (!cws) { + pr_warning("trace_workqueue: not enough memory\n"); + return; + } + tracing_record_cmdline(wq_thread); + + INIT_LIST_HEAD(&cws->list); + cws->cpu = cpu; + + cws->pid = wq_thread->pid; + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + if (list_empty(&all_workqueue_stat[cpu].list)) + cws->first_entry = true; + list_add_tail(&cws->list, &all_workqueue_stat[cpu].list); + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); +} + +/* Destruction of a cpu workqueue thread */ +static void probe_workqueue_destruction(struct task_struct *wq_thread) +{ + /* Workqueue only execute on one cpu */ + int cpu = cpumask_first(&wq_thread->cpus_allowed); + struct cpu_workqueue_stats *node, *next; + unsigned long flags; + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, + list) { + if (node->pid == wq_thread->pid) { + list_del(&node->list); + kfree(node); + goto found; + } + } + + pr_debug("trace_workqueue: don't find workqueue to destroy\n"); +found: + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + +} + +static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu) +{ + unsigned long flags; + struct cpu_workqueue_stats *ret = NULL; + + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + + if (!list_empty(&all_workqueue_stat[cpu].list)) + ret = list_entry(all_workqueue_stat[cpu].list.next, + struct cpu_workqueue_stats, list); + + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + + return ret; +} + +static void *workqueue_stat_start(void) +{ + int cpu; + void *ret = NULL; + + for_each_possible_cpu(cpu) { + ret = workqueue_stat_start_cpu(cpu); + if (ret) + return ret; + } + return NULL; +} + +static void *workqueue_stat_next(void *prev, int idx) +{ + struct cpu_workqueue_stats *prev_cws = prev; + int cpu = prev_cws->cpu; + unsigned long flags; + void *ret = NULL; + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + if (list_is_last(&prev_cws->list, &all_workqueue_stat[cpu].list)) { + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + for (++cpu ; cpu < num_possible_cpus(); cpu++) { + ret = workqueue_stat_start_cpu(cpu); + if (ret) + return ret; + } + return NULL; + } + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + + return list_entry(prev_cws->list.next, struct cpu_workqueue_stats, + list); +} + +static int workqueue_stat_show(struct seq_file *s, void *p) +{ + struct cpu_workqueue_stats *cws = p; + unsigned long flags; + int cpu = cws->cpu; + + seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, + atomic_read(&cws->inserted), + cws->executed, + trace_find_cmdline(cws->pid)); + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + if (&cws->list == all_workqueue_stat[cpu].list.next) + seq_printf(s, "\n"); + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + + return 0; +} + +static int workqueue_stat_headers(struct seq_file *s) +{ + seq_printf(s, "# CPU INSERTED EXECUTED NAME\n"); + seq_printf(s, "# | | | |\n\n"); + return 0; +} + +struct tracer_stat workqueue_stats __read_mostly = { + .name = "workqueues", + .stat_start = workqueue_stat_start, + .stat_next = workqueue_stat_next, + .stat_show = workqueue_stat_show, + .stat_headers = workqueue_stat_headers +}; + + +int __init stat_workqueue_init(void) +{ + if (register_stat_tracer(&workqueue_stats)) { + pr_warning("Unable to register workqueue stat tracer\n"); + return 1; + } + + return 0; +} +fs_initcall(stat_workqueue_init); + +/* + * Workqueues are created very early, just after pre-smp initcalls. + * So we must register our tracepoints at this stage. + */ +int __init trace_workqueue_early_init(void) +{ + int ret, cpu; + + ret = register_trace_workqueue_insertion(probe_workqueue_insertion); + if (ret) + goto out; + + ret = register_trace_workqueue_execution(probe_workqueue_execution); + if (ret) + goto no_insertion; + + ret = register_trace_workqueue_creation(probe_workqueue_creation); + if (ret) + goto no_execution; + + ret = register_trace_workqueue_destruction(probe_workqueue_destruction); + if (ret) + goto no_creation; + + all_workqueue_stat = kmalloc(sizeof(struct workqueue_global_stats) + * num_possible_cpus(), GFP_KERNEL); + + if (!all_workqueue_stat) { + pr_warning("trace_workqueue: not enough memory\n"); + goto no_creation; + } + + for_each_possible_cpu(cpu) { + spin_lock_init(&all_workqueue_stat[cpu].lock); + INIT_LIST_HEAD(&all_workqueue_stat[cpu].list); + } + + return 0; + +no_creation: + unregister_trace_workqueue_creation(probe_workqueue_creation); +no_execution: + unregister_trace_workqueue_execution(probe_workqueue_execution); +no_insertion: + unregister_trace_workqueue_insertion(probe_workqueue_insertion); +out: + pr_warning("trace_workqueue: unable to trace workqueues\n"); + + return 1; +} +early_initcall(trace_workqueue_early_init); -- cgit From 3690b5e6fd9daa030039ae9bda69044228bd476d Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Fri, 16 Jan 2009 16:32:25 +0800 Subject: trace_workqueue: use percpu data for workqueue stat Impact: use percpu data instead of a global structure Use: static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat); instead of allocating a global structure. percpu data also works well on NUMA. Signed-off-by: Lai Jiangshan Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace_workqueue.c | 64 +++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 35 deletions(-) (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index f8118d39ca9b..4664990fe9c5 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -8,6 +8,7 @@ #include #include +#include #include "trace_stat.h" #include "trace.h" @@ -37,7 +38,8 @@ struct workqueue_global_stats { /* Don't need a global lock because allocated before the workqueues, and * never freed. */ -static struct workqueue_global_stats *all_workqueue_stat; +static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat); +#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu)) /* Insertion of a work */ static void @@ -48,8 +50,8 @@ probe_workqueue_insertion(struct task_struct *wq_thread, struct cpu_workqueue_stats *node, *next; unsigned long flags; - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); - list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, list) { if (node->pid == wq_thread->pid) { atomic_inc(&node->inserted); @@ -58,7 +60,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread, } pr_debug("trace_workqueue: entry not found\n"); found: - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); } /* Execution of a work */ @@ -70,8 +72,8 @@ probe_workqueue_execution(struct task_struct *wq_thread, struct cpu_workqueue_stats *node, *next; unsigned long flags; - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); - list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, list) { if (node->pid == wq_thread->pid) { node->executed++; @@ -80,7 +82,7 @@ probe_workqueue_execution(struct task_struct *wq_thread, } pr_debug("trace_workqueue: entry not found\n"); found: - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); } /* Creation of a cpu workqueue thread */ @@ -104,11 +106,11 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) cws->pid = wq_thread->pid; - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); - if (list_empty(&all_workqueue_stat[cpu].list)) + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + if (list_empty(&workqueue_cpu_stat(cpu)->list)) cws->first_entry = true; - list_add_tail(&cws->list, &all_workqueue_stat[cpu].list); - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); } /* Destruction of a cpu workqueue thread */ @@ -119,8 +121,8 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread) struct cpu_workqueue_stats *node, *next; unsigned long flags; - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); - list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, list) { if (node->pid == wq_thread->pid) { list_del(&node->list); @@ -131,7 +133,7 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread) pr_debug("trace_workqueue: don't find workqueue to destroy\n"); found: - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); } @@ -141,13 +143,13 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu) struct cpu_workqueue_stats *ret = NULL; - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); - if (!list_empty(&all_workqueue_stat[cpu].list)) - ret = list_entry(all_workqueue_stat[cpu].list.next, + if (!list_empty(&workqueue_cpu_stat(cpu)->list)) + ret = list_entry(workqueue_cpu_stat(cpu)->list.next, struct cpu_workqueue_stats, list); - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); return ret; } @@ -172,9 +174,9 @@ static void *workqueue_stat_next(void *prev, int idx) unsigned long flags; void *ret = NULL; - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); - if (list_is_last(&prev_cws->list, &all_workqueue_stat[cpu].list)) { - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) { + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); for (++cpu ; cpu < num_possible_cpus(); cpu++) { ret = workqueue_stat_start_cpu(cpu); if (ret) @@ -182,7 +184,7 @@ static void *workqueue_stat_next(void *prev, int idx) } return NULL; } - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); return list_entry(prev_cws->list.next, struct cpu_workqueue_stats, list); @@ -199,10 +201,10 @@ static int workqueue_stat_show(struct seq_file *s, void *p) cws->executed, trace_find_cmdline(cws->pid)); - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); - if (&cws->list == all_workqueue_stat[cpu].list.next) + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + if (&cws->list == workqueue_cpu_stat(cpu)->list.next) seq_printf(s, "\n"); - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); return 0; } @@ -258,17 +260,9 @@ int __init trace_workqueue_early_init(void) if (ret) goto no_creation; - all_workqueue_stat = kmalloc(sizeof(struct workqueue_global_stats) - * num_possible_cpus(), GFP_KERNEL); - - if (!all_workqueue_stat) { - pr_warning("trace_workqueue: not enough memory\n"); - goto no_creation; - } - for_each_possible_cpu(cpu) { - spin_lock_init(&all_workqueue_stat[cpu].lock); - INIT_LIST_HEAD(&all_workqueue_stat[cpu].list); + spin_lock_init(&workqueue_cpu_stat(cpu)->lock); + INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list); } return 0; -- cgit From c3ffc7a40b7e94b094efe1c8ab4e24370a782b65 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Mon, 9 Mar 2009 18:15:34 +0900 Subject: tracing: Don't use tracing_record_cmdline() in workqueue tracer Impact: improve workqueue tracer output Currently, /sys/kernel/debug/tracing/trace_stat/workqueues can display wrong and strange thread names. Why? Currently, ftrace has tracing_record_cmdline()/trace_find_cmdline() convenience function that implements a task->comm string cache. This can avoid unnecessary memcpy overhead and the workqueue tracer uses it. However, in general, any trace statistics feature shouldn't use tracing_record_cmdline() because trace statistics can display very old process. Then comm cache can return wrong string because recent process overrides the cache. Fortunately, workqueue trace guarantees that displayed processes are live. Thus we can search comm string from PID at display time. % cat workqueues # CPU INSERTED EXECUTED NAME # | | | | 7 431913 431913 kondemand/7 7 0 0 tail 7 21 21 git 7 0 0 ls 7 9 9 cat 7 832632 832632 unix_chkpwd 7 236292 236292 ls Note: tail, git, ls, cat unix_chkpwd are obiously not workqueue thread. % cat workqueues # CPU INSERTED EXECUTED NAME # | | | | 7 510 510 kondemand/7 7 0 0 kmpathd/7 7 15 15 ata/7 7 0 0 aio/7 7 11 11 kblockd/7 7 1063 1063 work_on_cpu/7 7 167 167 events/7 Signed-off-by: KOSAKI Motohiro Cc: Lai Jiangshan Cc: Steven Rostedt Cc: Frederic Weisbecker Signed-off-by: Ingo Molnar --- kernel/trace/trace_workqueue.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 4664990fe9c5..46c8dc896bd3 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -99,8 +99,6 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) pr_warning("trace_workqueue: not enough memory\n"); return; } - tracing_record_cmdline(wq_thread); - INIT_LIST_HEAD(&cws->list); cws->cpu = cpu; @@ -195,11 +193,12 @@ static int workqueue_stat_show(struct seq_file *s, void *p) struct cpu_workqueue_stats *cws = p; unsigned long flags; int cpu = cws->cpu; + struct task_struct *tsk = find_task_by_vpid(cws->pid); seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, atomic_read(&cws->inserted), cws->executed, - trace_find_cmdline(cws->pid)); + tsk ? tsk->comm : "<...>"); spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); if (&cws->list == workqueue_cpu_stat(cpu)->list.next) -- cgit From bbcd3063597a3824357cd83c501c2a2aa21ef37b Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 10 Mar 2009 10:49:53 +0900 Subject: tracing: Don't assume possible cpu list have continuous numbers "for (++cpu ; cpu < num_possible_cpus(); cpu++)" statement assumes possible cpus have continuous number - but that's a wrong assumption. Insted, cpumask_next() should be used. Signed-off-by: KOSAKI Motohiro Cc: Lai Jiangshan Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <20090310104437.A480.A69D9226@jp.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_workqueue.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 46c8dc896bd3..739fdacf873b 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -91,7 +91,7 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) struct cpu_workqueue_stats *cws; unsigned long flags; - WARN_ON(cpu < 0 || cpu >= num_possible_cpus()); + WARN_ON(cpu < 0); /* Workqueues are sometimes created in atomic context */ cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC); @@ -175,12 +175,12 @@ static void *workqueue_stat_next(void *prev, int idx) spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) { spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); - for (++cpu ; cpu < num_possible_cpus(); cpu++) { - ret = workqueue_stat_start_cpu(cpu); - if (ret) - return ret; - } - return NULL; + do { + cpu = cpumask_next(cpu, cpu_possible_mask); + if (cpu >= nr_cpu_ids) + return NULL; + } while (!(ret = workqueue_stat_start_cpu(cpu))); + return ret; } spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); -- cgit From ef18012b248b47ec9a12c3a83ca5e99782d39c5d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 10 Mar 2009 14:10:56 -0400 Subject: tracing: remove funky whitespace in the trace code Impact: clean up There existed a lot of 's in the tracing code. This patch removes them. Signed-off-by: Steven Rostedt --- kernel/trace/trace_workqueue.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 4664990fe9c5..e542483df623 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -19,14 +19,14 @@ struct cpu_workqueue_stats { /* Useful to know if we print the cpu headers */ bool first_entry; int cpu; - pid_t pid; + pid_t pid; /* Can be inserted from interrupt or user context, need to be atomic */ - atomic_t inserted; + atomic_t inserted; /* * Don't need to be atomic, works are serialized in a single workqueue thread * on a single CPU. */ - unsigned int executed; + unsigned int executed; }; /* List of workqueue threads on one cpu */ -- cgit From 889a6c367283709a80dad9413488472596a1a1d2 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Fri, 13 Mar 2009 09:03:04 +0900 Subject: tracing: Don't use tracing_record_cmdline() in workqueue tracer fix commit c3ffc7a40b7e94b094efe1c8ab4e24370a782b65 "Don't use tracing_record_cmdline() in workqueue tracer" has a race window. find_task_by_vpid() requires task_list_lock(). LKML-Reference: <20090313090042.43CD.A69D9226@jp.fujitsu.com> Signed-off-by: KOSAKI Motohiro Signed-off-by: Steven Rostedt --- kernel/trace/trace_workqueue.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index fb5ccac8bbc0..9ab035b58cf1 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -193,12 +193,20 @@ static int workqueue_stat_show(struct seq_file *s, void *p) struct cpu_workqueue_stats *cws = p; unsigned long flags; int cpu = cws->cpu; - struct task_struct *tsk = find_task_by_vpid(cws->pid); - - seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, - atomic_read(&cws->inserted), - cws->executed, - tsk ? tsk->comm : "<...>"); + struct pid *pid; + struct task_struct *tsk; + + pid = find_get_pid(cws->pid); + if (pid) { + tsk = get_pid_task(pid, PIDTYPE_PID); + if (tsk) { + seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, + atomic_read(&cws->inserted), cws->executed, + tsk->comm); + put_task_struct(tsk); + } + put_pid(pid); + } spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); if (&cws->list == workqueue_cpu_stat(cpu)->list.next) -- cgit From 2f63b840bc8a816ac879ee773b035cf3e433fae4 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 25 Mar 2009 16:59:18 +0800 Subject: trace_workqueues: fix empty line's output Empty lines separate cpus stat. After previous fix(trace_stat: keep original order) applied, the empty lines are displayed at incorrect position. Signed-off-by: Lai Jiangshan Acked-by: Steven Rostedt Acked-by: Frederic Weisbecker LKML-Reference: <49C9F266.2060706@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_workqueue.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 9ab035b58cf1..797201e4a137 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -196,6 +196,11 @@ static int workqueue_stat_show(struct seq_file *s, void *p) struct pid *pid; struct task_struct *tsk; + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + if (&cws->list == workqueue_cpu_stat(cpu)->list.next) + seq_printf(s, "\n"); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); + pid = find_get_pid(cws->pid); if (pid) { tsk = get_pid_task(pid, PIDTYPE_PID); @@ -208,18 +213,13 @@ static int workqueue_stat_show(struct seq_file *s, void *p) put_pid(pid); } - spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); - if (&cws->list == workqueue_cpu_stat(cpu)->list.next) - seq_printf(s, "\n"); - spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); - return 0; } static int workqueue_stat_headers(struct seq_file *s) { seq_printf(s, "# CPU INSERTED EXECUTED NAME\n"); - seq_printf(s, "# | | | |\n\n"); + seq_printf(s, "# | | | |\n"); return 0; } -- cgit