summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-12-03 12:51:21 -0800
committerDavid S. Miller <davem@davemloft.net>2009-12-03 12:51:21 -0800
commita6c872afb2536f47285e6643f4629dec7520041d (patch)
tree4b54e69fc6594f9afc1277520a350db04e578e77 /kernel/workqueue.c
parent9fe02668fe48a1d546196bc1392330ff28d9bd57 (diff)
parent22763c5cf3690a681551162c15d34d935308c8d7 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 47cdd7e76f2b..67e526b6ae81 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -685,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
int schedule_on_each_cpu(work_func_t func)
{
int cpu;
+ int orig = -1;
struct work_struct *works;
works = alloc_percpu(struct work_struct);
@@ -692,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func)
return -ENOMEM;
get_online_cpus();
+
+ /*
+ * When running in keventd don't schedule a work item on
+ * itself. Can just call directly because the work queue is
+ * already bound. This also is faster.
+ */
+ if (current_is_keventd())
+ orig = raw_smp_processor_id();
+
for_each_online_cpu(cpu) {
struct work_struct *work = per_cpu_ptr(works, cpu);
INIT_WORK(work, func);
- schedule_work_on(cpu, work);
+ if (cpu != orig)
+ schedule_work_on(cpu, work);
}
+ if (orig >= 0)
+ func(per_cpu_ptr(works, orig));
+
for_each_online_cpu(cpu)
flush_work(per_cpu_ptr(works, cpu));
+
put_online_cpus();
free_percpu(works);
return 0;