Commit d7102e95 authored by's avatar Committed by Linus Torvalds
Browse files

[PATCH] sched: filter affine wakeups


From: Nick Piggin <>

Track the last waker CPU, and only consider wakeup-balancing if there's a
match between current waker CPU and the previous waker CPU.  This ensures
that there is some correlation between two subsequent wakeup events before
we move the task.  Should help random-wakeup workloads on large SMP
systems, by reducing the migration attempts by a factor of nr_cpus.
Signed-off-by: default avatarIngo Molnar <>
Signed-off-by: default avatarNick Piggin <>
Signed-off-by: default avatarAndrew Morton <>
Signed-off-by: default avatarLinus Torvalds <>
parent 198e2f18
......@@ -696,8 +696,11 @@ struct task_struct {
int lock_depth; /* BKL lock depth */
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
#if defined(CONFIG_SMP)
int last_waker_cpu; /* CPU that last woke this task up */
int oncpu;
int prio, static_prio;
struct list_head run_list;
......@@ -1290,6 +1290,9 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
if (p->last_waker_cpu != this_cpu)
goto out_set_cpu;
if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
goto out_set_cpu;
......@@ -1360,6 +1363,8 @@ out_set_cpu:
cpu = task_cpu(p);
p->last_waker_cpu = this_cpu;
#endif /* CONFIG_SMP */
if (old_state == TASK_UNINTERRUPTIBLE) {
......@@ -1441,9 +1446,12 @@ void fastcall sched_fork(task_t *p, int clone_flags)
memset(&p->sched_info, 0, sizeof(p->sched_info));
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
#if defined(CONFIG_SMP)
p->last_waker_cpu = cpu;
p->oncpu = 0;
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment