Skip to content

Commit

Permalink
sched: Fix missing preemption opportunity
Browse files Browse the repository at this point in the history
If an interrupt fires in cond_resched(), between the call to __schedule()
and the PREEMPT_ACTIVE count decrementation, and that interrupt sets
TIF_NEED_RESCHED, the call to preempt_schedule_irq() will be ignored
due to the PREEMPT_ACTIVE count. This kind of scenario, with irq preemption
being delayed because it's interrupting a preempt-disabled area, is
usually fixed up after preemption is re-enabled back with an explicit
call to preempt_schedule().

This is what preempt_enable() does but a raw preempt count decrement as
performed by __preempt_count_sub(PREEMPT_ACTIVE) doesn't handle delayed
preemption check. Therefore when such a race happens, the rescheduling
is going to be delayed until the next scheduler or preemption entrypoint.
This can be a problem for scheduler latency sensitive workloads.

Lets fix that by consolidating cond_resched() with preempt_schedule()
internals.

Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Reported-by: Ingo Molnar <mingo@kernel.org>
Original-patch-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1421946484-9298-1-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
fweisbec authored and Ingo Molnar committed Jan 30, 2015
1 parent 80e3d87 commit a18b5d0
Showing 1 changed file with 19 additions and 21 deletions.
40 changes: 19 additions & 21 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2884,6 +2884,21 @@ void __sched schedule_preempt_disabled(void)
preempt_disable();
}

static void preempt_schedule_common(void)
{
do {
__preempt_count_add(PREEMPT_ACTIVE);
__schedule();
__preempt_count_sub(PREEMPT_ACTIVE);

/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
}

#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
Expand All @@ -2899,17 +2914,7 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
if (likely(!preemptible()))
return;

do {
__preempt_count_add(PREEMPT_ACTIVE);
__schedule();
__preempt_count_sub(PREEMPT_ACTIVE);

/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
EXPORT_SYMBOL(preempt_schedule);
Expand Down Expand Up @@ -4209,17 +4214,10 @@ SYSCALL_DEFINE0(sched_yield)
return 0;
}

static void __cond_resched(void)
{
__preempt_count_add(PREEMPT_ACTIVE);
__schedule();
__preempt_count_sub(PREEMPT_ACTIVE);
}

int __sched _cond_resched(void)
{
if (should_resched()) {
__cond_resched();
preempt_schedule_common();
return 1;
}
return 0;
Expand All @@ -4244,7 +4242,7 @@ int __cond_resched_lock(spinlock_t *lock)
if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
if (resched)
__cond_resched();
preempt_schedule_common();
else
cpu_relax();
ret = 1;
Expand All @@ -4260,7 +4258,7 @@ int __sched __cond_resched_softirq(void)

if (should_resched()) {
local_bh_enable();
__cond_resched();
preempt_schedule_common();
local_bh_disable();
return 1;
}
Expand Down

0 comments on commit a18b5d0

Please sign in to comment.