Skip to content

Commit

Permalink
rtmutex: use a trylock for waiter lock in trylock
Browse files Browse the repository at this point in the history
Mike Galbraith captered the following:
| >#11 [ffff88017b243e90] _raw_spin_lock at ffffffff815d2596
| >#12 [ffff88017b243e90] rt_mutex_trylock at ffffffff815d15be
| >#13 [ffff88017b243eb0] get_next_timer_interrupt at ffffffff81063b42
| >#14 [ffff88017b243f00] tick_nohz_stop_sched_tick at ffffffff810bd1fd
| >#15 [ffff88017b243f70] tick_nohz_irq_exit at ffffffff810bd7d2
| >#16 [ffff88017b243f90] irq_exit at ffffffff8105b02d
| >#17 [ffff88017b243fb0] reschedule_interrupt at ffffffff815db3dd
| >--- <IRQ stack> ---
| >#18 [ffff88017a2a9bc8] reschedule_interrupt at ffffffff815db3dd
| >    [exception RIP: task_blocks_on_rt_mutex+51]
| >#19 [ffff88017a2a9ce0] rt_spin_lock_slowlock at ffffffff815d183c
| >#20 [ffff88017a2a9da0] lock_timer_base.isra.35 at ffffffff81061cbf
| >#21 [ffff88017a2a9dd0] schedule_timeout at ffffffff815cf1ce
| >#22 [ffff88017a2a9e50] rcu_gp_kthread at ffffffff810f9bbb
| >#23 [ffff88017a2a9ed0] kthread at ffffffff810796d5
| >#24 [ffff88017a2a9f50] ret_from_fork at ffffffff815da04c

lock_timer_base() does a try_lock() which deadlocks on the waiter lock
not the lock itself.
This patch takes the waiter_lock with trylock so it should work from interrupt
context as well. If the fastpath doesn't work and the waiter_lock itself is
taken then it seems that the lock itself taken.
This patch also adds "rt_spin_unlock_after_trylock_in_irq" to keep lockdep
happy. If we managed to take the wait_lock in the first place we should also
be able to take it in the unlock path.

Cc: stable-rt@vger.kernel.org
Reported-by: Mike Galbraith <bitbucket@online.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
  • Loading branch information
Sebastian Andrzej Siewior authored and linux4kix committed Jan 25, 2016
1 parent f78775f commit 59b8e4a
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 5 deletions.
1 change: 1 addition & 0 deletions include/linux/spinlock_rt.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ extern void __lockfunc rt_spin_lock(spinlock_t *lock);
extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock);
extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
Expand Down
31 changes: 27 additions & 4 deletions kernel/locking/rtmutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -1047,10 +1047,8 @@ static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
/*
* Slow path to release a rt_mutex spin_lock style
*/
static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock)
{
raw_spin_lock(&lock->wait_lock);

debug_rt_mutex_unlock(lock);

rt_mutex_deadlock_account_unlock(current);
Expand All @@ -1069,6 +1067,23 @@ static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
rt_mutex_adjust_prio(current);
}

static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
{
raw_spin_lock(&lock->wait_lock);
__rt_spin_lock_slowunlock(lock);
}

static void noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex *lock)
{
int ret;

do {
ret = raw_spin_trylock(&lock->wait_lock);
} while (!ret);

__rt_spin_lock_slowunlock(lock);
}

void __lockfunc rt_spin_lock(spinlock_t *lock)
{
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
Expand Down Expand Up @@ -1099,6 +1114,13 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock)
}
EXPORT_SYMBOL(rt_spin_unlock);

void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock)
{
/* NOTE: we always pass in '1' for nested, for simplicity */
spin_release(&lock->dep_map, 1, _RET_IP_);
rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq);
}

void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
{
rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
Expand Down Expand Up @@ -1443,7 +1465,8 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
{
int ret = 0;

raw_spin_lock(&lock->wait_lock);
if (!raw_spin_trylock(&lock->wait_lock))
return ret;

if (likely(rt_mutex_owner(lock) != current)) {

Expand Down
2 changes: 1 addition & 1 deletion kernel/timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -1400,7 +1400,7 @@ unsigned long get_next_timer_interrupt(unsigned long now)
expires = base->next_timer;
}
#ifdef CONFIG_PREEMPT_RT_FULL
rt_spin_unlock(&base->lock);
rt_spin_unlock_after_trylock_in_irq(&base->lock);
#else
spin_unlock(&base->lock);
#endif
Expand Down

0 comments on commit 59b8e4a

Please sign in to comment.