Skip to content
This repository has been archived by the owner on Feb 26, 2020. It is now read-only.

Commit

Permalink
Subclass tq_lock to eliminate a lockdep warning
Browse files Browse the repository at this point in the history
When taskq_dispatch() calls taskq_thread_spawn() to create a new thread
for a taskq, linux lockdep warns of possible recursive locking.  This is
a false positive.

One such call chain is as follows, when a taskq needs more threads:
	taskq_dispatch->taskq_thread_spawn->taskq_dispatch

The initial taskq_dispatch() holds tq_lock on the taskq that needed more
worker threads.  The later call into taskq_dispatch() takes
dynamic_taskq->tq_lock.  Without subclassing, lockdep believes these
could potentially be the same lock and complains.  A similar case occurs
when taskq_dispatch() then calls task_alloc().

This patch uses spin_lock_irqsave_nested() when taking tq_lock, with one
of two new lock subclasses:

subclass              taskq
TQ_LOCK_DYNAMIC       dynamic_taskq
TQ_LOCK_GENERAL       any other
  • Loading branch information
ofaaland committed Dec 12, 2015
1 parent 20350d4 commit aca2626
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 21 deletions.
9 changes: 9 additions & 0 deletions include/sys/taskq.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,14 @@
#define TQ_NEW 0x04000000
#define TQ_FRONT 0x08000000

/* spin_lock(lock) and spin_lock_nested(lock,0) are equivalent,
* so TQ_LOCK_DYNAMIC must not evaluate to 0
*/
typedef enum tq_lock_role {
TQ_LOCK_GENERAL = 0,
TQ_LOCK_DYNAMIC = 1,
} tq_lock_role_t;

typedef unsigned long taskqid_t;
typedef void (task_func_t)(void *);

Expand All @@ -81,6 +89,7 @@ typedef struct taskq {
struct list_head tq_delay_list; /* delayed task_t's */
wait_queue_head_t tq_work_waitq; /* new work waitq */
wait_queue_head_t tq_wait_waitq; /* wait waitq */
tq_lock_role_t tq_lock_class; /* class used when taking tq_lock */
} taskq_t;

typedef struct taskq_ent {
Expand Down
50 changes: 29 additions & 21 deletions module/spl/spl-taskq.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ task_alloc(taskq_t *tq, uint_t flags)
*/
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
schedule_timeout(HZ / 100);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);
if (count < 100) {
count++;
goto retry;
Expand All @@ -122,7 +122,7 @@ task_alloc(taskq_t *tq, uint_t flags)

spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags));
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);

if (t) {
taskq_init_ent(t);
Expand Down Expand Up @@ -188,7 +188,7 @@ task_expire(unsigned long data)
taskq_t *tq = t->tqent_taskq;
struct list_head *l;

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);

if (t->tqent_flags & TQENT_FLAG_CANCEL) {
ASSERT(list_empty(&t->tqent_list));
Expand Down Expand Up @@ -379,7 +379,7 @@ taskq_wait_id_check(taskq_t *tq, taskqid_t id)
int active = 0;
int rc;

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);
rc = (taskq_find(tq, id, &active) == NULL);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);

Expand All @@ -402,7 +402,7 @@ taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
{
int rc;

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);
rc = (id < tq->tq_lowest_id);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);

Expand All @@ -429,7 +429,7 @@ taskq_wait_check(taskq_t *tq)
{
int rc;

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);
rc = (tq->tq_lowest_id == tq->tq_next_id);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);

Expand Down Expand Up @@ -474,7 +474,7 @@ taskq_member(taskq_t *tq, void *t)
{
int found;

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);
found = taskq_member_impl(tq, t);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);

Expand All @@ -497,7 +497,7 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)

ASSERT(tq);

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);
t = taskq_find(tq, id, &active);
if (t && !active) {
list_del_init(&t->tqent_list);
Expand All @@ -519,7 +519,7 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
if (timer_pending(&t->tqent_timer)) {
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
del_timer_sync(&t->tqent_timer);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);
}

if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
Expand Down Expand Up @@ -549,7 +549,7 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
ASSERT(tq);
ASSERT(func);

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);

/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE))
Expand Down Expand Up @@ -605,7 +605,7 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
ASSERT(tq);
ASSERT(func);

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);

/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE))
Expand Down Expand Up @@ -648,7 +648,7 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
ASSERT(tq);
ASSERT(func);

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);

/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE)) {
Expand Down Expand Up @@ -740,13 +740,13 @@ taskq_thread_spawn_task(void *arg)

(void) taskq_thread_create(tq);

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);
tq->tq_nspawn--;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
}

/*
* Spawn addition threads for dynamic taskqs (TASKQ_DYNMAIC) the current
* Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
* number of threads is insufficient to handle the pending tasks. These
* new threads must be created by the dedicated dynamic_taskq to avoid
* deadlocks between thread creation and memory reclaim. The system_taskq
Expand Down Expand Up @@ -810,6 +810,7 @@ taskq_thread(void *args)
int seq_tasks = 0;

ASSERT(tqt);
ASSERT(tqt->tqt_tq);
tq = tqt->tqt_tq;
current->flags |= PF_NOFREEZE;

Expand All @@ -821,7 +822,7 @@ taskq_thread(void *args)
sigprocmask(SIG_BLOCK, &blocked, NULL);
flush_signals(current);

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);

/* Immediately exit if more threads than allowed were created. */
if (tq->tq_nthreads >= tq->tq_maxthreads)
Expand All @@ -848,7 +849,7 @@ taskq_thread(void *args)
schedule();
seq_tasks = 0;

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);
remove_wait_queue(&tq->tq_work_waitq, &wait);
} else {
__set_current_state(TASK_RUNNING);
Expand Down Expand Up @@ -877,7 +878,7 @@ taskq_thread(void *args)
/* Perform the requested task */
t->tqent_func(t->tqent_arg);

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);
tq->tq_nactive--;
list_del_init(&tqt->tqt_active_list);
tqt->tqt_task = NULL;
Expand Down Expand Up @@ -999,9 +1000,10 @@ taskq_create(const char *name, int nthreads, pri_t pri,
INIT_LIST_HEAD(&tq->tq_delay_list);
init_waitqueue_head(&tq->tq_work_waitq);
init_waitqueue_head(&tq->tq_wait_waitq);
tq->tq_lock_class = TQ_LOCK_GENERAL;

if (flags & TASKQ_PREPOPULATE) {
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);

for (i = 0; i < minalloc; i++)
task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW));
Expand Down Expand Up @@ -1040,7 +1042,7 @@ taskq_destroy(taskq_t *tq)
taskq_ent_t *t;

ASSERT(tq);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);
tq->tq_flags &= ~TASKQ_ACTIVE;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);

Expand All @@ -1053,7 +1055,7 @@ taskq_destroy(taskq_t *tq)

taskq_wait(tq);

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);

/*
* Signal each thread to exit and block until it does. Each thread
Expand All @@ -1069,7 +1071,7 @@ taskq_destroy(taskq_t *tq)

kthread_stop(thread);

spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags, tq->tq_lock_class);
}

while (!list_empty(&tq->tq_free_list)) {
Expand Down Expand Up @@ -1113,6 +1115,12 @@ spl_taskq_init(void)
return (1);
}

/* This is used to annotate tq_lock, so
* taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
* does not trigger a lockdep warning re: possible recursive locking
*/
dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;

return (0);
}

Expand Down

0 comments on commit aca2626

Please sign in to comment.