diff --git a/include/sys/zil_impl.h b/include/sys/zil_impl.h index bb85bf6d1eb1..2283f0fafa47 100644 --- a/include/sys/zil_impl.h +++ b/include/sys/zil_impl.h @@ -183,6 +183,7 @@ struct zilog { uint64_t zl_destroy_txg; /* txg of last zil_destroy() */ uint64_t zl_replayed_seq[TXG_SIZE]; /* last replayed rec seq */ uint64_t zl_replaying_seq; /* current replay seq number */ + krwlock_t zl_commit_lock; /* protects suspend count */ uint32_t zl_suspend; /* log suspend count */ kcondvar_t zl_cv_suspend; /* log suspend completion */ uint8_t zl_suspending; /* log is currently suspending */ diff --git a/module/zfs/zil.c b/module/zfs/zil.c index 2e017992fd9a..1d402a14d6ea 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -3195,6 +3195,18 @@ zil_commit(zilog_t *zilog, uint64_t foid) return; } + /* + * The ->zl_commit_lock rwlock ensures that all in-flight + * zil_commit() operations finish before suspension begins and that + * no more begin. Without it, it is possible for the scheduler to + * preempt us right after the zilog->zl_suspend suspend check, run + * another thread that runs zil_suspend() and after the other thread + * has finished its call to zil_commit_impl(), resume this thread + * while zil is suspended. This can trigger an assertion failure in + * VERIFY(list_is_empty(&lwb->lwb_itxs)). + */ + rw_enter(&zilog->zl_commit_lock, RW_READER); + /* * If the ZIL is suspended, we don't want to dirty it by calling * zil_commit_itx_assign() below, nor can we write out @@ -3203,11 +3215,13 @@ zil_commit(zilog_t *zilog, uint64_t foid) * semantics, and avoid calling those functions altogether. */ if (zilog->zl_suspend > 0) { + rw_exit(&zilog->zl_commit_lock); txg_wait_synced(zilog->zl_dmu_pool, 0); return; } zil_commit_impl(zilog, foid); + rw_exit(&zilog->zl_commit_lock); } void @@ -3472,6 +3486,8 @@ zil_alloc(objset_t *os, zil_header_t *zh_phys) cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); cv_init(&zilog->zl_lwb_io_cv, NULL, CV_DEFAULT, NULL); + rw_init(&zilog->zl_commit_lock, NULL, RW_DEFAULT, NULL); + return (zilog); } @@ -3511,6 +3527,8 @@ zil_free(zilog_t *zilog) cv_destroy(&zilog->zl_cv_suspend); cv_destroy(&zilog->zl_lwb_io_cv); + rw_destroy(&zilog->zl_commit_lock); + kmem_free(zilog, sizeof (zilog_t)); } @@ -3638,11 +3656,14 @@ zil_suspend(const char *osname, void **cookiep) return (error); zilog = dmu_objset_zil(os); + rw_enter(&zilog->zl_commit_lock, RW_WRITER); + mutex_enter(&zilog->zl_lock); zh = zilog->zl_header; if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ mutex_exit(&zilog->zl_lock); + rw_exit(&zilog->zl_commit_lock); dmu_objset_rele(os, suspend_tag); return (SET_ERROR(EBUSY)); } @@ -3656,6 +3677,7 @@ zil_suspend(const char *osname, void **cookiep) if (cookiep == NULL && !zilog->zl_suspending && (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { mutex_exit(&zilog->zl_lock); + rw_exit(&zilog->zl_commit_lock); dmu_objset_rele(os, suspend_tag); return (0); } @@ -3664,6 +3686,7 @@ zil_suspend(const char *osname, void **cookiep) dsl_pool_rele(dmu_objset_pool(os), suspend_tag); zilog->zl_suspend++; + rw_exit(&zilog->zl_commit_lock); if (zilog->zl_suspend > 1) { /*