diff --git a/kernel/kthread.c b/kernel/kthread.c index 0a009d6a3ac3..94f315773d87 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -1139,6 +1139,19 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, goto out; ret = __kthread_cancel_work(work, true, &flags); + + /* + * Canceling could run in parallel from kthread_cancel_delayed_work_sync + * and change work's canceling count as the spinlock is released and regain + * in __kthread_cancel_work so we need to check the count again. Otherwise, + * we might incorrectly queue the dwork and further cause + * cancel_delayed_work_sync thread waiting for flush dwork endlessly. + */ + if (work->canceling) { + ret = false; + goto out; + } + fast_queue: __kthread_queue_delayed_work(worker, dwork, delay); out: