mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git
This commit is contained in:
commit
3feff3acfa
@ -3680,23 +3680,27 @@ void workqueue_softirq_dead(unsigned int cpu)
|
||||
* check_flush_dependency - check for flush dependency sanity
|
||||
* @target_wq: workqueue being flushed
|
||||
* @target_work: work item being flushed (NULL for workqueue flushes)
|
||||
* @from_cancel: are we called from the work cancel path
|
||||
*
|
||||
* %current is trying to flush the whole @target_wq or @target_work on it.
|
||||
* If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
|
||||
* reclaiming memory or running on a workqueue which doesn't have
|
||||
* %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
|
||||
* a deadlock.
|
||||
* If this is not the cancel path (which implies work being flushed is either
|
||||
* already running, or will not be at all), check if @target_wq doesn't have
|
||||
* %WQ_MEM_RECLAIM and verify that %current is not reclaiming memory or running
|
||||
* on a workqueue which doesn't have %WQ_MEM_RECLAIM as that can break forward-
|
||||
* progress guarantee leading to a deadlock.
|
||||
*/
|
||||
static void check_flush_dependency(struct workqueue_struct *target_wq,
|
||||
struct work_struct *target_work)
|
||||
struct work_struct *target_work,
|
||||
bool from_cancel)
|
||||
{
|
||||
work_func_t target_func = target_work ? target_work->func : NULL;
|
||||
work_func_t target_func;
|
||||
struct worker *worker;
|
||||
|
||||
if (target_wq->flags & WQ_MEM_RECLAIM)
|
||||
if (from_cancel || target_wq->flags & WQ_MEM_RECLAIM)
|
||||
return;
|
||||
|
||||
worker = current_wq_worker();
|
||||
target_func = target_work ? target_work->func : NULL;
|
||||
|
||||
WARN_ONCE(current->flags & PF_MEMALLOC,
|
||||
"workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
|
||||
@ -3980,7 +3984,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
|
||||
list_add_tail(&this_flusher.list, &wq->flusher_overflow);
|
||||
}
|
||||
|
||||
check_flush_dependency(wq, NULL);
|
||||
check_flush_dependency(wq, NULL, false);
|
||||
|
||||
mutex_unlock(&wq->mutex);
|
||||
|
||||
@ -4155,7 +4159,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
|
||||
}
|
||||
|
||||
wq = pwq->wq;
|
||||
check_flush_dependency(wq, work);
|
||||
check_flush_dependency(wq, work, from_cancel);
|
||||
|
||||
insert_wq_barrier(pwq, barr, work, worker);
|
||||
raw_spin_unlock_irq(&pool->lock);
|
||||
|
@ -519,7 +519,15 @@ unsafe fn raw_get_work(ptr: *mut Self) -> *mut $crate::workqueue::Work<$work_typ
|
||||
impl{T} HasWork<Self> for ClosureWork<T> { self.work }
|
||||
}
|
||||
|
||||
// SAFETY: TODO.
|
||||
// SAFETY: The `__enqueue` implementation in RawWorkItem uses a `work_struct` initialized with the
|
||||
// `run` method of this trait as the function pointer because:
|
||||
// - `__enqueue` gets the `work_struct` from the `Work` field, using `T::raw_get_work`.
|
||||
// - The only safe way to create a `Work` object is through `Work::new`.
|
||||
// - `Work::new` makes sure that `T::Pointer::run` is passed to `init_work_with_key`.
|
||||
// - Finally `Work` and `RawWorkItem` guarantee that the correct `Work` field
|
||||
// will be used because of the ID const generic bound. This makes sure that `T::raw_get_work`
|
||||
// uses the correct offset for the `Work` field, and `Work::new` picks the correct
|
||||
// implementation of `WorkItemPointer` for `Arc<T>`.
|
||||
unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T>
|
||||
where
|
||||
T: WorkItem<ID, Pointer = Self>,
|
||||
@ -537,7 +545,13 @@ unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T>
|
||||
}
|
||||
}
|
||||
|
||||
// SAFETY: TODO.
|
||||
// SAFETY: The `work_struct` raw pointer is guaranteed to be valid for the duration of the call to
|
||||
// the closure because we get it from an `Arc`, which means that the ref count will be at least 1,
|
||||
// and we don't drop the `Arc` ourselves. If `queue_work_on` returns true, it is further guaranteed
|
||||
// to be valid until a call to the function pointer in `work_struct` because we leak the memory it
|
||||
// points to, and only reclaim it if the closure returns false, or in `WorkItemPointer::run`, which
|
||||
// is what the function pointer in the `work_struct` must be pointing to, according to the safety
|
||||
// requirements of `WorkItemPointer`.
|
||||
unsafe impl<T, const ID: u64> RawWorkItem<ID> for Arc<T>
|
||||
where
|
||||
T: WorkItem<ID, Pointer = Self>,
|
||||
|
Loading…
Reference in New Issue
Block a user