mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-18 02:46:06 +00:00
RDMA/mlx4: Avoid flush_scheduled_work() usage
Flushing system-wide workqueues is dangerous and will be forbidden. Replace system_wq with local cm_wq. Link: https://lore.kernel.org/r/22f7183b-cc16-5a34-e879-7605f5efc6e6@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
549f39a58a
commit
9cf62d91e4
@ -80,6 +80,7 @@ struct cm_req_msg {
|
|||||||
union ib_gid primary_path_sgid;
|
union ib_gid primary_path_sgid;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct workqueue_struct *cm_wq;
|
||||||
|
|
||||||
static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
|
static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
|
||||||
{
|
{
|
||||||
@ -288,10 +289,10 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
|
|||||||
/*make sure that there is no schedule inside the scheduled work.*/
|
/*make sure that there is no schedule inside the scheduled work.*/
|
||||||
if (!sriov->is_going_down && !id->scheduled_delete) {
|
if (!sriov->is_going_down && !id->scheduled_delete) {
|
||||||
id->scheduled_delete = 1;
|
id->scheduled_delete = 1;
|
||||||
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
|
queue_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
|
||||||
} else if (id->scheduled_delete) {
|
} else if (id->scheduled_delete) {
|
||||||
/* Adjust timeout if already scheduled */
|
/* Adjust timeout if already scheduled */
|
||||||
mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
|
mod_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
|
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
|
||||||
spin_unlock(&sriov->id_map_lock);
|
spin_unlock(&sriov->id_map_lock);
|
||||||
@ -370,7 +371,7 @@ static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int sl
|
|||||||
ret = xa_err(item);
|
ret = xa_err(item);
|
||||||
else
|
else
|
||||||
/* If a retry, adjust delayed work */
|
/* If a retry, adjust delayed work */
|
||||||
mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
|
mod_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
|
||||||
goto err_or_exists;
|
goto err_or_exists;
|
||||||
}
|
}
|
||||||
xa_unlock(&sriov->xa_rej_tmout);
|
xa_unlock(&sriov->xa_rej_tmout);
|
||||||
@ -393,7 +394,7 @@ static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int sl
|
|||||||
return xa_err(old);
|
return xa_err(old);
|
||||||
}
|
}
|
||||||
|
|
||||||
schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
|
queue_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -500,7 +501,7 @@ static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
|
|||||||
xa_lock(&sriov->xa_rej_tmout);
|
xa_lock(&sriov->xa_rej_tmout);
|
||||||
xa_for_each(&sriov->xa_rej_tmout, id, item) {
|
xa_for_each(&sriov->xa_rej_tmout, id, item) {
|
||||||
if (slave < 0 || slave == item->slave) {
|
if (slave < 0 || slave == item->slave) {
|
||||||
mod_delayed_work(system_wq, &item->timeout, 0);
|
mod_delayed_work(cm_wq, &item->timeout, 0);
|
||||||
flush_needed = true;
|
flush_needed = true;
|
||||||
++cnt;
|
++cnt;
|
||||||
}
|
}
|
||||||
@ -508,7 +509,7 @@ static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
|
|||||||
xa_unlock(&sriov->xa_rej_tmout);
|
xa_unlock(&sriov->xa_rej_tmout);
|
||||||
|
|
||||||
if (flush_needed) {
|
if (flush_needed) {
|
||||||
flush_scheduled_work();
|
flush_workqueue(cm_wq);
|
||||||
pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
|
pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
|
||||||
cnt, slave);
|
cnt, slave);
|
||||||
}
|
}
|
||||||
@ -540,7 +541,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
|
|||||||
spin_unlock(&sriov->id_map_lock);
|
spin_unlock(&sriov->id_map_lock);
|
||||||
|
|
||||||
if (need_flush)
|
if (need_flush)
|
||||||
flush_scheduled_work(); /* make sure all timers were flushed */
|
flush_workqueue(cm_wq); /* make sure all timers were flushed */
|
||||||
|
|
||||||
/* now, remove all leftover entries from databases*/
|
/* now, remove all leftover entries from databases*/
|
||||||
spin_lock(&sriov->id_map_lock);
|
spin_lock(&sriov->id_map_lock);
|
||||||
@ -587,3 +588,17 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
|
|||||||
|
|
||||||
rej_tmout_xa_cleanup(sriov, slave);
|
rej_tmout_xa_cleanup(sriov, slave);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mlx4_ib_cm_init(void)
|
||||||
|
{
|
||||||
|
cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0);
|
||||||
|
if (!cm_wq)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mlx4_ib_cm_destroy(void)
|
||||||
|
{
|
||||||
|
destroy_workqueue(cm_wq);
|
||||||
|
}
|
||||||
|
@ -3307,10 +3307,14 @@ static int __init mlx4_ib_init(void)
|
|||||||
if (!wq)
|
if (!wq)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
err = mlx4_ib_mcg_init();
|
err = mlx4_ib_cm_init();
|
||||||
if (err)
|
if (err)
|
||||||
goto clean_wq;
|
goto clean_wq;
|
||||||
|
|
||||||
|
err = mlx4_ib_mcg_init();
|
||||||
|
if (err)
|
||||||
|
goto clean_cm;
|
||||||
|
|
||||||
err = mlx4_register_interface(&mlx4_ib_interface);
|
err = mlx4_register_interface(&mlx4_ib_interface);
|
||||||
if (err)
|
if (err)
|
||||||
goto clean_mcg;
|
goto clean_mcg;
|
||||||
@ -3320,6 +3324,9 @@ static int __init mlx4_ib_init(void)
|
|||||||
clean_mcg:
|
clean_mcg:
|
||||||
mlx4_ib_mcg_destroy();
|
mlx4_ib_mcg_destroy();
|
||||||
|
|
||||||
|
clean_cm:
|
||||||
|
mlx4_ib_cm_destroy();
|
||||||
|
|
||||||
clean_wq:
|
clean_wq:
|
||||||
destroy_workqueue(wq);
|
destroy_workqueue(wq);
|
||||||
return err;
|
return err;
|
||||||
@ -3329,6 +3336,7 @@ static void __exit mlx4_ib_cleanup(void)
|
|||||||
{
|
{
|
||||||
mlx4_unregister_interface(&mlx4_ib_interface);
|
mlx4_unregister_interface(&mlx4_ib_interface);
|
||||||
mlx4_ib_mcg_destroy();
|
mlx4_ib_mcg_destroy();
|
||||||
|
mlx4_ib_cm_destroy();
|
||||||
destroy_workqueue(wq);
|
destroy_workqueue(wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -937,4 +937,7 @@ mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
|
|||||||
int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
|
int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
|
||||||
int *num_of_mtts);
|
int *num_of_mtts);
|
||||||
|
|
||||||
|
int mlx4_ib_cm_init(void);
|
||||||
|
void mlx4_ib_cm_destroy(void);
|
||||||
|
|
||||||
#endif /* MLX4_IB_H */
|
#endif /* MLX4_IB_H */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user