mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-16 18:08:20 +00:00
Merge branch 'virtio_net-fix-lock-warning-and-unrecoverable-state'
Heng Qi says: ==================== virtio_net: fix lock warning and unrecoverable state Patch 1 describes and fixes an issue where dim cannot return to normal state in certain scenarios. Patch 2 attempts to resolve lockdep's complaints that holding many nested locks. ==================== Link: https://lore.kernel.org/r/20240528134116.117426-1-hengqi@linux.alibaba.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
45c0a209dc
@ -4259,7 +4259,6 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
|
||||
struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
|
||||
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
|
||||
struct scatterlist sgs_rx;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
|
||||
@ -4269,27 +4268,27 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
|
||||
ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
|
||||
return -EINVAL;
|
||||
|
||||
/* Acquire all queues dim_locks */
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
mutex_lock(&vi->rq[i].dim_lock);
|
||||
|
||||
if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
|
||||
vi->rx_dim_enabled = true;
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
mutex_lock(&vi->rq[i].dim_lock);
|
||||
vi->rq[i].dim_enabled = true;
|
||||
goto unlock;
|
||||
mutex_unlock(&vi->rq[i].dim_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
|
||||
if (!coal_rx) {
|
||||
ret = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
if (!coal_rx)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
|
||||
vi->rx_dim_enabled = false;
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
mutex_lock(&vi->rq[i].dim_lock);
|
||||
vi->rq[i].dim_enabled = false;
|
||||
mutex_unlock(&vi->rq[i].dim_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/* Since the per-queue coalescing params can be set,
|
||||
@ -4302,22 +4301,19 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
|
||||
|
||||
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
|
||||
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
|
||||
&sgs_rx)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
&sgs_rx))
|
||||
return -EINVAL;
|
||||
|
||||
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
|
||||
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
mutex_lock(&vi->rq[i].dim_lock);
|
||||
vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
|
||||
vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
|
||||
}
|
||||
unlock:
|
||||
for (i = vi->max_queue_pairs - 1; i >= 0; i--)
|
||||
mutex_unlock(&vi->rq[i].dim_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
|
||||
@ -4419,9 +4415,9 @@ static void virtnet_rx_dim_work(struct work_struct *work)
|
||||
if (err)
|
||||
pr_debug("%s: Failed to send dim parameters on rxq%d\n",
|
||||
dev->name, qnum);
|
||||
dim->state = DIM_START_MEASURE;
|
||||
}
|
||||
out:
|
||||
dim->state = DIM_START_MEASURE;
|
||||
mutex_unlock(&rq->dim_lock);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user