xen/pvcalls: don't call bind_evtchn_to_irqhandler() under lock

bind_evtchn_to_irqhandler() shouldn't be called under spinlock, as it
can sleep.

This requires to move the calls of create_active() out of the locked
regions. This is no problem, as the worst which could happen would be
a spurious call of the interrupt handler, causing a spurious wake_up().

Reported-by: Dan Carpenter <error27@gmail.com>
Link: https://lore.kernel.org/lkml/Y+JUIl64UDmdkboh@kadam/
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Link: https://lore.kernel.org/r/20230403092711.15285-1-jgross@suse.com
Signed-off-by: Juergen Gross <jgross@suse.com>
This commit is contained in:
Juergen Gross 2023-04-03 11:27:11 +02:00
parent b2c042cc80
commit c66bb48edd

View File

@ -227,22 +227,30 @@ static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
static void free_active_ring(struct sock_mapping *map);
static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
struct sock_mapping *map)
static void pvcalls_front_destroy_active(struct pvcalls_bedata *bedata,
struct sock_mapping *map)
{
int i;
unbind_from_irqhandler(map->active.irq, map);
spin_lock(&bedata->socket_lock);
if (!list_empty(&map->list))
list_del_init(&map->list);
spin_unlock(&bedata->socket_lock);
if (bedata) {
spin_lock(&bedata->socket_lock);
if (!list_empty(&map->list))
list_del_init(&map->list);
spin_unlock(&bedata->socket_lock);
}
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
gnttab_end_foreign_access(map->active.ref, NULL);
free_active_ring(map);
}
static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
struct sock_mapping *map)
{
pvcalls_front_destroy_active(bedata, map);
kfree(map);
}
@ -433,19 +441,18 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
pvcalls_exit_sock(sock);
return ret;
}
ret = create_active(map, &evtchn);
if (ret < 0) {
free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
ret = create_active(map, &evtchn);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
free_active_ring(map);
pvcalls_front_destroy_active(NULL, map);
pvcalls_exit_sock(sock);
return ret;
}
@ -821,28 +828,27 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
pvcalls_exit_sock(sock);
return ret;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
free_active_ring(map2);
kfree(map2);
pvcalls_exit_sock(sock);
return ret;
}
ret = create_active(map2, &evtchn);
if (ret < 0) {
free_active_ring(map2);
kfree(map2);
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
pvcalls_exit_sock(sock);
return ret;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
pvcalls_front_free_map(bedata, map2);
pvcalls_exit_sock(sock);
return ret;
}
list_add_tail(&map2->list, &bedata->socket_mappings);
req = RING_GET_REQUEST(&bedata->ring, req_id);