mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
eventfd: simplify eventfd_signal()
Ever since the eventfd type was introduced back in 2007 in commit
e1ad7468c7
("signal/timer/event: eventfd core") the eventfd_signal()
function only ever passed 1 as a value for @n. There's no point in
keeping that additional argument.
Link: https://lore.kernel.org/r/20231122-vfs-eventfd-signal-v2-2-bd549b14ce0c@kernel.org
Acked-by: Xu Yilun <yilun.xu@intel.com>
Acked-by: Andrew Donnellan <ajd@linux.ibm.com> # ocxl
Acked-by: Eric Farman <farman@linux.ibm.com> # s390
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
8588487192
commit
3652117f85
@ -2388,7 +2388,7 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *h
|
|||||||
if (!eventfd)
|
if (!eventfd)
|
||||||
return HV_STATUS_INVALID_PORT_ID;
|
return HV_STATUS_INVALID_PORT_ID;
|
||||||
|
|
||||||
eventfd_signal(eventfd, 1);
|
eventfd_signal(eventfd);
|
||||||
return HV_STATUS_SUCCESS;
|
return HV_STATUS_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2088,7 +2088,7 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
|
|||||||
if (ret < 0 && ret != -ENOTCONN)
|
if (ret < 0 && ret != -ENOTCONN)
|
||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1);
|
eventfd_signal(evtchnfd->deliver.eventfd.ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
*r = 0;
|
*r = 0;
|
||||||
|
@ -2044,7 +2044,7 @@ static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64
|
|||||||
notifier_event->events_mask |= event_mask;
|
notifier_event->events_mask |= event_mask;
|
||||||
|
|
||||||
if (notifier_event->eventfd)
|
if (notifier_event->eventfd)
|
||||||
eventfd_signal(notifier_event->eventfd, 1);
|
eventfd_signal(notifier_event->eventfd);
|
||||||
|
|
||||||
mutex_unlock(¬ifier_event->lock);
|
mutex_unlock(¬ifier_event->lock);
|
||||||
}
|
}
|
||||||
|
@ -1872,7 +1872,7 @@ static irqreturn_t dfl_irq_handler(int irq, void *arg)
|
|||||||
{
|
{
|
||||||
struct eventfd_ctx *trigger = arg;
|
struct eventfd_ctx *trigger = arg;
|
||||||
|
|
||||||
eventfd_signal(trigger, 1);
|
eventfd_signal(trigger);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1365,7 +1365,7 @@ static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
|
|||||||
struct syncobj_eventfd_entry *entry =
|
struct syncobj_eventfd_entry *entry =
|
||||||
container_of(cb, struct syncobj_eventfd_entry, fence_cb);
|
container_of(cb, struct syncobj_eventfd_entry, fence_cb);
|
||||||
|
|
||||||
eventfd_signal(entry->ev_fd_ctx, 1);
|
eventfd_signal(entry->ev_fd_ctx);
|
||||||
syncobj_eventfd_entry_free(entry);
|
syncobj_eventfd_entry_free(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1388,13 +1388,13 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
|
|||||||
entry->fence = fence;
|
entry->fence = fence;
|
||||||
|
|
||||||
if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
|
if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
|
||||||
eventfd_signal(entry->ev_fd_ctx, 1);
|
eventfd_signal(entry->ev_fd_ctx);
|
||||||
syncobj_eventfd_entry_free(entry);
|
syncobj_eventfd_entry_free(entry);
|
||||||
} else {
|
} else {
|
||||||
ret = dma_fence_add_callback(fence, &entry->fence_cb,
|
ret = dma_fence_add_callback(fence, &entry->fence_cb,
|
||||||
syncobj_eventfd_entry_fence_func);
|
syncobj_eventfd_entry_fence_func);
|
||||||
if (ret == -ENOENT) {
|
if (ret == -ENOENT) {
|
||||||
eventfd_signal(entry->ev_fd_ctx, 1);
|
eventfd_signal(entry->ev_fd_ctx);
|
||||||
syncobj_eventfd_entry_free(entry);
|
syncobj_eventfd_entry_free(entry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -453,7 +453,7 @@ static void inject_virtual_interrupt(struct intel_vgpu *vgpu)
|
|||||||
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
|
||||||
return;
|
return;
|
||||||
if (vgpu->msi_trigger)
|
if (vgpu->msi_trigger)
|
||||||
eventfd_signal(vgpu->msi_trigger, 1);
|
eventfd_signal(vgpu->msi_trigger);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void propagate_event(struct intel_gvt_irq *irq,
|
static void propagate_event(struct intel_gvt_irq *irq,
|
||||||
|
@ -2498,7 +2498,7 @@ static void dispatch_event_fd(struct list_head *fd_list,
|
|||||||
|
|
||||||
list_for_each_entry_rcu(item, fd_list, xa_list) {
|
list_for_each_entry_rcu(item, fd_list, xa_list) {
|
||||||
if (item->eventfd)
|
if (item->eventfd)
|
||||||
eventfd_signal(item->eventfd, 1);
|
eventfd_signal(item->eventfd);
|
||||||
else
|
else
|
||||||
deliver_event(item, data);
|
deliver_event(item, data);
|
||||||
}
|
}
|
||||||
|
@ -184,7 +184,7 @@ static irqreturn_t irq_handler(void *private)
|
|||||||
{
|
{
|
||||||
struct eventfd_ctx *ev_ctx = private;
|
struct eventfd_ctx *ev_ctx = private;
|
||||||
|
|
||||||
eventfd_signal(ev_ctx, 1);
|
eventfd_signal(ev_ctx);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
|
|||||||
|
|
||||||
/* Notify the guest if more CRWs are on our queue */
|
/* Notify the guest if more CRWs are on our queue */
|
||||||
if (!list_empty(&private->crw) && private->crw_trigger)
|
if (!list_empty(&private->crw) && private->crw_trigger)
|
||||||
eventfd_signal(private->crw_trigger, 1);
|
eventfd_signal(private->crw_trigger);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -112,7 +112,7 @@ void vfio_ccw_sch_io_todo(struct work_struct *work)
|
|||||||
private->state = VFIO_CCW_STATE_IDLE;
|
private->state = VFIO_CCW_STATE_IDLE;
|
||||||
|
|
||||||
if (private->io_trigger)
|
if (private->io_trigger)
|
||||||
eventfd_signal(private->io_trigger, 1);
|
eventfd_signal(private->io_trigger);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vfio_ccw_crw_todo(struct work_struct *work)
|
void vfio_ccw_crw_todo(struct work_struct *work)
|
||||||
@ -122,7 +122,7 @@ void vfio_ccw_crw_todo(struct work_struct *work)
|
|||||||
private = container_of(work, struct vfio_ccw_private, crw_work);
|
private = container_of(work, struct vfio_ccw_private, crw_work);
|
||||||
|
|
||||||
if (!list_empty(&private->crw) && private->crw_trigger)
|
if (!list_empty(&private->crw) && private->crw_trigger)
|
||||||
eventfd_signal(private->crw_trigger, 1);
|
eventfd_signal(private->crw_trigger);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -421,7 +421,7 @@ static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private,
|
|||||||
case VFIO_IRQ_SET_DATA_NONE:
|
case VFIO_IRQ_SET_DATA_NONE:
|
||||||
{
|
{
|
||||||
if (*ctx)
|
if (*ctx)
|
||||||
eventfd_signal(*ctx, 1);
|
eventfd_signal(*ctx);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
case VFIO_IRQ_SET_DATA_BOOL:
|
case VFIO_IRQ_SET_DATA_BOOL:
|
||||||
@ -432,7 +432,7 @@ static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private,
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (trigger && *ctx)
|
if (trigger && *ctx)
|
||||||
eventfd_signal(*ctx, 1);
|
eventfd_signal(*ctx);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
case VFIO_IRQ_SET_DATA_EVENTFD:
|
case VFIO_IRQ_SET_DATA_EVENTFD:
|
||||||
@ -612,7 +612,7 @@ static void vfio_ccw_mdev_request(struct vfio_device *vdev, unsigned int count)
|
|||||||
"Relaying device request to user (#%u)\n",
|
"Relaying device request to user (#%u)\n",
|
||||||
count);
|
count);
|
||||||
|
|
||||||
eventfd_signal(private->req_trigger, 1);
|
eventfd_signal(private->req_trigger);
|
||||||
} else if (count == 0) {
|
} else if (count == 0) {
|
||||||
dev_notice(dev,
|
dev_notice(dev,
|
||||||
"No device request channel registered, blocked until released by user\n");
|
"No device request channel registered, blocked until released by user\n");
|
||||||
|
@ -1794,7 +1794,7 @@ static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
|
|||||||
"Relaying device request to user (#%u)\n",
|
"Relaying device request to user (#%u)\n",
|
||||||
count);
|
count);
|
||||||
|
|
||||||
eventfd_signal(matrix_mdev->req_trigger, 1);
|
eventfd_signal(matrix_mdev->req_trigger);
|
||||||
} else if (count == 0) {
|
} else if (count == 0) {
|
||||||
dev_notice(dev,
|
dev_notice(dev,
|
||||||
"No device request registered, blocked until released by user\n");
|
"No device request registered, blocked until released by user\n");
|
||||||
|
@ -831,7 +831,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
|
|||||||
io_data->kiocb->ki_complete(io_data->kiocb, ret);
|
io_data->kiocb->ki_complete(io_data->kiocb, ret);
|
||||||
|
|
||||||
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
|
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
|
||||||
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
|
eventfd_signal(io_data->ffs->ffs_eventfd);
|
||||||
|
|
||||||
if (io_data->read)
|
if (io_data->read)
|
||||||
kfree(io_data->to_free);
|
kfree(io_data->to_free);
|
||||||
@ -2738,7 +2738,7 @@ static void __ffs_event_add(struct ffs_data *ffs,
|
|||||||
ffs->ev.types[ffs->ev.count++] = type;
|
ffs->ev.types[ffs->ev.count++] = type;
|
||||||
wake_up_locked(&ffs->ev.waitq);
|
wake_up_locked(&ffs->ev.waitq);
|
||||||
if (ffs->ffs_eventfd)
|
if (ffs->ffs_eventfd)
|
||||||
eventfd_signal(ffs->ffs_eventfd, 1);
|
eventfd_signal(ffs->ffs_eventfd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ffs_event_add(struct ffs_data *ffs,
|
static void ffs_event_add(struct ffs_data *ffs,
|
||||||
|
@ -493,7 +493,7 @@ static void vduse_vq_kick(struct vduse_virtqueue *vq)
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
if (vq->kickfd)
|
if (vq->kickfd)
|
||||||
eventfd_signal(vq->kickfd, 1);
|
eventfd_signal(vq->kickfd);
|
||||||
else
|
else
|
||||||
vq->kicked = true;
|
vq->kicked = true;
|
||||||
unlock:
|
unlock:
|
||||||
@ -911,7 +911,7 @@ static int vduse_kickfd_setup(struct vduse_dev *dev,
|
|||||||
eventfd_ctx_put(vq->kickfd);
|
eventfd_ctx_put(vq->kickfd);
|
||||||
vq->kickfd = ctx;
|
vq->kickfd = ctx;
|
||||||
if (vq->ready && vq->kicked && vq->kickfd) {
|
if (vq->ready && vq->kicked && vq->kickfd) {
|
||||||
eventfd_signal(vq->kickfd, 1);
|
eventfd_signal(vq->kickfd);
|
||||||
vq->kicked = false;
|
vq->kicked = false;
|
||||||
}
|
}
|
||||||
spin_unlock(&vq->kick_lock);
|
spin_unlock(&vq->kick_lock);
|
||||||
@ -960,7 +960,7 @@ static bool vduse_vq_signal_irqfd(struct vduse_virtqueue *vq)
|
|||||||
|
|
||||||
spin_lock_irq(&vq->irq_lock);
|
spin_lock_irq(&vq->irq_lock);
|
||||||
if (vq->ready && vq->cb.trigger) {
|
if (vq->ready && vq->cb.trigger) {
|
||||||
eventfd_signal(vq->cb.trigger, 1);
|
eventfd_signal(vq->cb.trigger);
|
||||||
signal = true;
|
signal = true;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&vq->irq_lock);
|
spin_unlock_irq(&vq->irq_lock);
|
||||||
|
@ -54,7 +54,7 @@ static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg)
|
|||||||
{
|
{
|
||||||
struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg;
|
struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg;
|
||||||
|
|
||||||
eventfd_signal(mc_irq->trigger, 1);
|
eventfd_signal(mc_irq->trigger);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -443,7 +443,7 @@ static int vfio_pci_core_runtime_resume(struct device *dev)
|
|||||||
*/
|
*/
|
||||||
down_write(&vdev->memory_lock);
|
down_write(&vdev->memory_lock);
|
||||||
if (vdev->pm_wake_eventfd_ctx) {
|
if (vdev->pm_wake_eventfd_ctx) {
|
||||||
eventfd_signal(vdev->pm_wake_eventfd_ctx, 1);
|
eventfd_signal(vdev->pm_wake_eventfd_ctx);
|
||||||
__vfio_pci_runtime_pm_exit(vdev);
|
__vfio_pci_runtime_pm_exit(vdev);
|
||||||
}
|
}
|
||||||
up_write(&vdev->memory_lock);
|
up_write(&vdev->memory_lock);
|
||||||
@ -1883,7 +1883,7 @@ void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
|
|||||||
pci_notice_ratelimited(pdev,
|
pci_notice_ratelimited(pdev,
|
||||||
"Relaying device request to user (#%u)\n",
|
"Relaying device request to user (#%u)\n",
|
||||||
count);
|
count);
|
||||||
eventfd_signal(vdev->req_trigger, 1);
|
eventfd_signal(vdev->req_trigger);
|
||||||
} else if (count == 0) {
|
} else if (count == 0) {
|
||||||
pci_warn(pdev,
|
pci_warn(pdev,
|
||||||
"No device request channel registered, blocked until released by user\n");
|
"No device request channel registered, blocked until released by user\n");
|
||||||
@ -2302,7 +2302,7 @@ pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
|
|||||||
mutex_lock(&vdev->igate);
|
mutex_lock(&vdev->igate);
|
||||||
|
|
||||||
if (vdev->err_trigger)
|
if (vdev->err_trigger)
|
||||||
eventfd_signal(vdev->err_trigger, 1);
|
eventfd_signal(vdev->err_trigger);
|
||||||
|
|
||||||
mutex_unlock(&vdev->igate);
|
mutex_unlock(&vdev->igate);
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused)
|
|||||||
ctx = vfio_irq_ctx_get(vdev, 0);
|
ctx = vfio_irq_ctx_get(vdev, 0);
|
||||||
if (WARN_ON_ONCE(!ctx))
|
if (WARN_ON_ONCE(!ctx))
|
||||||
return;
|
return;
|
||||||
eventfd_signal(ctx->trigger, 1);
|
eventfd_signal(ctx->trigger);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,7 +342,7 @@ static irqreturn_t vfio_msihandler(int irq, void *arg)
|
|||||||
{
|
{
|
||||||
struct eventfd_ctx *trigger = arg;
|
struct eventfd_ctx *trigger = arg;
|
||||||
|
|
||||||
eventfd_signal(trigger, 1);
|
eventfd_signal(trigger);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -689,11 +689,11 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
|
|||||||
if (!ctx)
|
if (!ctx)
|
||||||
continue;
|
continue;
|
||||||
if (flags & VFIO_IRQ_SET_DATA_NONE) {
|
if (flags & VFIO_IRQ_SET_DATA_NONE) {
|
||||||
eventfd_signal(ctx->trigger, 1);
|
eventfd_signal(ctx->trigger);
|
||||||
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
|
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
|
||||||
uint8_t *bools = data;
|
uint8_t *bools = data;
|
||||||
if (bools[i - start])
|
if (bools[i - start])
|
||||||
eventfd_signal(ctx->trigger, 1);
|
eventfd_signal(ctx->trigger);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
@ -707,7 +707,7 @@ static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
|
|||||||
if (flags & VFIO_IRQ_SET_DATA_NONE) {
|
if (flags & VFIO_IRQ_SET_DATA_NONE) {
|
||||||
if (*ctx) {
|
if (*ctx) {
|
||||||
if (count) {
|
if (count) {
|
||||||
eventfd_signal(*ctx, 1);
|
eventfd_signal(*ctx);
|
||||||
} else {
|
} else {
|
||||||
eventfd_ctx_put(*ctx);
|
eventfd_ctx_put(*ctx);
|
||||||
*ctx = NULL;
|
*ctx = NULL;
|
||||||
@ -722,7 +722,7 @@ static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
|
|||||||
|
|
||||||
trigger = *(uint8_t *)data;
|
trigger = *(uint8_t *)data;
|
||||||
if (trigger && *ctx)
|
if (trigger && *ctx)
|
||||||
eventfd_signal(*ctx, 1);
|
eventfd_signal(*ctx);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
|
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
|
||||||
|
@ -155,7 +155,7 @@ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
|
|||||||
spin_unlock_irqrestore(&irq_ctx->lock, flags);
|
spin_unlock_irqrestore(&irq_ctx->lock, flags);
|
||||||
|
|
||||||
if (ret == IRQ_HANDLED)
|
if (ret == IRQ_HANDLED)
|
||||||
eventfd_signal(irq_ctx->trigger, 1);
|
eventfd_signal(irq_ctx->trigger);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -164,7 +164,7 @@ static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
|
|||||||
{
|
{
|
||||||
struct vfio_platform_irq *irq_ctx = dev_id;
|
struct vfio_platform_irq *irq_ctx = dev_id;
|
||||||
|
|
||||||
eventfd_signal(irq_ctx->trigger, 1);
|
eventfd_signal(irq_ctx->trigger);
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@ -178,7 +178,7 @@ static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
|
|||||||
struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
|
struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
|
||||||
|
|
||||||
if (call_ctx)
|
if (call_ctx)
|
||||||
eventfd_signal(call_ctx, 1);
|
eventfd_signal(call_ctx);
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
@ -189,7 +189,7 @@ static irqreturn_t vhost_vdpa_config_cb(void *private)
|
|||||||
struct eventfd_ctx *config_ctx = v->config_ctx;
|
struct eventfd_ctx *config_ctx = v->config_ctx;
|
||||||
|
|
||||||
if (config_ctx)
|
if (config_ctx)
|
||||||
eventfd_signal(config_ctx, 1);
|
eventfd_signal(config_ctx);
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@ -2248,7 +2248,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
|
|||||||
len -= l;
|
len -= l;
|
||||||
if (!len) {
|
if (!len) {
|
||||||
if (vq->log_ctx)
|
if (vq->log_ctx)
|
||||||
eventfd_signal(vq->log_ctx, 1);
|
eventfd_signal(vq->log_ctx);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2271,7 +2271,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
|
|||||||
log_used(vq, (used - (void __user *)vq->used),
|
log_used(vq, (used - (void __user *)vq->used),
|
||||||
sizeof vq->used->flags);
|
sizeof vq->used->flags);
|
||||||
if (vq->log_ctx)
|
if (vq->log_ctx)
|
||||||
eventfd_signal(vq->log_ctx, 1);
|
eventfd_signal(vq->log_ctx);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2289,7 +2289,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq)
|
|||||||
log_used(vq, (used - (void __user *)vq->used),
|
log_used(vq, (used - (void __user *)vq->used),
|
||||||
sizeof *vhost_avail_event(vq));
|
sizeof *vhost_avail_event(vq));
|
||||||
if (vq->log_ctx)
|
if (vq->log_ctx)
|
||||||
eventfd_signal(vq->log_ctx, 1);
|
eventfd_signal(vq->log_ctx);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2715,7 +2715,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
|
|||||||
log_used(vq, offsetof(struct vring_used, idx),
|
log_used(vq, offsetof(struct vring_used, idx),
|
||||||
sizeof vq->used->idx);
|
sizeof vq->used->idx);
|
||||||
if (vq->log_ctx)
|
if (vq->log_ctx)
|
||||||
eventfd_signal(vq->log_ctx, 1);
|
eventfd_signal(vq->log_ctx);
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@ -2763,7 +2763,7 @@ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
|
|||||||
{
|
{
|
||||||
/* Signal the Guest tell them we used something up. */
|
/* Signal the Guest tell them we used something up. */
|
||||||
if (vq->call_ctx.ctx && vhost_notify(dev, vq))
|
if (vq->call_ctx.ctx && vhost_notify(dev, vq))
|
||||||
eventfd_signal(vq->call_ctx.ctx, 1);
|
eventfd_signal(vq->call_ctx.ctx);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vhost_signal);
|
EXPORT_SYMBOL_GPL(vhost_signal);
|
||||||
|
|
||||||
|
@ -249,7 +249,7 @@ void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
|
|||||||
#define vq_err(vq, fmt, ...) do { \
|
#define vq_err(vq, fmt, ...) do { \
|
||||||
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
|
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
|
||||||
if ((vq)->error_ctx) \
|
if ((vq)->error_ctx) \
|
||||||
eventfd_signal((vq)->error_ctx, 1);\
|
eventfd_signal((vq)->error_ctx);\
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -223,7 +223,7 @@ static int acrn_ioeventfd_handler(struct acrn_ioreq_client *client,
|
|||||||
mutex_lock(&client->vm->ioeventfds_lock);
|
mutex_lock(&client->vm->ioeventfds_lock);
|
||||||
p = hsm_ioeventfd_match(client->vm, addr, val, size, req->type);
|
p = hsm_ioeventfd_match(client->vm, addr, val, size, req->type);
|
||||||
if (p)
|
if (p)
|
||||||
eventfd_signal(p->eventfd, 1);
|
eventfd_signal(p->eventfd);
|
||||||
mutex_unlock(&client->vm->ioeventfds_lock);
|
mutex_unlock(&client->vm->ioeventfds_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1147,7 +1147,7 @@ static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
|
|||||||
if (ioreq->addr == kioeventfd->addr + VIRTIO_MMIO_QUEUE_NOTIFY &&
|
if (ioreq->addr == kioeventfd->addr + VIRTIO_MMIO_QUEUE_NOTIFY &&
|
||||||
ioreq->size == kioeventfd->addr_len &&
|
ioreq->size == kioeventfd->addr_len &&
|
||||||
(ioreq->data & QUEUE_NOTIFY_VQ_MASK) == kioeventfd->vq) {
|
(ioreq->data & QUEUE_NOTIFY_VQ_MASK) == kioeventfd->vq) {
|
||||||
eventfd_signal(kioeventfd->eventfd, 1);
|
eventfd_signal(kioeventfd->eventfd);
|
||||||
state = STATE_IORESP_READY;
|
state = STATE_IORESP_READY;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
2
fs/aio.c
2
fs/aio.c
@ -1166,7 +1166,7 @@ static void aio_complete(struct aio_kiocb *iocb)
|
|||||||
* from IRQ context.
|
* from IRQ context.
|
||||||
*/
|
*/
|
||||||
if (iocb->ki_eventfd)
|
if (iocb->ki_eventfd)
|
||||||
eventfd_signal(iocb->ki_eventfd, 1);
|
eventfd_signal(iocb->ki_eventfd);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have to order our ring_info tail store above and test
|
* We have to order our ring_info tail store above and test
|
||||||
|
11
fs/eventfd.c
11
fs/eventfd.c
@ -72,22 +72,19 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* eventfd_signal - Adds @n to the eventfd counter.
|
* eventfd_signal - Increment the event counter
|
||||||
* @ctx: [in] Pointer to the eventfd context.
|
* @ctx: [in] Pointer to the eventfd context.
|
||||||
* @n: [in] Value of the counter to be added to the eventfd internal counter.
|
|
||||||
* The value cannot be negative.
|
|
||||||
*
|
*
|
||||||
* This function is supposed to be called by the kernel in paths that do not
|
* This function is supposed to be called by the kernel in paths that do not
|
||||||
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
|
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
|
||||||
* value, and we signal this as overflow condition by returning a EPOLLERR
|
* value, and we signal this as overflow condition by returning a EPOLLERR
|
||||||
* to poll(2).
|
* to poll(2).
|
||||||
*
|
*
|
||||||
* Returns the amount by which the counter was incremented. This will be less
|
* Returns the amount by which the counter was incremented.
|
||||||
* than @n if the counter has overflowed.
|
|
||||||
*/
|
*/
|
||||||
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
|
__u64 eventfd_signal(struct eventfd_ctx *ctx)
|
||||||
{
|
{
|
||||||
return eventfd_signal_mask(ctx, n, 0);
|
return eventfd_signal_mask(ctx, 1, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(eventfd_signal);
|
EXPORT_SYMBOL_GPL(eventfd_signal);
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
|
|||||||
struct file *eventfd_fget(int fd);
|
struct file *eventfd_fget(int fd);
|
||||||
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
|
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
|
||||||
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
|
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
|
||||||
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
|
__u64 eventfd_signal(struct eventfd_ctx *ctx);
|
||||||
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask);
|
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask);
|
||||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
|
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
|
||||||
__u64 *cnt);
|
__u64 *cnt);
|
||||||
@ -58,7 +58,7 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
|
|||||||
return ERR_PTR(-ENOSYS);
|
return ERR_PTR(-ENOSYS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
|
static inline int eventfd_signal(struct eventfd_ctx *ctx)
|
||||||
{
|
{
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
@ -4378,7 +4378,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
|
|||||||
* only one element of the array here.
|
* only one element of the array here.
|
||||||
*/
|
*/
|
||||||
for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
|
for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
|
||||||
eventfd_signal(t->entries[i].eventfd, 1);
|
eventfd_signal(t->entries[i].eventfd);
|
||||||
|
|
||||||
/* i = current_threshold + 1 */
|
/* i = current_threshold + 1 */
|
||||||
i++;
|
i++;
|
||||||
@ -4390,7 +4390,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
|
|||||||
* only one element of the array here.
|
* only one element of the array here.
|
||||||
*/
|
*/
|
||||||
for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
|
for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
|
||||||
eventfd_signal(t->entries[i].eventfd, 1);
|
eventfd_signal(t->entries[i].eventfd);
|
||||||
|
|
||||||
/* Update current_threshold */
|
/* Update current_threshold */
|
||||||
t->current_threshold = i - 1;
|
t->current_threshold = i - 1;
|
||||||
@ -4430,7 +4430,7 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
|
|||||||
spin_lock(&memcg_oom_lock);
|
spin_lock(&memcg_oom_lock);
|
||||||
|
|
||||||
list_for_each_entry(ev, &memcg->oom_notify, list)
|
list_for_each_entry(ev, &memcg->oom_notify, list)
|
||||||
eventfd_signal(ev->eventfd, 1);
|
eventfd_signal(ev->eventfd);
|
||||||
|
|
||||||
spin_unlock(&memcg_oom_lock);
|
spin_unlock(&memcg_oom_lock);
|
||||||
return 0;
|
return 0;
|
||||||
@ -4649,7 +4649,7 @@ static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
|
|||||||
|
|
||||||
/* already in OOM ? */
|
/* already in OOM ? */
|
||||||
if (memcg->under_oom)
|
if (memcg->under_oom)
|
||||||
eventfd_signal(eventfd, 1);
|
eventfd_signal(eventfd);
|
||||||
spin_unlock(&memcg_oom_lock);
|
spin_unlock(&memcg_oom_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -4941,7 +4941,7 @@ static void memcg_event_remove(struct work_struct *work)
|
|||||||
event->unregister_event(memcg, event->eventfd);
|
event->unregister_event(memcg, event->eventfd);
|
||||||
|
|
||||||
/* Notify userspace the event is going away. */
|
/* Notify userspace the event is going away. */
|
||||||
eventfd_signal(event->eventfd, 1);
|
eventfd_signal(event->eventfd);
|
||||||
|
|
||||||
eventfd_ctx_put(event->eventfd);
|
eventfd_ctx_put(event->eventfd);
|
||||||
kfree(event);
|
kfree(event);
|
||||||
|
@ -169,7 +169,7 @@ static bool vmpressure_event(struct vmpressure *vmpr,
|
|||||||
continue;
|
continue;
|
||||||
if (level < ev->level)
|
if (level < ev->level)
|
||||||
continue;
|
continue;
|
||||||
eventfd_signal(ev->efd, 1);
|
eventfd_signal(ev->efd);
|
||||||
ret = true;
|
ret = true;
|
||||||
}
|
}
|
||||||
mutex_unlock(&vmpr->events_lock);
|
mutex_unlock(&vmpr->events_lock);
|
||||||
|
@ -234,10 +234,10 @@ static void mtty_trigger_interrupt(struct mdev_state *mdev_state)
|
|||||||
|
|
||||||
if (is_msi(mdev_state)) {
|
if (is_msi(mdev_state)) {
|
||||||
if (mdev_state->msi_evtfd)
|
if (mdev_state->msi_evtfd)
|
||||||
eventfd_signal(mdev_state->msi_evtfd, 1);
|
eventfd_signal(mdev_state->msi_evtfd);
|
||||||
} else if (is_intx(mdev_state)) {
|
} else if (is_intx(mdev_state)) {
|
||||||
if (mdev_state->intx_evtfd && !mdev_state->intx_mask) {
|
if (mdev_state->intx_evtfd && !mdev_state->intx_mask) {
|
||||||
eventfd_signal(mdev_state->intx_evtfd, 1);
|
eventfd_signal(mdev_state->intx_evtfd);
|
||||||
mdev_state->intx_mask = true;
|
mdev_state->intx_mask = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ static void irqfd_resampler_notify(struct kvm_kernel_irqfd_resampler *resampler)
|
|||||||
|
|
||||||
list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
|
list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
|
||||||
srcu_read_lock_held(&resampler->kvm->irq_srcu))
|
srcu_read_lock_held(&resampler->kvm->irq_srcu))
|
||||||
eventfd_signal(irqfd->resamplefd, 1);
|
eventfd_signal(irqfd->resamplefd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -786,7 +786,7 @@ ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
|
|||||||
if (!ioeventfd_in_range(p, addr, len, val))
|
if (!ioeventfd_in_range(p, addr, len, val))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
eventfd_signal(p->eventfd, 1);
|
eventfd_signal(p->eventfd);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user