2019-05-27 06:55:05 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2010-01-18 13:44:55 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
|
2011-01-31 07:36:37 +00:00
|
|
|
* Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
|
|
|
|
* Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
|
2007-10-22 01:03:39 +00:00
|
|
|
*/
|
2009-12-21 16:06:04 +00:00
|
|
|
#include <linux/cdev.h>
|
2009-12-21 17:06:21 +00:00
|
|
|
#include <linux/debugfs.h>
|
2011-09-22 18:14:23 +00:00
|
|
|
#include <linux/completion.h>
|
2009-12-21 16:06:04 +00:00
|
|
|
#include <linux/device.h>
|
2007-10-22 01:03:39 +00:00
|
|
|
#include <linux/err.h>
|
2011-09-14 07:36:41 +00:00
|
|
|
#include <linux/freezer.h>
|
2009-12-21 16:19:30 +00:00
|
|
|
#include <linux/fs.h>
|
2012-08-09 12:30:39 +00:00
|
|
|
#include <linux/splice.h>
|
|
|
|
#include <linux/pagemap.h>
|
2022-11-22 13:46:43 +00:00
|
|
|
#include <linux/idr.h>
|
2007-10-22 01:03:39 +00:00
|
|
|
#include <linux/init.h>
|
2010-01-18 13:45:05 +00:00
|
|
|
#include <linux/list.h>
|
2009-12-21 16:19:30 +00:00
|
|
|
#include <linux/poll.h>
|
|
|
|
#include <linux/sched.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2010-01-18 13:45:05 +00:00
|
|
|
#include <linux/spinlock.h>
|
2007-10-22 01:03:39 +00:00
|
|
|
#include <linux/virtio.h>
|
|
|
|
#include <linux/virtio_console.h>
|
2009-12-21 16:19:30 +00:00
|
|
|
#include <linux/wait.h>
|
2009-12-21 15:33:25 +00:00
|
|
|
#include <linux/workqueue.h>
|
2011-07-03 17:35:48 +00:00
|
|
|
#include <linux/module.h>
|
2012-12-14 04:10:51 +00:00
|
|
|
#include <linux/dma-mapping.h>
|
2011-02-01 04:01:25 +00:00
|
|
|
#include "../tty/hvc/hvc_console.h"
|
2007-10-22 01:03:39 +00:00
|
|
|
|
2012-12-14 04:10:51 +00:00
|
|
|
#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
|
2021-10-19 07:01:44 +00:00
|
|
|
#define VIRTCONS_MAX_PORTS 0x8000
|
2012-12-14 04:10:51 +00:00
|
|
|
|
2010-01-18 13:45:05 +00:00
|
|
|
/*
|
|
|
|
* This is a global struct for storing common data for all the devices
|
|
|
|
* this driver handles.
|
|
|
|
*
|
|
|
|
* Mainly, it has a linked list for all the consoles in one place so
|
|
|
|
* that callbacks from hvc for get_chars(), put_chars() work properly
|
|
|
|
* across multiple devices and multiple ports per device.
|
|
|
|
*/
|
|
|
|
struct ports_driver_data {
|
2009-12-21 17:06:21 +00:00
|
|
|
/* Used for exporting per-port information to debugfs */
|
|
|
|
struct dentry *debugfs_dir;
|
|
|
|
|
2010-09-02 12:41:49 +00:00
|
|
|
/* List of all the devices we're handling */
|
|
|
|
struct list_head portdevs;
|
|
|
|
|
2010-01-18 13:45:05 +00:00
|
|
|
/* All the console devices handled by this driver */
|
|
|
|
struct list_head consoles;
|
|
|
|
};
|
2022-11-22 13:46:43 +00:00
|
|
|
|
|
|
|
static struct ports_driver_data pdrvdata;
|
2010-01-18 13:45:05 +00:00
|
|
|
|
2023-06-20 14:37:58 +00:00
|
|
|
static const struct class port_class = {
|
|
|
|
.name = "virtio-ports",
|
|
|
|
};
|
|
|
|
|
2013-04-08 06:43:59 +00:00
|
|
|
static DEFINE_SPINLOCK(pdrvdata_lock);
|
|
|
|
static DECLARE_COMPLETION(early_console_added);
|
2010-01-18 13:45:05 +00:00
|
|
|
|
2010-01-18 13:45:09 +00:00
|
|
|
/* This struct holds information that's relevant only for console ports */
|
|
|
|
struct console {
|
|
|
|
/* We'll place all consoles in a list in the pdrvdata struct */
|
|
|
|
struct list_head list;
|
|
|
|
|
|
|
|
/* The hvc device associated with this console port */
|
|
|
|
struct hvc_struct *hvc;
|
|
|
|
|
2010-05-05 20:35:08 +00:00
|
|
|
/* The size of the console */
|
|
|
|
struct winsize ws;
|
|
|
|
|
2010-01-18 13:45:09 +00:00
|
|
|
/*
|
|
|
|
* This number identifies the number that we used to register
|
|
|
|
* with hvc in hvc_instantiate() and hvc_alloc(); this is the
|
|
|
|
* number passed on by the hvc callbacks to us to
|
|
|
|
* differentiate between the other console ports handled by
|
|
|
|
* this driver
|
|
|
|
*/
|
|
|
|
u32 vtermno;
|
|
|
|
};
|
|
|
|
|
2022-11-22 13:46:43 +00:00
|
|
|
static DEFINE_IDA(vtermno_ida);
|
|
|
|
|
2010-01-18 13:45:01 +00:00
|
|
|
struct port_buffer {
|
|
|
|
char *buf;
|
|
|
|
|
|
|
|
/* size of the buffer in *buf above */
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
/* used length of the buffer */
|
|
|
|
size_t len;
|
|
|
|
/* offset in the buf from which to consume data */
|
|
|
|
size_t offset;
|
2012-12-14 03:16:42 +00:00
|
|
|
|
2012-12-14 04:10:51 +00:00
|
|
|
/* DMA address of buffer */
|
|
|
|
dma_addr_t dma;
|
|
|
|
|
|
|
|
/* Device we got DMA memory from */
|
|
|
|
struct device *dev;
|
|
|
|
|
|
|
|
/* List of pending dma buffers to free */
|
|
|
|
struct list_head list;
|
|
|
|
|
2012-12-14 03:16:42 +00:00
|
|
|
/* If sgpages == 0 then buf is used */
|
|
|
|
unsigned int sgpages;
|
|
|
|
|
|
|
|
/* sg is used if spages > 0. sg must be the last in is struct */
|
2023-09-22 17:51:15 +00:00
|
|
|
struct scatterlist sg[] __counted_by(sgpages);
|
2010-01-18 13:45:01 +00:00
|
|
|
};
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
/*
|
|
|
|
* This is a per-device struct that stores data common to all the
|
|
|
|
* ports for that device (vdev->priv).
|
|
|
|
*/
|
|
|
|
struct ports_device {
|
2010-09-02 12:41:49 +00:00
|
|
|
/* Next portdev in the list, head is in the pdrvdata struct */
|
|
|
|
struct list_head list;
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
/*
|
|
|
|
* Workqueue handlers where we process deferred work after
|
|
|
|
* notification
|
|
|
|
*/
|
|
|
|
struct work_struct control_work;
|
2015-03-05 00:15:49 +00:00
|
|
|
struct work_struct config_work;
|
2009-12-21 15:33:25 +00:00
|
|
|
|
|
|
|
struct list_head ports;
|
|
|
|
|
|
|
|
/* To protect the list of ports */
|
|
|
|
spinlock_t ports_lock;
|
|
|
|
|
|
|
|
/* To protect the vq operations for the control channel */
|
2013-03-29 11:00:07 +00:00
|
|
|
spinlock_t c_ivq_lock;
|
2013-03-29 11:00:08 +00:00
|
|
|
spinlock_t c_ovq_lock;
|
2009-12-21 15:33:25 +00:00
|
|
|
|
2016-12-05 19:39:42 +00:00
|
|
|
/* max. number of ports this device can hold */
|
|
|
|
u32 max_nr_ports;
|
2009-12-21 15:33:25 +00:00
|
|
|
|
|
|
|
/* The virtio device we're associated with */
|
|
|
|
struct virtio_device *vdev;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A couple of virtqueues for the control channel: one for
|
|
|
|
* guest->host transfers, one for host->guest transfers
|
|
|
|
*/
|
|
|
|
struct virtqueue *c_ivq, *c_ovq;
|
|
|
|
|
2016-08-30 15:04:15 +00:00
|
|
|
/*
|
|
|
|
* A control packet buffer for guest->host requests, protected
|
|
|
|
* by c_ovq_lock.
|
|
|
|
*/
|
|
|
|
struct virtio_console_control cpkt;
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
/* Array of per-port IO virtqueues */
|
|
|
|
struct virtqueue **in_vqs, **out_vqs;
|
2009-12-21 16:06:04 +00:00
|
|
|
|
|
|
|
/* Major number for this device. Ports will be created as minors. */
|
|
|
|
int chr_major;
|
2009-12-21 15:33:25 +00:00
|
|
|
};
|
|
|
|
|
2011-09-14 07:36:46 +00:00
|
|
|
struct port_stats {
|
|
|
|
unsigned long bytes_sent, bytes_received, bytes_discarded;
|
|
|
|
};
|
|
|
|
|
2010-01-18 13:45:07 +00:00
|
|
|
/* This struct holds the per-port data */
|
2010-01-18 13:45:00 +00:00
|
|
|
struct port {
|
2009-12-21 15:33:25 +00:00
|
|
|
/* Next port in the list, head is in the ports_device */
|
|
|
|
struct list_head list;
|
|
|
|
|
2010-01-18 13:45:07 +00:00
|
|
|
/* Pointer to the parent virtio_console device */
|
|
|
|
struct ports_device *portdev;
|
2010-01-18 13:45:01 +00:00
|
|
|
|
|
|
|
/* The current buffer from which data has to be fed to readers */
|
|
|
|
struct port_buffer *inbuf;
|
2010-01-18 13:45:00 +00:00
|
|
|
|
2010-01-18 13:45:12 +00:00
|
|
|
/*
|
|
|
|
* To protect the operations on the in_vq associated with this
|
|
|
|
* port. Has to be a spinlock because it can be called from
|
|
|
|
* interrupt context (get_char()).
|
|
|
|
*/
|
|
|
|
spinlock_t inbuf_lock;
|
|
|
|
|
2010-05-20 04:15:50 +00:00
|
|
|
/* Protect the operations on the out_vq. */
|
|
|
|
spinlock_t outvq_lock;
|
|
|
|
|
2010-01-18 13:45:07 +00:00
|
|
|
/* The IO vqs for this port */
|
|
|
|
struct virtqueue *in_vq, *out_vq;
|
|
|
|
|
2009-12-21 17:06:21 +00:00
|
|
|
/* File in the debugfs directory that exposes this port's information */
|
|
|
|
struct dentry *debugfs_file;
|
|
|
|
|
2011-09-14 07:36:46 +00:00
|
|
|
/*
|
|
|
|
* Keep count of the bytes sent, received and discarded for
|
|
|
|
* this port for accounting and debugging purposes. These
|
|
|
|
* counts are not reset across port open / close events.
|
|
|
|
*/
|
|
|
|
struct port_stats stats;
|
|
|
|
|
2010-01-18 13:45:09 +00:00
|
|
|
/*
|
|
|
|
* The entries in this struct will be valid if this port is
|
|
|
|
* hooked up to an hvc console
|
|
|
|
*/
|
|
|
|
struct console cons;
|
2009-12-21 15:33:25 +00:00
|
|
|
|
2009-12-21 16:06:04 +00:00
|
|
|
/* Each port associates with a separate char device */
|
2010-09-02 12:50:59 +00:00
|
|
|
struct cdev *cdev;
|
2009-12-21 16:06:04 +00:00
|
|
|
struct device *dev;
|
|
|
|
|
2010-09-02 13:08:29 +00:00
|
|
|
/* Reference-counting to handle port hot-unplugs and file operations */
|
|
|
|
struct kref kref;
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
/* A waitqueue for poll() or blocking read operations */
|
|
|
|
wait_queue_head_t waitqueue;
|
|
|
|
|
2009-12-21 16:27:40 +00:00
|
|
|
/* The 'name' of the port that we expose via sysfs properties */
|
|
|
|
char *name;
|
|
|
|
|
2010-09-02 13:17:52 +00:00
|
|
|
/* We can notify apps of host connect / disconnect events via SIGIO */
|
|
|
|
struct fasync_struct *async_queue;
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
/* The 'id' to identify the port with the Host */
|
|
|
|
u32 id;
|
2009-12-21 16:19:30 +00:00
|
|
|
|
2010-05-20 04:15:50 +00:00
|
|
|
bool outvq_full;
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
/* Is the host device open */
|
|
|
|
bool host_connected;
|
2009-11-26 05:55:38 +00:00
|
|
|
|
|
|
|
/* We should allow only one process to open a port */
|
|
|
|
bool guest_connected;
|
2010-01-18 13:45:00 +00:00
|
|
|
};
|
2007-10-22 01:03:39 +00:00
|
|
|
|
2010-01-18 13:45:05 +00:00
|
|
|
static struct port *find_port_by_vtermno(u32 vtermno)
|
|
|
|
{
|
|
|
|
struct port *port;
|
2010-01-18 13:45:09 +00:00
|
|
|
struct console *cons;
|
2010-01-18 13:45:05 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&pdrvdata_lock, flags);
|
2010-01-18 13:45:09 +00:00
|
|
|
list_for_each_entry(cons, &pdrvdata.consoles, list) {
|
|
|
|
if (cons->vtermno == vtermno) {
|
|
|
|
port = container_of(cons, struct port, cons);
|
2010-01-18 13:45:05 +00:00
|
|
|
goto out;
|
2010-01-18 13:45:09 +00:00
|
|
|
}
|
2010-01-18 13:45:05 +00:00
|
|
|
}
|
|
|
|
port = NULL;
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&pdrvdata_lock, flags);
|
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2010-09-02 12:50:58 +00:00
|
|
|
static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
|
|
|
|
dev_t dev)
|
|
|
|
{
|
|
|
|
struct port *port;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&portdev->ports_lock, flags);
|
2013-07-29 04:46:13 +00:00
|
|
|
list_for_each_entry(port, &portdev->ports, list) {
|
|
|
|
if (port->cdev->dev == dev) {
|
|
|
|
kref_get(&port->kref);
|
2010-09-02 12:50:58 +00:00
|
|
|
goto out;
|
2013-07-29 04:46:13 +00:00
|
|
|
}
|
|
|
|
}
|
2010-09-02 12:50:58 +00:00
|
|
|
port = NULL;
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&portdev->ports_lock, flags);
|
|
|
|
|
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct port *find_port_by_devt(dev_t dev)
|
|
|
|
{
|
|
|
|
struct ports_device *portdev;
|
|
|
|
struct port *port;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&pdrvdata_lock, flags);
|
|
|
|
list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
|
|
|
|
port = find_port_by_devt_in_portdev(portdev, dev);
|
|
|
|
if (port)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
port = NULL;
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&pdrvdata_lock, flags);
|
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
|
|
|
|
{
|
|
|
|
struct port *port;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&portdev->ports_lock, flags);
|
|
|
|
list_for_each_entry(port, &portdev->ports, list)
|
|
|
|
if (port->id == id)
|
|
|
|
goto out;
|
|
|
|
port = NULL;
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&portdev->ports_lock, flags);
|
|
|
|
|
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2010-01-18 13:45:12 +00:00
|
|
|
static struct port *find_port_by_vq(struct ports_device *portdev,
|
|
|
|
struct virtqueue *vq)
|
|
|
|
{
|
|
|
|
struct port *port;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
spin_lock_irqsave(&portdev->ports_lock, flags);
|
|
|
|
list_for_each_entry(port, &portdev->ports, list)
|
2010-01-18 13:45:12 +00:00
|
|
|
if (port->in_vq == vq || port->out_vq == vq)
|
|
|
|
goto out;
|
|
|
|
port = NULL;
|
|
|
|
out:
|
2009-12-21 15:33:25 +00:00
|
|
|
spin_unlock_irqrestore(&portdev->ports_lock, flags);
|
2010-01-18 13:45:12 +00:00
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
static bool is_console_port(struct port *port)
|
|
|
|
{
|
|
|
|
if (port->cons.hvc)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-12-14 04:10:51 +00:00
|
|
|
static bool is_rproc_serial(const struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL;
|
|
|
|
}
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
static inline bool use_multiport(struct ports_device *portdev)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This condition can be true when put_chars is called from
|
|
|
|
* early_init
|
|
|
|
*/
|
|
|
|
if (!portdev->vdev)
|
2015-03-30 23:46:10 +00:00
|
|
|
return false;
|
2014-10-07 14:39:42 +00:00
|
|
|
return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT);
|
2009-12-21 15:33:25 +00:00
|
|
|
}
|
|
|
|
|
2012-12-14 04:10:51 +00:00
|
|
|
static DEFINE_SPINLOCK(dma_bufs_lock);
|
|
|
|
static LIST_HEAD(pending_free_dma_bufs);
|
|
|
|
|
|
|
|
static void free_buf(struct port_buffer *buf, bool can_sleep)
|
2010-01-18 13:45:01 +00:00
|
|
|
{
|
2012-12-14 03:16:42 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < buf->sgpages; i++) {
|
|
|
|
struct page *page = sg_page(&buf->sg[i]);
|
|
|
|
if (!page)
|
|
|
|
break;
|
|
|
|
put_page(page);
|
|
|
|
}
|
|
|
|
|
2012-12-14 04:10:51 +00:00
|
|
|
if (!buf->dev) {
|
|
|
|
kfree(buf->buf);
|
|
|
|
} else if (is_rproc_enabled) {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* dma_free_coherent requires interrupts to be enabled. */
|
|
|
|
if (!can_sleep) {
|
|
|
|
/* queue up dma-buffers to be freed later */
|
|
|
|
spin_lock_irqsave(&dma_bufs_lock, flags);
|
|
|
|
list_add_tail(&buf->list, &pending_free_dma_bufs);
|
|
|
|
spin_unlock_irqrestore(&dma_bufs_lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma);
|
|
|
|
|
|
|
|
/* Release device refcnt and allow it to be freed */
|
|
|
|
put_device(buf->dev);
|
|
|
|
}
|
|
|
|
|
2010-01-18 13:45:01 +00:00
|
|
|
kfree(buf);
|
|
|
|
}
|
|
|
|
|
2012-12-14 04:10:51 +00:00
|
|
|
static void reclaim_dma_bufs(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct port_buffer *buf, *tmp;
|
|
|
|
LIST_HEAD(tmp_list);
|
|
|
|
|
|
|
|
if (list_empty(&pending_free_dma_bufs))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Create a copy of the pending_free_dma_bufs while holding the lock */
|
|
|
|
spin_lock_irqsave(&dma_bufs_lock, flags);
|
|
|
|
list_cut_position(&tmp_list, &pending_free_dma_bufs,
|
|
|
|
pending_free_dma_bufs.prev);
|
|
|
|
spin_unlock_irqrestore(&dma_bufs_lock, flags);
|
|
|
|
|
|
|
|
/* Release the dma buffers, without irqs enabled */
|
|
|
|
list_for_each_entry_safe(buf, tmp, &tmp_list, list) {
|
|
|
|
list_del(&buf->list);
|
|
|
|
free_buf(buf, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-20 16:54:23 +00:00
|
|
|
static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
|
2012-12-14 03:16:42 +00:00
|
|
|
int pages)
|
2010-01-18 13:45:01 +00:00
|
|
|
{
|
|
|
|
struct port_buffer *buf;
|
|
|
|
|
2012-12-14 04:10:51 +00:00
|
|
|
reclaim_dma_bufs();
|
|
|
|
|
2012-12-14 03:16:42 +00:00
|
|
|
/*
|
|
|
|
* Allocate buffer and the sg list. The sg list array is allocated
|
|
|
|
* directly after the port_buffer struct.
|
|
|
|
*/
|
2018-06-07 14:57:15 +00:00
|
|
|
buf = kmalloc(struct_size(buf, sg, pages), GFP_KERNEL);
|
2010-01-18 13:45:01 +00:00
|
|
|
if (!buf)
|
|
|
|
goto fail;
|
2012-12-14 03:16:42 +00:00
|
|
|
|
|
|
|
buf->sgpages = pages;
|
|
|
|
if (pages > 0) {
|
2012-12-14 04:10:51 +00:00
|
|
|
buf->dev = NULL;
|
2012-12-14 03:16:42 +00:00
|
|
|
buf->buf = NULL;
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2018-04-20 16:54:23 +00:00
|
|
|
if (is_rproc_serial(vdev)) {
|
2012-12-14 04:10:51 +00:00
|
|
|
/*
|
|
|
|
* Allocate DMA memory from ancestor. When a virtio
|
|
|
|
* device is created by remoteproc, the DMA memory is
|
virtio: virtio_console: fix DMA memory allocation for rproc serial
Since commit 086d08725d34 ("remoteproc: create vdev subdevice with
specific dma memory pool"), every remoteproc has a DMA subdevice
("remoteprocX#vdevYbuffer") for each virtio device, which inherits
DMA capabilities from the corresponding platform device. This allowed
to associate different DMA pools with each vdev, and required from
virtio drivers to perform DMA operations with the parent device
(vdev->dev.parent) instead of grandparent (vdev->dev.parent->parent).
virtio_rpmsg_bus was already changed in the same merge cycle with
commit d999b622fcfb ("rpmsg: virtio: allocate buffer from parent"),
but virtio_console did not. In fact, operations using the grandparent
worked fine while the grandparent was the platform device, but since
commit c774ad010873 ("remoteproc: Fix and restore the parenting
hierarchy for vdev") this was changed, and now the grandparent device
is the remoteproc device without any DMA capabilities.
So, starting v5.8-rc1 the following warning is observed:
[ 2.483925] ------------[ cut here ]------------
[ 2.489148] WARNING: CPU: 3 PID: 101 at kernel/dma/mapping.c:427 0x80e7eee8
[ 2.489152] Modules linked in: virtio_console(+)
[ 2.503737] virtio_rpmsg_bus rpmsg_core
[ 2.508903]
[ 2.528898] <Other modules, stack and call trace here>
[ 2.913043]
[ 2.914907] ---[ end trace 93ac8746beab612c ]---
[ 2.920102] virtio-ports vport1p0: Error allocating inbufs
kernel/dma/mapping.c:427 is:
WARN_ON_ONCE(!dev->coherent_dma_mask);
obviously because the grandparent now is remoteproc dev without any
DMA caps:
[ 3.104943] Parent: remoteproc0#vdev1buffer, grandparent: remoteproc0
Fix this the same way as it was for virtio_rpmsg_bus, using just the
parent device (vdev->dev.parent, "remoteprocX#vdevYbuffer") for DMA
operations.
This also allows now to reserve DMA pools/buffers for rproc serial
via Device Tree.
Fixes: c774ad010873 ("remoteproc: Fix and restore the parenting hierarchy for vdev")
Cc: stable@vger.kernel.org # 5.1+
Reviewed-by: Mathieu Poirier <mathieu.poirier@linaro.org>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Alexander Lobakin <alobakin@pm.me>
Date: Thu, 5 Nov 2020 11:10:24 +0800
Link: https://lore.kernel.org/r/AOKowLclCbOCKxyiJ71WeNyuAAj2q8EUtxrXbyky5E@cp7-web-042.plabs.ch
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-11-04 15:31:36 +00:00
|
|
|
* associated with the parent device:
|
|
|
|
* virtioY => remoteprocX#vdevYbuffer.
|
2012-12-14 04:10:51 +00:00
|
|
|
*/
|
virtio: virtio_console: fix DMA memory allocation for rproc serial
Since commit 086d08725d34 ("remoteproc: create vdev subdevice with
specific dma memory pool"), every remoteproc has a DMA subdevice
("remoteprocX#vdevYbuffer") for each virtio device, which inherits
DMA capabilities from the corresponding platform device. This allowed
to associate different DMA pools with each vdev, and required from
virtio drivers to perform DMA operations with the parent device
(vdev->dev.parent) instead of grandparent (vdev->dev.parent->parent).
virtio_rpmsg_bus was already changed in the same merge cycle with
commit d999b622fcfb ("rpmsg: virtio: allocate buffer from parent"),
but virtio_console did not. In fact, operations using the grandparent
worked fine while the grandparent was the platform device, but since
commit c774ad010873 ("remoteproc: Fix and restore the parenting
hierarchy for vdev") this was changed, and now the grandparent device
is the remoteproc device without any DMA capabilities.
So, starting v5.8-rc1 the following warning is observed:
[ 2.483925] ------------[ cut here ]------------
[ 2.489148] WARNING: CPU: 3 PID: 101 at kernel/dma/mapping.c:427 0x80e7eee8
[ 2.489152] Modules linked in: virtio_console(+)
[ 2.503737] virtio_rpmsg_bus rpmsg_core
[ 2.508903]
[ 2.528898] <Other modules, stack and call trace here>
[ 2.913043]
[ 2.914907] ---[ end trace 93ac8746beab612c ]---
[ 2.920102] virtio-ports vport1p0: Error allocating inbufs
kernel/dma/mapping.c:427 is:
WARN_ON_ONCE(!dev->coherent_dma_mask);
obviously because the grandparent now is remoteproc dev without any
DMA caps:
[ 3.104943] Parent: remoteproc0#vdev1buffer, grandparent: remoteproc0
Fix this the same way as it was for virtio_rpmsg_bus, using just the
parent device (vdev->dev.parent, "remoteprocX#vdevYbuffer") for DMA
operations.
This also allows now to reserve DMA pools/buffers for rproc serial
via Device Tree.
Fixes: c774ad010873 ("remoteproc: Fix and restore the parenting hierarchy for vdev")
Cc: stable@vger.kernel.org # 5.1+
Reviewed-by: Mathieu Poirier <mathieu.poirier@linaro.org>
Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Alexander Lobakin <alobakin@pm.me>
Date: Thu, 5 Nov 2020 11:10:24 +0800
Link: https://lore.kernel.org/r/AOKowLclCbOCKxyiJ71WeNyuAAj2q8EUtxrXbyky5E@cp7-web-042.plabs.ch
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2020-11-04 15:31:36 +00:00
|
|
|
buf->dev = vdev->dev.parent;
|
|
|
|
if (!buf->dev)
|
2012-12-14 04:10:51 +00:00
|
|
|
goto free_buf;
|
|
|
|
|
|
|
|
/* Increase device refcnt to avoid freeing it */
|
|
|
|
get_device(buf->dev);
|
|
|
|
buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma,
|
|
|
|
GFP_KERNEL);
|
|
|
|
} else {
|
|
|
|
buf->dev = NULL;
|
|
|
|
buf->buf = kmalloc(buf_size, GFP_KERNEL);
|
|
|
|
}
|
|
|
|
|
2010-01-18 13:45:01 +00:00
|
|
|
if (!buf->buf)
|
|
|
|
goto free_buf;
|
|
|
|
buf->len = 0;
|
|
|
|
buf->offset = 0;
|
|
|
|
buf->size = buf_size;
|
|
|
|
return buf;
|
|
|
|
|
|
|
|
free_buf:
|
|
|
|
kfree(buf);
|
|
|
|
fail:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-01-18 13:45:03 +00:00
|
|
|
/* Callers should take appropriate locks */
|
2011-09-14 07:36:42 +00:00
|
|
|
static struct port_buffer *get_inbuf(struct port *port)
|
2010-01-18 13:45:03 +00:00
|
|
|
{
|
|
|
|
struct port_buffer *buf;
|
|
|
|
unsigned int len;
|
|
|
|
|
2011-09-14 07:36:43 +00:00
|
|
|
if (port->inbuf)
|
|
|
|
return port->inbuf;
|
|
|
|
|
|
|
|
buf = virtqueue_get_buf(port->in_vq, &len);
|
2010-01-18 13:45:03 +00:00
|
|
|
if (buf) {
|
2021-05-25 12:56:22 +00:00
|
|
|
buf->len = min_t(size_t, len, buf->size);
|
2010-01-18 13:45:03 +00:00
|
|
|
buf->offset = 0;
|
2011-09-14 07:36:46 +00:00
|
|
|
port->stats.bytes_received += len;
|
2010-01-18 13:45:03 +00:00
|
|
|
}
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2010-01-18 13:45:02 +00:00
|
|
|
/*
|
|
|
|
* Create a scatter-gather list representing our input buffer and put
|
|
|
|
* it in the queue.
|
|
|
|
*
|
|
|
|
* Callers should take appropriate locks.
|
|
|
|
*/
|
2010-01-18 13:45:12 +00:00
|
|
|
static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
|
2010-01-18 13:45:02 +00:00
|
|
|
{
|
|
|
|
struct scatterlist sg[1];
|
2010-01-18 13:45:12 +00:00
|
|
|
int ret;
|
2010-01-18 13:45:07 +00:00
|
|
|
|
2010-01-18 13:45:02 +00:00
|
|
|
sg_init_one(sg, buf->buf, buf->size);
|
|
|
|
|
2013-03-20 05:14:29 +00:00
|
|
|
ret = virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC);
|
2010-04-12 13:18:32 +00:00
|
|
|
virtqueue_kick(vq);
|
2012-12-09 23:15:12 +00:00
|
|
|
if (!ret)
|
|
|
|
ret = vq->num_free;
|
2010-01-18 13:45:12 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-12-21 16:45:30 +00:00
|
|
|
/* Discard any unread data this port has. Callers lockers. */
|
|
|
|
static void discard_port_data(struct port *port)
|
|
|
|
{
|
|
|
|
struct port_buffer *buf;
|
2011-09-14 07:36:45 +00:00
|
|
|
unsigned int err;
|
2009-12-21 16:45:30 +00:00
|
|
|
|
2011-03-04 03:34:33 +00:00
|
|
|
if (!port->portdev) {
|
|
|
|
/* Device has been unplugged. vqs are already gone. */
|
|
|
|
return;
|
|
|
|
}
|
2011-09-14 07:36:45 +00:00
|
|
|
buf = get_inbuf(port);
|
2009-12-21 16:45:30 +00:00
|
|
|
|
2011-09-14 07:36:44 +00:00
|
|
|
err = 0;
|
2010-02-12 05:02:18 +00:00
|
|
|
while (buf) {
|
2011-09-14 07:36:46 +00:00
|
|
|
port->stats.bytes_discarded += buf->len - buf->offset;
|
2011-09-14 07:36:45 +00:00
|
|
|
if (add_inbuf(port->in_vq, buf) < 0) {
|
2011-09-14 07:36:44 +00:00
|
|
|
err++;
|
2012-12-14 04:10:51 +00:00
|
|
|
free_buf(buf, false);
|
2010-02-12 05:02:18 +00:00
|
|
|
}
|
2011-09-14 07:36:45 +00:00
|
|
|
port->inbuf = NULL;
|
|
|
|
buf = get_inbuf(port);
|
2009-12-21 16:45:30 +00:00
|
|
|
}
|
2011-09-14 07:36:44 +00:00
|
|
|
if (err)
|
2010-02-12 05:02:18 +00:00
|
|
|
dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
|
2011-09-14 07:36:44 +00:00
|
|
|
err);
|
2009-12-21 16:45:30 +00:00
|
|
|
}
|
|
|
|
|
2010-01-18 13:45:12 +00:00
|
|
|
static bool port_has_data(struct port *port)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
bool ret;
|
|
|
|
|
2011-09-14 07:36:43 +00:00
|
|
|
ret = false;
|
2010-01-18 13:45:12 +00:00
|
|
|
spin_lock_irqsave(&port->inbuf_lock, flags);
|
2010-02-12 05:02:18 +00:00
|
|
|
port->inbuf = get_inbuf(port);
|
2011-09-14 07:36:43 +00:00
|
|
|
if (port->inbuf)
|
2010-02-12 05:02:18 +00:00
|
|
|
ret = true;
|
2011-09-14 07:36:43 +00:00
|
|
|
|
2010-01-18 13:45:12 +00:00
|
|
|
spin_unlock_irqrestore(&port->inbuf_lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-05-20 04:15:46 +00:00
|
|
|
static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
|
|
|
|
unsigned int event, unsigned int value)
|
2009-12-21 15:33:25 +00:00
|
|
|
{
|
|
|
|
struct scatterlist sg[1];
|
|
|
|
struct virtqueue *vq;
|
2010-02-24 05:06:51 +00:00
|
|
|
unsigned int len;
|
2009-12-21 15:33:25 +00:00
|
|
|
|
2010-05-20 04:15:46 +00:00
|
|
|
if (!use_multiport(portdev))
|
2009-12-21 15:33:25 +00:00
|
|
|
return 0;
|
|
|
|
|
2010-05-20 04:15:46 +00:00
|
|
|
vq = portdev->c_ovq;
|
2009-12-21 15:33:25 +00:00
|
|
|
|
2013-03-29 11:00:08 +00:00
|
|
|
spin_lock(&portdev->c_ovq_lock);
|
2016-08-30 15:04:15 +00:00
|
|
|
|
|
|
|
portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
|
|
|
|
portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event);
|
|
|
|
portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value);
|
|
|
|
|
|
|
|
sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control));
|
|
|
|
|
|
|
|
if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) {
|
2010-04-12 13:18:32 +00:00
|
|
|
virtqueue_kick(vq);
|
2013-10-28 23:10:39 +00:00
|
|
|
while (!virtqueue_get_buf(vq, &len)
|
|
|
|
&& !virtqueue_is_broken(vq))
|
2009-12-21 15:33:25 +00:00
|
|
|
cpu_relax();
|
|
|
|
}
|
2016-08-30 15:04:15 +00:00
|
|
|
|
2013-03-29 11:00:08 +00:00
|
|
|
spin_unlock(&portdev->c_ovq_lock);
|
2009-12-21 15:33:25 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-05-20 04:15:46 +00:00
|
|
|
static ssize_t send_control_msg(struct port *port, unsigned int event,
|
|
|
|
unsigned int value)
|
|
|
|
{
|
2010-09-02 12:41:42 +00:00
|
|
|
/* Did the port get unplugged before userspace closed it? */
|
|
|
|
if (port->portdev)
|
|
|
|
return __send_control_msg(port->portdev, port->id, event, value);
|
|
|
|
return 0;
|
2010-05-20 04:15:46 +00:00
|
|
|
}
|
|
|
|
|
2012-08-09 12:30:39 +00:00
|
|
|
|
2010-05-20 04:15:50 +00:00
|
|
|
/* Callers must take the port->outvq_lock */
|
|
|
|
static void reclaim_consumed_buffers(struct port *port)
|
|
|
|
{
|
2012-12-14 03:16:42 +00:00
|
|
|
struct port_buffer *buf;
|
2010-05-20 04:15:50 +00:00
|
|
|
unsigned int len;
|
|
|
|
|
2011-03-04 03:34:33 +00:00
|
|
|
if (!port->portdev) {
|
|
|
|
/* Device has been unplugged. vqs are already gone. */
|
|
|
|
return;
|
|
|
|
}
|
2012-12-14 03:16:42 +00:00
|
|
|
while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
|
2012-12-14 04:10:51 +00:00
|
|
|
free_buf(buf, false);
|
2010-05-20 04:15:50 +00:00
|
|
|
port->outvq_full = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-09 12:30:39 +00:00
|
|
|
static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
|
|
|
|
int nents, size_t in_count,
|
2012-12-14 03:16:42 +00:00
|
|
|
void *data, bool nonblock)
|
2009-12-21 11:58:51 +00:00
|
|
|
{
|
|
|
|
struct virtqueue *out_vq;
|
2012-10-16 13:26:15 +00:00
|
|
|
int err;
|
2010-05-20 04:15:50 +00:00
|
|
|
unsigned long flags;
|
2009-12-21 11:58:51 +00:00
|
|
|
unsigned int len;
|
|
|
|
|
|
|
|
out_vq = port->out_vq;
|
|
|
|
|
2010-05-20 04:15:50 +00:00
|
|
|
spin_lock_irqsave(&port->outvq_lock, flags);
|
|
|
|
|
|
|
|
reclaim_consumed_buffers(port);
|
|
|
|
|
2013-03-20 05:14:29 +00:00
|
|
|
err = virtqueue_add_outbuf(out_vq, sg, nents, data, GFP_ATOMIC);
|
2009-12-21 11:58:51 +00:00
|
|
|
|
|
|
|
/* Tell Host to go! */
|
2010-04-12 13:18:32 +00:00
|
|
|
virtqueue_kick(out_vq);
|
2009-12-21 11:58:51 +00:00
|
|
|
|
2012-10-16 13:26:15 +00:00
|
|
|
if (err) {
|
2010-04-08 15:46:16 +00:00
|
|
|
in_count = 0;
|
2010-05-20 04:15:50 +00:00
|
|
|
goto done;
|
2009-12-21 11:58:51 +00:00
|
|
|
}
|
|
|
|
|
2012-10-16 13:26:15 +00:00
|
|
|
if (out_vq->num_free == 0)
|
2010-05-20 04:15:50 +00:00
|
|
|
port->outvq_full = true;
|
|
|
|
|
|
|
|
if (nonblock)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait till the host acknowledges it pushed out the data we
|
2010-10-20 03:15:43 +00:00
|
|
|
* sent. This is done for data from the hvc_console; the tty
|
|
|
|
* operations are performed with spinlocks held so we can't
|
|
|
|
* sleep here. An alternative would be to copy the data to a
|
|
|
|
* buffer and relax the spinning requirement. The downside is
|
|
|
|
* we need to kmalloc a GFP_ATOMIC buffer each time the
|
|
|
|
* console driver writes something out.
|
2010-05-20 04:15:50 +00:00
|
|
|
*/
|
2013-10-28 23:10:39 +00:00
|
|
|
while (!virtqueue_get_buf(out_vq, &len)
|
|
|
|
&& !virtqueue_is_broken(out_vq))
|
2009-12-21 11:58:51 +00:00
|
|
|
cpu_relax();
|
2010-05-20 04:15:50 +00:00
|
|
|
done:
|
|
|
|
spin_unlock_irqrestore(&port->outvq_lock, flags);
|
2011-09-14 07:36:46 +00:00
|
|
|
|
|
|
|
port->stats.bytes_sent += in_count;
|
2010-05-20 04:15:50 +00:00
|
|
|
/*
|
|
|
|
* We're expected to return the amount of data we wrote -- all
|
|
|
|
* of it
|
|
|
|
*/
|
2010-04-08 15:46:16 +00:00
|
|
|
return in_count;
|
2009-12-21 11:58:51 +00:00
|
|
|
}
|
|
|
|
|
2010-01-18 13:45:12 +00:00
|
|
|
/*
|
|
|
|
* Give out the data that's requested from the buffer that we have
|
|
|
|
* queued up.
|
|
|
|
*/
|
2023-12-06 07:36:57 +00:00
|
|
|
static ssize_t fill_readbuf(struct port *port, u8 __user *out_buf,
|
2014-12-01 11:31:45 +00:00
|
|
|
size_t out_count, bool to_user)
|
2010-01-18 13:45:12 +00:00
|
|
|
{
|
|
|
|
struct port_buffer *buf;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!out_count || !port_has_data(port))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
buf = port->inbuf;
|
2009-12-21 15:56:45 +00:00
|
|
|
out_count = min(out_count, buf->len - buf->offset);
|
2010-01-18 13:45:12 +00:00
|
|
|
|
2009-12-21 15:56:45 +00:00
|
|
|
if (to_user) {
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
|
|
|
|
if (ret)
|
|
|
|
return -EFAULT;
|
|
|
|
} else {
|
2023-12-06 07:36:57 +00:00
|
|
|
memcpy((__force u8 *)out_buf, buf->buf + buf->offset,
|
2014-12-01 11:31:45 +00:00
|
|
|
out_count);
|
2009-12-21 15:56:45 +00:00
|
|
|
}
|
2010-01-18 13:45:12 +00:00
|
|
|
|
|
|
|
buf->offset += out_count;
|
|
|
|
|
|
|
|
if (buf->offset == buf->len) {
|
|
|
|
/*
|
|
|
|
* We're done using all the data in this buffer.
|
|
|
|
* Re-queue so that the Host can send us more data.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&port->inbuf_lock, flags);
|
|
|
|
port->inbuf = NULL;
|
|
|
|
|
|
|
|
if (add_inbuf(port->in_vq, buf) < 0)
|
2009-12-21 16:06:04 +00:00
|
|
|
dev_warn(port->dev, "failed add_buf\n");
|
2010-01-18 13:45:12 +00:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&port->inbuf_lock, flags);
|
|
|
|
}
|
2009-12-21 15:56:45 +00:00
|
|
|
/* Return the number of bytes actually copied */
|
2010-01-18 13:45:12 +00:00
|
|
|
return out_count;
|
2010-01-18 13:45:02 +00:00
|
|
|
}
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
/* The condition that must be true for polling to end */
|
2010-05-20 04:15:49 +00:00
|
|
|
static bool will_read_block(struct port *port)
|
2009-12-21 16:19:30 +00:00
|
|
|
{
|
2010-09-02 12:41:43 +00:00
|
|
|
if (!port->guest_connected) {
|
|
|
|
/* Port got hot-unplugged. Let's exit. */
|
|
|
|
return false;
|
|
|
|
}
|
2010-05-20 04:15:49 +00:00
|
|
|
return !port_has_data(port) && port->host_connected;
|
2009-12-21 16:19:30 +00:00
|
|
|
}
|
|
|
|
|
2010-05-20 04:15:50 +00:00
|
|
|
static bool will_write_block(struct port *port)
|
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
2010-05-27 07:54:40 +00:00
|
|
|
if (!port->guest_connected) {
|
|
|
|
/* Port got hot-unplugged. Let's exit. */
|
|
|
|
return false;
|
|
|
|
}
|
2010-05-20 04:15:50 +00:00
|
|
|
if (!port->host_connected)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
spin_lock_irq(&port->outvq_lock);
|
|
|
|
/*
|
|
|
|
* Check if the Host has consumed any buffers since we last
|
|
|
|
* sent data (this is only applicable for nonblocking ports).
|
|
|
|
*/
|
|
|
|
reclaim_consumed_buffers(port);
|
|
|
|
ret = port->outvq_full;
|
|
|
|
spin_unlock_irq(&port->outvq_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
|
|
|
|
size_t count, loff_t *offp)
|
|
|
|
{
|
|
|
|
struct port *port;
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
port = filp->private_data;
|
|
|
|
|
2013-07-29 04:53:21 +00:00
|
|
|
/* Port is hot-unplugged. */
|
|
|
|
if (!port->guest_connected)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
if (!port_has_data(port)) {
|
|
|
|
/*
|
|
|
|
* If nothing's connected on the host just return 0 in
|
|
|
|
* case of list_empty; this tells the userspace app
|
|
|
|
* that there's no connection
|
|
|
|
*/
|
|
|
|
if (!port->host_connected)
|
|
|
|
return 0;
|
|
|
|
if (filp->f_flags & O_NONBLOCK)
|
|
|
|
return -EAGAIN;
|
|
|
|
|
2011-09-14 07:36:41 +00:00
|
|
|
ret = wait_event_freezable(port->waitqueue,
|
|
|
|
!will_read_block(port));
|
2009-12-21 16:19:30 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2013-07-29 04:53:21 +00:00
|
|
|
/* Port got hot-unplugged while we were waiting above. */
|
2010-09-02 12:41:45 +00:00
|
|
|
if (!port->guest_connected)
|
|
|
|
return -ENODEV;
|
2009-12-21 16:19:30 +00:00
|
|
|
/*
|
|
|
|
* We could've received a disconnection message while we were
|
|
|
|
* waiting for more data.
|
|
|
|
*
|
|
|
|
* This check is not clubbed in the if() statement above as we
|
|
|
|
* might receive some data as well as the host could get
|
|
|
|
* disconnected after we got woken up from our wait. So we
|
|
|
|
* really want to give off whatever data we have and only then
|
|
|
|
* check for host_connected.
|
|
|
|
*/
|
|
|
|
if (!port_has_data(port) && !port->host_connected)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return fill_readbuf(port, ubuf, count, true);
|
|
|
|
}
|
|
|
|
|
2012-08-09 12:31:00 +00:00
|
|
|
static int wait_port_writable(struct port *port, bool nonblock)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (will_write_block(port)) {
|
|
|
|
if (nonblock)
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
ret = wait_event_freezable(port->waitqueue,
|
|
|
|
!will_write_block(port));
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
/* Port got hot-unplugged. */
|
|
|
|
if (!port->guest_connected)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
|
|
|
|
size_t count, loff_t *offp)
|
|
|
|
{
|
|
|
|
struct port *port;
|
2012-12-14 03:16:42 +00:00
|
|
|
struct port_buffer *buf;
|
2009-12-21 16:19:30 +00:00
|
|
|
ssize_t ret;
|
2010-05-20 04:15:50 +00:00
|
|
|
bool nonblock;
|
2012-12-14 03:16:42 +00:00
|
|
|
struct scatterlist sg[1];
|
2009-12-21 16:19:30 +00:00
|
|
|
|
2010-09-14 07:56:16 +00:00
|
|
|
/* Userspace could be out to fool us */
|
|
|
|
if (!count)
|
|
|
|
return 0;
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
port = filp->private_data;
|
|
|
|
|
2010-05-20 04:15:50 +00:00
|
|
|
nonblock = filp->f_flags & O_NONBLOCK;
|
|
|
|
|
2012-08-09 12:31:00 +00:00
|
|
|
ret = wait_port_writable(port, nonblock);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2010-05-20 04:15:50 +00:00
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
count = min((size_t)(32 * 1024), count);
|
|
|
|
|
2018-04-20 16:54:23 +00:00
|
|
|
buf = alloc_buf(port->portdev->vdev, count, 0);
|
2009-12-21 16:19:30 +00:00
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2012-12-14 03:16:42 +00:00
|
|
|
ret = copy_from_user(buf->buf, ubuf, count);
|
2009-12-21 16:19:30 +00:00
|
|
|
if (ret) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto free_buf;
|
|
|
|
}
|
|
|
|
|
2010-10-20 03:15:43 +00:00
|
|
|
/*
|
|
|
|
* We now ask send_buf() to not spin for generic ports -- we
|
|
|
|
* can re-use the same code path that non-blocking file
|
|
|
|
* descriptors take for blocking file descriptors since the
|
|
|
|
* wait is already done and we're certain the write will go
|
|
|
|
* through to the host.
|
|
|
|
*/
|
|
|
|
nonblock = true;
|
2012-12-14 03:16:42 +00:00
|
|
|
sg_init_one(sg, buf->buf, count);
|
|
|
|
ret = __send_to_port(port, sg, 1, count, buf, nonblock);
|
2010-05-20 04:15:50 +00:00
|
|
|
|
|
|
|
if (nonblock && ret > 0)
|
|
|
|
goto out;
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
free_buf:
|
2012-12-14 04:10:51 +00:00
|
|
|
free_buf(buf, true);
|
2010-05-20 04:15:50 +00:00
|
|
|
out:
|
2009-12-21 16:19:30 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-08-09 12:30:39 +00:00
|
|
|
struct sg_list {
|
|
|
|
unsigned int n;
|
2012-08-09 12:31:20 +00:00
|
|
|
unsigned int size;
|
2012-08-09 12:30:39 +00:00
|
|
|
size_t len;
|
|
|
|
struct scatterlist *sg;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
|
|
|
|
struct splice_desc *sd)
|
|
|
|
{
|
|
|
|
struct sg_list *sgl = sd->u.data;
|
2012-08-09 12:30:50 +00:00
|
|
|
unsigned int offset, len;
|
2012-08-09 12:30:39 +00:00
|
|
|
|
2012-08-09 12:31:20 +00:00
|
|
|
if (sgl->n == sgl->size)
|
2012-08-09 12:30:39 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Try lock this page */
|
2020-05-20 15:58:16 +00:00
|
|
|
if (pipe_buf_try_steal(pipe, buf)) {
|
2012-08-09 12:30:39 +00:00
|
|
|
/* Get reference and unlock page for moving */
|
|
|
|
get_page(buf->page);
|
|
|
|
unlock_page(buf->page);
|
|
|
|
|
|
|
|
len = min(buf->len, sd->len);
|
|
|
|
sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
|
2012-08-09 12:30:50 +00:00
|
|
|
} else {
|
|
|
|
/* Failback to copying a page */
|
|
|
|
struct page *page = alloc_page(GFP_KERNEL);
|
2014-02-02 12:05:05 +00:00
|
|
|
char *src;
|
2012-08-09 12:30:50 +00:00
|
|
|
|
|
|
|
if (!page)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
offset = sd->pos & ~PAGE_MASK;
|
|
|
|
|
|
|
|
len = sd->len;
|
|
|
|
if (len + offset > PAGE_SIZE)
|
|
|
|
len = PAGE_SIZE - offset;
|
|
|
|
|
2014-02-03 02:09:54 +00:00
|
|
|
src = kmap_atomic(buf->page);
|
2014-02-02 12:05:05 +00:00
|
|
|
memcpy(page_address(page) + offset, src + buf->offset, len);
|
2014-02-03 02:09:54 +00:00
|
|
|
kunmap_atomic(src);
|
2012-08-09 12:30:50 +00:00
|
|
|
|
|
|
|
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
|
2012-08-09 12:30:39 +00:00
|
|
|
}
|
2012-08-09 12:30:50 +00:00
|
|
|
sgl->n++;
|
|
|
|
sgl->len += len;
|
2012-08-09 12:30:39 +00:00
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Faster zero-copy write by splicing */
|
|
|
|
static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
|
|
|
|
struct file *filp, loff_t *ppos,
|
|
|
|
size_t len, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct port *port = filp->private_data;
|
|
|
|
struct sg_list sgl;
|
|
|
|
ssize_t ret;
|
2012-12-14 03:16:42 +00:00
|
|
|
struct port_buffer *buf;
|
2012-08-09 12:30:39 +00:00
|
|
|
struct splice_desc sd = {
|
|
|
|
.total_len = len,
|
|
|
|
.flags = flags,
|
|
|
|
.pos = *ppos,
|
|
|
|
.u.data = &sgl,
|
|
|
|
};
|
2019-11-15 13:30:32 +00:00
|
|
|
unsigned int occupancy;
|
2012-08-09 12:30:39 +00:00
|
|
|
|
2012-12-14 04:10:51 +00:00
|
|
|
/*
|
|
|
|
* Rproc_serial does not yet support splice. To support splice
|
|
|
|
* pipe_to_sg() must allocate dma-buffers and copy content from
|
|
|
|
* regular pages to dma pages. And alloc_buf and free_buf must
|
|
|
|
* support allocating and freeing such a list of dma-buffers.
|
|
|
|
*/
|
|
|
|
if (is_rproc_serial(port->out_vq->vdev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-07-23 02:00:49 +00:00
|
|
|
pipe_lock(pipe);
|
2019-11-15 13:30:32 +00:00
|
|
|
ret = 0;
|
|
|
|
if (pipe_empty(pipe->head, pipe->tail))
|
2013-07-23 02:00:49 +00:00
|
|
|
goto error_out;
|
2013-07-23 02:00:49 +00:00
|
|
|
|
2012-08-09 12:31:00 +00:00
|
|
|
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
|
|
|
|
if (ret < 0)
|
2013-07-23 02:00:49 +00:00
|
|
|
goto error_out;
|
2012-08-09 12:31:00 +00:00
|
|
|
|
2019-11-15 13:30:32 +00:00
|
|
|
occupancy = pipe_occupancy(pipe->head, pipe->tail);
|
|
|
|
buf = alloc_buf(port->portdev->vdev, 0, occupancy);
|
2019-11-30 22:12:13 +00:00
|
|
|
|
2013-07-23 02:00:49 +00:00
|
|
|
if (!buf) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error_out;
|
|
|
|
}
|
2012-12-14 03:16:42 +00:00
|
|
|
|
2012-08-09 12:30:39 +00:00
|
|
|
sgl.n = 0;
|
|
|
|
sgl.len = 0;
|
2019-11-15 13:30:32 +00:00
|
|
|
sgl.size = occupancy;
|
2012-12-14 03:16:42 +00:00
|
|
|
sgl.sg = buf->sg;
|
2012-08-09 12:31:20 +00:00
|
|
|
sg_init_table(sgl.sg, sgl.size);
|
2012-08-09 12:30:39 +00:00
|
|
|
ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
|
2013-07-23 02:00:49 +00:00
|
|
|
pipe_unlock(pipe);
|
2012-08-09 12:30:39 +00:00
|
|
|
if (likely(ret > 0))
|
2012-12-14 03:16:42 +00:00
|
|
|
ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
|
2012-08-09 12:30:39 +00:00
|
|
|
|
2012-10-15 07:57:33 +00:00
|
|
|
if (unlikely(ret <= 0))
|
2012-12-14 04:10:51 +00:00
|
|
|
free_buf(buf, true);
|
2012-08-09 12:30:39 +00:00
|
|
|
return ret;
|
2013-07-23 02:00:49 +00:00
|
|
|
|
|
|
|
error_out:
|
|
|
|
pipe_unlock(pipe);
|
|
|
|
return ret;
|
2012-08-09 12:30:39 +00:00
|
|
|
}
|
|
|
|
|
2017-07-03 10:39:46 +00:00
|
|
|
static __poll_t port_fops_poll(struct file *filp, poll_table *wait)
|
2009-12-21 16:19:30 +00:00
|
|
|
{
|
|
|
|
struct port *port;
|
2017-07-03 10:39:46 +00:00
|
|
|
__poll_t ret;
|
2009-12-21 16:19:30 +00:00
|
|
|
|
|
|
|
port = filp->private_data;
|
|
|
|
poll_wait(filp, &port->waitqueue, wait);
|
|
|
|
|
2010-09-02 12:41:44 +00:00
|
|
|
if (!port->guest_connected) {
|
|
|
|
/* Port got unplugged */
|
2018-02-11 22:34:03 +00:00
|
|
|
return EPOLLHUP;
|
2010-09-02 12:41:44 +00:00
|
|
|
}
|
2009-12-21 16:19:30 +00:00
|
|
|
ret = 0;
|
2010-09-16 09:13:08 +00:00
|
|
|
if (!will_read_block(port))
|
2018-02-11 22:34:03 +00:00
|
|
|
ret |= EPOLLIN | EPOLLRDNORM;
|
2010-05-20 04:15:50 +00:00
|
|
|
if (!will_write_block(port))
|
2018-02-11 22:34:03 +00:00
|
|
|
ret |= EPOLLOUT;
|
2009-12-21 16:19:30 +00:00
|
|
|
if (!port->host_connected)
|
2018-02-11 22:34:03 +00:00
|
|
|
ret |= EPOLLHUP;
|
2009-12-21 16:19:30 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-09-02 13:08:29 +00:00
|
|
|
static void remove_port(struct kref *kref);
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
static int port_fops_release(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct port *port;
|
|
|
|
|
|
|
|
port = filp->private_data;
|
|
|
|
|
|
|
|
/* Notify host of port being closed */
|
|
|
|
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
|
|
|
|
|
2009-12-21 16:45:30 +00:00
|
|
|
spin_lock_irq(&port->inbuf_lock);
|
2009-11-26 05:55:38 +00:00
|
|
|
port->guest_connected = false;
|
|
|
|
|
2009-12-21 16:45:30 +00:00
|
|
|
discard_port_data(port);
|
|
|
|
|
|
|
|
spin_unlock_irq(&port->inbuf_lock);
|
|
|
|
|
2010-05-20 04:15:50 +00:00
|
|
|
spin_lock_irq(&port->outvq_lock);
|
|
|
|
reclaim_consumed_buffers(port);
|
|
|
|
spin_unlock_irq(&port->outvq_lock);
|
|
|
|
|
2012-12-14 04:10:51 +00:00
|
|
|
reclaim_dma_bufs();
|
2010-09-02 13:08:29 +00:00
|
|
|
/*
|
|
|
|
* Locks aren't necessary here as a port can't be opened after
|
|
|
|
* unplug, and if a port isn't unplugged, a kref would already
|
|
|
|
* exist for the port. Plus, taking ports_lock here would
|
|
|
|
* create a dependency on other locks taken by functions
|
|
|
|
* inside remove_port if we're the last holder of the port,
|
|
|
|
* creating many problems.
|
|
|
|
*/
|
|
|
|
kref_put(&port->kref, remove_port);
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int port_fops_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct cdev *cdev = inode->i_cdev;
|
|
|
|
struct port *port;
|
2010-09-02 12:41:48 +00:00
|
|
|
int ret;
|
2009-12-21 16:19:30 +00:00
|
|
|
|
2013-07-29 04:46:13 +00:00
|
|
|
/* We get the port with a kref here */
|
2010-09-02 12:50:58 +00:00
|
|
|
port = find_port_by_devt(cdev->dev);
|
2013-07-29 04:47:13 +00:00
|
|
|
if (!port) {
|
|
|
|
/* Port was unplugged before we could proceed */
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
2009-12-21 16:19:30 +00:00
|
|
|
filp->private_data = port;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't allow opening of console port devices -- that's done
|
|
|
|
* via /dev/hvc
|
|
|
|
*/
|
2010-09-02 12:41:48 +00:00
|
|
|
if (is_console_port(port)) {
|
|
|
|
ret = -ENXIO;
|
|
|
|
goto out;
|
|
|
|
}
|
2009-12-21 16:19:30 +00:00
|
|
|
|
2009-11-26 05:55:38 +00:00
|
|
|
/* Allow only one process to open a particular port at a time */
|
|
|
|
spin_lock_irq(&port->inbuf_lock);
|
|
|
|
if (port->guest_connected) {
|
|
|
|
spin_unlock_irq(&port->inbuf_lock);
|
2013-04-15 02:30:15 +00:00
|
|
|
ret = -EBUSY;
|
2010-09-02 12:41:48 +00:00
|
|
|
goto out;
|
2009-11-26 05:55:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
port->guest_connected = true;
|
|
|
|
spin_unlock_irq(&port->inbuf_lock);
|
|
|
|
|
2010-05-20 04:15:50 +00:00
|
|
|
spin_lock_irq(&port->outvq_lock);
|
|
|
|
/*
|
|
|
|
* There might be a chance that we missed reclaiming a few
|
|
|
|
* buffers in the window of the port getting previously closed
|
|
|
|
* and opening now.
|
|
|
|
*/
|
|
|
|
reclaim_consumed_buffers(port);
|
|
|
|
spin_unlock_irq(&port->outvq_lock);
|
|
|
|
|
2010-09-16 09:13:09 +00:00
|
|
|
nonseekable_open(inode, filp);
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
/* Notify host of port being opened */
|
|
|
|
send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
|
|
|
|
|
|
|
|
return 0;
|
2010-09-02 12:41:48 +00:00
|
|
|
out:
|
2010-09-02 13:08:29 +00:00
|
|
|
kref_put(&port->kref, remove_port);
|
2010-09-02 12:41:48 +00:00
|
|
|
return ret;
|
2009-12-21 16:19:30 +00:00
|
|
|
}
|
|
|
|
|
2010-09-02 13:17:52 +00:00
|
|
|
static int port_fops_fasync(int fd, struct file *filp, int mode)
|
|
|
|
{
|
|
|
|
struct port *port;
|
|
|
|
|
|
|
|
port = filp->private_data;
|
|
|
|
return fasync_helper(fd, filp, mode, &port->async_queue);
|
|
|
|
}
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
/*
|
|
|
|
* The file operations that we support: programs in the guest can open
|
|
|
|
* a console device, read from it, write to it, poll for data and
|
|
|
|
* close it. The devices are at
|
|
|
|
* /dev/vport<device number>p<port number>
|
|
|
|
*/
|
|
|
|
static const struct file_operations port_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = port_fops_open,
|
|
|
|
.read = port_fops_read,
|
|
|
|
.write = port_fops_write,
|
2012-08-09 12:30:39 +00:00
|
|
|
.splice_write = port_fops_splice_write,
|
2009-12-21 16:19:30 +00:00
|
|
|
.poll = port_fops_poll,
|
|
|
|
.release = port_fops_release,
|
2010-09-02 13:17:52 +00:00
|
|
|
.fasync = port_fops_fasync,
|
2009-12-21 16:19:30 +00:00
|
|
|
};
|
|
|
|
|
2010-01-18 13:44:55 +00:00
|
|
|
/*
|
|
|
|
* The put_chars() callback is pretty straightforward.
|
2007-10-22 01:03:39 +00:00
|
|
|
*
|
2010-01-18 13:44:55 +00:00
|
|
|
* We turn the characters into a scatter-gather list, add it to the
|
|
|
|
* output queue and then kick the Host. Then we sit here waiting for
|
|
|
|
* it to finish: inefficient in theory, but in practice
|
2017-08-16 17:31:57 +00:00
|
|
|
* implementations will do it immediately.
|
2010-01-18 13:44:55 +00:00
|
|
|
*/
|
2023-12-06 07:36:57 +00:00
|
|
|
static ssize_t put_chars(u32 vtermno, const u8 *buf, size_t count)
|
2007-10-22 01:03:39 +00:00
|
|
|
{
|
2010-01-18 13:45:00 +00:00
|
|
|
struct port *port;
|
2012-12-14 03:16:42 +00:00
|
|
|
struct scatterlist sg[1];
|
2017-02-01 08:02:27 +00:00
|
|
|
void *data;
|
|
|
|
int ret;
|
2010-01-18 13:45:05 +00:00
|
|
|
|
|
|
|
port = find_port_by_vtermno(vtermno);
|
|
|
|
if (!port)
|
2010-05-20 04:15:47 +00:00
|
|
|
return -EPIPE;
|
2007-10-22 01:03:39 +00:00
|
|
|
|
2017-02-01 08:02:27 +00:00
|
|
|
data = kmemdup(buf, count, GFP_ATOMIC);
|
|
|
|
if (!data)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
sg_init_one(sg, data, count);
|
|
|
|
ret = __send_to_port(port, sg, 1, count, data, false);
|
|
|
|
kfree(data);
|
|
|
|
return ret;
|
2007-10-22 01:03:39 +00:00
|
|
|
}
|
|
|
|
|
2010-01-18 13:44:55 +00:00
|
|
|
/*
|
|
|
|
* get_chars() is the callback from the hvc_console infrastructure
|
|
|
|
* when an interrupt is received.
|
2007-10-22 01:03:39 +00:00
|
|
|
*
|
2010-01-18 13:45:12 +00:00
|
|
|
* We call out to fill_readbuf that gets us the required data from the
|
|
|
|
* buffers that are queued up.
|
2010-01-18 13:44:55 +00:00
|
|
|
*/
|
2023-12-06 07:36:57 +00:00
|
|
|
static ssize_t get_chars(u32 vtermno, u8 *buf, size_t count)
|
2007-10-22 01:03:39 +00:00
|
|
|
{
|
2010-01-18 13:45:00 +00:00
|
|
|
struct port *port;
|
|
|
|
|
2010-01-18 13:45:05 +00:00
|
|
|
port = find_port_by_vtermno(vtermno);
|
|
|
|
if (!port)
|
2010-05-20 04:15:47 +00:00
|
|
|
return -EPIPE;
|
2010-01-18 13:45:00 +00:00
|
|
|
|
2007-10-22 01:03:39 +00:00
|
|
|
/* If we don't have an input queue yet, we can't get input. */
|
2010-01-18 13:45:00 +00:00
|
|
|
BUG_ON(!port->in_vq);
|
2007-10-22 01:03:39 +00:00
|
|
|
|
2023-12-06 07:36:57 +00:00
|
|
|
return fill_readbuf(port, (__force u8 __user *)buf, count, false);
|
2007-10-22 01:03:39 +00:00
|
|
|
}
|
|
|
|
|
2010-01-18 13:45:08 +00:00
|
|
|
static void resize_console(struct port *port)
|
2008-11-25 12:36:26 +00:00
|
|
|
{
|
2010-01-18 13:45:08 +00:00
|
|
|
struct virtio_device *vdev;
|
2008-11-25 12:36:26 +00:00
|
|
|
|
2010-03-19 12:06:44 +00:00
|
|
|
/* The port could have been hot-unplugged */
|
2010-05-05 20:35:08 +00:00
|
|
|
if (!port || !is_console_port(port))
|
2010-03-19 12:06:44 +00:00
|
|
|
return;
|
|
|
|
|
2010-01-18 13:45:08 +00:00
|
|
|
vdev = port->portdev->vdev;
|
2012-12-14 04:10:51 +00:00
|
|
|
|
|
|
|
/* Don't test F_SIZE at all if we're rproc: not a valid feature! */
|
|
|
|
if (!is_rproc_serial(vdev) &&
|
|
|
|
virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
|
2010-05-05 20:35:08 +00:00
|
|
|
hvc_resize(port->cons.hvc, port->cons.ws);
|
2008-11-25 12:36:26 +00:00
|
|
|
}
|
|
|
|
|
2010-01-18 13:45:05 +00:00
|
|
|
/* We set the configuration at this point, since we now have a tty */
|
2008-06-20 13:24:15 +00:00
|
|
|
static int notifier_add_vio(struct hvc_struct *hp, int data)
|
|
|
|
{
|
2010-01-18 13:45:05 +00:00
|
|
|
struct port *port;
|
|
|
|
|
|
|
|
port = find_port_by_vtermno(hp->vtermno);
|
|
|
|
if (!port)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-06-20 13:24:15 +00:00
|
|
|
hp->irq_requested = 1;
|
2010-01-18 13:45:08 +00:00
|
|
|
resize_console(port);
|
2008-11-25 12:36:26 +00:00
|
|
|
|
2008-06-20 13:24:15 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void notifier_del_vio(struct hvc_struct *hp, int data)
|
|
|
|
{
|
|
|
|
hp->irq_requested = 0;
|
|
|
|
}
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
/* The operations for console ports. */
|
2009-11-28 06:50:26 +00:00
|
|
|
static const struct hv_ops hv_ops = {
|
2010-01-18 13:44:56 +00:00
|
|
|
.get_chars = get_chars,
|
|
|
|
.put_chars = put_chars,
|
|
|
|
.notifier_add = notifier_add_vio,
|
|
|
|
.notifier_del = notifier_del_vio,
|
|
|
|
.notifier_hangup = notifier_del_vio,
|
|
|
|
};
|
|
|
|
|
2013-04-08 06:43:59 +00:00
|
|
|
static int init_port_console(struct port *port)
|
2010-01-18 13:45:10 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The Host's telling us this port is a console port. Hook it
|
|
|
|
* up with an hvc console.
|
|
|
|
*
|
|
|
|
* To set up and manage our virtual console, we call
|
|
|
|
* hvc_alloc().
|
|
|
|
*
|
|
|
|
* The first argument of hvc_alloc() is the virtual console
|
|
|
|
* number. The second argument is the parameter for the
|
|
|
|
* notification mechanism (like irq number). We currently
|
|
|
|
* leave this as zero, virtqueues have implicit notifications.
|
|
|
|
*
|
|
|
|
* The third argument is a "struct hv_ops" containing the
|
|
|
|
* put_chars() get_chars(), notifier_add() and notifier_del()
|
|
|
|
* pointers. The final argument is the output buffer size: we
|
|
|
|
* can do any size, so we put PAGE_SIZE here.
|
|
|
|
*/
|
2022-11-22 13:46:43 +00:00
|
|
|
ret = ida_alloc_min(&vtermno_ida, 1, GFP_KERNEL);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2010-01-18 13:45:10 +00:00
|
|
|
|
2022-11-22 13:46:43 +00:00
|
|
|
port->cons.vtermno = ret;
|
2010-01-18 13:45:10 +00:00
|
|
|
port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
|
|
|
|
if (IS_ERR(port->cons.hvc)) {
|
|
|
|
ret = PTR_ERR(port->cons.hvc);
|
2010-01-18 11:05:23 +00:00
|
|
|
dev_err(port->dev,
|
|
|
|
"error %d allocating hvc for port\n", ret);
|
2010-01-18 13:45:10 +00:00
|
|
|
port->cons.hvc = NULL;
|
2022-11-22 13:46:43 +00:00
|
|
|
ida_free(&vtermno_ida, port->cons.vtermno);
|
2010-01-18 13:45:10 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
spin_lock_irq(&pdrvdata_lock);
|
|
|
|
list_add_tail(&port->cons.list, &pdrvdata.consoles);
|
|
|
|
spin_unlock_irq(&pdrvdata_lock);
|
2009-11-26 05:55:38 +00:00
|
|
|
port->guest_connected = true;
|
2010-01-18 13:45:10 +00:00
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
/* Notify host of port being opened */
|
|
|
|
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
|
|
|
|
|
2010-01-18 13:45:10 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-21 16:27:40 +00:00
|
|
|
static ssize_t show_port_name(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buffer)
|
|
|
|
{
|
|
|
|
struct port *port;
|
|
|
|
|
|
|
|
port = dev_get_drvdata(dev);
|
|
|
|
|
|
|
|
return sprintf(buffer, "%s\n", port->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
|
|
|
|
|
|
|
|
static struct attribute *port_sysfs_entries[] = {
|
|
|
|
&dev_attr_name.attr,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2017-08-02 11:21:08 +00:00
|
|
|
static const struct attribute_group port_attribute_group = {
|
2009-12-21 16:27:40 +00:00
|
|
|
.name = NULL, /* put in device directory */
|
|
|
|
.attrs = port_sysfs_entries,
|
|
|
|
};
|
|
|
|
|
2018-12-01 02:41:28 +00:00
|
|
|
static int port_debugfs_show(struct seq_file *s, void *data)
|
2009-12-21 17:06:21 +00:00
|
|
|
{
|
2018-07-12 21:39:56 +00:00
|
|
|
struct port *port = s->private;
|
|
|
|
|
|
|
|
seq_printf(s, "name: %s\n", port->name ? port->name : "");
|
|
|
|
seq_printf(s, "guest_connected: %d\n", port->guest_connected);
|
|
|
|
seq_printf(s, "host_connected: %d\n", port->host_connected);
|
|
|
|
seq_printf(s, "outvq_full: %d\n", port->outvq_full);
|
|
|
|
seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent);
|
|
|
|
seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received);
|
|
|
|
seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded);
|
|
|
|
seq_printf(s, "is_console: %s\n",
|
|
|
|
is_console_port(port) ? "yes" : "no");
|
|
|
|
seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno);
|
2009-12-21 17:06:21 +00:00
|
|
|
|
2018-07-12 21:39:56 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2009-12-21 17:06:21 +00:00
|
|
|
|
2018-12-01 02:41:28 +00:00
|
|
|
DEFINE_SHOW_ATTRIBUTE(port_debugfs);
|
2009-12-21 17:06:21 +00:00
|
|
|
|
2010-05-05 20:35:08 +00:00
|
|
|
static void set_console_size(struct port *port, u16 rows, u16 cols)
|
|
|
|
{
|
|
|
|
if (!port || !is_console_port(port))
|
|
|
|
return;
|
|
|
|
|
|
|
|
port->cons.ws.ws_row = rows;
|
|
|
|
port->cons.ws.ws_col = cols;
|
|
|
|
}
|
|
|
|
|
virtio_console: allocate inbufs in add_port() only if it is needed
When we hot unplug a virtserialport and then try to hot plug again,
it fails:
(qemu) chardev-add socket,id=serial0,path=/tmp/serial0,server,nowait
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
(qemu) device_del serial0
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
kernel error:
virtio-ports vport2p2: Error allocating inbufs
qemu error:
virtio-serial-bus: Guest failure in adding port 2 for device \
virtio-serial0.0
This happens because buffers for the in_vq are allocated when the port is
added but are not released when the port is unplugged.
They are only released when virtconsole is removed (see a7a69ec0d8e4)
To avoid the problem and to be symmetric, we could allocate all the buffers
in init_vqs() as they are released in remove_vqs(), but it sounds like
a waste of memory.
Rather than that, this patch changes add_port() logic to ignore ENOSPC
error in fill_queue(), which means queue has already been filled.
Fixes: a7a69ec0d8e4 ("virtio_console: free buffers after reset")
Cc: mst@redhat.com
Cc: stable@vger.kernel.org
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-14 12:25:48 +00:00
|
|
|
static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
|
2010-05-20 04:15:48 +00:00
|
|
|
{
|
|
|
|
struct port_buffer *buf;
|
virtio_console: allocate inbufs in add_port() only if it is needed
When we hot unplug a virtserialport and then try to hot plug again,
it fails:
(qemu) chardev-add socket,id=serial0,path=/tmp/serial0,server,nowait
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
(qemu) device_del serial0
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
kernel error:
virtio-ports vport2p2: Error allocating inbufs
qemu error:
virtio-serial-bus: Guest failure in adding port 2 for device \
virtio-serial0.0
This happens because buffers for the in_vq are allocated when the port is
added but are not released when the port is unplugged.
They are only released when virtconsole is removed (see a7a69ec0d8e4)
To avoid the problem and to be symmetric, we could allocate all the buffers
in init_vqs() as they are released in remove_vqs(), but it sounds like
a waste of memory.
Rather than that, this patch changes add_port() logic to ignore ENOSPC
error in fill_queue(), which means queue has already been filled.
Fixes: a7a69ec0d8e4 ("virtio_console: free buffers after reset")
Cc: mst@redhat.com
Cc: stable@vger.kernel.org
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-14 12:25:48 +00:00
|
|
|
int nr_added_bufs;
|
2010-05-20 04:15:48 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
nr_added_bufs = 0;
|
|
|
|
do {
|
2018-04-20 16:54:23 +00:00
|
|
|
buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
|
2010-05-20 04:15:48 +00:00
|
|
|
if (!buf)
|
virtio_console: allocate inbufs in add_port() only if it is needed
When we hot unplug a virtserialport and then try to hot plug again,
it fails:
(qemu) chardev-add socket,id=serial0,path=/tmp/serial0,server,nowait
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
(qemu) device_del serial0
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
kernel error:
virtio-ports vport2p2: Error allocating inbufs
qemu error:
virtio-serial-bus: Guest failure in adding port 2 for device \
virtio-serial0.0
This happens because buffers for the in_vq are allocated when the port is
added but are not released when the port is unplugged.
They are only released when virtconsole is removed (see a7a69ec0d8e4)
To avoid the problem and to be symmetric, we could allocate all the buffers
in init_vqs() as they are released in remove_vqs(), but it sounds like
a waste of memory.
Rather than that, this patch changes add_port() logic to ignore ENOSPC
error in fill_queue(), which means queue has already been filled.
Fixes: a7a69ec0d8e4 ("virtio_console: free buffers after reset")
Cc: mst@redhat.com
Cc: stable@vger.kernel.org
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-14 12:25:48 +00:00
|
|
|
return -ENOMEM;
|
2010-05-20 04:15:48 +00:00
|
|
|
|
|
|
|
spin_lock_irq(lock);
|
|
|
|
ret = add_inbuf(vq, buf);
|
|
|
|
if (ret < 0) {
|
|
|
|
spin_unlock_irq(lock);
|
2012-12-14 04:10:51 +00:00
|
|
|
free_buf(buf, true);
|
virtio_console: allocate inbufs in add_port() only if it is needed
When we hot unplug a virtserialport and then try to hot plug again,
it fails:
(qemu) chardev-add socket,id=serial0,path=/tmp/serial0,server,nowait
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
(qemu) device_del serial0
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
kernel error:
virtio-ports vport2p2: Error allocating inbufs
qemu error:
virtio-serial-bus: Guest failure in adding port 2 for device \
virtio-serial0.0
This happens because buffers for the in_vq are allocated when the port is
added but are not released when the port is unplugged.
They are only released when virtconsole is removed (see a7a69ec0d8e4)
To avoid the problem and to be symmetric, we could allocate all the buffers
in init_vqs() as they are released in remove_vqs(), but it sounds like
a waste of memory.
Rather than that, this patch changes add_port() logic to ignore ENOSPC
error in fill_queue(), which means queue has already been filled.
Fixes: a7a69ec0d8e4 ("virtio_console: free buffers after reset")
Cc: mst@redhat.com
Cc: stable@vger.kernel.org
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-14 12:25:48 +00:00
|
|
|
return ret;
|
2010-05-20 04:15:48 +00:00
|
|
|
}
|
|
|
|
nr_added_bufs++;
|
|
|
|
spin_unlock_irq(lock);
|
|
|
|
} while (ret > 0);
|
|
|
|
|
|
|
|
return nr_added_bufs;
|
|
|
|
}
|
|
|
|
|
2010-09-02 13:17:52 +00:00
|
|
|
static void send_sigio_to_port(struct port *port)
|
|
|
|
{
|
|
|
|
if (port->async_queue && port->guest_connected)
|
|
|
|
kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
|
|
|
|
}
|
|
|
|
|
2010-05-20 04:15:48 +00:00
|
|
|
static int add_port(struct ports_device *portdev, u32 id)
|
|
|
|
{
|
|
|
|
char debugfs_name[16];
|
|
|
|
struct port *port;
|
|
|
|
dev_t devt;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
port = kmalloc(sizeof(*port), GFP_KERNEL);
|
|
|
|
if (!port) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
2010-09-02 13:08:29 +00:00
|
|
|
kref_init(&port->kref);
|
2010-05-20 04:15:48 +00:00
|
|
|
|
|
|
|
port->portdev = portdev;
|
|
|
|
port->id = id;
|
|
|
|
|
|
|
|
port->name = NULL;
|
|
|
|
port->inbuf = NULL;
|
|
|
|
port->cons.hvc = NULL;
|
2010-09-02 13:17:52 +00:00
|
|
|
port->async_queue = NULL;
|
2010-05-20 04:15:48 +00:00
|
|
|
|
2010-05-05 20:35:08 +00:00
|
|
|
port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
|
2019-03-19 06:04:06 +00:00
|
|
|
port->cons.vtermno = 0;
|
2010-05-05 20:35:08 +00:00
|
|
|
|
2010-05-20 04:15:48 +00:00
|
|
|
port->host_connected = port->guest_connected = false;
|
2011-09-14 07:36:46 +00:00
|
|
|
port->stats = (struct port_stats) { 0 };
|
2010-05-20 04:15:48 +00:00
|
|
|
|
2010-05-20 04:15:50 +00:00
|
|
|
port->outvq_full = false;
|
|
|
|
|
2010-05-20 04:15:48 +00:00
|
|
|
port->in_vq = portdev->in_vqs[port->id];
|
|
|
|
port->out_vq = portdev->out_vqs[port->id];
|
|
|
|
|
2010-09-02 12:50:59 +00:00
|
|
|
port->cdev = cdev_alloc();
|
|
|
|
if (!port->cdev) {
|
|
|
|
dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto free_port;
|
|
|
|
}
|
|
|
|
port->cdev->ops = &port_fops;
|
2010-05-20 04:15:48 +00:00
|
|
|
|
|
|
|
devt = MKDEV(portdev->chr_major, id);
|
2010-09-02 12:50:59 +00:00
|
|
|
err = cdev_add(port->cdev, devt, 1);
|
2010-05-20 04:15:48 +00:00
|
|
|
if (err < 0) {
|
|
|
|
dev_err(&port->portdev->vdev->dev,
|
|
|
|
"Error %d adding cdev for port %u\n", err, id);
|
2010-09-02 12:50:59 +00:00
|
|
|
goto free_cdev;
|
2010-05-20 04:15:48 +00:00
|
|
|
}
|
2023-06-20 14:37:58 +00:00
|
|
|
port->dev = device_create(&port_class, &port->portdev->vdev->dev,
|
2010-05-20 04:15:48 +00:00
|
|
|
devt, port, "vport%up%u",
|
2013-02-12 05:54:59 +00:00
|
|
|
port->portdev->vdev->index, id);
|
2010-05-20 04:15:48 +00:00
|
|
|
if (IS_ERR(port->dev)) {
|
|
|
|
err = PTR_ERR(port->dev);
|
|
|
|
dev_err(&port->portdev->vdev->dev,
|
|
|
|
"Error %d creating device for port %u\n",
|
|
|
|
err, id);
|
|
|
|
goto free_cdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_init(&port->inbuf_lock);
|
2010-05-20 04:15:50 +00:00
|
|
|
spin_lock_init(&port->outvq_lock);
|
2010-05-20 04:15:48 +00:00
|
|
|
init_waitqueue_head(&port->waitqueue);
|
|
|
|
|
virtio_console: allocate inbufs in add_port() only if it is needed
When we hot unplug a virtserialport and then try to hot plug again,
it fails:
(qemu) chardev-add socket,id=serial0,path=/tmp/serial0,server,nowait
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
(qemu) device_del serial0
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
kernel error:
virtio-ports vport2p2: Error allocating inbufs
qemu error:
virtio-serial-bus: Guest failure in adding port 2 for device \
virtio-serial0.0
This happens because buffers for the in_vq are allocated when the port is
added but are not released when the port is unplugged.
They are only released when virtconsole is removed (see a7a69ec0d8e4)
To avoid the problem and to be symmetric, we could allocate all the buffers
in init_vqs() as they are released in remove_vqs(), but it sounds like
a waste of memory.
Rather than that, this patch changes add_port() logic to ignore ENOSPC
error in fill_queue(), which means queue has already been filled.
Fixes: a7a69ec0d8e4 ("virtio_console: free buffers after reset")
Cc: mst@redhat.com
Cc: stable@vger.kernel.org
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-14 12:25:48 +00:00
|
|
|
/* We can safely ignore ENOSPC because it means
|
|
|
|
* the queue already has buffers. Buffers are removed
|
|
|
|
* only by virtcons_remove(), not by unplug_port()
|
|
|
|
*/
|
|
|
|
err = fill_queue(port->in_vq, &port->inbuf_lock);
|
|
|
|
if (err < 0 && err != -ENOSPC) {
|
2010-05-20 04:15:48 +00:00
|
|
|
dev_err(port->dev, "Error allocating inbufs\n");
|
|
|
|
goto free_device;
|
|
|
|
}
|
|
|
|
|
2012-12-14 04:10:51 +00:00
|
|
|
if (is_rproc_serial(port->portdev->vdev))
|
|
|
|
/*
|
|
|
|
* For rproc_serial assume remote processor is connected.
|
|
|
|
* rproc_serial does not want the console port, only
|
|
|
|
* the generic port implementation.
|
|
|
|
*/
|
2013-03-18 08:49:14 +00:00
|
|
|
port->host_connected = true;
|
2012-12-14 04:10:51 +00:00
|
|
|
else if (!use_multiport(port->portdev)) {
|
|
|
|
/*
|
|
|
|
* If we're not using multiport support,
|
|
|
|
* this has to be a console port.
|
|
|
|
*/
|
2010-05-20 04:15:48 +00:00
|
|
|
err = init_port_console(port);
|
|
|
|
if (err)
|
|
|
|
goto free_inbufs;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irq(&portdev->ports_lock);
|
|
|
|
list_add_tail(&port->list, &port->portdev->ports);
|
|
|
|
spin_unlock_irq(&portdev->ports_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell the Host we're set so that it can send us various
|
|
|
|
* configuration parameters for this port (eg, port name,
|
|
|
|
* caching, whether this is a console port, etc.)
|
|
|
|
*/
|
|
|
|
send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
|
|
|
|
|
2021-02-16 15:04:10 +00:00
|
|
|
/*
|
|
|
|
* Finally, create the debugfs file that we can use to
|
|
|
|
* inspect a port's state at any time
|
|
|
|
*/
|
|
|
|
snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
|
|
|
|
port->portdev->vdev->index, id);
|
|
|
|
port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
|
|
|
|
pdrvdata.debugfs_dir,
|
|
|
|
port, &port_debugfs_fops);
|
2010-05-20 04:15:48 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_inbufs:
|
|
|
|
free_device:
|
2023-06-20 14:37:58 +00:00
|
|
|
device_destroy(&port_class, port->dev->devt);
|
2010-05-20 04:15:48 +00:00
|
|
|
free_cdev:
|
2010-09-02 12:50:59 +00:00
|
|
|
cdev_del(port->cdev);
|
2010-05-20 04:15:48 +00:00
|
|
|
free_port:
|
|
|
|
kfree(port);
|
|
|
|
fail:
|
|
|
|
/* The host might want to notify management sw about port add failure */
|
drivers/char: Eliminate use after free
In each case, the first argument to send_control_msg or __send_control_msg,
respectively, has either not been successfully allocated or has been freed
at the point of the call. In the first case, the first argument, port, is
only used to access the portdev and id fields, in order to call
__send_control_msg. Thus it seems possible instead to call
__send_control_msg directly. In the second case, the call to
__send_control_msg is moved up to a place where it seems like the first
argument, portdev, has been initialized sufficiently to make the call to
__send_control_msg meaningful.
This has only been compile tested.
A simplified version of the semantic match that finds this problem is as
follows: (http://coccinelle.lip6.fr/)
// <smpl>
@free@
expression E;
position p;
@@
kfree@p(E)
@@
expression free.E, subE<=free.E, E1;
position free.p;
@@
kfree@p(E)
...
(
subE = E1
|
* E
)
// </smpl>
Signed-off-by: Julia Lawall <julia@diku.dk>
Acked-by: Amit Shah <amit.shah@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2010-05-15 09:45:53 +00:00
|
|
|
__send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
|
2010-05-20 04:15:48 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2010-09-02 13:08:29 +00:00
|
|
|
/* No users remain, remove all port-specific data. */
|
|
|
|
static void remove_port(struct kref *kref)
|
|
|
|
{
|
|
|
|
struct port *port;
|
|
|
|
|
|
|
|
port = container_of(kref, struct port, kref);
|
|
|
|
|
|
|
|
kfree(port);
|
|
|
|
}
|
|
|
|
|
2011-12-22 11:28:27 +00:00
|
|
|
static void remove_port_data(struct port *port)
|
|
|
|
{
|
2013-07-29 04:53:46 +00:00
|
|
|
spin_lock_irq(&port->inbuf_lock);
|
2011-12-22 11:28:27 +00:00
|
|
|
/* Remove unused data this port might have received. */
|
|
|
|
discard_port_data(port);
|
2016-10-11 11:05:15 +00:00
|
|
|
spin_unlock_irq(&port->inbuf_lock);
|
2011-12-22 11:28:27 +00:00
|
|
|
|
2013-07-29 04:53:46 +00:00
|
|
|
spin_lock_irq(&port->outvq_lock);
|
|
|
|
reclaim_consumed_buffers(port);
|
2016-10-11 11:05:15 +00:00
|
|
|
spin_unlock_irq(&port->outvq_lock);
|
2011-12-22 11:28:27 +00:00
|
|
|
}
|
|
|
|
|
2010-09-02 13:08:29 +00:00
|
|
|
/*
|
|
|
|
* Port got unplugged. Remove port from portdev's list and drop the
|
|
|
|
* kref reference. If no userspace has this port opened, it will
|
|
|
|
* result in immediate removal the port.
|
|
|
|
*/
|
|
|
|
static void unplug_port(struct port *port)
|
2009-12-21 16:57:31 +00:00
|
|
|
{
|
2010-09-02 13:08:29 +00:00
|
|
|
spin_lock_irq(&port->portdev->ports_lock);
|
|
|
|
list_del(&port->list);
|
|
|
|
spin_unlock_irq(&port->portdev->ports_lock);
|
|
|
|
|
2013-07-29 04:54:15 +00:00
|
|
|
spin_lock_irq(&port->inbuf_lock);
|
2010-05-27 07:54:39 +00:00
|
|
|
if (port->guest_connected) {
|
2013-07-29 04:51:32 +00:00
|
|
|
/* Let the app know the port is going down. */
|
|
|
|
send_sigio_to_port(port);
|
|
|
|
|
|
|
|
/* Do this after sigio is actually sent */
|
2010-05-27 07:54:39 +00:00
|
|
|
port->guest_connected = false;
|
|
|
|
port->host_connected = false;
|
2010-09-02 13:17:54 +00:00
|
|
|
|
2013-07-29 04:51:32 +00:00
|
|
|
wake_up_interruptible(&port->waitqueue);
|
2010-05-27 07:54:39 +00:00
|
|
|
}
|
2013-07-29 04:54:15 +00:00
|
|
|
spin_unlock_irq(&port->inbuf_lock);
|
2010-05-27 07:54:39 +00:00
|
|
|
|
2009-12-21 16:57:31 +00:00
|
|
|
if (is_console_port(port)) {
|
|
|
|
spin_lock_irq(&pdrvdata_lock);
|
|
|
|
list_del(&port->cons.list);
|
|
|
|
spin_unlock_irq(&pdrvdata_lock);
|
|
|
|
hvc_remove(port->cons.hvc);
|
2022-11-22 13:46:43 +00:00
|
|
|
ida_free(&vtermno_ida, port->cons.vtermno);
|
2009-12-21 16:57:31 +00:00
|
|
|
}
|
|
|
|
|
2011-12-22 11:28:27 +00:00
|
|
|
remove_port_data(port);
|
2010-02-12 05:02:15 +00:00
|
|
|
|
2010-09-02 13:08:29 +00:00
|
|
|
/*
|
|
|
|
* We should just assume the device itself has gone off --
|
|
|
|
* else a close on an open port later will try to send out a
|
|
|
|
* control message.
|
|
|
|
*/
|
|
|
|
port->portdev = NULL;
|
2009-12-21 17:06:21 +00:00
|
|
|
|
2013-07-29 04:50:29 +00:00
|
|
|
sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
|
2023-06-20 14:37:58 +00:00
|
|
|
device_destroy(&port_class, port->dev->devt);
|
2013-07-29 04:50:29 +00:00
|
|
|
cdev_del(port->cdev);
|
|
|
|
|
|
|
|
debugfs_remove(port->debugfs_file);
|
2013-08-07 06:24:17 +00:00
|
|
|
kfree(port->name);
|
2013-07-29 04:50:29 +00:00
|
|
|
|
2010-09-02 13:08:29 +00:00
|
|
|
/*
|
|
|
|
* Locks around here are not necessary - a port can't be
|
|
|
|
* opened after we removed the port struct from ports_list
|
|
|
|
* above.
|
|
|
|
*/
|
|
|
|
kref_put(&port->kref, remove_port);
|
2009-12-21 16:57:31 +00:00
|
|
|
}
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
/* Any private messages that the Host and Guest want to share */
|
2014-12-01 11:17:40 +00:00
|
|
|
static void handle_control_message(struct virtio_device *vdev,
|
|
|
|
struct ports_device *portdev,
|
2009-12-21 15:33:25 +00:00
|
|
|
struct port_buffer *buf)
|
|
|
|
{
|
|
|
|
struct virtio_console_control *cpkt;
|
|
|
|
struct port *port;
|
2009-12-21 16:27:40 +00:00
|
|
|
size_t name_size;
|
|
|
|
int err;
|
2009-12-21 15:33:25 +00:00
|
|
|
|
|
|
|
cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
|
|
|
|
|
2014-12-01 11:17:40 +00:00
|
|
|
port = find_port_by_id(portdev, virtio32_to_cpu(vdev, cpkt->id));
|
|
|
|
if (!port &&
|
|
|
|
cpkt->event != cpu_to_virtio16(vdev, VIRTIO_CONSOLE_PORT_ADD)) {
|
2009-12-21 15:33:25 +00:00
|
|
|
/* No valid header at start of buffer. Drop it. */
|
|
|
|
dev_dbg(&portdev->vdev->dev,
|
|
|
|
"Invalid index %u in control packet\n", cpkt->id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-12-01 11:17:40 +00:00
|
|
|
switch (virtio16_to_cpu(vdev, cpkt->event)) {
|
2010-05-20 04:15:48 +00:00
|
|
|
case VIRTIO_CONSOLE_PORT_ADD:
|
|
|
|
if (port) {
|
2010-05-20 04:15:49 +00:00
|
|
|
dev_dbg(&portdev->vdev->dev,
|
|
|
|
"Port %u already added\n", port->id);
|
2010-05-20 04:15:48 +00:00
|
|
|
send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
|
|
|
|
break;
|
|
|
|
}
|
2014-12-01 11:17:40 +00:00
|
|
|
if (virtio32_to_cpu(vdev, cpkt->id) >=
|
2016-12-05 19:39:42 +00:00
|
|
|
portdev->max_nr_ports) {
|
2010-05-20 04:15:48 +00:00
|
|
|
dev_warn(&portdev->vdev->dev,
|
2014-12-01 11:17:40 +00:00
|
|
|
"Request for adding port with "
|
|
|
|
"out-of-bound id %u, max. supported id: %u\n",
|
2016-12-05 19:39:42 +00:00
|
|
|
cpkt->id, portdev->max_nr_ports - 1);
|
2010-05-20 04:15:48 +00:00
|
|
|
break;
|
|
|
|
}
|
2014-12-01 11:17:40 +00:00
|
|
|
add_port(portdev, virtio32_to_cpu(vdev, cpkt->id));
|
2010-05-20 04:15:48 +00:00
|
|
|
break;
|
|
|
|
case VIRTIO_CONSOLE_PORT_REMOVE:
|
2010-09-02 13:08:29 +00:00
|
|
|
unplug_port(port);
|
2010-05-20 04:15:48 +00:00
|
|
|
break;
|
2009-12-21 15:33:25 +00:00
|
|
|
case VIRTIO_CONSOLE_CONSOLE_PORT:
|
|
|
|
if (!cpkt->value)
|
|
|
|
break;
|
|
|
|
if (is_console_port(port))
|
|
|
|
break;
|
|
|
|
|
|
|
|
init_port_console(port);
|
2011-09-22 18:14:23 +00:00
|
|
|
complete(&early_console_added);
|
2009-12-21 15:33:25 +00:00
|
|
|
/*
|
|
|
|
* Could remove the port here in case init fails - but
|
|
|
|
* have to notify the host first.
|
|
|
|
*/
|
|
|
|
break;
|
2010-05-05 20:35:09 +00:00
|
|
|
case VIRTIO_CONSOLE_RESIZE: {
|
|
|
|
struct {
|
|
|
|
__u16 rows;
|
|
|
|
__u16 cols;
|
|
|
|
} size;
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
if (!is_console_port(port))
|
|
|
|
break;
|
2010-05-05 20:35:09 +00:00
|
|
|
|
|
|
|
memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
|
|
|
|
sizeof(size));
|
|
|
|
set_console_size(port, size.rows, size.cols);
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
port->cons.hvc->irq_requested = 1;
|
|
|
|
resize_console(port);
|
|
|
|
break;
|
2010-05-05 20:35:09 +00:00
|
|
|
}
|
2009-12-21 16:19:30 +00:00
|
|
|
case VIRTIO_CONSOLE_PORT_OPEN:
|
2014-12-01 11:17:40 +00:00
|
|
|
port->host_connected = virtio16_to_cpu(vdev, cpkt->value);
|
2009-12-21 16:19:30 +00:00
|
|
|
wake_up_interruptible(&port->waitqueue);
|
2010-05-20 04:15:50 +00:00
|
|
|
/*
|
|
|
|
* If the host port got closed and the host had any
|
|
|
|
* unconsumed buffers, we'll be able to reclaim them
|
|
|
|
* now.
|
|
|
|
*/
|
|
|
|
spin_lock_irq(&port->outvq_lock);
|
|
|
|
reclaim_consumed_buffers(port);
|
|
|
|
spin_unlock_irq(&port->outvq_lock);
|
2010-09-02 13:17:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the guest is connected, it'll be interested in
|
|
|
|
* knowing the host connection state changed.
|
|
|
|
*/
|
2013-07-29 04:55:38 +00:00
|
|
|
spin_lock_irq(&port->inbuf_lock);
|
2010-09-02 13:17:52 +00:00
|
|
|
send_sigio_to_port(port);
|
2013-07-29 04:55:38 +00:00
|
|
|
spin_unlock_irq(&port->inbuf_lock);
|
2009-12-21 16:19:30 +00:00
|
|
|
break;
|
2009-12-21 16:27:40 +00:00
|
|
|
case VIRTIO_CONSOLE_PORT_NAME:
|
2011-09-14 07:36:40 +00:00
|
|
|
/*
|
|
|
|
* If we woke up after hibernation, we can get this
|
|
|
|
* again. Skip it in that case.
|
|
|
|
*/
|
|
|
|
if (port->name)
|
|
|
|
break;
|
|
|
|
|
2009-12-21 16:27:40 +00:00
|
|
|
/*
|
|
|
|
* Skip the size of the header and the cpkt to get the size
|
|
|
|
* of the name that was sent
|
|
|
|
*/
|
|
|
|
name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
|
|
|
|
|
|
|
|
port->name = kmalloc(name_size, GFP_KERNEL);
|
|
|
|
if (!port->name) {
|
|
|
|
dev_err(port->dev,
|
|
|
|
"Not enough space to store port name\n");
|
|
|
|
break;
|
|
|
|
}
|
2023-01-18 01:29:44 +00:00
|
|
|
strscpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
|
|
|
|
name_size);
|
2009-12-21 16:27:40 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Since we only have one sysfs attribute, 'name',
|
|
|
|
* create it only if we have a name for the port.
|
|
|
|
*/
|
|
|
|
err = sysfs_create_group(&port->dev->kobj,
|
|
|
|
&port_attribute_group);
|
2010-03-19 12:06:43 +00:00
|
|
|
if (err) {
|
2009-12-21 16:27:40 +00:00
|
|
|
dev_err(port->dev,
|
|
|
|
"Error %d creating sysfs device attributes\n",
|
|
|
|
err);
|
2010-03-19 12:06:43 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Generate a udev event so that appropriate
|
|
|
|
* symlinks can be created based on udev
|
|
|
|
* rules.
|
|
|
|
*/
|
|
|
|
kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
|
|
|
|
}
|
2009-12-21 16:27:40 +00:00
|
|
|
break;
|
2009-12-21 15:33:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void control_work_handler(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct ports_device *portdev;
|
|
|
|
struct virtqueue *vq;
|
|
|
|
struct port_buffer *buf;
|
|
|
|
unsigned int len;
|
|
|
|
|
|
|
|
portdev = container_of(work, struct ports_device, control_work);
|
|
|
|
vq = portdev->c_ivq;
|
|
|
|
|
2013-03-29 11:00:07 +00:00
|
|
|
spin_lock(&portdev->c_ivq_lock);
|
2010-04-12 13:18:32 +00:00
|
|
|
while ((buf = virtqueue_get_buf(vq, &len))) {
|
2013-03-29 11:00:07 +00:00
|
|
|
spin_unlock(&portdev->c_ivq_lock);
|
2009-12-21 15:33:25 +00:00
|
|
|
|
2021-05-25 12:56:22 +00:00
|
|
|
buf->len = min_t(size_t, len, buf->size);
|
2009-12-21 15:33:25 +00:00
|
|
|
buf->offset = 0;
|
|
|
|
|
2014-12-01 11:17:40 +00:00
|
|
|
handle_control_message(vq->vdev, portdev, buf);
|
2009-12-21 15:33:25 +00:00
|
|
|
|
2013-03-29 11:00:07 +00:00
|
|
|
spin_lock(&portdev->c_ivq_lock);
|
2009-12-21 15:33:25 +00:00
|
|
|
if (add_inbuf(portdev->c_ivq, buf) < 0) {
|
|
|
|
dev_warn(&portdev->vdev->dev,
|
|
|
|
"Error adding buffer to queue\n");
|
2012-12-14 04:10:51 +00:00
|
|
|
free_buf(buf, false);
|
2009-12-21 15:33:25 +00:00
|
|
|
}
|
|
|
|
}
|
2013-03-29 11:00:07 +00:00
|
|
|
spin_unlock(&portdev->c_ivq_lock);
|
2009-12-21 15:33:25 +00:00
|
|
|
}
|
|
|
|
|
2018-04-20 17:24:23 +00:00
|
|
|
static void flush_bufs(struct virtqueue *vq, bool can_sleep)
|
|
|
|
{
|
|
|
|
struct port_buffer *buf;
|
|
|
|
unsigned int len;
|
|
|
|
|
|
|
|
while ((buf = virtqueue_get_buf(vq, &len)))
|
|
|
|
free_buf(buf, can_sleep);
|
|
|
|
}
|
|
|
|
|
2011-01-31 07:36:36 +00:00
|
|
|
static void out_intr(struct virtqueue *vq)
|
|
|
|
{
|
|
|
|
struct port *port;
|
|
|
|
|
|
|
|
port = find_port_by_vq(vq->vdev->priv, vq);
|
2018-04-20 17:24:23 +00:00
|
|
|
if (!port) {
|
|
|
|
flush_bufs(vq, false);
|
2011-01-31 07:36:36 +00:00
|
|
|
return;
|
2018-04-20 17:24:23 +00:00
|
|
|
}
|
2011-01-31 07:36:36 +00:00
|
|
|
|
|
|
|
wake_up_interruptible(&port->waitqueue);
|
|
|
|
}
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
static void in_intr(struct virtqueue *vq)
|
|
|
|
{
|
|
|
|
struct port *port;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
port = find_port_by_vq(vq->vdev->priv, vq);
|
2018-04-20 17:24:23 +00:00
|
|
|
if (!port) {
|
|
|
|
flush_bufs(vq, false);
|
2009-12-21 15:33:25 +00:00
|
|
|
return;
|
2018-04-20 17:24:23 +00:00
|
|
|
}
|
2009-12-21 15:33:25 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&port->inbuf_lock, flags);
|
2011-09-14 07:36:43 +00:00
|
|
|
port->inbuf = get_inbuf(port);
|
2009-12-21 15:33:25 +00:00
|
|
|
|
2009-12-21 16:45:30 +00:00
|
|
|
/*
|
2013-03-18 08:49:14 +00:00
|
|
|
* Normally the port should not accept data when the port is
|
|
|
|
* closed. For generic serial ports, the host won't (shouldn't)
|
|
|
|
* send data till the guest is connected. But this condition
|
2009-12-21 16:45:30 +00:00
|
|
|
* can be reached when a console port is not yet connected (no
|
2013-03-18 08:49:14 +00:00
|
|
|
* tty is spawned) and the other side sends out data over the
|
|
|
|
* vring, or when a remote devices start sending data before
|
|
|
|
* the ports are opened.
|
|
|
|
*
|
|
|
|
* A generic serial port will discard data if not connected,
|
|
|
|
* while console ports and rproc-serial ports accepts data at
|
|
|
|
* any time. rproc-serial is initiated with guest_connected to
|
|
|
|
* false because port_fops_open expects this. Console ports are
|
|
|
|
* hooked up with an HVC console and is initialized with
|
|
|
|
* guest_connected to true.
|
2009-12-21 16:45:30 +00:00
|
|
|
*/
|
2013-03-18 08:49:14 +00:00
|
|
|
|
|
|
|
if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
|
2009-12-21 16:45:30 +00:00
|
|
|
discard_port_data(port);
|
|
|
|
|
2013-07-29 04:55:38 +00:00
|
|
|
/* Send a SIGIO indicating new data in case the process asked for it */
|
|
|
|
send_sigio_to_port(port);
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
spin_unlock_irqrestore(&port->inbuf_lock, flags);
|
|
|
|
|
2009-12-21 16:19:30 +00:00
|
|
|
wake_up_interruptible(&port->waitqueue);
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
if (is_console_port(port) && hvc_poll(port->cons.hvc))
|
|
|
|
hvc_kick();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void control_intr(struct virtqueue *vq)
|
|
|
|
{
|
|
|
|
struct ports_device *portdev;
|
|
|
|
|
|
|
|
portdev = vq->vdev->priv;
|
|
|
|
schedule_work(&portdev->control_work);
|
|
|
|
}
|
|
|
|
|
2009-12-21 16:52:08 +00:00
|
|
|
static void config_intr(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
struct ports_device *portdev;
|
|
|
|
|
|
|
|
portdev = vdev->priv;
|
2010-05-20 04:15:48 +00:00
|
|
|
|
2015-03-05 00:15:49 +00:00
|
|
|
if (!use_multiport(portdev))
|
|
|
|
schedule_work(&portdev->config_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void config_work_handler(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct ports_device *portdev;
|
|
|
|
|
2017-01-19 21:37:46 +00:00
|
|
|
portdev = container_of(work, struct ports_device, config_work);
|
2010-05-05 20:35:07 +00:00
|
|
|
if (!use_multiport(portdev)) {
|
2015-03-05 00:15:49 +00:00
|
|
|
struct virtio_device *vdev;
|
2010-05-05 20:35:08 +00:00
|
|
|
struct port *port;
|
|
|
|
u16 rows, cols;
|
|
|
|
|
2015-03-05 00:15:49 +00:00
|
|
|
vdev = portdev->vdev;
|
2013-10-14 07:41:51 +00:00
|
|
|
virtio_cread(vdev, struct virtio_console_config, cols, &cols);
|
|
|
|
virtio_cread(vdev, struct virtio_console_config, rows, &rows);
|
2010-05-05 20:35:08 +00:00
|
|
|
|
|
|
|
port = find_port_by_id(portdev, 0);
|
|
|
|
set_console_size(port, rows, cols);
|
|
|
|
|
2010-05-05 20:35:07 +00:00
|
|
|
/*
|
|
|
|
* We'll use this way of resizing only for legacy
|
|
|
|
* support. For newer userspace
|
|
|
|
* (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
|
|
|
|
* to indicate console size changes so that it can be
|
|
|
|
* done per-port.
|
|
|
|
*/
|
2010-05-05 20:35:08 +00:00
|
|
|
resize_console(port);
|
2010-05-05 20:35:07 +00:00
|
|
|
}
|
2009-12-21 16:52:08 +00:00
|
|
|
}
|
|
|
|
|
2010-01-18 13:45:11 +00:00
|
|
|
static int init_vqs(struct ports_device *portdev)
|
|
|
|
{
|
2024-07-08 07:48:06 +00:00
|
|
|
struct virtqueue_info *vqs_info;
|
2010-01-18 13:45:11 +00:00
|
|
|
struct virtqueue **vqs;
|
2009-12-21 15:33:25 +00:00
|
|
|
u32 i, j, nr_ports, nr_queues;
|
2010-01-18 13:45:11 +00:00
|
|
|
int err;
|
|
|
|
|
2016-12-05 19:39:42 +00:00
|
|
|
nr_ports = portdev->max_nr_ports;
|
2009-12-21 15:33:25 +00:00
|
|
|
nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
|
2010-01-18 13:45:11 +00:00
|
|
|
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL);
|
2024-07-08 07:48:06 +00:00
|
|
|
vqs_info = kcalloc(nr_queues, sizeof(*vqs_info), GFP_KERNEL);
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
portdev->in_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
|
|
|
|
GFP_KERNEL);
|
|
|
|
portdev->out_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
|
|
|
|
GFP_KERNEL);
|
2024-07-08 07:48:06 +00:00
|
|
|
if (!vqs || !vqs_info || !portdev->in_vqs || !portdev->out_vqs) {
|
2010-01-18 13:45:11 +00:00
|
|
|
err = -ENOMEM;
|
2010-11-06 09:06:50 +00:00
|
|
|
goto free;
|
2010-01-18 13:45:11 +00:00
|
|
|
}
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
/*
|
|
|
|
* For backward compat (newer host but older guest), the host
|
|
|
|
* spawns a console port first and also inits the vqs for port
|
|
|
|
* 0 before others.
|
|
|
|
*/
|
|
|
|
j = 0;
|
2024-07-08 07:48:06 +00:00
|
|
|
vqs_info[j].callback = in_intr;
|
|
|
|
vqs_info[j + 1].callback = out_intr;
|
|
|
|
vqs_info[j].name = "input";
|
|
|
|
vqs_info[j + 1].name = "output";
|
2009-12-21 15:33:25 +00:00
|
|
|
j += 2;
|
|
|
|
|
|
|
|
if (use_multiport(portdev)) {
|
2024-07-08 07:48:06 +00:00
|
|
|
vqs_info[j].callback = control_intr;
|
|
|
|
vqs_info[j].name = "control-i";
|
|
|
|
vqs_info[j + 1].name = "control-o";
|
2009-12-21 15:33:25 +00:00
|
|
|
|
|
|
|
for (i = 1; i < nr_ports; i++) {
|
|
|
|
j += 2;
|
2024-07-08 07:48:06 +00:00
|
|
|
vqs_info[j].callback = in_intr;
|
|
|
|
vqs_info[j + 1].callback = out_intr;
|
|
|
|
vqs_info[j].name = "input";
|
|
|
|
vqs_info[j + 1].name = "output";
|
2009-12-21 15:33:25 +00:00
|
|
|
}
|
|
|
|
}
|
2010-01-18 13:45:11 +00:00
|
|
|
/* Find the queues. */
|
2024-07-08 07:48:14 +00:00
|
|
|
err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, vqs_info, NULL);
|
2010-01-18 13:45:11 +00:00
|
|
|
if (err)
|
2010-11-06 09:06:50 +00:00
|
|
|
goto free;
|
2010-01-18 13:45:11 +00:00
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
j = 0;
|
2010-01-18 13:45:11 +00:00
|
|
|
portdev->in_vqs[0] = vqs[0];
|
|
|
|
portdev->out_vqs[0] = vqs[1];
|
2009-12-21 15:33:25 +00:00
|
|
|
j += 2;
|
|
|
|
if (use_multiport(portdev)) {
|
|
|
|
portdev->c_ivq = vqs[j];
|
|
|
|
portdev->c_ovq = vqs[j + 1];
|
|
|
|
|
|
|
|
for (i = 1; i < nr_ports; i++) {
|
|
|
|
j += 2;
|
|
|
|
portdev->in_vqs[i] = vqs[j];
|
|
|
|
portdev->out_vqs[i] = vqs[j + 1];
|
|
|
|
}
|
|
|
|
}
|
2024-07-08 07:48:06 +00:00
|
|
|
kfree(vqs_info);
|
2010-01-18 13:45:11 +00:00
|
|
|
kfree(vqs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2010-11-06 09:06:50 +00:00
|
|
|
free:
|
2010-01-18 13:45:11 +00:00
|
|
|
kfree(portdev->out_vqs);
|
|
|
|
kfree(portdev->in_vqs);
|
2024-07-08 07:48:06 +00:00
|
|
|
kfree(vqs_info);
|
2010-01-18 13:45:11 +00:00
|
|
|
kfree(vqs);
|
2010-11-06 09:06:50 +00:00
|
|
|
|
2010-01-18 13:45:11 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2009-12-21 16:06:04 +00:00
|
|
|
static const struct file_operations portdev_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2011-12-22 11:28:27 +00:00
|
|
|
static void remove_vqs(struct ports_device *portdev)
|
|
|
|
{
|
2018-04-20 17:24:23 +00:00
|
|
|
struct virtqueue *vq;
|
|
|
|
|
|
|
|
virtio_device_for_each_vq(portdev->vdev, vq) {
|
|
|
|
struct port_buffer *buf;
|
|
|
|
|
|
|
|
flush_bufs(vq, true);
|
|
|
|
while ((buf = virtqueue_detach_unused_buf(vq)))
|
|
|
|
free_buf(buf, true);
|
2023-06-09 13:18:16 +00:00
|
|
|
cond_resched();
|
2018-04-20 17:24:23 +00:00
|
|
|
}
|
2011-12-22 11:28:27 +00:00
|
|
|
portdev->vdev->config->del_vqs(portdev->vdev);
|
|
|
|
kfree(portdev->in_vqs);
|
|
|
|
kfree(portdev->out_vqs);
|
|
|
|
}
|
|
|
|
|
2018-04-20 17:51:18 +00:00
|
|
|
static void virtcons_remove(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
struct ports_device *portdev;
|
|
|
|
struct port *port, *port2;
|
|
|
|
|
|
|
|
portdev = vdev->priv;
|
|
|
|
|
|
|
|
spin_lock_irq(&pdrvdata_lock);
|
|
|
|
list_del(&portdev->list);
|
|
|
|
spin_unlock_irq(&pdrvdata_lock);
|
|
|
|
|
2021-10-05 07:04:10 +00:00
|
|
|
/* Device is going away, exit any polling for buffers */
|
|
|
|
virtio_break_device(vdev);
|
|
|
|
if (use_multiport(portdev))
|
|
|
|
flush_work(&portdev->control_work);
|
|
|
|
else
|
|
|
|
flush_work(&portdev->config_work);
|
|
|
|
|
2018-04-20 17:51:18 +00:00
|
|
|
/* Disable interrupts for vqs */
|
2021-10-13 10:55:44 +00:00
|
|
|
virtio_reset_device(vdev);
|
2018-04-20 17:51:18 +00:00
|
|
|
/* Finish up work that's lined up */
|
|
|
|
if (use_multiport(portdev))
|
|
|
|
cancel_work_sync(&portdev->control_work);
|
|
|
|
else
|
|
|
|
cancel_work_sync(&portdev->config_work);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(port, port2, &portdev->ports, list)
|
|
|
|
unplug_port(port);
|
|
|
|
|
|
|
|
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When yanking out a device, we immediately lose the
|
|
|
|
* (device-side) queues. So there's no point in keeping the
|
|
|
|
* guest side around till we drop our final reference. This
|
|
|
|
* also means that any ports which are in an open state will
|
|
|
|
* have to just stop using the port, as the vqs are going
|
|
|
|
* away.
|
|
|
|
*/
|
|
|
|
remove_vqs(portdev);
|
|
|
|
kfree(portdev);
|
|
|
|
}
|
|
|
|
|
2010-01-18 13:45:07 +00:00
|
|
|
/*
|
|
|
|
* Once we're further in boot, we get probed like any other virtio
|
|
|
|
* device.
|
2009-12-21 15:33:25 +00:00
|
|
|
*
|
|
|
|
* If the host also supports multiple console ports, we check the
|
|
|
|
* config space to see how many ports the host has spawned. We
|
|
|
|
* initialize each port found.
|
2010-01-18 13:45:07 +00:00
|
|
|
*/
|
2012-11-19 18:22:51 +00:00
|
|
|
static int virtcons_probe(struct virtio_device *vdev)
|
2010-01-18 13:45:07 +00:00
|
|
|
{
|
|
|
|
struct ports_device *portdev;
|
|
|
|
int err;
|
2009-12-21 15:33:25 +00:00
|
|
|
bool multiport;
|
2011-09-22 18:14:23 +00:00
|
|
|
|
2015-02-11 04:31:14 +00:00
|
|
|
/* We only need a config space if features are offered */
|
|
|
|
if (!vdev->config->get &&
|
|
|
|
(virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)
|
|
|
|
|| virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT))) {
|
2015-01-12 14:23:37 +00:00
|
|
|
dev_err(&vdev->dev, "%s failure: config access disabled\n",
|
|
|
|
__func__);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2010-01-18 13:45:07 +00:00
|
|
|
portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
|
|
|
|
if (!portdev) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Attach this portdev to this virtio_device, and vice-versa. */
|
|
|
|
portdev->vdev = vdev;
|
|
|
|
vdev->priv = portdev;
|
|
|
|
|
2009-12-21 16:06:04 +00:00
|
|
|
portdev->chr_major = register_chrdev(0, "virtio-portsdev",
|
|
|
|
&portdev_fops);
|
|
|
|
if (portdev->chr_major < 0) {
|
|
|
|
dev_err(&vdev->dev,
|
|
|
|
"Error %d registering chrdev for device %u\n",
|
2013-02-12 05:54:59 +00:00
|
|
|
portdev->chr_major, vdev->index);
|
2009-12-21 16:06:04 +00:00
|
|
|
err = portdev->chr_major;
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
multiport = false;
|
2016-12-05 19:39:42 +00:00
|
|
|
portdev->max_nr_ports = 1;
|
2012-12-14 04:10:51 +00:00
|
|
|
|
|
|
|
/* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
|
|
|
|
if (!is_rproc_serial(vdev) &&
|
2013-10-14 07:41:51 +00:00
|
|
|
virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
|
|
|
|
struct virtio_console_config, max_nr_ports,
|
2016-12-05 19:39:42 +00:00
|
|
|
&portdev->max_nr_ports) == 0) {
|
2021-10-19 07:01:44 +00:00
|
|
|
if (portdev->max_nr_ports == 0 ||
|
|
|
|
portdev->max_nr_ports > VIRTCONS_MAX_PORTS) {
|
|
|
|
dev_err(&vdev->dev,
|
|
|
|
"Invalidate max_nr_ports %d",
|
|
|
|
portdev->max_nr_ports);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto free;
|
|
|
|
}
|
2009-12-21 15:33:25 +00:00
|
|
|
multiport = true;
|
2012-12-14 04:10:51 +00:00
|
|
|
}
|
2009-12-21 15:33:25 +00:00
|
|
|
|
|
|
|
spin_lock_init(&portdev->ports_lock);
|
|
|
|
INIT_LIST_HEAD(&portdev->ports);
|
2018-04-20 18:00:13 +00:00
|
|
|
INIT_LIST_HEAD(&portdev->list);
|
2009-12-21 15:33:25 +00:00
|
|
|
|
2015-03-05 00:15:49 +00:00
|
|
|
INIT_WORK(&portdev->config_work, &config_work_handler);
|
2015-03-05 00:15:30 +00:00
|
|
|
INIT_WORK(&portdev->control_work, &control_work_handler);
|
|
|
|
|
2009-12-21 15:33:25 +00:00
|
|
|
if (multiport) {
|
2013-03-29 11:00:07 +00:00
|
|
|
spin_lock_init(&portdev->c_ivq_lock);
|
2013-03-29 11:00:08 +00:00
|
|
|
spin_lock_init(&portdev->c_ovq_lock);
|
2024-09-16 18:16:44 +00:00
|
|
|
}
|
2009-12-21 15:33:25 +00:00
|
|
|
|
2024-09-16 18:16:44 +00:00
|
|
|
err = init_vqs(portdev);
|
|
|
|
if (err < 0) {
|
|
|
|
dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
|
|
|
|
goto free_chrdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_device_ready(portdev->vdev);
|
|
|
|
|
|
|
|
if (multiport) {
|
virtio_console: allocate inbufs in add_port() only if it is needed
When we hot unplug a virtserialport and then try to hot plug again,
it fails:
(qemu) chardev-add socket,id=serial0,path=/tmp/serial0,server,nowait
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
(qemu) device_del serial0
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
kernel error:
virtio-ports vport2p2: Error allocating inbufs
qemu error:
virtio-serial-bus: Guest failure in adding port 2 for device \
virtio-serial0.0
This happens because buffers for the in_vq are allocated when the port is
added but are not released when the port is unplugged.
They are only released when virtconsole is removed (see a7a69ec0d8e4)
To avoid the problem and to be symmetric, we could allocate all the buffers
in init_vqs() as they are released in remove_vqs(), but it sounds like
a waste of memory.
Rather than that, this patch changes add_port() logic to ignore ENOSPC
error in fill_queue(), which means queue has already been filled.
Fixes: a7a69ec0d8e4 ("virtio_console: free buffers after reset")
Cc: mst@redhat.com
Cc: stable@vger.kernel.org
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-14 12:25:48 +00:00
|
|
|
err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
|
|
|
|
if (err < 0) {
|
2010-02-12 05:02:17 +00:00
|
|
|
dev_err(&vdev->dev,
|
|
|
|
"Error allocating buffers for control queue\n");
|
2018-04-20 18:00:13 +00:00
|
|
|
/*
|
|
|
|
* The host might want to notify mgmt sw about device
|
|
|
|
* add failure.
|
|
|
|
*/
|
|
|
|
__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
|
|
|
|
VIRTIO_CONSOLE_DEVICE_READY, 0);
|
|
|
|
/* Device was functional: we need full cleanup. */
|
|
|
|
virtcons_remove(vdev);
|
virtio_console: allocate inbufs in add_port() only if it is needed
When we hot unplug a virtserialport and then try to hot plug again,
it fails:
(qemu) chardev-add socket,id=serial0,path=/tmp/serial0,server,nowait
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
(qemu) device_del serial0
(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
chardev=serial0,id=serial0,name=serial0
kernel error:
virtio-ports vport2p2: Error allocating inbufs
qemu error:
virtio-serial-bus: Guest failure in adding port 2 for device \
virtio-serial0.0
This happens because buffers for the in_vq are allocated when the port is
added but are not released when the port is unplugged.
They are only released when virtconsole is removed (see a7a69ec0d8e4)
To avoid the problem and to be symmetric, we could allocate all the buffers
in init_vqs() as they are released in remove_vqs(), but it sounds like
a waste of memory.
Rather than that, this patch changes add_port() logic to ignore ENOSPC
error in fill_queue(), which means queue has already been filled.
Fixes: a7a69ec0d8e4 ("virtio_console: free buffers after reset")
Cc: mst@redhat.com
Cc: stable@vger.kernel.org
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2019-11-14 12:25:48 +00:00
|
|
|
return err;
|
2010-02-12 05:02:17 +00:00
|
|
|
}
|
2010-05-20 04:15:49 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* For backward compatibility: Create a console port
|
|
|
|
* if we're running on older host.
|
|
|
|
*/
|
|
|
|
add_port(portdev, 0);
|
2009-12-21 15:33:25 +00:00
|
|
|
}
|
|
|
|
|
2010-09-02 12:41:49 +00:00
|
|
|
spin_lock_irq(&pdrvdata_lock);
|
|
|
|
list_add_tail(&portdev->list, &pdrvdata.portdevs);
|
|
|
|
spin_unlock_irq(&pdrvdata_lock);
|
|
|
|
|
2010-05-20 04:15:48 +00:00
|
|
|
__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
|
|
|
|
VIRTIO_CONSOLE_DEVICE_READY, 1);
|
2011-09-22 18:14:23 +00:00
|
|
|
|
2007-10-22 01:03:39 +00:00
|
|
|
return 0;
|
|
|
|
|
2009-12-21 16:06:04 +00:00
|
|
|
free_chrdev:
|
|
|
|
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
|
2007-10-22 01:03:39 +00:00
|
|
|
free:
|
2010-01-18 13:45:07 +00:00
|
|
|
kfree(portdev);
|
2007-10-22 01:03:39 +00:00
|
|
|
fail:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-07-01 20:09:50 +00:00
|
|
|
static const struct virtio_device_id id_table[] = {
|
2007-10-22 01:03:39 +00:00
|
|
|
{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
|
|
|
|
{ 0 },
|
|
|
|
};
|
2020-06-23 11:09:33 +00:00
|
|
|
MODULE_DEVICE_TABLE(virtio, id_table);
|
2007-10-22 01:03:39 +00:00
|
|
|
|
2020-07-01 20:09:50 +00:00
|
|
|
static const unsigned int features[] = {
|
2008-11-25 12:36:26 +00:00
|
|
|
VIRTIO_CONSOLE_F_SIZE,
|
2010-05-20 04:15:46 +00:00
|
|
|
VIRTIO_CONSOLE_F_MULTIPORT,
|
2008-11-25 12:36:26 +00:00
|
|
|
};
|
|
|
|
|
2020-07-01 20:09:50 +00:00
|
|
|
static const struct virtio_device_id rproc_serial_id_table[] = {
|
2012-12-14 04:10:51 +00:00
|
|
|
#if IS_ENABLED(CONFIG_REMOTEPROC)
|
|
|
|
{ VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID },
|
|
|
|
#endif
|
|
|
|
{ 0 },
|
|
|
|
};
|
2020-06-23 11:09:33 +00:00
|
|
|
MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table);
|
2012-12-14 04:10:51 +00:00
|
|
|
|
2020-07-01 20:09:50 +00:00
|
|
|
static const unsigned int rproc_serial_features[] = {
|
2012-12-14 04:10:51 +00:00
|
|
|
};
|
|
|
|
|
2013-09-16 23:55:23 +00:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
2011-12-22 11:28:28 +00:00
|
|
|
static int virtcons_freeze(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
struct ports_device *portdev;
|
|
|
|
struct port *port;
|
|
|
|
|
|
|
|
portdev = vdev->priv;
|
|
|
|
|
2021-10-13 10:55:44 +00:00
|
|
|
virtio_reset_device(vdev);
|
2011-12-22 11:28:28 +00:00
|
|
|
|
2017-03-29 20:22:04 +00:00
|
|
|
if (use_multiport(portdev))
|
|
|
|
virtqueue_disable_cb(portdev->c_ivq);
|
2011-12-22 11:28:28 +00:00
|
|
|
cancel_work_sync(&portdev->control_work);
|
2015-03-05 00:15:49 +00:00
|
|
|
cancel_work_sync(&portdev->config_work);
|
2012-01-06 10:49:08 +00:00
|
|
|
/*
|
|
|
|
* Once more: if control_work_handler() was running, it would
|
|
|
|
* enable the cb as the last step.
|
|
|
|
*/
|
2017-03-29 20:22:04 +00:00
|
|
|
if (use_multiport(portdev))
|
|
|
|
virtqueue_disable_cb(portdev->c_ivq);
|
2011-12-22 11:28:28 +00:00
|
|
|
|
|
|
|
list_for_each_entry(port, &portdev->ports, list) {
|
2012-01-06 10:49:08 +00:00
|
|
|
virtqueue_disable_cb(port->in_vq);
|
|
|
|
virtqueue_disable_cb(port->out_vq);
|
2011-12-22 11:28:28 +00:00
|
|
|
/*
|
|
|
|
* We'll ask the host later if the new invocation has
|
|
|
|
* the port opened or closed.
|
|
|
|
*/
|
|
|
|
port->host_connected = false;
|
|
|
|
remove_port_data(port);
|
|
|
|
}
|
|
|
|
remove_vqs(portdev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int virtcons_restore(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
struct ports_device *portdev;
|
|
|
|
struct port *port;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
portdev = vdev->priv;
|
|
|
|
|
|
|
|
ret = init_vqs(portdev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2014-10-14 23:52:32 +00:00
|
|
|
virtio_device_ready(portdev->vdev);
|
|
|
|
|
2011-12-22 11:28:28 +00:00
|
|
|
if (use_multiport(portdev))
|
2013-03-29 11:00:07 +00:00
|
|
|
fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
|
2011-12-22 11:28:28 +00:00
|
|
|
|
|
|
|
list_for_each_entry(port, &portdev->ports, list) {
|
|
|
|
port->in_vq = portdev->in_vqs[port->id];
|
|
|
|
port->out_vq = portdev->out_vqs[port->id];
|
|
|
|
|
|
|
|
fill_queue(port->in_vq, &port->inbuf_lock);
|
|
|
|
|
|
|
|
/* Get port open/close status on the host */
|
|
|
|
send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
|
2012-04-25 09:10:39 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If a port was open at the time of suspending, we
|
|
|
|
* have to let the host know that it's still open.
|
|
|
|
*/
|
|
|
|
if (port->guest_connected)
|
|
|
|
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
|
2011-12-22 11:28:28 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-10-22 01:03:39 +00:00
|
|
|
static struct virtio_driver virtio_console = {
|
2008-11-25 12:36:26 +00:00
|
|
|
.feature_table = features,
|
|
|
|
.feature_table_size = ARRAY_SIZE(features),
|
2007-10-22 01:03:39 +00:00
|
|
|
.driver.name = KBUILD_MODNAME,
|
|
|
|
.id_table = id_table,
|
|
|
|
.probe = virtcons_probe,
|
2010-02-12 05:02:16 +00:00
|
|
|
.remove = virtcons_remove,
|
2009-12-21 16:52:08 +00:00
|
|
|
.config_changed = config_intr,
|
2013-09-16 23:55:23 +00:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
2011-12-22 11:28:28 +00:00
|
|
|
.freeze = virtcons_freeze,
|
|
|
|
.restore = virtcons_restore,
|
|
|
|
#endif
|
2007-10-22 01:03:39 +00:00
|
|
|
};
|
|
|
|
|
2012-12-21 23:12:08 +00:00
|
|
|
static struct virtio_driver virtio_rproc_serial = {
|
2012-12-14 04:10:51 +00:00
|
|
|
.feature_table = rproc_serial_features,
|
|
|
|
.feature_table_size = ARRAY_SIZE(rproc_serial_features),
|
|
|
|
.driver.name = "virtio_rproc_serial",
|
|
|
|
.id_table = rproc_serial_id_table,
|
|
|
|
.probe = virtcons_probe,
|
|
|
|
.remove = virtcons_remove,
|
|
|
|
};
|
|
|
|
|
2022-03-16 19:20:03 +00:00
|
|
|
static int __init virtio_console_init(void)
|
2007-10-22 01:03:39 +00:00
|
|
|
{
|
2009-12-21 16:06:04 +00:00
|
|
|
int err;
|
|
|
|
|
2023-06-20 14:37:58 +00:00
|
|
|
err = class_register(&port_class);
|
|
|
|
if (err)
|
2009-12-21 16:06:04 +00:00
|
|
|
return err;
|
2009-12-21 17:06:21 +00:00
|
|
|
|
|
|
|
pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
|
2010-01-18 13:45:05 +00:00
|
|
|
INIT_LIST_HEAD(&pdrvdata.consoles);
|
2010-09-02 12:41:49 +00:00
|
|
|
INIT_LIST_HEAD(&pdrvdata.portdevs);
|
2010-01-18 13:45:05 +00:00
|
|
|
|
2012-09-01 19:49:37 +00:00
|
|
|
err = register_virtio_driver(&virtio_console);
|
|
|
|
if (err < 0) {
|
|
|
|
pr_err("Error %d registering virtio driver\n", err);
|
|
|
|
goto free;
|
|
|
|
}
|
2012-12-14 04:10:51 +00:00
|
|
|
err = register_virtio_driver(&virtio_rproc_serial);
|
|
|
|
if (err < 0) {
|
|
|
|
pr_err("Error %d registering virtio rproc serial driver\n",
|
|
|
|
err);
|
|
|
|
goto unregister;
|
|
|
|
}
|
2012-09-01 19:49:37 +00:00
|
|
|
return 0;
|
2012-12-14 04:10:51 +00:00
|
|
|
unregister:
|
|
|
|
unregister_virtio_driver(&virtio_console);
|
2012-09-01 19:49:37 +00:00
|
|
|
free:
|
2014-07-26 22:00:01 +00:00
|
|
|
debugfs_remove_recursive(pdrvdata.debugfs_dir);
|
2023-06-20 14:37:58 +00:00
|
|
|
class_unregister(&port_class);
|
2012-09-01 19:49:37 +00:00
|
|
|
return err;
|
2007-10-22 01:03:39 +00:00
|
|
|
}
|
2010-02-12 05:02:16 +00:00
|
|
|
|
2022-03-16 19:20:03 +00:00
|
|
|
static void __exit virtio_console_fini(void)
|
2010-02-12 05:02:16 +00:00
|
|
|
{
|
2012-12-14 04:10:51 +00:00
|
|
|
reclaim_dma_bufs();
|
|
|
|
|
2010-02-12 05:02:16 +00:00
|
|
|
unregister_virtio_driver(&virtio_console);
|
2012-12-14 04:10:51 +00:00
|
|
|
unregister_virtio_driver(&virtio_rproc_serial);
|
2010-02-12 05:02:16 +00:00
|
|
|
|
2023-06-20 14:37:58 +00:00
|
|
|
class_unregister(&port_class);
|
2014-07-26 22:00:01 +00:00
|
|
|
debugfs_remove_recursive(pdrvdata.debugfs_dir);
|
2010-02-12 05:02:16 +00:00
|
|
|
}
|
2022-03-16 19:20:03 +00:00
|
|
|
module_init(virtio_console_init);
|
|
|
|
module_exit(virtio_console_fini);
|
2007-10-22 01:03:39 +00:00
|
|
|
|
|
|
|
MODULE_DESCRIPTION("Virtio console driver");
|
|
|
|
MODULE_LICENSE("GPL");
|