linux-stable/drivers/rpmsg/mtk_rpmsg.c
AngeloGioacchino Del Regno 353d921468 rpmsg: mtk_rpmsg: Fix circular locking dependency
During execution of the worker that's used to register rpmsg devices
we are safely locking the channels mutex but, when creating a new
endpoint for such devices, we are registering a IPI on the SCP, which
then makes the SCP to trigger an interrupt, lock its own mutex and in
turn register more subdevices.
This creates a circular locking dependency situation, as the mtk_rpmsg
channels_lock will then depend on the SCP IPI lock.

[   15.447736] ======================================================
[   15.460158] WARNING: possible circular locking dependency detected
[   15.460161] 5.17.0-next-20220324+ #399 Not tainted
[   15.460165] ------------------------------------------------------
[   15.460166] kworker/0:3/155 is trying to acquire lock:
[   15.460170] ffff5b4d0eaf1308 (&scp->ipi_desc[i].lock){+.+.}-{4:4}, at: scp_ipi_lock+0x34/0x50 [mtk_scp_ipi]
[   15.504958]
[]                but task is already holding lock:
[   15.504960] ffff5b4d0e8f1918 (&mtk_subdev->channels_lock){+.+.}-{4:4}, at: mtk_register_device_work_function+0x50/0x1cc [mtk_rpmsg]
[   15.504978]
[]                which lock already depends on the new lock.

[   15.504980]
[]                the existing dependency chain (in reverse order) is:
[   15.504982]
[]               -> #1 (&mtk_subdev->channels_lock){+.+.}-{4:4}:
[   15.504990]        lock_acquire+0x68/0x84
[   15.504999]        __mutex_lock+0xa4/0x3e0
[   15.505007]        mutex_lock_nested+0x40/0x70
[   15.505012]        mtk_rpmsg_ns_cb+0xe4/0x134 [mtk_rpmsg]
[   15.641684]        mtk_rpmsg_ipi_handler+0x38/0x64 [mtk_rpmsg]
[   15.641693]        scp_ipi_handler+0xbc/0x180 [mtk_scp]
[   15.663905]        mt8192_scp_irq_handler+0x44/0xa4 [mtk_scp]
[   15.663915]        scp_irq_handler+0x6c/0xa0 [mtk_scp]
[   15.685779]        irq_thread_fn+0x34/0xa0
[   15.685785]        irq_thread+0x18c/0x240
[   15.685789]        kthread+0x104/0x110
[   15.709579]        ret_from_fork+0x10/0x20
[   15.709586]
[]               -> #0 (&scp->ipi_desc[i].lock){+.+.}-{4:4}:
[   15.731271]        __lock_acquire+0x11e4/0x1910
[   15.740367]        lock_acquire.part.0+0xd8/0x220
[   15.749813]        lock_acquire+0x68/0x84
[   15.757861]        __mutex_lock+0xa4/0x3e0
[   15.766084]        mutex_lock_nested+0x40/0x70
[   15.775006]        scp_ipi_lock+0x34/0x50 [mtk_scp_ipi]
[   15.785503]        scp_ipi_register+0x40/0xa4 [mtk_scp_ipi]
[   15.796697]        scp_register_ipi+0x1c/0x30 [mtk_scp]
[   15.807194]        mtk_rpmsg_create_ept+0xa0/0x108 [mtk_rpmsg]
[   15.818912]        rpmsg_create_ept+0x44/0x60
[   15.827660]        cros_ec_rpmsg_probe+0x15c/0x1f0
[   15.837282]        rpmsg_dev_probe+0x128/0x1d0
[   15.846203]        really_probe.part.0+0xa4/0x2a0
[   15.855649]        __driver_probe_device+0xa0/0x150
[   15.865443]        driver_probe_device+0x48/0x150
[   15.877157]        __device_attach_driver+0xc0/0x12c
[   15.889359]        bus_for_each_drv+0x80/0xe0
[   15.900330]        __device_attach+0xe4/0x190
[   15.911303]        device_initial_probe+0x1c/0x2c
[   15.922969]        bus_probe_device+0xa8/0xb0
[   15.933927]        device_add+0x3a8/0x8a0
[   15.944193]        device_register+0x28/0x40
[   15.954970]        rpmsg_register_device+0x5c/0xa0
[   15.966782]        mtk_register_device_work_function+0x148/0x1cc [mtk_rpmsg]
[   15.983146]        process_one_work+0x294/0x664
[   15.994458]        worker_thread+0x7c/0x45c
[   16.005069]        kthread+0x104/0x110
[   16.014789]        ret_from_fork+0x10/0x20
[   16.025201]
[]               other info that might help us debug this:

[   16.047769]  Possible unsafe locking scenario:

[   16.063942]        CPU0                    CPU1
[   16.075166]        ----                    ----
[   16.086376]   lock(&mtk_subdev->channels_lock);
[   16.097592]                                lock(&scp->ipi_desc[i].lock);
[   16.113188]                                lock(&mtk_subdev->channels_lock);
[   16.129482]   lock(&scp->ipi_desc[i].lock);
[   16.140020]
[]                *** DEADLOCK ***

[   16.158282] 4 locks held by kworker/0:3/155:
[   16.168978]  #0: ffff5b4d00008748 ((wq_completion)events){+.+.}-{0:0}, at: process_one_work+0x1fc/0x664
[   16.190017]  #1: ffff80000953bdc8 ((work_completion)(&mtk_subdev->register_work)){+.+.}-{0:0}, at: process_one_work+0x1fc/0x664
[   16.215269]  #2: ffff5b4d0e8f1918 (&mtk_subdev->channels_lock){+.+.}-{4:4}, at: mtk_register_device_work_function+0x50/0x1cc [mtk_rpmsg]
[   16.242131]  #3: ffff5b4d05964190 (&dev->mutex){....}-{4:4}, at: __device_attach+0x44/0x190

To solve this, simply unlock the channels_lock mutex before calling
mtk_rpmsg_register_device() and relock it right after, as safety is
still ensured by the locking mechanism that happens right after
through SCP.

Fixes: 7017996951 ("rpmsg: add rpmsg support for mt8183 SCP.")
Signed-off-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Link: https://lore.kernel.org/r/20220525091201.14210-1-angelogioacchino.delregno@collabora.com
Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
2022-06-14 16:41:10 -06:00

411 lines
10 KiB
C

// SPDX-License-Identifier: GPL-2.0
//
// Copyright 2019 Google LLC.
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/rpmsg/mtk_rpmsg.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "rpmsg_internal.h"
struct mtk_rpmsg_rproc_subdev {
struct platform_device *pdev;
struct mtk_rpmsg_info *info;
struct rpmsg_endpoint *ns_ept;
struct rproc_subdev subdev;
struct work_struct register_work;
struct list_head channels;
struct mutex channels_lock;
};
#define to_mtk_subdev(d) container_of(d, struct mtk_rpmsg_rproc_subdev, subdev)
struct mtk_rpmsg_channel_info {
struct rpmsg_channel_info info;
bool registered;
struct list_head list;
};
/**
* struct rpmsg_ns_msg - dynamic name service announcement message
* @name: name of remote service that is published
* @addr: address of remote service that is published
*
* This message is sent across to publish a new service. When we receive these
* messages, an appropriate rpmsg channel (i.e device) is created. In turn, the
* ->probe() handler of the appropriate rpmsg driver will be invoked
* (if/as-soon-as one is registered).
*/
struct rpmsg_ns_msg {
char name[RPMSG_NAME_SIZE];
u32 addr;
} __packed;
struct mtk_rpmsg_device {
struct rpmsg_device rpdev;
struct mtk_rpmsg_rproc_subdev *mtk_subdev;
};
struct mtk_rpmsg_endpoint {
struct rpmsg_endpoint ept;
struct mtk_rpmsg_rproc_subdev *mtk_subdev;
};
#define to_mtk_rpmsg_device(r) container_of(r, struct mtk_rpmsg_device, rpdev)
#define to_mtk_rpmsg_endpoint(r) container_of(r, struct mtk_rpmsg_endpoint, ept)
static const struct rpmsg_endpoint_ops mtk_rpmsg_endpoint_ops;
static void __mtk_ept_release(struct kref *kref)
{
struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
refcount);
kfree(to_mtk_rpmsg_endpoint(ept));
}
static void mtk_rpmsg_ipi_handler(void *data, unsigned int len, void *priv)
{
struct mtk_rpmsg_endpoint *mept = priv;
struct rpmsg_endpoint *ept = &mept->ept;
int ret;
ret = (*ept->cb)(ept->rpdev, data, len, ept->priv, ept->addr);
if (ret)
dev_warn(&ept->rpdev->dev, "rpmsg handler return error = %d",
ret);
}
static struct rpmsg_endpoint *
__mtk_create_ept(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
u32 id)
{
struct mtk_rpmsg_endpoint *mept;
struct rpmsg_endpoint *ept;
struct platform_device *pdev = mtk_subdev->pdev;
int ret;
mept = kzalloc(sizeof(*mept), GFP_KERNEL);
if (!mept)
return NULL;
mept->mtk_subdev = mtk_subdev;
ept = &mept->ept;
kref_init(&ept->refcount);
ept->rpdev = rpdev;
ept->cb = cb;
ept->priv = priv;
ept->ops = &mtk_rpmsg_endpoint_ops;
ept->addr = id;
ret = mtk_subdev->info->register_ipi(pdev, id, mtk_rpmsg_ipi_handler,
mept);
if (ret) {
dev_err(&pdev->dev, "IPI register failed, id = %d", id);
kref_put(&ept->refcount, __mtk_ept_release);
return NULL;
}
return ept;
}
static struct rpmsg_endpoint *
mtk_rpmsg_create_ept(struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv,
struct rpmsg_channel_info chinfo)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev =
to_mtk_rpmsg_device(rpdev)->mtk_subdev;
return __mtk_create_ept(mtk_subdev, rpdev, cb, priv, chinfo.src);
}
static void mtk_rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev =
to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
mtk_subdev->info->unregister_ipi(mtk_subdev->pdev, ept->addr);
kref_put(&ept->refcount, __mtk_ept_release);
}
static int mtk_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev =
to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
return mtk_subdev->info->send_ipi(mtk_subdev->pdev, ept->addr, data,
len, 0);
}
static int mtk_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev =
to_mtk_rpmsg_endpoint(ept)->mtk_subdev;
/*
* TODO: This currently is same as mtk_rpmsg_send, and wait until SCP
* received the last command.
*/
return mtk_subdev->info->send_ipi(mtk_subdev->pdev, ept->addr, data,
len, 0);
}
static const struct rpmsg_endpoint_ops mtk_rpmsg_endpoint_ops = {
.destroy_ept = mtk_rpmsg_destroy_ept,
.send = mtk_rpmsg_send,
.trysend = mtk_rpmsg_trysend,
};
static void mtk_rpmsg_release_device(struct device *dev)
{
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
struct mtk_rpmsg_device *mdev = to_mtk_rpmsg_device(rpdev);
kfree(mdev);
}
static const struct rpmsg_device_ops mtk_rpmsg_device_ops = {
.create_ept = mtk_rpmsg_create_ept,
};
static struct device_node *
mtk_rpmsg_match_device_subnode(struct device_node *node, const char *channel)
{
struct device_node *child;
const char *name;
int ret;
for_each_available_child_of_node(node, child) {
ret = of_property_read_string(child, "mediatek,rpmsg-name", &name);
if (ret)
continue;
if (strcmp(name, channel) == 0)
return child;
}
return NULL;
}
static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
struct rpmsg_channel_info *info)
{
struct rpmsg_device *rpdev;
struct mtk_rpmsg_device *mdev;
struct platform_device *pdev = mtk_subdev->pdev;
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
mdev->mtk_subdev = mtk_subdev;
rpdev = &mdev->rpdev;
rpdev->ops = &mtk_rpmsg_device_ops;
rpdev->src = info->src;
rpdev->dst = info->dst;
strscpy(rpdev->id.name, info->name, RPMSG_NAME_SIZE);
rpdev->dev.of_node =
mtk_rpmsg_match_device_subnode(pdev->dev.of_node, info->name);
rpdev->dev.parent = &pdev->dev;
rpdev->dev.release = mtk_rpmsg_release_device;
return rpmsg_register_device(rpdev);
}
static void mtk_register_device_work_function(struct work_struct *register_work)
{
struct mtk_rpmsg_rproc_subdev *subdev = container_of(
register_work, struct mtk_rpmsg_rproc_subdev, register_work);
struct platform_device *pdev = subdev->pdev;
struct mtk_rpmsg_channel_info *info;
int ret;
mutex_lock(&subdev->channels_lock);
list_for_each_entry(info, &subdev->channels, list) {
if (info->registered)
continue;
mutex_unlock(&subdev->channels_lock);
ret = mtk_rpmsg_register_device(subdev, &info->info);
mutex_lock(&subdev->channels_lock);
if (ret) {
dev_err(&pdev->dev, "Can't create rpmsg_device\n");
continue;
}
info->registered = true;
}
mutex_unlock(&subdev->channels_lock);
}
static int mtk_rpmsg_create_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
char *name, u32 addr)
{
struct mtk_rpmsg_channel_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
strscpy(info->info.name, name, RPMSG_NAME_SIZE);
info->info.src = addr;
info->info.dst = RPMSG_ADDR_ANY;
mutex_lock(&mtk_subdev->channels_lock);
list_add(&info->list, &mtk_subdev->channels);
mutex_unlock(&mtk_subdev->channels_lock);
schedule_work(&mtk_subdev->register_work);
return 0;
}
static int mtk_rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
void *priv, u32 src)
{
struct rpmsg_ns_msg *msg = data;
struct mtk_rpmsg_rproc_subdev *mtk_subdev = priv;
struct device *dev = &mtk_subdev->pdev->dev;
int ret;
if (len != sizeof(*msg)) {
dev_err(dev, "malformed ns msg (%d)\n", len);
return -EINVAL;
}
/*
* the name service ept does _not_ belong to a real rpmsg channel,
* and is handled by the rpmsg bus itself.
* for sanity reasons, make sure a valid rpdev has _not_ sneaked
* in somehow.
*/
if (rpdev) {
dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
return -EINVAL;
}
/* don't trust the remote processor for null terminating the name */
msg->name[RPMSG_NAME_SIZE - 1] = '\0';
dev_info(dev, "creating channel %s addr 0x%x\n", msg->name, msg->addr);
ret = mtk_rpmsg_create_device(mtk_subdev, msg->name, msg->addr);
if (ret) {
dev_err(dev, "create rpmsg device failed\n");
return ret;
}
return 0;
}
static int mtk_rpmsg_prepare(struct rproc_subdev *subdev)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
/* a dedicated endpoint handles the name service msgs */
if (mtk_subdev->info->ns_ipi_id >= 0) {
mtk_subdev->ns_ept =
__mtk_create_ept(mtk_subdev, NULL, mtk_rpmsg_ns_cb,
mtk_subdev,
mtk_subdev->info->ns_ipi_id);
if (!mtk_subdev->ns_ept) {
dev_err(&mtk_subdev->pdev->dev,
"failed to create name service endpoint\n");
return -ENOMEM;
}
}
return 0;
}
static void mtk_rpmsg_unprepare(struct rproc_subdev *subdev)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
if (mtk_subdev->ns_ept) {
mtk_rpmsg_destroy_ept(mtk_subdev->ns_ept);
mtk_subdev->ns_ept = NULL;
}
}
static void mtk_rpmsg_stop(struct rproc_subdev *subdev, bool crashed)
{
struct mtk_rpmsg_channel_info *info, *next;
struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
struct device *dev = &mtk_subdev->pdev->dev;
/*
* Destroy the name service endpoint here, to avoid new channel being
* created after the rpmsg_unregister_device loop below.
*/
if (mtk_subdev->ns_ept) {
mtk_rpmsg_destroy_ept(mtk_subdev->ns_ept);
mtk_subdev->ns_ept = NULL;
}
cancel_work_sync(&mtk_subdev->register_work);
mutex_lock(&mtk_subdev->channels_lock);
list_for_each_entry(info, &mtk_subdev->channels, list) {
if (!info->registered)
continue;
if (rpmsg_unregister_device(dev, &info->info)) {
dev_warn(
dev,
"rpmsg_unregister_device failed for %s.%d.%d\n",
info->info.name, info->info.src,
info->info.dst);
}
}
list_for_each_entry_safe(info, next,
&mtk_subdev->channels, list) {
list_del(&info->list);
kfree(info);
}
mutex_unlock(&mtk_subdev->channels_lock);
}
struct rproc_subdev *
mtk_rpmsg_create_rproc_subdev(struct platform_device *pdev,
struct mtk_rpmsg_info *info)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev;
mtk_subdev = kzalloc(sizeof(*mtk_subdev), GFP_KERNEL);
if (!mtk_subdev)
return NULL;
mtk_subdev->pdev = pdev;
mtk_subdev->subdev.prepare = mtk_rpmsg_prepare;
mtk_subdev->subdev.stop = mtk_rpmsg_stop;
mtk_subdev->subdev.unprepare = mtk_rpmsg_unprepare;
mtk_subdev->info = info;
INIT_LIST_HEAD(&mtk_subdev->channels);
INIT_WORK(&mtk_subdev->register_work,
mtk_register_device_work_function);
mutex_init(&mtk_subdev->channels_lock);
return &mtk_subdev->subdev;
}
EXPORT_SYMBOL_GPL(mtk_rpmsg_create_rproc_subdev);
void mtk_rpmsg_destroy_rproc_subdev(struct rproc_subdev *subdev)
{
struct mtk_rpmsg_rproc_subdev *mtk_subdev = to_mtk_subdev(subdev);
kfree(mtk_subdev);
}
EXPORT_SYMBOL_GPL(mtk_rpmsg_destroy_rproc_subdev);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MediaTek scp rpmsg driver");