mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-08 15:04:45 +00:00
fce68b0308
This patch adds a new member scheduled in struct mptcp_subflow_context, which will be set in the MPTCP scheduler context when the scheduler picks this subflow to send data. Add a new helper mptcp_subflow_set_scheduled() to set this flag using WRITE_ONCE(). Reviewed-by: Mat Martineau <martineau@kernel.org> Signed-off-by: Geliang Tang <geliang.tang@suse.com> Signed-off-by: Mat Martineau <martineau@kernel.org> Link: https://lore.kernel.org/r/20230821-upstream-net-next-20230818-v1-6-0c860fb256a8@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
96 lines
1.8 KiB
C
96 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Multipath TCP
|
|
*
|
|
* Copyright (c) 2022, SUSE.
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "MPTCP: " fmt
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/list.h>
|
|
#include <linux/rculist.h>
|
|
#include <linux/spinlock.h>
|
|
#include "protocol.h"
|
|
|
|
static DEFINE_SPINLOCK(mptcp_sched_list_lock);
|
|
static LIST_HEAD(mptcp_sched_list);
|
|
|
|
/* Must be called with rcu read lock held */
|
|
struct mptcp_sched_ops *mptcp_sched_find(const char *name)
|
|
{
|
|
struct mptcp_sched_ops *sched, *ret = NULL;
|
|
|
|
list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
|
|
if (!strcmp(sched->name, name)) {
|
|
ret = sched;
|
|
break;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
|
|
{
|
|
if (!sched->get_subflow)
|
|
return -EINVAL;
|
|
|
|
spin_lock(&mptcp_sched_list_lock);
|
|
if (mptcp_sched_find(sched->name)) {
|
|
spin_unlock(&mptcp_sched_list_lock);
|
|
return -EEXIST;
|
|
}
|
|
list_add_tail_rcu(&sched->list, &mptcp_sched_list);
|
|
spin_unlock(&mptcp_sched_list_lock);
|
|
|
|
pr_debug("%s registered", sched->name);
|
|
return 0;
|
|
}
|
|
|
|
void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched)
|
|
{
|
|
spin_lock(&mptcp_sched_list_lock);
|
|
list_del_rcu(&sched->list);
|
|
spin_unlock(&mptcp_sched_list_lock);
|
|
}
|
|
|
|
int mptcp_init_sched(struct mptcp_sock *msk,
|
|
struct mptcp_sched_ops *sched)
|
|
{
|
|
if (!sched)
|
|
goto out;
|
|
|
|
if (!bpf_try_module_get(sched, sched->owner))
|
|
return -EBUSY;
|
|
|
|
msk->sched = sched;
|
|
if (msk->sched->init)
|
|
msk->sched->init(msk);
|
|
|
|
pr_debug("sched=%s", msk->sched->name);
|
|
|
|
out:
|
|
return 0;
|
|
}
|
|
|
|
void mptcp_release_sched(struct mptcp_sock *msk)
|
|
{
|
|
struct mptcp_sched_ops *sched = msk->sched;
|
|
|
|
if (!sched)
|
|
return;
|
|
|
|
msk->sched = NULL;
|
|
if (sched->release)
|
|
sched->release(msk);
|
|
|
|
bpf_module_put(sched, sched->owner);
|
|
}
|
|
|
|
void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
|
|
bool scheduled)
|
|
{
|
|
WRITE_ONCE(subflow->scheduled, scheduled);
|
|
}
|