mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
Including fixes from BPF, netfilter, bluetooth and CAN.
Current release - regressions: - eth: r8169: multiple fixes for PCIe ASPM-related problems - vrf: fix RCU lockdep splat in output path Previous releases - regressions: - gso: fall back to SW segmenting with GSO_UDP_L4 dodgy bit set - dsa: mv88e6xxx: do a final check before timing out when polling - nf_tables: fix sleep in atomic in nft_chain_validate Previous releases - always broken: - sched: fix undoing tcf_bind_filter() in multiple classifiers - bpf, arm64: fix BTI type used for freplace attached functions - can: gs_usb: fix time stamp counter initialization - nft_set_pipapo: fix improper element removal (leading to UAF) Misc: - net: support STP on bridge in non-root netns, STP prevents packet loops so not supporting it results in freezing systems of unsuspecting users, and in turn very upset noises being made - fix kdoc warnings - annotate various bits of TCP state to prevent data races Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmS5pp0ACgkQMUZtbf5S IrtudA/9Ep+URprI3tpv+VHOQMWtMd7lzz+wwEUDQSo2T6xdMcYbd1E4ZWWOPw/y jTIIVF3qde4nuI/MZtzGhvCD8v4bzhw10uRm4f4vhC2i+CzXr/UdOQSMqeZmJZgN vndixvRjHJKYxogOa+DjXgOiuQTQfuSfSnaai0kvw3zZzi4tev/Bdj6KZmFW+UK+ Q7uQZ5n8tdE4UvUdj8Jek23SZ4kL+HtQOIdAAqyduQnYnax5L5sbep0TjuCjjkpK 26rvmwYFJmEab4mC2T3Y7VDaXYM9M2f/EuFBMBVEohE3KPTTdT12WzLfJv7TTKTl hymfXgfmCXiZElzoQTJ69bFGbhqFaCJwhCUHFwYqkqj0bW9cXYJD2achpi3nVgnn CV8vfqJtkzdgh2bV2faG+1wmAm1wzHSURmT5NlnFaX6a6BYypaN7CERn7BnIdLM/ YA2wud39bL0EJsic5e3gtlyJdfhtx7iqCMzE7S5FiUZvgOmUhBZ4IWkMs6Aq5PpL FLLgBSHGEIAdLVQGvXLjfQ/LeSrW8JsiSy6deztzR+ZflvvaBIP5y8sC3+KdxAvN 3ybMsMEE5OK3i808aV3l6/8DLeAJ+DWuMc96Ix7Yyt2LXFnnV79DX49zJAEUWrc7 54FnNzkgAO/Q9aEFmmQoFt5qZmoFHuNwcHBOmXARAatQqNCwDqk= =Xifr -----END PGP SIGNATURE----- Merge tag 'net-6.5-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from BPF, netfilter, bluetooth and CAN. Current release - regressions: - eth: r8169: multiple fixes for PCIe ASPM-related problems - vrf: fix RCU lockdep splat in output path Previous releases - regressions: - gso: fall back to SW segmenting with GSO_UDP_L4 dodgy bit set - dsa: mv88e6xxx: do a final check before timing out when polling - nf_tables: fix sleep in atomic in nft_chain_validate Previous releases - always broken: - sched: fix undoing tcf_bind_filter() in multiple classifiers - bpf, arm64: fix BTI type used for freplace attached functions - can: gs_usb: fix time stamp counter initialization - nft_set_pipapo: fix improper element removal (leading to UAF) Misc: - net: support STP on bridge in non-root netns, STP prevents packet loops so not supporting it results in freezing systems of unsuspecting users, and in turn very upset noises being made - fix kdoc warnings - annotate various bits of TCP state to prevent data races" * tag 'net-6.5-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (95 commits) net: phy: prevent stale pointer dereference in phy_init() tcp: annotate data-races around fastopenq.max_qlen tcp: annotate data-races around icsk->icsk_user_timeout tcp: annotate data-races around tp->notsent_lowat tcp: annotate data-races around rskq_defer_accept tcp: annotate data-races around tp->linger2 tcp: annotate data-races around icsk->icsk_syn_retries tcp: annotate data-races around tp->keepalive_probes tcp: annotate data-races around tp->keepalive_intvl tcp: annotate data-races around tp->keepalive_time tcp: annotate data-races around tp->tsoffset tcp: annotate data-races around tp->tcp_tx_delay Bluetooth: MGMT: Use correct address for memcpy() Bluetooth: btusb: Fix bluetooth on Intel Macbook 2014 Bluetooth: SCO: fix sco_conn related locking and validity issues Bluetooth: hci_conn: return ERR_PTR instead of NULL when there is no link Bluetooth: hci_sync: Avoid use-after-free in dbg for hci_remove_adv_monitor() Bluetooth: coredump: fix building with coredump disabled Bluetooth: ISO: fix iso_conn related locking and validity issues Bluetooth: hci_event: call disconnect callback before deleting conn ...
This commit is contained in:
commit
57f1f9dd3a
3
.mailmap
3
.mailmap
@ -241,6 +241,7 @@ Jisheng Zhang <jszhang@kernel.org> <Jisheng.Zhang@synaptics.com>
|
|||||||
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
|
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
|
||||||
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
|
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
|
||||||
John Crispin <john@phrozen.org> <blogic@openwrt.org>
|
John Crispin <john@phrozen.org> <blogic@openwrt.org>
|
||||||
|
John Fastabend <john.fastabend@gmail.com> <john.r.fastabend@intel.com>
|
||||||
John Keeping <john@keeping.me.uk> <john@metanate.com>
|
John Keeping <john@keeping.me.uk> <john@metanate.com>
|
||||||
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
||||||
John Stultz <johnstul@us.ibm.com>
|
John Stultz <johnstul@us.ibm.com>
|
||||||
@ -454,6 +455,8 @@ Sebastian Reichel <sre@kernel.org> <sre@debian.org>
|
|||||||
Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
|
Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
|
||||||
Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
|
Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
|
||||||
Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io>
|
Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io>
|
||||||
|
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@intel.com>
|
||||||
|
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@oracle.com>
|
||||||
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
|
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
|
||||||
Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
|
Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
|
||||||
Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
|
Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
|
||||||
|
@ -322,7 +322,13 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
emit_bti(A64_BTI_C, ctx);
|
/* bpf function may be invoked by 3 instruction types:
|
||||||
|
* 1. bl, attached via freplace to bpf prog via short jump
|
||||||
|
* 2. br, attached via freplace to bpf prog via long jump
|
||||||
|
* 3. blr, working as a function pointer, used by emit_call.
|
||||||
|
* So BTI_JC should used here to support both br and blr.
|
||||||
|
*/
|
||||||
|
emit_bti(A64_BTI_JC, ctx);
|
||||||
|
|
||||||
emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
|
emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
|
||||||
emit(A64_NOP, ctx);
|
emit(A64_NOP, ctx);
|
||||||
|
@ -4104,6 +4104,7 @@ static int btusb_probe(struct usb_interface *intf,
|
|||||||
BT_DBG("intf %p id %p", intf, id);
|
BT_DBG("intf %p id %p", intf, id);
|
||||||
|
|
||||||
if ((id->driver_info & BTUSB_IFNUM_2) &&
|
if ((id->driver_info & BTUSB_IFNUM_2) &&
|
||||||
|
(intf->cur_altsetting->desc.bInterfaceNumber != 0) &&
|
||||||
(intf->cur_altsetting->desc.bInterfaceNumber != 2))
|
(intf->cur_altsetting->desc.bInterfaceNumber != 2))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
@ -227,6 +227,8 @@ static int
|
|||||||
__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
|
__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
|
||||||
const u8 mode_req, bool nowait)
|
const u8 mode_req, bool nowait)
|
||||||
{
|
{
|
||||||
|
const struct can_bittiming *bt = &priv->can.bittiming;
|
||||||
|
unsigned long timeout_us = MCP251XFD_POLL_TIMEOUT_US;
|
||||||
u32 con = 0, con_reqop, osc = 0;
|
u32 con = 0, con_reqop, osc = 0;
|
||||||
u8 mode;
|
u8 mode;
|
||||||
int err;
|
int err;
|
||||||
@ -246,12 +248,16 @@ __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
|
|||||||
if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
|
if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (bt->bitrate)
|
||||||
|
timeout_us = max_t(unsigned long, timeout_us,
|
||||||
|
MCP251XFD_FRAME_LEN_MAX_BITS * USEC_PER_SEC /
|
||||||
|
bt->bitrate);
|
||||||
|
|
||||||
err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
|
err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
|
||||||
!mcp251xfd_reg_invalid(con) &&
|
!mcp251xfd_reg_invalid(con) &&
|
||||||
FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
|
FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
|
||||||
con) == mode_req,
|
con) == mode_req,
|
||||||
MCP251XFD_POLL_SLEEP_US,
|
MCP251XFD_POLL_SLEEP_US, timeout_us);
|
||||||
MCP251XFD_POLL_TIMEOUT_US);
|
|
||||||
if (err != -ETIMEDOUT && err != -EBADMSG)
|
if (err != -ETIMEDOUT && err != -EBADMSG)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -387,6 +387,7 @@ static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC <
|
|||||||
#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
|
#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
|
||||||
#define MCP251XFD_POLL_SLEEP_US (10)
|
#define MCP251XFD_POLL_SLEEP_US (10)
|
||||||
#define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
|
#define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
|
||||||
|
#define MCP251XFD_FRAME_LEN_MAX_BITS (736)
|
||||||
|
|
||||||
/* Misc */
|
/* Misc */
|
||||||
#define MCP251XFD_NAPI_WEIGHT 32
|
#define MCP251XFD_NAPI_WEIGHT 32
|
||||||
|
@ -303,12 +303,6 @@ struct gs_can {
|
|||||||
struct can_bittiming_const bt_const, data_bt_const;
|
struct can_bittiming_const bt_const, data_bt_const;
|
||||||
unsigned int channel; /* channel number */
|
unsigned int channel; /* channel number */
|
||||||
|
|
||||||
/* time counter for hardware timestamps */
|
|
||||||
struct cyclecounter cc;
|
|
||||||
struct timecounter tc;
|
|
||||||
spinlock_t tc_lock; /* spinlock to guard access tc->cycle_last */
|
|
||||||
struct delayed_work timestamp;
|
|
||||||
|
|
||||||
u32 feature;
|
u32 feature;
|
||||||
unsigned int hf_size_tx;
|
unsigned int hf_size_tx;
|
||||||
|
|
||||||
@ -325,6 +319,13 @@ struct gs_usb {
|
|||||||
struct gs_can *canch[GS_MAX_INTF];
|
struct gs_can *canch[GS_MAX_INTF];
|
||||||
struct usb_anchor rx_submitted;
|
struct usb_anchor rx_submitted;
|
||||||
struct usb_device *udev;
|
struct usb_device *udev;
|
||||||
|
|
||||||
|
/* time counter for hardware timestamps */
|
||||||
|
struct cyclecounter cc;
|
||||||
|
struct timecounter tc;
|
||||||
|
spinlock_t tc_lock; /* spinlock to guard access tc->cycle_last */
|
||||||
|
struct delayed_work timestamp;
|
||||||
|
|
||||||
unsigned int hf_size_rx;
|
unsigned int hf_size_rx;
|
||||||
u8 active_channels;
|
u8 active_channels;
|
||||||
};
|
};
|
||||||
@ -388,15 +389,15 @@ static int gs_cmd_reset(struct gs_can *dev)
|
|||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int gs_usb_get_timestamp(const struct gs_can *dev,
|
static inline int gs_usb_get_timestamp(const struct gs_usb *parent,
|
||||||
u32 *timestamp_p)
|
u32 *timestamp_p)
|
||||||
{
|
{
|
||||||
__le32 timestamp;
|
__le32 timestamp;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
rc = usb_control_msg_recv(dev->udev, 0, GS_USB_BREQ_TIMESTAMP,
|
rc = usb_control_msg_recv(parent->udev, 0, GS_USB_BREQ_TIMESTAMP,
|
||||||
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
|
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
|
||||||
dev->channel, 0,
|
0, 0,
|
||||||
×tamp, sizeof(timestamp),
|
×tamp, sizeof(timestamp),
|
||||||
USB_CTRL_GET_TIMEOUT,
|
USB_CTRL_GET_TIMEOUT,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
@ -410,20 +411,20 @@ static inline int gs_usb_get_timestamp(const struct gs_can *dev,
|
|||||||
|
|
||||||
static u64 gs_usb_timestamp_read(const struct cyclecounter *cc) __must_hold(&dev->tc_lock)
|
static u64 gs_usb_timestamp_read(const struct cyclecounter *cc) __must_hold(&dev->tc_lock)
|
||||||
{
|
{
|
||||||
struct gs_can *dev = container_of(cc, struct gs_can, cc);
|
struct gs_usb *parent = container_of(cc, struct gs_usb, cc);
|
||||||
u32 timestamp = 0;
|
u32 timestamp = 0;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
lockdep_assert_held(&dev->tc_lock);
|
lockdep_assert_held(&parent->tc_lock);
|
||||||
|
|
||||||
/* drop lock for synchronous USB transfer */
|
/* drop lock for synchronous USB transfer */
|
||||||
spin_unlock_bh(&dev->tc_lock);
|
spin_unlock_bh(&parent->tc_lock);
|
||||||
err = gs_usb_get_timestamp(dev, ×tamp);
|
err = gs_usb_get_timestamp(parent, ×tamp);
|
||||||
spin_lock_bh(&dev->tc_lock);
|
spin_lock_bh(&parent->tc_lock);
|
||||||
if (err)
|
if (err)
|
||||||
netdev_err(dev->netdev,
|
dev_err(&parent->udev->dev,
|
||||||
"Error %d while reading timestamp. HW timestamps may be inaccurate.",
|
"Error %d while reading timestamp. HW timestamps may be inaccurate.",
|
||||||
err);
|
err);
|
||||||
|
|
||||||
return timestamp;
|
return timestamp;
|
||||||
}
|
}
|
||||||
@ -431,14 +432,14 @@ static u64 gs_usb_timestamp_read(const struct cyclecounter *cc) __must_hold(&dev
|
|||||||
static void gs_usb_timestamp_work(struct work_struct *work)
|
static void gs_usb_timestamp_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct delayed_work *delayed_work = to_delayed_work(work);
|
struct delayed_work *delayed_work = to_delayed_work(work);
|
||||||
struct gs_can *dev;
|
struct gs_usb *parent;
|
||||||
|
|
||||||
dev = container_of(delayed_work, struct gs_can, timestamp);
|
parent = container_of(delayed_work, struct gs_usb, timestamp);
|
||||||
spin_lock_bh(&dev->tc_lock);
|
spin_lock_bh(&parent->tc_lock);
|
||||||
timecounter_read(&dev->tc);
|
timecounter_read(&parent->tc);
|
||||||
spin_unlock_bh(&dev->tc_lock);
|
spin_unlock_bh(&parent->tc_lock);
|
||||||
|
|
||||||
schedule_delayed_work(&dev->timestamp,
|
schedule_delayed_work(&parent->timestamp,
|
||||||
GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
|
GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -446,37 +447,38 @@ static void gs_usb_skb_set_timestamp(struct gs_can *dev,
|
|||||||
struct sk_buff *skb, u32 timestamp)
|
struct sk_buff *skb, u32 timestamp)
|
||||||
{
|
{
|
||||||
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
|
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
|
||||||
|
struct gs_usb *parent = dev->parent;
|
||||||
u64 ns;
|
u64 ns;
|
||||||
|
|
||||||
spin_lock_bh(&dev->tc_lock);
|
spin_lock_bh(&parent->tc_lock);
|
||||||
ns = timecounter_cyc2time(&dev->tc, timestamp);
|
ns = timecounter_cyc2time(&parent->tc, timestamp);
|
||||||
spin_unlock_bh(&dev->tc_lock);
|
spin_unlock_bh(&parent->tc_lock);
|
||||||
|
|
||||||
hwtstamps->hwtstamp = ns_to_ktime(ns);
|
hwtstamps->hwtstamp = ns_to_ktime(ns);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gs_usb_timestamp_init(struct gs_can *dev)
|
static void gs_usb_timestamp_init(struct gs_usb *parent)
|
||||||
{
|
{
|
||||||
struct cyclecounter *cc = &dev->cc;
|
struct cyclecounter *cc = &parent->cc;
|
||||||
|
|
||||||
cc->read = gs_usb_timestamp_read;
|
cc->read = gs_usb_timestamp_read;
|
||||||
cc->mask = CYCLECOUNTER_MASK(32);
|
cc->mask = CYCLECOUNTER_MASK(32);
|
||||||
cc->shift = 32 - bits_per(NSEC_PER_SEC / GS_USB_TIMESTAMP_TIMER_HZ);
|
cc->shift = 32 - bits_per(NSEC_PER_SEC / GS_USB_TIMESTAMP_TIMER_HZ);
|
||||||
cc->mult = clocksource_hz2mult(GS_USB_TIMESTAMP_TIMER_HZ, cc->shift);
|
cc->mult = clocksource_hz2mult(GS_USB_TIMESTAMP_TIMER_HZ, cc->shift);
|
||||||
|
|
||||||
spin_lock_init(&dev->tc_lock);
|
spin_lock_init(&parent->tc_lock);
|
||||||
spin_lock_bh(&dev->tc_lock);
|
spin_lock_bh(&parent->tc_lock);
|
||||||
timecounter_init(&dev->tc, &dev->cc, ktime_get_real_ns());
|
timecounter_init(&parent->tc, &parent->cc, ktime_get_real_ns());
|
||||||
spin_unlock_bh(&dev->tc_lock);
|
spin_unlock_bh(&parent->tc_lock);
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&dev->timestamp, gs_usb_timestamp_work);
|
INIT_DELAYED_WORK(&parent->timestamp, gs_usb_timestamp_work);
|
||||||
schedule_delayed_work(&dev->timestamp,
|
schedule_delayed_work(&parent->timestamp,
|
||||||
GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
|
GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gs_usb_timestamp_stop(struct gs_can *dev)
|
static void gs_usb_timestamp_stop(struct gs_usb *parent)
|
||||||
{
|
{
|
||||||
cancel_delayed_work_sync(&dev->timestamp);
|
cancel_delayed_work_sync(&parent->timestamp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
|
static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
|
||||||
@ -560,6 +562,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
|
|||||||
if (!netif_device_present(netdev))
|
if (!netif_device_present(netdev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (!netif_running(netdev))
|
||||||
|
goto resubmit_urb;
|
||||||
|
|
||||||
if (hf->echo_id == -1) { /* normal rx */
|
if (hf->echo_id == -1) { /* normal rx */
|
||||||
if (hf->flags & GS_CAN_FLAG_FD) {
|
if (hf->flags & GS_CAN_FLAG_FD) {
|
||||||
skb = alloc_canfd_skb(dev->netdev, &cfd);
|
skb = alloc_canfd_skb(dev->netdev, &cfd);
|
||||||
@ -833,6 +838,7 @@ static int gs_can_open(struct net_device *netdev)
|
|||||||
.mode = cpu_to_le32(GS_CAN_MODE_START),
|
.mode = cpu_to_le32(GS_CAN_MODE_START),
|
||||||
};
|
};
|
||||||
struct gs_host_frame *hf;
|
struct gs_host_frame *hf;
|
||||||
|
struct urb *urb = NULL;
|
||||||
u32 ctrlmode;
|
u32 ctrlmode;
|
||||||
u32 flags = 0;
|
u32 flags = 0;
|
||||||
int rc, i;
|
int rc, i;
|
||||||
@ -855,14 +861,18 @@ static int gs_can_open(struct net_device *netdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!parent->active_channels) {
|
if (!parent->active_channels) {
|
||||||
|
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
|
||||||
|
gs_usb_timestamp_init(parent);
|
||||||
|
|
||||||
for (i = 0; i < GS_MAX_RX_URBS; i++) {
|
for (i = 0; i < GS_MAX_RX_URBS; i++) {
|
||||||
struct urb *urb;
|
|
||||||
u8 *buf;
|
u8 *buf;
|
||||||
|
|
||||||
/* alloc rx urb */
|
/* alloc rx urb */
|
||||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||||
if (!urb)
|
if (!urb) {
|
||||||
return -ENOMEM;
|
rc = -ENOMEM;
|
||||||
|
goto out_usb_kill_anchored_urbs;
|
||||||
|
}
|
||||||
|
|
||||||
/* alloc rx buffer */
|
/* alloc rx buffer */
|
||||||
buf = kmalloc(dev->parent->hf_size_rx,
|
buf = kmalloc(dev->parent->hf_size_rx,
|
||||||
@ -870,8 +880,8 @@ static int gs_can_open(struct net_device *netdev)
|
|||||||
if (!buf) {
|
if (!buf) {
|
||||||
netdev_err(netdev,
|
netdev_err(netdev,
|
||||||
"No memory left for USB buffer\n");
|
"No memory left for USB buffer\n");
|
||||||
usb_free_urb(urb);
|
rc = -ENOMEM;
|
||||||
return -ENOMEM;
|
goto out_usb_free_urb;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* fill, anchor, and submit rx urb */
|
/* fill, anchor, and submit rx urb */
|
||||||
@ -894,9 +904,7 @@ static int gs_can_open(struct net_device *netdev)
|
|||||||
netdev_err(netdev,
|
netdev_err(netdev,
|
||||||
"usb_submit failed (err=%d)\n", rc);
|
"usb_submit failed (err=%d)\n", rc);
|
||||||
|
|
||||||
usb_unanchor_urb(urb);
|
goto out_usb_unanchor_urb;
|
||||||
usb_free_urb(urb);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Drop reference,
|
/* Drop reference,
|
||||||
@ -926,13 +934,9 @@ static int gs_can_open(struct net_device *netdev)
|
|||||||
flags |= GS_CAN_MODE_FD;
|
flags |= GS_CAN_MODE_FD;
|
||||||
|
|
||||||
/* if hardware supports timestamps, enable it */
|
/* if hardware supports timestamps, enable it */
|
||||||
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) {
|
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
|
||||||
flags |= GS_CAN_MODE_HW_TIMESTAMP;
|
flags |= GS_CAN_MODE_HW_TIMESTAMP;
|
||||||
|
|
||||||
/* start polling timestamp */
|
|
||||||
gs_usb_timestamp_init(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* finally start device */
|
/* finally start device */
|
||||||
dev->can.state = CAN_STATE_ERROR_ACTIVE;
|
dev->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||||
dm.flags = cpu_to_le32(flags);
|
dm.flags = cpu_to_le32(flags);
|
||||||
@ -942,10 +946,9 @@ static int gs_can_open(struct net_device *netdev)
|
|||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
|
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
|
||||||
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
|
|
||||||
gs_usb_timestamp_stop(dev);
|
|
||||||
dev->can.state = CAN_STATE_STOPPED;
|
dev->can.state = CAN_STATE_STOPPED;
|
||||||
return rc;
|
|
||||||
|
goto out_usb_kill_anchored_urbs;
|
||||||
}
|
}
|
||||||
|
|
||||||
parent->active_channels++;
|
parent->active_channels++;
|
||||||
@ -953,6 +956,22 @@ static int gs_can_open(struct net_device *netdev)
|
|||||||
netif_start_queue(netdev);
|
netif_start_queue(netdev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_usb_unanchor_urb:
|
||||||
|
usb_unanchor_urb(urb);
|
||||||
|
out_usb_free_urb:
|
||||||
|
usb_free_urb(urb);
|
||||||
|
out_usb_kill_anchored_urbs:
|
||||||
|
if (!parent->active_channels) {
|
||||||
|
usb_kill_anchored_urbs(&dev->tx_submitted);
|
||||||
|
|
||||||
|
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
|
||||||
|
gs_usb_timestamp_stop(parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
close_candev(netdev);
|
||||||
|
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gs_usb_get_state(const struct net_device *netdev,
|
static int gs_usb_get_state(const struct net_device *netdev,
|
||||||
@ -998,14 +1017,13 @@ static int gs_can_close(struct net_device *netdev)
|
|||||||
|
|
||||||
netif_stop_queue(netdev);
|
netif_stop_queue(netdev);
|
||||||
|
|
||||||
/* stop polling timestamp */
|
|
||||||
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
|
|
||||||
gs_usb_timestamp_stop(dev);
|
|
||||||
|
|
||||||
/* Stop polling */
|
/* Stop polling */
|
||||||
parent->active_channels--;
|
parent->active_channels--;
|
||||||
if (!parent->active_channels) {
|
if (!parent->active_channels) {
|
||||||
usb_kill_anchored_urbs(&parent->rx_submitted);
|
usb_kill_anchored_urbs(&parent->rx_submitted);
|
||||||
|
|
||||||
|
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
|
||||||
|
gs_usb_timestamp_stop(parent);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stop sending URBs */
|
/* Stop sending URBs */
|
||||||
|
@ -506,7 +506,13 @@ static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
|
|||||||
(data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
|
(data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
|
||||||
shifts[STATIC_MAC_FWD_PORTS];
|
shifts[STATIC_MAC_FWD_PORTS];
|
||||||
alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
|
alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
|
||||||
data_hi >>= 1;
|
|
||||||
|
/* KSZ8795 family switches have STATIC_MAC_TABLE_USE_FID and
|
||||||
|
* STATIC_MAC_TABLE_FID definitions off by 1 when doing read on the
|
||||||
|
* static MAC table compared to doing write.
|
||||||
|
*/
|
||||||
|
if (ksz_is_ksz87xx(dev))
|
||||||
|
data_hi >>= 1;
|
||||||
alu->is_static = true;
|
alu->is_static = true;
|
||||||
alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
|
alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
|
||||||
alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
|
alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
|
||||||
|
@ -331,13 +331,13 @@ static const u32 ksz8795_masks[] = {
|
|||||||
[STATIC_MAC_TABLE_VALID] = BIT(21),
|
[STATIC_MAC_TABLE_VALID] = BIT(21),
|
||||||
[STATIC_MAC_TABLE_USE_FID] = BIT(23),
|
[STATIC_MAC_TABLE_USE_FID] = BIT(23),
|
||||||
[STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
|
[STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
|
||||||
[STATIC_MAC_TABLE_OVERRIDE] = BIT(26),
|
[STATIC_MAC_TABLE_OVERRIDE] = BIT(22),
|
||||||
[STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20),
|
[STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(20, 16),
|
||||||
[DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
|
[DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
|
||||||
[DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8),
|
[DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
|
||||||
[DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
|
[DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
|
||||||
[DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
|
[DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
|
||||||
[DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20),
|
[DYNAMIC_MAC_TABLE_FID] = GENMASK(22, 16),
|
||||||
[DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
|
[DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
|
||||||
[DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
|
[DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
|
||||||
[P_MII_TX_FLOW_CTRL] = BIT(5),
|
[P_MII_TX_FLOW_CTRL] = BIT(5),
|
||||||
|
@ -601,6 +601,13 @@ static inline void ksz_regmap_unlock(void *__mtx)
|
|||||||
mutex_unlock(mtx);
|
mutex_unlock(mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool ksz_is_ksz87xx(struct ksz_device *dev)
|
||||||
|
{
|
||||||
|
return dev->chip_id == KSZ8795_CHIP_ID ||
|
||||||
|
dev->chip_id == KSZ8794_CHIP_ID ||
|
||||||
|
dev->chip_id == KSZ8765_CHIP_ID;
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
|
static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
|
||||||
{
|
{
|
||||||
return dev->chip_id == KSZ8830_CHIP_ID;
|
return dev->chip_id == KSZ8830_CHIP_ID;
|
||||||
|
@ -109,6 +109,13 @@ int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
|
|||||||
usleep_range(1000, 2000);
|
usleep_range(1000, 2000);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = mv88e6xxx_read(chip, addr, reg, &data);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
if ((data & mask) == val)
|
||||||
|
return 0;
|
||||||
|
|
||||||
dev_err(chip->dev, "Timeout while waiting for switch\n");
|
dev_err(chip->dev, "Timeout while waiting for switch\n");
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
@ -1002,6 +1002,8 @@ static const struct regmap_config ar9331_mdio_regmap_config = {
|
|||||||
.val_bits = 32,
|
.val_bits = 32,
|
||||||
.reg_stride = 4,
|
.reg_stride = 4,
|
||||||
.max_register = AR9331_SW_REG_PAGE,
|
.max_register = AR9331_SW_REG_PAGE,
|
||||||
|
.use_single_read = true,
|
||||||
|
.use_single_write = true,
|
||||||
|
|
||||||
.ranges = ar9331_regmap_range,
|
.ranges = ar9331_regmap_range,
|
||||||
.num_ranges = ARRAY_SIZE(ar9331_regmap_range),
|
.num_ranges = ARRAY_SIZE(ar9331_regmap_range),
|
||||||
@ -1018,8 +1020,6 @@ static struct regmap_bus ar9331_sw_bus = {
|
|||||||
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
|
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
|
||||||
.read = ar9331_mdio_read,
|
.read = ar9331_mdio_read,
|
||||||
.write = ar9331_sw_bus_write,
|
.write = ar9331_sw_bus_write,
|
||||||
.max_raw_read = 4,
|
|
||||||
.max_raw_write = 4,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static int ar9331_sw_probe(struct mdio_device *mdiodev)
|
static int ar9331_sw_probe(struct mdio_device *mdiodev)
|
||||||
|
@ -512,11 +512,6 @@ bnad_debugfs_init(struct bnad *bnad)
|
|||||||
if (!bnad->port_debugfs_root) {
|
if (!bnad->port_debugfs_root) {
|
||||||
bnad->port_debugfs_root =
|
bnad->port_debugfs_root =
|
||||||
debugfs_create_dir(name, bna_debugfs_root);
|
debugfs_create_dir(name, bna_debugfs_root);
|
||||||
if (!bnad->port_debugfs_root) {
|
|
||||||
netdev_warn(bnad->netdev,
|
|
||||||
"debugfs root dir creation failed\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_inc(&bna_debugfs_port_count);
|
atomic_inc(&bna_debugfs_port_count);
|
||||||
|
|
||||||
|
@ -255,8 +255,10 @@ struct iavf_adapter {
|
|||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
struct work_struct reset_task;
|
struct work_struct reset_task;
|
||||||
struct work_struct adminq_task;
|
struct work_struct adminq_task;
|
||||||
|
struct work_struct finish_config;
|
||||||
struct delayed_work client_task;
|
struct delayed_work client_task;
|
||||||
wait_queue_head_t down_waitqueue;
|
wait_queue_head_t down_waitqueue;
|
||||||
|
wait_queue_head_t reset_waitqueue;
|
||||||
wait_queue_head_t vc_waitqueue;
|
wait_queue_head_t vc_waitqueue;
|
||||||
struct iavf_q_vector *q_vectors;
|
struct iavf_q_vector *q_vectors;
|
||||||
struct list_head vlan_filter_list;
|
struct list_head vlan_filter_list;
|
||||||
@ -518,8 +520,9 @@ int iavf_up(struct iavf_adapter *adapter);
|
|||||||
void iavf_down(struct iavf_adapter *adapter);
|
void iavf_down(struct iavf_adapter *adapter);
|
||||||
int iavf_process_config(struct iavf_adapter *adapter);
|
int iavf_process_config(struct iavf_adapter *adapter);
|
||||||
int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
|
int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
|
||||||
void iavf_schedule_reset(struct iavf_adapter *adapter);
|
void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags);
|
||||||
void iavf_schedule_request_stats(struct iavf_adapter *adapter);
|
void iavf_schedule_request_stats(struct iavf_adapter *adapter);
|
||||||
|
void iavf_schedule_finish_config(struct iavf_adapter *adapter);
|
||||||
void iavf_reset(struct iavf_adapter *adapter);
|
void iavf_reset(struct iavf_adapter *adapter);
|
||||||
void iavf_set_ethtool_ops(struct net_device *netdev);
|
void iavf_set_ethtool_ops(struct net_device *netdev);
|
||||||
void iavf_update_stats(struct iavf_adapter *adapter);
|
void iavf_update_stats(struct iavf_adapter *adapter);
|
||||||
@ -582,4 +585,5 @@ void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
|
|||||||
void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter);
|
void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter);
|
||||||
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
|
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
|
||||||
const u8 *macaddr);
|
const u8 *macaddr);
|
||||||
|
int iavf_wait_for_reset(struct iavf_adapter *adapter);
|
||||||
#endif /* _IAVF_H_ */
|
#endif /* _IAVF_H_ */
|
||||||
|
@ -484,6 +484,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
|
|||||||
{
|
{
|
||||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||||
u32 orig_flags, new_flags, changed_flags;
|
u32 orig_flags, new_flags, changed_flags;
|
||||||
|
int ret = 0;
|
||||||
u32 i;
|
u32 i;
|
||||||
|
|
||||||
orig_flags = READ_ONCE(adapter->flags);
|
orig_flags = READ_ONCE(adapter->flags);
|
||||||
@ -531,12 +532,14 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
|
|||||||
/* issue a reset to force legacy-rx change to take effect */
|
/* issue a reset to force legacy-rx change to take effect */
|
||||||
if (changed_flags & IAVF_FLAG_LEGACY_RX) {
|
if (changed_flags & IAVF_FLAG_LEGACY_RX) {
|
||||||
if (netif_running(netdev)) {
|
if (netif_running(netdev)) {
|
||||||
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
|
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
|
||||||
queue_work(adapter->wq, &adapter->reset_task);
|
ret = iavf_wait_for_reset(adapter);
|
||||||
|
if (ret)
|
||||||
|
netdev_warn(netdev, "Changing private flags timeout or interrupted waiting for reset");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -627,6 +630,7 @@ static int iavf_set_ringparam(struct net_device *netdev,
|
|||||||
{
|
{
|
||||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||||
u32 new_rx_count, new_tx_count;
|
u32 new_rx_count, new_tx_count;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -671,11 +675,13 @@ static int iavf_set_ringparam(struct net_device *netdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (netif_running(netdev)) {
|
if (netif_running(netdev)) {
|
||||||
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
|
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
|
||||||
queue_work(adapter->wq, &adapter->reset_task);
|
ret = iavf_wait_for_reset(adapter);
|
||||||
|
if (ret)
|
||||||
|
netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset");
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1830,7 +1836,7 @@ static int iavf_set_channels(struct net_device *netdev,
|
|||||||
{
|
{
|
||||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||||
u32 num_req = ch->combined_count;
|
u32 num_req = ch->combined_count;
|
||||||
int i;
|
int ret = 0;
|
||||||
|
|
||||||
if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
|
if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
|
||||||
adapter->num_tc) {
|
adapter->num_tc) {
|
||||||
@ -1852,22 +1858,13 @@ static int iavf_set_channels(struct net_device *netdev,
|
|||||||
|
|
||||||
adapter->num_req_queues = num_req;
|
adapter->num_req_queues = num_req;
|
||||||
adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
|
adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
|
||||||
iavf_schedule_reset(adapter);
|
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
|
||||||
|
|
||||||
/* wait for the reset is done */
|
ret = iavf_wait_for_reset(adapter);
|
||||||
for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
|
if (ret)
|
||||||
msleep(IAVF_RESET_WAIT_MS);
|
netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset");
|
||||||
if (adapter->flags & IAVF_FLAG_RESET_PENDING)
|
|
||||||
continue;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
|
|
||||||
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
|
|
||||||
adapter->num_active_queues = num_req;
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -166,6 +166,45 @@ static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
|
|||||||
return netdev_priv(pci_get_drvdata(pdev));
|
return netdev_priv(pci_get_drvdata(pdev));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iavf_is_reset_in_progress - Check if a reset is in progress
|
||||||
|
* @adapter: board private structure
|
||||||
|
*/
|
||||||
|
static bool iavf_is_reset_in_progress(struct iavf_adapter *adapter)
|
||||||
|
{
|
||||||
|
if (adapter->state == __IAVF_RESETTING ||
|
||||||
|
adapter->flags & (IAVF_FLAG_RESET_PENDING |
|
||||||
|
IAVF_FLAG_RESET_NEEDED))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iavf_wait_for_reset - Wait for reset to finish.
|
||||||
|
* @adapter: board private structure
|
||||||
|
*
|
||||||
|
* Returns 0 if reset finished successfully, negative on timeout or interrupt.
|
||||||
|
*/
|
||||||
|
int iavf_wait_for_reset(struct iavf_adapter *adapter)
|
||||||
|
{
|
||||||
|
int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue,
|
||||||
|
!iavf_is_reset_in_progress(adapter),
|
||||||
|
msecs_to_jiffies(5000));
|
||||||
|
|
||||||
|
/* If ret < 0 then it means wait was interrupted.
|
||||||
|
* If ret == 0 then it means we got a timeout while waiting
|
||||||
|
* for reset to finish.
|
||||||
|
* If ret > 0 it means reset has finished.
|
||||||
|
*/
|
||||||
|
if (ret > 0)
|
||||||
|
return 0;
|
||||||
|
else if (ret < 0)
|
||||||
|
return -EINTR;
|
||||||
|
else
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
|
* iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
|
||||||
* @hw: pointer to the HW structure
|
* @hw: pointer to the HW structure
|
||||||
@ -262,12 +301,14 @@ static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
|
|||||||
/**
|
/**
|
||||||
* iavf_schedule_reset - Set the flags and schedule a reset event
|
* iavf_schedule_reset - Set the flags and schedule a reset event
|
||||||
* @adapter: board private structure
|
* @adapter: board private structure
|
||||||
|
* @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
|
||||||
**/
|
**/
|
||||||
void iavf_schedule_reset(struct iavf_adapter *adapter)
|
void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
|
||||||
{
|
{
|
||||||
if (!(adapter->flags &
|
if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
|
||||||
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
|
!(adapter->flags &
|
||||||
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
|
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
|
||||||
|
adapter->flags |= flags;
|
||||||
queue_work(adapter->wq, &adapter->reset_task);
|
queue_work(adapter->wq, &adapter->reset_task);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -295,7 +336,7 @@ static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
|
|||||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||||
|
|
||||||
adapter->tx_timeout_count++;
|
adapter->tx_timeout_count++;
|
||||||
iavf_schedule_reset(adapter);
|
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1651,10 +1692,10 @@ static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
|
|||||||
adapter->msix_entries[vector].entry = vector;
|
adapter->msix_entries[vector].entry = vector;
|
||||||
|
|
||||||
err = iavf_acquire_msix_vectors(adapter, v_budget);
|
err = iavf_acquire_msix_vectors(adapter, v_budget);
|
||||||
|
if (!err)
|
||||||
|
iavf_schedule_finish_config(adapter);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
netif_set_real_num_rx_queues(adapter->netdev, pairs);
|
|
||||||
netif_set_real_num_tx_queues(adapter->netdev, pairs);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1828,19 +1869,16 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
|
|||||||
static void iavf_free_q_vectors(struct iavf_adapter *adapter)
|
static void iavf_free_q_vectors(struct iavf_adapter *adapter)
|
||||||
{
|
{
|
||||||
int q_idx, num_q_vectors;
|
int q_idx, num_q_vectors;
|
||||||
int napi_vectors;
|
|
||||||
|
|
||||||
if (!adapter->q_vectors)
|
if (!adapter->q_vectors)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
|
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
|
||||||
napi_vectors = adapter->num_active_queues;
|
|
||||||
|
|
||||||
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
|
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
|
||||||
struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
|
struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
|
||||||
|
|
||||||
if (q_idx < napi_vectors)
|
netif_napi_del(&q_vector->napi);
|
||||||
netif_napi_del(&q_vector->napi);
|
|
||||||
}
|
}
|
||||||
kfree(adapter->q_vectors);
|
kfree(adapter->q_vectors);
|
||||||
adapter->q_vectors = NULL;
|
adapter->q_vectors = NULL;
|
||||||
@ -1877,9 +1915,7 @@ static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
|
|||||||
goto err_alloc_queues;
|
goto err_alloc_queues;
|
||||||
}
|
}
|
||||||
|
|
||||||
rtnl_lock();
|
|
||||||
err = iavf_set_interrupt_capability(adapter);
|
err = iavf_set_interrupt_capability(adapter);
|
||||||
rtnl_unlock();
|
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&adapter->pdev->dev,
|
dev_err(&adapter->pdev->dev,
|
||||||
"Unable to setup interrupt capabilities\n");
|
"Unable to setup interrupt capabilities\n");
|
||||||
@ -1932,15 +1968,16 @@ static void iavf_free_rss(struct iavf_adapter *adapter)
|
|||||||
/**
|
/**
|
||||||
* iavf_reinit_interrupt_scheme - Reallocate queues and vectors
|
* iavf_reinit_interrupt_scheme - Reallocate queues and vectors
|
||||||
* @adapter: board private structure
|
* @adapter: board private structure
|
||||||
|
* @running: true if adapter->state == __IAVF_RUNNING
|
||||||
*
|
*
|
||||||
* Returns 0 on success, negative on failure
|
* Returns 0 on success, negative on failure
|
||||||
**/
|
**/
|
||||||
static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
|
static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool running)
|
||||||
{
|
{
|
||||||
struct net_device *netdev = adapter->netdev;
|
struct net_device *netdev = adapter->netdev;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (netif_running(netdev))
|
if (running)
|
||||||
iavf_free_traffic_irqs(adapter);
|
iavf_free_traffic_irqs(adapter);
|
||||||
iavf_free_misc_irq(adapter);
|
iavf_free_misc_irq(adapter);
|
||||||
iavf_reset_interrupt_capability(adapter);
|
iavf_reset_interrupt_capability(adapter);
|
||||||
@ -1964,6 +2001,78 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iavf_finish_config - do all netdev work that needs RTNL
|
||||||
|
* @work: our work_struct
|
||||||
|
*
|
||||||
|
* Do work that needs both RTNL and crit_lock.
|
||||||
|
**/
|
||||||
|
static void iavf_finish_config(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct iavf_adapter *adapter;
|
||||||
|
int pairs, err;
|
||||||
|
|
||||||
|
adapter = container_of(work, struct iavf_adapter, finish_config);
|
||||||
|
|
||||||
|
/* Always take RTNL first to prevent circular lock dependency */
|
||||||
|
rtnl_lock();
|
||||||
|
mutex_lock(&adapter->crit_lock);
|
||||||
|
|
||||||
|
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
|
||||||
|
adapter->netdev_registered &&
|
||||||
|
!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
|
||||||
|
netdev_update_features(adapter->netdev);
|
||||||
|
adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (adapter->state) {
|
||||||
|
case __IAVF_DOWN:
|
||||||
|
if (!adapter->netdev_registered) {
|
||||||
|
err = register_netdevice(adapter->netdev);
|
||||||
|
if (err) {
|
||||||
|
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
|
||||||
|
err);
|
||||||
|
|
||||||
|
/* go back and try again.*/
|
||||||
|
iavf_free_rss(adapter);
|
||||||
|
iavf_free_misc_irq(adapter);
|
||||||
|
iavf_reset_interrupt_capability(adapter);
|
||||||
|
iavf_change_state(adapter,
|
||||||
|
__IAVF_INIT_CONFIG_ADAPTER);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
adapter->netdev_registered = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set the real number of queues when reset occurs while
|
||||||
|
* state == __IAVF_DOWN
|
||||||
|
*/
|
||||||
|
fallthrough;
|
||||||
|
case __IAVF_RUNNING:
|
||||||
|
pairs = adapter->num_active_queues;
|
||||||
|
netif_set_real_num_rx_queues(adapter->netdev, pairs);
|
||||||
|
netif_set_real_num_tx_queues(adapter->netdev, pairs);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
rtnl_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iavf_schedule_finish_config - Set the flags and schedule a reset event
|
||||||
|
* @adapter: board private structure
|
||||||
|
**/
|
||||||
|
void iavf_schedule_finish_config(struct iavf_adapter *adapter)
|
||||||
|
{
|
||||||
|
if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
|
||||||
|
queue_work(adapter->wq, &adapter->finish_config);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iavf_process_aq_command - process aq_required flags
|
* iavf_process_aq_command - process aq_required flags
|
||||||
* and sends aq command
|
* and sends aq command
|
||||||
@ -2371,7 +2480,7 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
|
|||||||
adapter->vsi_res->num_queue_pairs);
|
adapter->vsi_res->num_queue_pairs);
|
||||||
adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
|
adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
|
||||||
adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
|
adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
|
||||||
iavf_schedule_reset(adapter);
|
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
|
||||||
|
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
@ -2601,22 +2710,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
|
|||||||
|
|
||||||
netif_carrier_off(netdev);
|
netif_carrier_off(netdev);
|
||||||
adapter->link_up = false;
|
adapter->link_up = false;
|
||||||
|
|
||||||
/* set the semaphore to prevent any callbacks after device registration
|
|
||||||
* up to time when state of driver will be set to __IAVF_DOWN
|
|
||||||
*/
|
|
||||||
rtnl_lock();
|
|
||||||
if (!adapter->netdev_registered) {
|
|
||||||
err = register_netdevice(netdev);
|
|
||||||
if (err) {
|
|
||||||
rtnl_unlock();
|
|
||||||
goto err_register;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
adapter->netdev_registered = true;
|
|
||||||
|
|
||||||
netif_tx_stop_all_queues(netdev);
|
netif_tx_stop_all_queues(netdev);
|
||||||
|
|
||||||
if (CLIENT_ALLOWED(adapter)) {
|
if (CLIENT_ALLOWED(adapter)) {
|
||||||
err = iavf_lan_add_device(adapter);
|
err = iavf_lan_add_device(adapter);
|
||||||
if (err)
|
if (err)
|
||||||
@ -2629,7 +2724,6 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
|
|||||||
|
|
||||||
iavf_change_state(adapter, __IAVF_DOWN);
|
iavf_change_state(adapter, __IAVF_DOWN);
|
||||||
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
|
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
|
||||||
rtnl_unlock();
|
|
||||||
|
|
||||||
iavf_misc_irq_enable(adapter);
|
iavf_misc_irq_enable(adapter);
|
||||||
wake_up(&adapter->down_waitqueue);
|
wake_up(&adapter->down_waitqueue);
|
||||||
@ -2649,10 +2743,11 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
|
|||||||
/* request initial VLAN offload settings */
|
/* request initial VLAN offload settings */
|
||||||
iavf_set_vlan_offload_features(adapter, 0, netdev->features);
|
iavf_set_vlan_offload_features(adapter, 0, netdev->features);
|
||||||
|
|
||||||
|
iavf_schedule_finish_config(adapter);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
err_mem:
|
err_mem:
|
||||||
iavf_free_rss(adapter);
|
iavf_free_rss(adapter);
|
||||||
err_register:
|
|
||||||
iavf_free_misc_irq(adapter);
|
iavf_free_misc_irq(adapter);
|
||||||
err_sw_init:
|
err_sw_init:
|
||||||
iavf_reset_interrupt_capability(adapter);
|
iavf_reset_interrupt_capability(adapter);
|
||||||
@ -2679,26 +2774,9 @@ static void iavf_watchdog_task(struct work_struct *work)
|
|||||||
goto restart_watchdog;
|
goto restart_watchdog;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
|
|
||||||
adapter->netdev_registered &&
|
|
||||||
!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
|
|
||||||
rtnl_trylock()) {
|
|
||||||
netdev_update_features(adapter->netdev);
|
|
||||||
rtnl_unlock();
|
|
||||||
adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
|
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
|
||||||
iavf_change_state(adapter, __IAVF_COMM_FAILED);
|
iavf_change_state(adapter, __IAVF_COMM_FAILED);
|
||||||
|
|
||||||
if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
|
|
||||||
adapter->aq_required = 0;
|
|
||||||
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
|
|
||||||
mutex_unlock(&adapter->crit_lock);
|
|
||||||
queue_work(adapter->wq, &adapter->reset_task);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (adapter->state) {
|
switch (adapter->state) {
|
||||||
case __IAVF_STARTUP:
|
case __IAVF_STARTUP:
|
||||||
iavf_startup(adapter);
|
iavf_startup(adapter);
|
||||||
@ -2826,11 +2904,10 @@ static void iavf_watchdog_task(struct work_struct *work)
|
|||||||
/* check for hw reset */
|
/* check for hw reset */
|
||||||
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
|
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
|
||||||
if (!reg_val) {
|
if (!reg_val) {
|
||||||
adapter->flags |= IAVF_FLAG_RESET_PENDING;
|
|
||||||
adapter->aq_required = 0;
|
adapter->aq_required = 0;
|
||||||
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
|
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
|
||||||
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
|
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
|
||||||
queue_work(adapter->wq, &adapter->reset_task);
|
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
|
||||||
mutex_unlock(&adapter->crit_lock);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
queue_delayed_work(adapter->wq,
|
queue_delayed_work(adapter->wq,
|
||||||
&adapter->watchdog_task, HZ * 2);
|
&adapter->watchdog_task, HZ * 2);
|
||||||
@ -2940,11 +3017,6 @@ static void iavf_reset_task(struct work_struct *work)
|
|||||||
int i = 0, err;
|
int i = 0, err;
|
||||||
bool running;
|
bool running;
|
||||||
|
|
||||||
/* Detach interface to avoid subsequent NDO callbacks */
|
|
||||||
rtnl_lock();
|
|
||||||
netif_device_detach(netdev);
|
|
||||||
rtnl_unlock();
|
|
||||||
|
|
||||||
/* When device is being removed it doesn't make sense to run the reset
|
/* When device is being removed it doesn't make sense to run the reset
|
||||||
* task, just return in such a case.
|
* task, just return in such a case.
|
||||||
*/
|
*/
|
||||||
@ -2952,7 +3024,7 @@ static void iavf_reset_task(struct work_struct *work)
|
|||||||
if (adapter->state != __IAVF_REMOVE)
|
if (adapter->state != __IAVF_REMOVE)
|
||||||
queue_work(adapter->wq, &adapter->reset_task);
|
queue_work(adapter->wq, &adapter->reset_task);
|
||||||
|
|
||||||
goto reset_finish;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (!mutex_trylock(&adapter->client_lock))
|
while (!mutex_trylock(&adapter->client_lock))
|
||||||
@ -3010,11 +3082,6 @@ static void iavf_reset_task(struct work_struct *work)
|
|||||||
iavf_disable_vf(adapter);
|
iavf_disable_vf(adapter);
|
||||||
mutex_unlock(&adapter->client_lock);
|
mutex_unlock(&adapter->client_lock);
|
||||||
mutex_unlock(&adapter->crit_lock);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
if (netif_running(netdev)) {
|
|
||||||
rtnl_lock();
|
|
||||||
dev_close(netdev);
|
|
||||||
rtnl_unlock();
|
|
||||||
}
|
|
||||||
return; /* Do not attempt to reinit. It's dead, Jim. */
|
return; /* Do not attempt to reinit. It's dead, Jim. */
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3056,7 +3123,7 @@ static void iavf_reset_task(struct work_struct *work)
|
|||||||
|
|
||||||
if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
|
if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
|
||||||
(adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
|
(adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
|
||||||
err = iavf_reinit_interrupt_scheme(adapter);
|
err = iavf_reinit_interrupt_scheme(adapter, running);
|
||||||
if (err)
|
if (err)
|
||||||
goto reset_err;
|
goto reset_err;
|
||||||
}
|
}
|
||||||
@ -3151,10 +3218,11 @@ static void iavf_reset_task(struct work_struct *work)
|
|||||||
|
|
||||||
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
|
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
|
||||||
|
|
||||||
|
wake_up(&adapter->reset_waitqueue);
|
||||||
mutex_unlock(&adapter->client_lock);
|
mutex_unlock(&adapter->client_lock);
|
||||||
mutex_unlock(&adapter->crit_lock);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
|
||||||
goto reset_finish;
|
return;
|
||||||
reset_err:
|
reset_err:
|
||||||
if (running) {
|
if (running) {
|
||||||
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
|
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
|
||||||
@ -3164,21 +3232,7 @@ static void iavf_reset_task(struct work_struct *work)
|
|||||||
|
|
||||||
mutex_unlock(&adapter->client_lock);
|
mutex_unlock(&adapter->client_lock);
|
||||||
mutex_unlock(&adapter->crit_lock);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
|
||||||
if (netif_running(netdev)) {
|
|
||||||
/* Close device to ensure that Tx queues will not be started
|
|
||||||
* during netif_device_attach() at the end of the reset task.
|
|
||||||
*/
|
|
||||||
rtnl_lock();
|
|
||||||
dev_close(netdev);
|
|
||||||
rtnl_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
|
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
|
||||||
reset_finish:
|
|
||||||
rtnl_lock();
|
|
||||||
netif_device_attach(netdev);
|
|
||||||
rtnl_unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3227,9 +3281,7 @@ static void iavf_adminq_task(struct work_struct *work)
|
|||||||
} while (pending);
|
} while (pending);
|
||||||
mutex_unlock(&adapter->crit_lock);
|
mutex_unlock(&adapter->crit_lock);
|
||||||
|
|
||||||
if ((adapter->flags &
|
if (iavf_is_reset_in_progress(adapter))
|
||||||
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
|
|
||||||
adapter->state == __IAVF_RESETTING)
|
|
||||||
goto freedom;
|
goto freedom;
|
||||||
|
|
||||||
/* check for error indications */
|
/* check for error indications */
|
||||||
@ -4315,6 +4367,7 @@ static int iavf_close(struct net_device *netdev)
|
|||||||
static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
|
static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
|
||||||
{
|
{
|
||||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
netdev_dbg(netdev, "changing MTU from %d to %d\n",
|
netdev_dbg(netdev, "changing MTU from %d to %d\n",
|
||||||
netdev->mtu, new_mtu);
|
netdev->mtu, new_mtu);
|
||||||
@ -4325,11 +4378,15 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (netif_running(netdev)) {
|
if (netif_running(netdev)) {
|
||||||
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
|
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
|
||||||
queue_work(adapter->wq, &adapter->reset_task);
|
ret = iavf_wait_for_reset(adapter);
|
||||||
|
if (ret < 0)
|
||||||
|
netdev_warn(netdev, "MTU change interrupted waiting for reset");
|
||||||
|
else if (ret)
|
||||||
|
netdev_warn(netdev, "MTU change timed out waiting for reset");
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
|
#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
|
||||||
@ -4922,6 +4979,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||||||
|
|
||||||
INIT_WORK(&adapter->reset_task, iavf_reset_task);
|
INIT_WORK(&adapter->reset_task, iavf_reset_task);
|
||||||
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
|
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
|
||||||
|
INIT_WORK(&adapter->finish_config, iavf_finish_config);
|
||||||
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
|
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
|
||||||
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
|
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
|
||||||
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
|
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
|
||||||
@ -4930,6 +4988,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||||||
/* Setup the wait queue for indicating transition to down status */
|
/* Setup the wait queue for indicating transition to down status */
|
||||||
init_waitqueue_head(&adapter->down_waitqueue);
|
init_waitqueue_head(&adapter->down_waitqueue);
|
||||||
|
|
||||||
|
/* Setup the wait queue for indicating transition to running state */
|
||||||
|
init_waitqueue_head(&adapter->reset_waitqueue);
|
||||||
|
|
||||||
/* Setup the wait queue for indicating virtchannel events */
|
/* Setup the wait queue for indicating virtchannel events */
|
||||||
init_waitqueue_head(&adapter->vc_waitqueue);
|
init_waitqueue_head(&adapter->vc_waitqueue);
|
||||||
|
|
||||||
@ -5061,13 +5122,15 @@ static void iavf_remove(struct pci_dev *pdev)
|
|||||||
usleep_range(500, 1000);
|
usleep_range(500, 1000);
|
||||||
}
|
}
|
||||||
cancel_delayed_work_sync(&adapter->watchdog_task);
|
cancel_delayed_work_sync(&adapter->watchdog_task);
|
||||||
|
cancel_work_sync(&adapter->finish_config);
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
if (adapter->netdev_registered) {
|
if (adapter->netdev_registered) {
|
||||||
rtnl_lock();
|
|
||||||
unregister_netdevice(netdev);
|
unregister_netdevice(netdev);
|
||||||
adapter->netdev_registered = false;
|
adapter->netdev_registered = false;
|
||||||
rtnl_unlock();
|
|
||||||
}
|
}
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
if (CLIENT_ALLOWED(adapter)) {
|
if (CLIENT_ALLOWED(adapter)) {
|
||||||
err = iavf_lan_del_device(adapter);
|
err = iavf_lan_del_device(adapter);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -1961,9 +1961,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
|||||||
case VIRTCHNL_EVENT_RESET_IMPENDING:
|
case VIRTCHNL_EVENT_RESET_IMPENDING:
|
||||||
dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
|
dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
|
||||||
if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
|
if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
|
||||||
adapter->flags |= IAVF_FLAG_RESET_PENDING;
|
|
||||||
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
|
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
|
||||||
queue_work(adapter->wq, &adapter->reset_task);
|
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -2237,6 +2236,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
|||||||
|
|
||||||
iavf_process_config(adapter);
|
iavf_process_config(adapter);
|
||||||
adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
|
adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
|
||||||
|
iavf_schedule_finish_config(adapter);
|
||||||
|
|
||||||
iavf_set_queue_vlan_tag_loc(adapter);
|
iavf_set_queue_vlan_tag_loc(adapter);
|
||||||
|
|
||||||
@ -2285,6 +2285,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
|||||||
case VIRTCHNL_OP_ENABLE_QUEUES:
|
case VIRTCHNL_OP_ENABLE_QUEUES:
|
||||||
/* enable transmits */
|
/* enable transmits */
|
||||||
iavf_irq_enable(adapter, true);
|
iavf_irq_enable(adapter, true);
|
||||||
|
wake_up(&adapter->reset_waitqueue);
|
||||||
adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
|
adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
|
||||||
break;
|
break;
|
||||||
case VIRTCHNL_OP_DISABLE_QUEUES:
|
case VIRTCHNL_OP_DISABLE_QUEUES:
|
||||||
|
@ -800,6 +800,8 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
|
|||||||
|
|
||||||
ice_for_each_q_vector(vsi, v_idx)
|
ice_for_each_q_vector(vsi, v_idx)
|
||||||
ice_free_q_vector(vsi, v_idx);
|
ice_free_q_vector(vsi, v_idx);
|
||||||
|
|
||||||
|
vsi->num_q_vectors = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2681,8 +2681,13 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
|
|||||||
|
|
||||||
ring->rx_max_pending = ICE_MAX_NUM_DESC;
|
ring->rx_max_pending = ICE_MAX_NUM_DESC;
|
||||||
ring->tx_max_pending = ICE_MAX_NUM_DESC;
|
ring->tx_max_pending = ICE_MAX_NUM_DESC;
|
||||||
ring->rx_pending = vsi->rx_rings[0]->count;
|
if (vsi->tx_rings && vsi->rx_rings) {
|
||||||
ring->tx_pending = vsi->tx_rings[0]->count;
|
ring->rx_pending = vsi->rx_rings[0]->count;
|
||||||
|
ring->tx_pending = vsi->tx_rings[0]->count;
|
||||||
|
} else {
|
||||||
|
ring->rx_pending = 0;
|
||||||
|
ring->tx_pending = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Rx mini and jumbo rings are not supported */
|
/* Rx mini and jumbo rings are not supported */
|
||||||
ring->rx_mini_max_pending = 0;
|
ring->rx_mini_max_pending = 0;
|
||||||
@ -2716,6 +2721,10 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Return if there is no rings (device is reloading) */
|
||||||
|
if (!vsi->tx_rings || !vsi->rx_rings)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
|
new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
|
||||||
if (new_tx_cnt != ring->tx_pending)
|
if (new_tx_cnt != ring->tx_pending)
|
||||||
netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
|
netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
|
||||||
|
@ -2972,39 +2972,12 @@ int ice_vsi_release(struct ice_vsi *vsi)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
pf = vsi->back;
|
pf = vsi->back;
|
||||||
|
|
||||||
/* do not unregister while driver is in the reset recovery pending
|
|
||||||
* state. Since reset/rebuild happens through PF service task workqueue,
|
|
||||||
* it's not a good idea to unregister netdev that is associated to the
|
|
||||||
* PF that is running the work queue items currently. This is done to
|
|
||||||
* avoid check_flush_dependency() warning on this wq
|
|
||||||
*/
|
|
||||||
if (vsi->netdev && !ice_is_reset_in_progress(pf->state) &&
|
|
||||||
(test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state))) {
|
|
||||||
unregister_netdev(vsi->netdev);
|
|
||||||
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vsi->type == ICE_VSI_PF)
|
|
||||||
ice_devlink_destroy_pf_port(pf);
|
|
||||||
|
|
||||||
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
|
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
|
||||||
ice_rss_clean(vsi);
|
ice_rss_clean(vsi);
|
||||||
|
|
||||||
ice_vsi_close(vsi);
|
ice_vsi_close(vsi);
|
||||||
ice_vsi_decfg(vsi);
|
ice_vsi_decfg(vsi);
|
||||||
|
|
||||||
if (vsi->netdev) {
|
|
||||||
if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) {
|
|
||||||
unregister_netdev(vsi->netdev);
|
|
||||||
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
|
|
||||||
}
|
|
||||||
if (test_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state)) {
|
|
||||||
free_netdev(vsi->netdev);
|
|
||||||
vsi->netdev = NULL;
|
|
||||||
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* retain SW VSI data structure since it is needed to unregister and
|
/* retain SW VSI data structure since it is needed to unregister and
|
||||||
* free VSI netdev when PF is not in reset recovery pending state,\
|
* free VSI netdev when PF is not in reset recovery pending state,\
|
||||||
* for ex: during rmmod.
|
* for ex: during rmmod.
|
||||||
|
@ -4430,9 +4430,9 @@ static int ice_start_eth(struct ice_vsi *vsi)
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
rtnl_lock();
|
|
||||||
err = ice_vsi_open(vsi);
|
err = ice_vsi_open(vsi);
|
||||||
rtnl_unlock();
|
if (err)
|
||||||
|
ice_fltr_remove_all(vsi);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -4895,6 +4895,7 @@ int ice_load(struct ice_pf *pf)
|
|||||||
params = ice_vsi_to_params(vsi);
|
params = ice_vsi_to_params(vsi);
|
||||||
params.flags = ICE_VSI_FLAG_INIT;
|
params.flags = ICE_VSI_FLAG_INIT;
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
err = ice_vsi_cfg(vsi, ¶ms);
|
err = ice_vsi_cfg(vsi, ¶ms);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_vsi_cfg;
|
goto err_vsi_cfg;
|
||||||
@ -4902,6 +4903,7 @@ int ice_load(struct ice_pf *pf)
|
|||||||
err = ice_start_eth(ice_get_main_vsi(pf));
|
err = ice_start_eth(ice_get_main_vsi(pf));
|
||||||
if (err)
|
if (err)
|
||||||
goto err_start_eth;
|
goto err_start_eth;
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
err = ice_init_rdma(pf);
|
err = ice_init_rdma(pf);
|
||||||
if (err)
|
if (err)
|
||||||
@ -4916,9 +4918,11 @@ int ice_load(struct ice_pf *pf)
|
|||||||
|
|
||||||
err_init_rdma:
|
err_init_rdma:
|
||||||
ice_vsi_close(ice_get_main_vsi(pf));
|
ice_vsi_close(ice_get_main_vsi(pf));
|
||||||
|
rtnl_lock();
|
||||||
err_start_eth:
|
err_start_eth:
|
||||||
ice_vsi_decfg(ice_get_main_vsi(pf));
|
ice_vsi_decfg(ice_get_main_vsi(pf));
|
||||||
err_vsi_cfg:
|
err_vsi_cfg:
|
||||||
|
rtnl_unlock();
|
||||||
ice_deinit_dev(pf);
|
ice_deinit_dev(pf);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -4931,8 +4935,10 @@ void ice_unload(struct ice_pf *pf)
|
|||||||
{
|
{
|
||||||
ice_deinit_features(pf);
|
ice_deinit_features(pf);
|
||||||
ice_deinit_rdma(pf);
|
ice_deinit_rdma(pf);
|
||||||
|
rtnl_lock();
|
||||||
ice_stop_eth(ice_get_main_vsi(pf));
|
ice_stop_eth(ice_get_main_vsi(pf));
|
||||||
ice_vsi_decfg(ice_get_main_vsi(pf));
|
ice_vsi_decfg(ice_get_main_vsi(pf));
|
||||||
|
rtnl_unlock();
|
||||||
ice_deinit_dev(pf);
|
ice_deinit_dev(pf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2828,9 +2828,8 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
|
|||||||
struct netdev_queue *nq = txring_txq(ring);
|
struct netdev_queue *nq = txring_txq(ring);
|
||||||
union igc_adv_tx_desc *tx_desc = NULL;
|
union igc_adv_tx_desc *tx_desc = NULL;
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
u16 ntu = ring->next_to_use;
|
|
||||||
struct xdp_desc xdp_desc;
|
struct xdp_desc xdp_desc;
|
||||||
u16 budget;
|
u16 budget, ntu;
|
||||||
|
|
||||||
if (!netif_carrier_ok(ring->netdev))
|
if (!netif_carrier_ok(ring->netdev))
|
||||||
return;
|
return;
|
||||||
@ -2840,6 +2839,7 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
|
|||||||
/* Avoid transmit queue timeout since we share it with the slow path */
|
/* Avoid transmit queue timeout since we share it with the slow path */
|
||||||
txq_trans_cond_update(nq);
|
txq_trans_cond_update(nq);
|
||||||
|
|
||||||
|
ntu = ring->next_to_use;
|
||||||
budget = igc_desc_unused(ring);
|
budget = igc_desc_unused(ring);
|
||||||
|
|
||||||
while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
|
while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
* Copyright (C) 2022 Marvell.
|
* Copyright (C) 2022 Marvell.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <crypto/skcipher.h>
|
||||||
#include <linux/rtnetlink.h>
|
#include <linux/rtnetlink.h>
|
||||||
#include <linux/bitfield.h>
|
#include <linux/bitfield.h>
|
||||||
#include "otx2_common.h"
|
#include "otx2_common.h"
|
||||||
@ -42,6 +43,56 @@
|
|||||||
#define MCS_TCI_E 0x08 /* encryption */
|
#define MCS_TCI_E 0x08 /* encryption */
|
||||||
#define MCS_TCI_C 0x04 /* changed text */
|
#define MCS_TCI_C 0x04 /* changed text */
|
||||||
|
|
||||||
|
#define CN10K_MAX_HASH_LEN 16
|
||||||
|
#define CN10K_MAX_SAK_LEN 32
|
||||||
|
|
||||||
|
static int cn10k_ecb_aes_encrypt(struct otx2_nic *pfvf, u8 *sak,
|
||||||
|
u16 sak_len, u8 *hash)
|
||||||
|
{
|
||||||
|
u8 data[CN10K_MAX_HASH_LEN] = { 0 };
|
||||||
|
struct skcipher_request *req = NULL;
|
||||||
|
struct scatterlist sg_src, sg_dst;
|
||||||
|
struct crypto_skcipher *tfm;
|
||||||
|
DECLARE_CRYPTO_WAIT(wait);
|
||||||
|
int err;
|
||||||
|
|
||||||
|
tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
|
||||||
|
if (IS_ERR(tfm)) {
|
||||||
|
dev_err(pfvf->dev, "failed to allocate transform for ecb-aes\n");
|
||||||
|
return PTR_ERR(tfm);
|
||||||
|
}
|
||||||
|
|
||||||
|
req = skcipher_request_alloc(tfm, GFP_KERNEL);
|
||||||
|
if (!req) {
|
||||||
|
dev_err(pfvf->dev, "failed to allocate request for skcipher\n");
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto free_tfm;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = crypto_skcipher_setkey(tfm, sak, sak_len);
|
||||||
|
if (err) {
|
||||||
|
dev_err(pfvf->dev, "failed to set key for skcipher\n");
|
||||||
|
goto free_req;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* build sg list */
|
||||||
|
sg_init_one(&sg_src, data, CN10K_MAX_HASH_LEN);
|
||||||
|
sg_init_one(&sg_dst, hash, CN10K_MAX_HASH_LEN);
|
||||||
|
|
||||||
|
skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
|
||||||
|
skcipher_request_set_crypt(req, &sg_src, &sg_dst,
|
||||||
|
CN10K_MAX_HASH_LEN, NULL);
|
||||||
|
|
||||||
|
err = crypto_skcipher_encrypt(req);
|
||||||
|
err = crypto_wait_req(err, &wait);
|
||||||
|
|
||||||
|
free_req:
|
||||||
|
skcipher_request_free(req);
|
||||||
|
free_tfm:
|
||||||
|
crypto_free_skcipher(tfm);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
|
static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
|
||||||
struct macsec_secy *secy)
|
struct macsec_secy *secy)
|
||||||
{
|
{
|
||||||
@ -330,19 +381,53 @@ static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int cn10k_mcs_write_keys(struct otx2_nic *pfvf,
|
||||||
|
struct macsec_secy *secy,
|
||||||
|
struct mcs_sa_plcy_write_req *req,
|
||||||
|
u8 *sak, u8 *salt, ssci_t ssci)
|
||||||
|
{
|
||||||
|
u8 hash_rev[CN10K_MAX_HASH_LEN];
|
||||||
|
u8 sak_rev[CN10K_MAX_SAK_LEN];
|
||||||
|
u8 salt_rev[MACSEC_SALT_LEN];
|
||||||
|
u8 hash[CN10K_MAX_HASH_LEN];
|
||||||
|
u32 ssci_63_32;
|
||||||
|
int err, i;
|
||||||
|
|
||||||
|
err = cn10k_ecb_aes_encrypt(pfvf, sak, secy->key_len, hash);
|
||||||
|
if (err) {
|
||||||
|
dev_err(pfvf->dev, "Generating hash using ECB(AES) failed\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < secy->key_len; i++)
|
||||||
|
sak_rev[i] = sak[secy->key_len - 1 - i];
|
||||||
|
|
||||||
|
for (i = 0; i < CN10K_MAX_HASH_LEN; i++)
|
||||||
|
hash_rev[i] = hash[CN10K_MAX_HASH_LEN - 1 - i];
|
||||||
|
|
||||||
|
for (i = 0; i < MACSEC_SALT_LEN; i++)
|
||||||
|
salt_rev[i] = salt[MACSEC_SALT_LEN - 1 - i];
|
||||||
|
|
||||||
|
ssci_63_32 = (__force u32)cpu_to_be32((__force u32)ssci);
|
||||||
|
|
||||||
|
memcpy(&req->plcy[0][0], sak_rev, secy->key_len);
|
||||||
|
memcpy(&req->plcy[0][4], hash_rev, CN10K_MAX_HASH_LEN);
|
||||||
|
memcpy(&req->plcy[0][6], salt_rev, MACSEC_SALT_LEN);
|
||||||
|
req->plcy[0][7] |= (u64)ssci_63_32 << 32;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
|
static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
|
||||||
struct macsec_secy *secy,
|
struct macsec_secy *secy,
|
||||||
struct cn10k_mcs_rxsc *rxsc,
|
struct cn10k_mcs_rxsc *rxsc,
|
||||||
u8 assoc_num, bool sa_in_use)
|
u8 assoc_num, bool sa_in_use)
|
||||||
{
|
{
|
||||||
unsigned char *src = rxsc->sa_key[assoc_num];
|
|
||||||
struct mcs_sa_plcy_write_req *plcy_req;
|
struct mcs_sa_plcy_write_req *plcy_req;
|
||||||
u8 *salt_p = rxsc->salt[assoc_num];
|
u8 *sak = rxsc->sa_key[assoc_num];
|
||||||
|
u8 *salt = rxsc->salt[assoc_num];
|
||||||
struct mcs_rx_sc_sa_map *map_req;
|
struct mcs_rx_sc_sa_map *map_req;
|
||||||
struct mbox *mbox = &pfvf->mbox;
|
struct mbox *mbox = &pfvf->mbox;
|
||||||
u64 ssci_salt_95_64 = 0;
|
|
||||||
u8 reg, key_len;
|
|
||||||
u64 salt_63_0;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&mbox->lock);
|
mutex_lock(&mbox->lock);
|
||||||
@ -360,20 +445,10 @@ static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
|
ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak,
|
||||||
memcpy((u8 *)&plcy_req->plcy[0][reg],
|
salt, rxsc->ssci[assoc_num]);
|
||||||
(src + reg * 8), 8);
|
if (ret)
|
||||||
reg++;
|
goto fail;
|
||||||
}
|
|
||||||
|
|
||||||
if (secy->xpn) {
|
|
||||||
memcpy((u8 *)&salt_63_0, salt_p, 8);
|
|
||||||
memcpy((u8 *)&ssci_salt_95_64, salt_p + 8, 4);
|
|
||||||
ssci_salt_95_64 |= (__force u64)rxsc->ssci[assoc_num] << 32;
|
|
||||||
|
|
||||||
plcy_req->plcy[0][6] = salt_63_0;
|
|
||||||
plcy_req->plcy[0][7] = ssci_salt_95_64;
|
|
||||||
}
|
|
||||||
|
|
||||||
plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
|
plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
|
||||||
plcy_req->sa_cnt = 1;
|
plcy_req->sa_cnt = 1;
|
||||||
@ -586,13 +661,10 @@ static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
|
|||||||
struct cn10k_mcs_txsc *txsc,
|
struct cn10k_mcs_txsc *txsc,
|
||||||
u8 assoc_num)
|
u8 assoc_num)
|
||||||
{
|
{
|
||||||
unsigned char *src = txsc->sa_key[assoc_num];
|
|
||||||
struct mcs_sa_plcy_write_req *plcy_req;
|
struct mcs_sa_plcy_write_req *plcy_req;
|
||||||
u8 *salt_p = txsc->salt[assoc_num];
|
u8 *sak = txsc->sa_key[assoc_num];
|
||||||
|
u8 *salt = txsc->salt[assoc_num];
|
||||||
struct mbox *mbox = &pfvf->mbox;
|
struct mbox *mbox = &pfvf->mbox;
|
||||||
u64 ssci_salt_95_64 = 0;
|
|
||||||
u8 reg, key_len;
|
|
||||||
u64 salt_63_0;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&mbox->lock);
|
mutex_lock(&mbox->lock);
|
||||||
@ -603,19 +675,10 @@ static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
|
ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak,
|
||||||
memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
|
salt, txsc->ssci[assoc_num]);
|
||||||
reg++;
|
if (ret)
|
||||||
}
|
goto fail;
|
||||||
|
|
||||||
if (secy->xpn) {
|
|
||||||
memcpy((u8 *)&salt_63_0, salt_p, 8);
|
|
||||||
memcpy((u8 *)&ssci_salt_95_64, salt_p + 8, 4);
|
|
||||||
ssci_salt_95_64 |= (__force u64)txsc->ssci[assoc_num] << 32;
|
|
||||||
|
|
||||||
plcy_req->plcy[0][6] = salt_63_0;
|
|
||||||
plcy_req->plcy[0][7] = ssci_salt_95_64;
|
|
||||||
}
|
|
||||||
|
|
||||||
plcy_req->plcy[0][8] = assoc_num;
|
plcy_req->plcy[0][8] = assoc_num;
|
||||||
plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
|
plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
|
||||||
|
@ -1454,8 +1454,9 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_free_npa_lf;
|
goto err_free_npa_lf;
|
||||||
|
|
||||||
/* Enable backpressure */
|
/* Enable backpressure for CGX mapped PF/VFs */
|
||||||
otx2_nix_config_bp(pf, true);
|
if (!is_otx2_lbkvf(pf->pdev))
|
||||||
|
otx2_nix_config_bp(pf, true);
|
||||||
|
|
||||||
/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
|
/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
|
||||||
err = otx2_rq_aura_pool_init(pf);
|
err = otx2_rq_aura_pool_init(pf);
|
||||||
|
@ -3846,23 +3846,6 @@ static int mtk_hw_deinit(struct mtk_eth *eth)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init mtk_init(struct net_device *dev)
|
|
||||||
{
|
|
||||||
struct mtk_mac *mac = netdev_priv(dev);
|
|
||||||
struct mtk_eth *eth = mac->hw;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = of_get_ethdev_address(mac->of_node, dev);
|
|
||||||
if (ret) {
|
|
||||||
/* If the mac address is invalid, use random mac address */
|
|
||||||
eth_hw_addr_random(dev);
|
|
||||||
dev_err(eth->dev, "generated random MAC address %pM\n",
|
|
||||||
dev->dev_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mtk_uninit(struct net_device *dev)
|
static void mtk_uninit(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct mtk_mac *mac = netdev_priv(dev);
|
struct mtk_mac *mac = netdev_priv(dev);
|
||||||
@ -4278,7 +4261,6 @@ static const struct ethtool_ops mtk_ethtool_ops = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct net_device_ops mtk_netdev_ops = {
|
static const struct net_device_ops mtk_netdev_ops = {
|
||||||
.ndo_init = mtk_init,
|
|
||||||
.ndo_uninit = mtk_uninit,
|
.ndo_uninit = mtk_uninit,
|
||||||
.ndo_open = mtk_open,
|
.ndo_open = mtk_open,
|
||||||
.ndo_stop = mtk_stop,
|
.ndo_stop = mtk_stop,
|
||||||
@ -4340,6 +4322,17 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
|
|||||||
mac->hw = eth;
|
mac->hw = eth;
|
||||||
mac->of_node = np;
|
mac->of_node = np;
|
||||||
|
|
||||||
|
err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
|
||||||
|
if (err == -EPROBE_DEFER)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
/* If the mac address is invalid, use random mac address */
|
||||||
|
eth_hw_addr_random(eth->netdev[id]);
|
||||||
|
dev_err(eth->dev, "generated random MAC address %pM\n",
|
||||||
|
eth->netdev[id]->dev_addr);
|
||||||
|
}
|
||||||
|
|
||||||
memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
|
memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
|
||||||
mac->hwlro_ip_cnt = 0;
|
mac->hwlro_ip_cnt = 0;
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
|
|||||||
|
|
||||||
acct = mtk_foe_entry_get_mib(ppe, i, NULL);
|
acct = mtk_foe_entry_get_mib(ppe, i, NULL);
|
||||||
|
|
||||||
type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
|
type = mtk_get_ib1_pkt_type(ppe->eth, entry->ib1);
|
||||||
seq_printf(m, "%05x %s %7s", i,
|
seq_printf(m, "%05x %s %7s", i,
|
||||||
mtk_foe_entry_state_str(state),
|
mtk_foe_entry_state_str(state),
|
||||||
mtk_foe_pkt_type_str(type));
|
mtk_foe_pkt_type_str(type));
|
||||||
|
@ -368,7 +368,8 @@ static bool ocelot_fdma_receive_skb(struct ocelot *ocelot, struct sk_buff *skb)
|
|||||||
if (unlikely(!ndev))
|
if (unlikely(!ndev))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
pskb_trim(skb, skb->len - ETH_FCS_LEN);
|
if (pskb_trim(skb, skb->len - ETH_FCS_LEN))
|
||||||
|
return false;
|
||||||
|
|
||||||
skb->dev = ndev;
|
skb->dev = ndev;
|
||||||
skb->protocol = eth_type_trans(skb, skb->dev);
|
skb->protocol = eth_type_trans(skb, skb->dev);
|
||||||
|
@ -1260,8 +1260,11 @@ static int emac_tso_csum(struct emac_adapter *adpt,
|
|||||||
if (skb->protocol == htons(ETH_P_IP)) {
|
if (skb->protocol == htons(ETH_P_IP)) {
|
||||||
u32 pkt_len = ((unsigned char *)ip_hdr(skb) - skb->data)
|
u32 pkt_len = ((unsigned char *)ip_hdr(skb) - skb->data)
|
||||||
+ ntohs(ip_hdr(skb)->tot_len);
|
+ ntohs(ip_hdr(skb)->tot_len);
|
||||||
if (skb->len > pkt_len)
|
if (skb->len > pkt_len) {
|
||||||
pskb_trim(skb, pkt_len);
|
ret = pskb_trim(skb, pkt_len);
|
||||||
|
if (unlikely(ret))
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hdr_len = skb_tcp_all_headers(skb);
|
hdr_len = skb_tcp_all_headers(skb);
|
||||||
|
@ -623,6 +623,7 @@ struct rtl8169_private {
|
|||||||
int cfg9346_usage_count;
|
int cfg9346_usage_count;
|
||||||
|
|
||||||
unsigned supports_gmii:1;
|
unsigned supports_gmii:1;
|
||||||
|
unsigned aspm_manageable:1;
|
||||||
dma_addr_t counters_phys_addr;
|
dma_addr_t counters_phys_addr;
|
||||||
struct rtl8169_counters *counters;
|
struct rtl8169_counters *counters;
|
||||||
struct rtl8169_tc_offsets tc_offset;
|
struct rtl8169_tc_offsets tc_offset;
|
||||||
@ -2746,7 +2747,15 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
|
|||||||
if (tp->mac_version < RTL_GIGA_MAC_VER_32)
|
if (tp->mac_version < RTL_GIGA_MAC_VER_32)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (enable) {
|
/* Don't enable ASPM in the chip if OS can't control ASPM */
|
||||||
|
if (enable && tp->aspm_manageable) {
|
||||||
|
/* On these chip versions ASPM can even harm
|
||||||
|
* bus communication of other PCI devices.
|
||||||
|
*/
|
||||||
|
if (tp->mac_version == RTL_GIGA_MAC_VER_42 ||
|
||||||
|
tp->mac_version == RTL_GIGA_MAC_VER_43)
|
||||||
|
return;
|
||||||
|
|
||||||
rtl_mod_config5(tp, 0, ASPM_en);
|
rtl_mod_config5(tp, 0, ASPM_en);
|
||||||
rtl_mod_config2(tp, 0, ClkReqEn);
|
rtl_mod_config2(tp, 0, ClkReqEn);
|
||||||
|
|
||||||
@ -4514,10 +4523,6 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (napi_schedule_prep(&tp->napi)) {
|
if (napi_schedule_prep(&tp->napi)) {
|
||||||
rtl_unlock_config_regs(tp);
|
|
||||||
rtl_hw_aspm_clkreq_enable(tp, false);
|
|
||||||
rtl_lock_config_regs(tp);
|
|
||||||
|
|
||||||
rtl_irq_disable(tp);
|
rtl_irq_disable(tp);
|
||||||
__napi_schedule(&tp->napi);
|
__napi_schedule(&tp->napi);
|
||||||
}
|
}
|
||||||
@ -4577,14 +4582,9 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
|
|||||||
|
|
||||||
work_done = rtl_rx(dev, tp, budget);
|
work_done = rtl_rx(dev, tp, budget);
|
||||||
|
|
||||||
if (work_done < budget && napi_complete_done(napi, work_done)) {
|
if (work_done < budget && napi_complete_done(napi, work_done))
|
||||||
rtl_irq_enable(tp);
|
rtl_irq_enable(tp);
|
||||||
|
|
||||||
rtl_unlock_config_regs(tp);
|
|
||||||
rtl_hw_aspm_clkreq_enable(tp, true);
|
|
||||||
rtl_lock_config_regs(tp);
|
|
||||||
}
|
|
||||||
|
|
||||||
return work_done;
|
return work_done;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5158,6 +5158,16 @@ static void rtl_init_mac_address(struct rtl8169_private *tp)
|
|||||||
rtl_rar_set(tp, mac_addr);
|
rtl_rar_set(tp, mac_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* register is set if system vendor successfully tested ASPM 1.2 */
|
||||||
|
static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
|
||||||
|
{
|
||||||
|
if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
|
||||||
|
r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
{
|
{
|
||||||
struct rtl8169_private *tp;
|
struct rtl8169_private *tp;
|
||||||
@ -5227,6 +5237,19 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||||||
xid);
|
xid);
|
||||||
tp->mac_version = chipset;
|
tp->mac_version = chipset;
|
||||||
|
|
||||||
|
/* Disable ASPM L1 as that cause random device stop working
|
||||||
|
* problems as well as full system hangs for some PCIe devices users.
|
||||||
|
* Chips from RTL8168h partially have issues with L1.2, but seem
|
||||||
|
* to work fine with L1 and L1.1.
|
||||||
|
*/
|
||||||
|
if (rtl_aspm_is_safe(tp))
|
||||||
|
rc = 0;
|
||||||
|
else if (tp->mac_version >= RTL_GIGA_MAC_VER_46)
|
||||||
|
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
|
||||||
|
else
|
||||||
|
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
|
||||||
|
tp->aspm_manageable = !rc;
|
||||||
|
|
||||||
tp->dash_type = rtl_check_dash(tp);
|
tp->dash_type = rtl_check_dash(tp);
|
||||||
|
|
||||||
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
|
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
|
||||||
|
@ -106,23 +106,37 @@ struct cpsw_ale_dev_id {
|
|||||||
|
|
||||||
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
|
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
|
||||||
{
|
{
|
||||||
int idx;
|
int idx, idx2;
|
||||||
|
u32 hi_val = 0;
|
||||||
|
|
||||||
idx = start / 32;
|
idx = start / 32;
|
||||||
|
idx2 = (start + bits - 1) / 32;
|
||||||
|
/* Check if bits to be fetched exceed a word */
|
||||||
|
if (idx != idx2) {
|
||||||
|
idx2 = 2 - idx2; /* flip */
|
||||||
|
hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
|
||||||
|
}
|
||||||
start -= idx * 32;
|
start -= idx * 32;
|
||||||
idx = 2 - idx; /* flip */
|
idx = 2 - idx; /* flip */
|
||||||
return (ale_entry[idx] >> start) & BITMASK(bits);
|
return (hi_val + (ale_entry[idx] >> start)) & BITMASK(bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
|
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
|
||||||
u32 value)
|
u32 value)
|
||||||
{
|
{
|
||||||
int idx;
|
int idx, idx2;
|
||||||
|
|
||||||
value &= BITMASK(bits);
|
value &= BITMASK(bits);
|
||||||
idx = start / 32;
|
idx = start / 32;
|
||||||
|
idx2 = (start + bits - 1) / 32;
|
||||||
|
/* Check if bits to be set exceed a word */
|
||||||
|
if (idx != idx2) {
|
||||||
|
idx2 = 2 - idx2; /* flip */
|
||||||
|
ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
|
||||||
|
ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
|
||||||
|
}
|
||||||
start -= idx * 32;
|
start -= idx * 32;
|
||||||
idx = 2 - idx; /* flip */
|
idx = 2 - idx; /* flip */
|
||||||
ale_entry[idx] &= ~(BITMASK(bits) << start);
|
ale_entry[idx] &= ~(BITMASK(bits) << start);
|
||||||
ale_entry[idx] |= (value << start);
|
ale_entry[idx] |= (value << start);
|
||||||
}
|
}
|
||||||
|
@ -1511,7 +1511,6 @@ static void wx_configure_rx(struct wx *wx)
|
|||||||
psrtype = WX_RDB_PL_CFG_L4HDR |
|
psrtype = WX_RDB_PL_CFG_L4HDR |
|
||||||
WX_RDB_PL_CFG_L3HDR |
|
WX_RDB_PL_CFG_L3HDR |
|
||||||
WX_RDB_PL_CFG_L2HDR |
|
WX_RDB_PL_CFG_L2HDR |
|
||||||
WX_RDB_PL_CFG_TUN_TUNHDR |
|
|
||||||
WX_RDB_PL_CFG_TUN_TUNHDR;
|
WX_RDB_PL_CFG_TUN_TUNHDR;
|
||||||
wr32(wx, WX_RDB_PL_CFG(0), psrtype);
|
wr32(wx, WX_RDB_PL_CFG(0), psrtype);
|
||||||
|
|
||||||
|
@ -3451,23 +3451,30 @@ static int __init phy_init(void)
|
|||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
ethtool_set_ethtool_phy_ops(&phy_ethtool_phy_ops);
|
||||||
|
|
||||||
rc = mdio_bus_init();
|
rc = mdio_bus_init();
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
goto err_ethtool_phy_ops;
|
||||||
|
|
||||||
ethtool_set_ethtool_phy_ops(&phy_ethtool_phy_ops);
|
|
||||||
features_init();
|
features_init();
|
||||||
|
|
||||||
rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
|
rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_c45;
|
goto err_mdio_bus;
|
||||||
|
|
||||||
rc = phy_driver_register(&genphy_driver, THIS_MODULE);
|
rc = phy_driver_register(&genphy_driver, THIS_MODULE);
|
||||||
if (rc) {
|
if (rc)
|
||||||
phy_driver_unregister(&genphy_c45_driver);
|
goto err_c45;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
err_c45:
|
err_c45:
|
||||||
mdio_bus_exit();
|
phy_driver_unregister(&genphy_c45_driver);
|
||||||
}
|
err_mdio_bus:
|
||||||
|
mdio_bus_exit();
|
||||||
|
err_ethtool_phy_ops:
|
||||||
|
ethtool_set_ethtool_phy_ops(NULL);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -1775,6 +1775,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
|||||||
} else if (!info->in || !info->out)
|
} else if (!info->in || !info->out)
|
||||||
status = usbnet_get_endpoints (dev, udev);
|
status = usbnet_get_endpoints (dev, udev);
|
||||||
else {
|
else {
|
||||||
|
u8 ep_addrs[3] = {
|
||||||
|
info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0
|
||||||
|
};
|
||||||
|
|
||||||
dev->in = usb_rcvbulkpipe (xdev, info->in);
|
dev->in = usb_rcvbulkpipe (xdev, info->in);
|
||||||
dev->out = usb_sndbulkpipe (xdev, info->out);
|
dev->out = usb_sndbulkpipe (xdev, info->out);
|
||||||
if (!(info->flags & FLAG_NO_SETINT))
|
if (!(info->flags & FLAG_NO_SETINT))
|
||||||
@ -1784,6 +1788,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
|||||||
else
|
else
|
||||||
status = 0;
|
status = 0;
|
||||||
|
|
||||||
|
if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs))
|
||||||
|
status = -EINVAL;
|
||||||
}
|
}
|
||||||
if (status >= 0 && dev->status)
|
if (status >= 0 && dev->status)
|
||||||
status = init_status (dev, udev);
|
status = init_status (dev, udev);
|
||||||
|
@ -664,7 +664,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
|
|||||||
skb->protocol = htons(ETH_P_IPV6);
|
skb->protocol = htons(ETH_P_IPV6);
|
||||||
skb->dev = dev;
|
skb->dev = dev;
|
||||||
|
|
||||||
rcu_read_lock_bh();
|
rcu_read_lock();
|
||||||
nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
|
nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
|
||||||
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
|
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
|
||||||
if (unlikely(!neigh))
|
if (unlikely(!neigh))
|
||||||
@ -672,10 +672,10 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
|
|||||||
if (!IS_ERR(neigh)) {
|
if (!IS_ERR(neigh)) {
|
||||||
sock_confirm_neigh(skb, neigh);
|
sock_confirm_neigh(skb, neigh);
|
||||||
ret = neigh_output(neigh, skb, false);
|
ret = neigh_output(neigh, skb, false);
|
||||||
rcu_read_unlock_bh();
|
rcu_read_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
rcu_read_unlock_bh();
|
rcu_read_unlock();
|
||||||
|
|
||||||
IP6_INC_STATS(dev_net(dst->dev),
|
IP6_INC_STATS(dev_net(dst->dev),
|
||||||
ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
|
ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
|
||||||
@ -889,7 +889,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_lock_bh();
|
rcu_read_lock();
|
||||||
|
|
||||||
neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
|
neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
|
||||||
if (!IS_ERR(neigh)) {
|
if (!IS_ERR(neigh)) {
|
||||||
@ -898,11 +898,11 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
|
|||||||
sock_confirm_neigh(skb, neigh);
|
sock_confirm_neigh(skb, neigh);
|
||||||
/* if crossing protocols, can not use the cached header */
|
/* if crossing protocols, can not use the cached header */
|
||||||
ret = neigh_output(neigh, skb, is_v6gw);
|
ret = neigh_output(neigh, skb, is_v6gw);
|
||||||
rcu_read_unlock_bh();
|
rcu_read_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_unlock_bh();
|
rcu_read_unlock();
|
||||||
vrf_tx_error(skb->dev, skb);
|
vrf_tx_error(skb->dev, skb);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -513,7 +513,7 @@ static inline void fastopen_queue_tune(struct sock *sk, int backlog)
|
|||||||
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
|
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
|
||||||
int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
|
int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
|
||||||
|
|
||||||
queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
|
WRITE_ONCE(queue->fastopenq.max_qlen, min_t(unsigned int, backlog, somaxconn));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void tcp_move_syn(struct tcp_sock *tp,
|
static inline void tcp_move_syn(struct tcp_sock *tp,
|
||||||
|
@ -593,9 +593,7 @@ struct hci_dev {
|
|||||||
const char *fw_info;
|
const char *fw_info;
|
||||||
struct dentry *debugfs;
|
struct dentry *debugfs;
|
||||||
|
|
||||||
#ifdef CONFIG_DEV_COREDUMP
|
|
||||||
struct hci_devcoredump dump;
|
struct hci_devcoredump dump;
|
||||||
#endif
|
|
||||||
|
|
||||||
struct device dev;
|
struct device dev;
|
||||||
|
|
||||||
@ -822,6 +820,7 @@ struct hci_conn_params {
|
|||||||
|
|
||||||
struct hci_conn *conn;
|
struct hci_conn *conn;
|
||||||
bool explicit_connect;
|
bool explicit_connect;
|
||||||
|
/* Accessed without hdev->lock: */
|
||||||
hci_conn_flags_t flags;
|
hci_conn_flags_t flags;
|
||||||
u8 privacy_mode;
|
u8 privacy_mode;
|
||||||
};
|
};
|
||||||
@ -1573,7 +1572,11 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
|
|||||||
bdaddr_t *addr, u8 addr_type);
|
bdaddr_t *addr, u8 addr_type);
|
||||||
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
|
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
|
||||||
void hci_conn_params_clear_disabled(struct hci_dev *hdev);
|
void hci_conn_params_clear_disabled(struct hci_dev *hdev);
|
||||||
|
void hci_conn_params_free(struct hci_conn_params *param);
|
||||||
|
|
||||||
|
void hci_pend_le_list_del_init(struct hci_conn_params *param);
|
||||||
|
void hci_pend_le_list_add(struct hci_conn_params *param,
|
||||||
|
struct list_head *list);
|
||||||
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
|
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
|
||||||
bdaddr_t *addr,
|
bdaddr_t *addr,
|
||||||
u8 addr_type);
|
u8 addr_type);
|
||||||
|
@ -277,7 +277,7 @@ struct bond_vlan_tag {
|
|||||||
unsigned short vlan_id;
|
unsigned short vlan_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* Returns NULL if the net_device does not belong to any of the bond's slaves
|
* Returns NULL if the net_device does not belong to any of the bond's slaves
|
||||||
*
|
*
|
||||||
* Caller must hold bond lock for read
|
* Caller must hold bond lock for read
|
||||||
|
@ -170,7 +170,8 @@ wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support
|
* enum wpan_phy_flags - WPAN PHY state flags
|
||||||
|
* @WPAN_PHY_FLAG_TXPOWER: Indicates that transceiver will support
|
||||||
* transmit power setting.
|
* transmit power setting.
|
||||||
* @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed
|
* @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed
|
||||||
* level setting.
|
* level setting.
|
||||||
|
@ -145,8 +145,8 @@ struct codel_vars {
|
|||||||
* @maxpacket: largest packet we've seen so far
|
* @maxpacket: largest packet we've seen so far
|
||||||
* @drop_count: temp count of dropped packets in dequeue()
|
* @drop_count: temp count of dropped packets in dequeue()
|
||||||
* @drop_len: bytes of dropped packets in dequeue()
|
* @drop_len: bytes of dropped packets in dequeue()
|
||||||
* ecn_mark: number of packets we ECN marked instead of dropping
|
* @ecn_mark: number of packets we ECN marked instead of dropping
|
||||||
* ce_mark: number of packets CE marked because sojourn time was above ce_threshold
|
* @ce_mark: number of packets CE marked because sojourn time was above ce_threshold
|
||||||
*/
|
*/
|
||||||
struct codel_stats {
|
struct codel_stats {
|
||||||
u32 maxpacket;
|
u32 maxpacket;
|
||||||
|
@ -221,7 +221,7 @@ struct devlink_dpipe_field {
|
|||||||
/**
|
/**
|
||||||
* struct devlink_dpipe_header - dpipe header object
|
* struct devlink_dpipe_header - dpipe header object
|
||||||
* @name: header name
|
* @name: header name
|
||||||
* @id: index, global/local detrmined by global bit
|
* @id: index, global/local determined by global bit
|
||||||
* @fields: fields
|
* @fields: fields
|
||||||
* @fields_count: number of fields
|
* @fields_count: number of fields
|
||||||
* @global: indicates if header is shared like most protocol header
|
* @global: indicates if header is shared like most protocol header
|
||||||
@ -241,7 +241,7 @@ struct devlink_dpipe_header {
|
|||||||
* @header_index: header index (packets can have several headers of same
|
* @header_index: header index (packets can have several headers of same
|
||||||
* type like in case of tunnels)
|
* type like in case of tunnels)
|
||||||
* @header: header
|
* @header: header
|
||||||
* @fieled_id: field index
|
* @field_id: field index
|
||||||
*/
|
*/
|
||||||
struct devlink_dpipe_match {
|
struct devlink_dpipe_match {
|
||||||
enum devlink_dpipe_match_type type;
|
enum devlink_dpipe_match_type type;
|
||||||
@ -256,7 +256,7 @@ struct devlink_dpipe_match {
|
|||||||
* @header_index: header index (packets can have several headers of same
|
* @header_index: header index (packets can have several headers of same
|
||||||
* type like in case of tunnels)
|
* type like in case of tunnels)
|
||||||
* @header: header
|
* @header: header
|
||||||
* @fieled_id: field index
|
* @field_id: field index
|
||||||
*/
|
*/
|
||||||
struct devlink_dpipe_action {
|
struct devlink_dpipe_action {
|
||||||
enum devlink_dpipe_action_type type;
|
enum devlink_dpipe_action_type type;
|
||||||
@ -292,7 +292,7 @@ struct devlink_dpipe_value {
|
|||||||
* struct devlink_dpipe_entry - table entry object
|
* struct devlink_dpipe_entry - table entry object
|
||||||
* @index: index of the entry in the table
|
* @index: index of the entry in the table
|
||||||
* @match_values: match values
|
* @match_values: match values
|
||||||
* @matche_values_count: count of matches tuples
|
* @match_values_count: count of matches tuples
|
||||||
* @action_values: actions values
|
* @action_values: actions values
|
||||||
* @action_values_count: count of actions values
|
* @action_values_count: count of actions values
|
||||||
* @counter: value of counter
|
* @counter: value of counter
|
||||||
@ -342,7 +342,9 @@ struct devlink_dpipe_table_ops;
|
|||||||
*/
|
*/
|
||||||
struct devlink_dpipe_table {
|
struct devlink_dpipe_table {
|
||||||
void *priv;
|
void *priv;
|
||||||
|
/* private: */
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
/* public: */
|
||||||
const char *name;
|
const char *name;
|
||||||
bool counters_enabled;
|
bool counters_enabled;
|
||||||
bool counter_control_extern;
|
bool counter_control_extern;
|
||||||
@ -355,13 +357,13 @@ struct devlink_dpipe_table {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* struct devlink_dpipe_table_ops - dpipe_table ops
|
* struct devlink_dpipe_table_ops - dpipe_table ops
|
||||||
* @actions_dump - dumps all tables actions
|
* @actions_dump: dumps all tables actions
|
||||||
* @matches_dump - dumps all tables matches
|
* @matches_dump: dumps all tables matches
|
||||||
* @entries_dump - dumps all active entries in the table
|
* @entries_dump: dumps all active entries in the table
|
||||||
* @counters_set_update - when changing the counter status hardware sync
|
* @counters_set_update: when changing the counter status hardware sync
|
||||||
* maybe needed to allocate/free counter related
|
* maybe needed to allocate/free counter related
|
||||||
* resources
|
* resources
|
||||||
* @size_get - get size
|
* @size_get: get size
|
||||||
*/
|
*/
|
||||||
struct devlink_dpipe_table_ops {
|
struct devlink_dpipe_table_ops {
|
||||||
int (*actions_dump)(void *priv, struct sk_buff *skb);
|
int (*actions_dump)(void *priv, struct sk_buff *skb);
|
||||||
@ -374,8 +376,8 @@ struct devlink_dpipe_table_ops {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* struct devlink_dpipe_headers - dpipe headers
|
* struct devlink_dpipe_headers - dpipe headers
|
||||||
* @headers - header array can be shared (global bit) or driver specific
|
* @headers: header array can be shared (global bit) or driver specific
|
||||||
* @headers_count - count of headers
|
* @headers_count: count of headers
|
||||||
*/
|
*/
|
||||||
struct devlink_dpipe_headers {
|
struct devlink_dpipe_headers {
|
||||||
struct devlink_dpipe_header **headers;
|
struct devlink_dpipe_header **headers;
|
||||||
@ -387,7 +389,7 @@ struct devlink_dpipe_headers {
|
|||||||
* @size_min: minimum size which can be set
|
* @size_min: minimum size which can be set
|
||||||
* @size_max: maximum size which can be set
|
* @size_max: maximum size which can be set
|
||||||
* @size_granularity: size granularity
|
* @size_granularity: size granularity
|
||||||
* @size_unit: resource's basic unit
|
* @unit: resource's basic unit
|
||||||
*/
|
*/
|
||||||
struct devlink_resource_size_params {
|
struct devlink_resource_size_params {
|
||||||
u64 size_min;
|
u64 size_min;
|
||||||
@ -457,6 +459,7 @@ struct devlink_flash_notify {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* struct devlink_param - devlink configuration parameter data
|
* struct devlink_param - devlink configuration parameter data
|
||||||
|
* @id: devlink parameter id number
|
||||||
* @name: name of the parameter
|
* @name: name of the parameter
|
||||||
* @generic: indicates if the parameter is generic or driver specific
|
* @generic: indicates if the parameter is generic or driver specific
|
||||||
* @type: parameter type
|
* @type: parameter type
|
||||||
@ -632,6 +635,7 @@ enum devlink_param_generic_id {
|
|||||||
* struct devlink_flash_update_params - Flash Update parameters
|
* struct devlink_flash_update_params - Flash Update parameters
|
||||||
* @fw: pointer to the firmware data to update from
|
* @fw: pointer to the firmware data to update from
|
||||||
* @component: the flash component to update
|
* @component: the flash component to update
|
||||||
|
* @overwrite_mask: which types of flash update are supported (may be %0)
|
||||||
*
|
*
|
||||||
* With the exception of fw, drivers must opt-in to parameters by
|
* With the exception of fw, drivers must opt-in to parameters by
|
||||||
* setting the appropriate bit in the supported_flash_update_params field in
|
* setting the appropriate bit in the supported_flash_update_params field in
|
||||||
|
@ -29,7 +29,7 @@ struct fqdir {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fragment queue flags
|
* enum: fragment queue flags
|
||||||
*
|
*
|
||||||
* @INET_FRAG_FIRST_IN: first fragment has arrived
|
* @INET_FRAG_FIRST_IN: first fragment has arrived
|
||||||
* @INET_FRAG_LAST_IN: final fragment has arrived
|
* @INET_FRAG_LAST_IN: final fragment has arrived
|
||||||
|
@ -111,7 +111,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
|
|||||||
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
|
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
|
||||||
int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, u16 *how_many_unacked);
|
int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, u16 *how_many_unacked);
|
||||||
struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr,
|
struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr,
|
||||||
struct llc_addr *laddr);
|
struct llc_addr *laddr, const struct net *net);
|
||||||
void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
|
void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
|
||||||
void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
|
void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
|
||||||
|
|
||||||
|
@ -269,7 +269,7 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
|
|||||||
/**
|
/**
|
||||||
* llc_pdu_decode_da - extracts dest address of input frame
|
* llc_pdu_decode_da - extracts dest address of input frame
|
||||||
* @skb: input skb that destination address must be extracted from it
|
* @skb: input skb that destination address must be extracted from it
|
||||||
* @sa: pointer to destination address (6 byte array).
|
* @da: pointer to destination address (6 byte array).
|
||||||
*
|
*
|
||||||
* This function extracts destination address(MAC) of input frame.
|
* This function extracts destination address(MAC) of input frame.
|
||||||
*/
|
*/
|
||||||
@ -321,7 +321,7 @@ static inline void llc_pdu_init_as_ui_cmd(struct sk_buff *skb)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* llc_pdu_init_as_test_cmd - sets PDU as TEST
|
* llc_pdu_init_as_test_cmd - sets PDU as TEST
|
||||||
* @skb - Address of the skb to build
|
* @skb: Address of the skb to build
|
||||||
*
|
*
|
||||||
* Sets a PDU as TEST
|
* Sets a PDU as TEST
|
||||||
*/
|
*/
|
||||||
@ -369,6 +369,8 @@ struct llc_xid_info {
|
|||||||
/**
|
/**
|
||||||
* llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
|
* llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
|
||||||
* @skb: input skb that header must be set into it.
|
* @skb: input skb that header must be set into it.
|
||||||
|
* @svcs_supported: The class of the LLC (I or II)
|
||||||
|
* @rx_window: The size of the receive window of the LLC
|
||||||
*
|
*
|
||||||
* This function sets third,fourth,fifth and sixth bytes of LLC header as
|
* This function sets third,fourth,fifth and sixth bytes of LLC header as
|
||||||
* a XID PDU.
|
* a XID PDU.
|
||||||
|
@ -192,7 +192,7 @@
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* struct nsh_md1_ctx - Keeps track of NSH context data
|
* struct nsh_md1_ctx - Keeps track of NSH context data
|
||||||
* @nshc<1-4>: NSH Contexts.
|
* @context: NSH Contexts.
|
||||||
*/
|
*/
|
||||||
struct nsh_md1_ctx {
|
struct nsh_md1_ctx {
|
||||||
__be32 context[4];
|
__be32 context[4];
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
/**
|
/**
|
||||||
* struct pie_params - contains pie parameters
|
* struct pie_params - contains pie parameters
|
||||||
* @target: target delay in pschedtime
|
* @target: target delay in pschedtime
|
||||||
* @tudpate: interval at which drop probability is calculated
|
* @tupdate: interval at which drop probability is calculated
|
||||||
* @limit: total number of packets that can be in the queue
|
* @limit: total number of packets that can be in the queue
|
||||||
* @alpha: parameter to control drop probability
|
* @alpha: parameter to control drop probability
|
||||||
* @beta: parameter to control drop probability
|
* @beta: parameter to control drop probability
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
/**
|
/*
|
||||||
* Copyright (c) 2017 Redpine Signals Inc.
|
* Copyright (c) 2017 Redpine Signals Inc.
|
||||||
*
|
*
|
||||||
* Permission to use, copy, modify, and/or distribute this software for any
|
* Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
@ -1509,25 +1509,38 @@ void tcp_leave_memory_pressure(struct sock *sk);
|
|||||||
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
|
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
|
||||||
{
|
{
|
||||||
struct net *net = sock_net((struct sock *)tp);
|
struct net *net = sock_net((struct sock *)tp);
|
||||||
|
int val;
|
||||||
|
|
||||||
return tp->keepalive_intvl ? :
|
/* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
|
||||||
READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
|
* and do_tcp_setsockopt().
|
||||||
|
*/
|
||||||
|
val = READ_ONCE(tp->keepalive_intvl);
|
||||||
|
|
||||||
|
return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int keepalive_time_when(const struct tcp_sock *tp)
|
static inline int keepalive_time_when(const struct tcp_sock *tp)
|
||||||
{
|
{
|
||||||
struct net *net = sock_net((struct sock *)tp);
|
struct net *net = sock_net((struct sock *)tp);
|
||||||
|
int val;
|
||||||
|
|
||||||
return tp->keepalive_time ? :
|
/* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
|
||||||
READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
|
val = READ_ONCE(tp->keepalive_time);
|
||||||
|
|
||||||
|
return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int keepalive_probes(const struct tcp_sock *tp)
|
static inline int keepalive_probes(const struct tcp_sock *tp)
|
||||||
{
|
{
|
||||||
struct net *net = sock_net((struct sock *)tp);
|
struct net *net = sock_net((struct sock *)tp);
|
||||||
|
int val;
|
||||||
|
|
||||||
return tp->keepalive_probes ? :
|
/* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
|
||||||
READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
|
* and do_tcp_setsockopt().
|
||||||
|
*/
|
||||||
|
val = READ_ONCE(tp->keepalive_probes);
|
||||||
|
|
||||||
|
return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
|
static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
|
||||||
@ -2048,7 +2061,11 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
|
|||||||
static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
|
static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
|
||||||
{
|
{
|
||||||
struct net *net = sock_net((struct sock *)tp);
|
struct net *net = sock_net((struct sock *)tp);
|
||||||
return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
|
u32 val;
|
||||||
|
|
||||||
|
val = READ_ONCE(tp->notsent_lowat);
|
||||||
|
|
||||||
|
return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool tcp_stream_memory_free(const struct sock *sk, int wake);
|
bool tcp_stream_memory_free(const struct sock *sk, int wake);
|
||||||
|
@ -5573,16 +5573,17 @@ static int update_stack_depth(struct bpf_verifier_env *env,
|
|||||||
* Since recursion is prevented by check_cfg() this algorithm
|
* Since recursion is prevented by check_cfg() this algorithm
|
||||||
* only needs a local stack of MAX_CALL_FRAMES to remember callsites
|
* only needs a local stack of MAX_CALL_FRAMES to remember callsites
|
||||||
*/
|
*/
|
||||||
static int check_max_stack_depth(struct bpf_verifier_env *env)
|
static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx)
|
||||||
{
|
{
|
||||||
int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
|
|
||||||
struct bpf_subprog_info *subprog = env->subprog_info;
|
struct bpf_subprog_info *subprog = env->subprog_info;
|
||||||
struct bpf_insn *insn = env->prog->insnsi;
|
struct bpf_insn *insn = env->prog->insnsi;
|
||||||
|
int depth = 0, frame = 0, i, subprog_end;
|
||||||
bool tail_call_reachable = false;
|
bool tail_call_reachable = false;
|
||||||
int ret_insn[MAX_CALL_FRAMES];
|
int ret_insn[MAX_CALL_FRAMES];
|
||||||
int ret_prog[MAX_CALL_FRAMES];
|
int ret_prog[MAX_CALL_FRAMES];
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
|
i = subprog[idx].start;
|
||||||
process_func:
|
process_func:
|
||||||
/* protect against potential stack overflow that might happen when
|
/* protect against potential stack overflow that might happen when
|
||||||
* bpf2bpf calls get combined with tailcalls. Limit the caller's stack
|
* bpf2bpf calls get combined with tailcalls. Limit the caller's stack
|
||||||
@ -5621,7 +5622,7 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
|
|||||||
continue_func:
|
continue_func:
|
||||||
subprog_end = subprog[idx + 1].start;
|
subprog_end = subprog[idx + 1].start;
|
||||||
for (; i < subprog_end; i++) {
|
for (; i < subprog_end; i++) {
|
||||||
int next_insn;
|
int next_insn, sidx;
|
||||||
|
|
||||||
if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
|
if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
|
||||||
continue;
|
continue;
|
||||||
@ -5631,14 +5632,14 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
|
|||||||
|
|
||||||
/* find the callee */
|
/* find the callee */
|
||||||
next_insn = i + insn[i].imm + 1;
|
next_insn = i + insn[i].imm + 1;
|
||||||
idx = find_subprog(env, next_insn);
|
sidx = find_subprog(env, next_insn);
|
||||||
if (idx < 0) {
|
if (sidx < 0) {
|
||||||
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
|
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
|
||||||
next_insn);
|
next_insn);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
if (subprog[idx].is_async_cb) {
|
if (subprog[sidx].is_async_cb) {
|
||||||
if (subprog[idx].has_tail_call) {
|
if (subprog[sidx].has_tail_call) {
|
||||||
verbose(env, "verifier bug. subprog has tail_call and async cb\n");
|
verbose(env, "verifier bug. subprog has tail_call and async cb\n");
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
@ -5647,6 +5648,7 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
i = next_insn;
|
i = next_insn;
|
||||||
|
idx = sidx;
|
||||||
|
|
||||||
if (subprog[idx].has_tail_call)
|
if (subprog[idx].has_tail_call)
|
||||||
tail_call_reachable = true;
|
tail_call_reachable = true;
|
||||||
@ -5682,6 +5684,22 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
|
|||||||
goto continue_func;
|
goto continue_func;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int check_max_stack_depth(struct bpf_verifier_env *env)
|
||||||
|
{
|
||||||
|
struct bpf_subprog_info *si = env->subprog_info;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
for (int i = 0; i < env->subprog_cnt; i++) {
|
||||||
|
if (!i || si[i].is_async_cb) {
|
||||||
|
ret = check_max_stack_depth_subprog(env, i);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||||
static int get_callee_stack_depth(struct bpf_verifier_env *env,
|
static int get_callee_stack_depth(struct bpf_verifier_env *env,
|
||||||
const struct bpf_insn *insn, int idx)
|
const struct bpf_insn *insn, int idx)
|
||||||
|
@ -118,7 +118,7 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
|
|||||||
*/
|
*/
|
||||||
params->explicit_connect = false;
|
params->explicit_connect = false;
|
||||||
|
|
||||||
list_del_init(¶ms->action);
|
hci_pend_le_list_del_init(params);
|
||||||
|
|
||||||
switch (params->auto_connect) {
|
switch (params->auto_connect) {
|
||||||
case HCI_AUTO_CONN_EXPLICIT:
|
case HCI_AUTO_CONN_EXPLICIT:
|
||||||
@ -127,10 +127,10 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
|
|||||||
return;
|
return;
|
||||||
case HCI_AUTO_CONN_DIRECT:
|
case HCI_AUTO_CONN_DIRECT:
|
||||||
case HCI_AUTO_CONN_ALWAYS:
|
case HCI_AUTO_CONN_ALWAYS:
|
||||||
list_add(¶ms->action, &hdev->pend_le_conns);
|
hci_pend_le_list_add(params, &hdev->pend_le_conns);
|
||||||
break;
|
break;
|
||||||
case HCI_AUTO_CONN_REPORT:
|
case HCI_AUTO_CONN_REPORT:
|
||||||
list_add(¶ms->action, &hdev->pend_le_reports);
|
hci_pend_le_list_add(params, &hdev->pend_le_reports);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
@ -1426,8 +1426,8 @@ static int hci_explicit_conn_params_set(struct hci_dev *hdev,
|
|||||||
if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
|
if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
|
||||||
params->auto_connect == HCI_AUTO_CONN_REPORT ||
|
params->auto_connect == HCI_AUTO_CONN_REPORT ||
|
||||||
params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
|
params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
|
||||||
list_del_init(¶ms->action);
|
hci_pend_le_list_del_init(params);
|
||||||
list_add(¶ms->action, &hdev->pend_le_conns);
|
hci_pend_le_list_add(params, &hdev->pend_le_conns);
|
||||||
}
|
}
|
||||||
|
|
||||||
params->explicit_connect = true;
|
params->explicit_connect = true;
|
||||||
@ -1684,7 +1684,7 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
|
|||||||
if (!link) {
|
if (!link) {
|
||||||
hci_conn_drop(acl);
|
hci_conn_drop(acl);
|
||||||
hci_conn_drop(sco);
|
hci_conn_drop(sco);
|
||||||
return NULL;
|
return ERR_PTR(-ENOLINK);
|
||||||
}
|
}
|
||||||
|
|
||||||
sco->setting = setting;
|
sco->setting = setting;
|
||||||
@ -2254,7 +2254,7 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
|
|||||||
if (!link) {
|
if (!link) {
|
||||||
hci_conn_drop(le);
|
hci_conn_drop(le);
|
||||||
hci_conn_drop(cis);
|
hci_conn_drop(cis);
|
||||||
return NULL;
|
return ERR_PTR(-ENOLINK);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If LE is already connected and CIS handle is already set proceed to
|
/* If LE is already connected and CIS handle is already set proceed to
|
||||||
|
@ -1972,6 +1972,7 @@ static int hci_remove_adv_monitor(struct hci_dev *hdev,
|
|||||||
struct adv_monitor *monitor)
|
struct adv_monitor *monitor)
|
||||||
{
|
{
|
||||||
int status = 0;
|
int status = 0;
|
||||||
|
int handle;
|
||||||
|
|
||||||
switch (hci_get_adv_monitor_offload_ext(hdev)) {
|
switch (hci_get_adv_monitor_offload_ext(hdev)) {
|
||||||
case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
|
case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
|
||||||
@ -1980,9 +1981,10 @@ static int hci_remove_adv_monitor(struct hci_dev *hdev,
|
|||||||
goto free_monitor;
|
goto free_monitor;
|
||||||
|
|
||||||
case HCI_ADV_MONITOR_EXT_MSFT:
|
case HCI_ADV_MONITOR_EXT_MSFT:
|
||||||
|
handle = monitor->handle;
|
||||||
status = msft_remove_monitor(hdev, monitor);
|
status = msft_remove_monitor(hdev, monitor);
|
||||||
bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
|
bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
|
||||||
hdev->name, monitor->handle, status);
|
hdev->name, handle, status);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2249,21 +2251,45 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This function requires the caller holds hdev->lock */
|
/* This function requires the caller holds hdev->lock or rcu_read_lock */
|
||||||
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
|
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
|
||||||
bdaddr_t *addr, u8 addr_type)
|
bdaddr_t *addr, u8 addr_type)
|
||||||
{
|
{
|
||||||
struct hci_conn_params *param;
|
struct hci_conn_params *param;
|
||||||
|
|
||||||
list_for_each_entry(param, list, action) {
|
rcu_read_lock();
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(param, list, action) {
|
||||||
if (bacmp(¶m->addr, addr) == 0 &&
|
if (bacmp(¶m->addr, addr) == 0 &&
|
||||||
param->addr_type == addr_type)
|
param->addr_type == addr_type) {
|
||||||
|
rcu_read_unlock();
|
||||||
return param;
|
return param;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This function requires the caller holds hdev->lock */
|
||||||
|
void hci_pend_le_list_del_init(struct hci_conn_params *param)
|
||||||
|
{
|
||||||
|
if (list_empty(¶m->action))
|
||||||
|
return;
|
||||||
|
|
||||||
|
list_del_rcu(¶m->action);
|
||||||
|
synchronize_rcu();
|
||||||
|
INIT_LIST_HEAD(¶m->action);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This function requires the caller holds hdev->lock */
|
||||||
|
void hci_pend_le_list_add(struct hci_conn_params *param,
|
||||||
|
struct list_head *list)
|
||||||
|
{
|
||||||
|
list_add_rcu(¶m->action, list);
|
||||||
|
}
|
||||||
|
|
||||||
/* This function requires the caller holds hdev->lock */
|
/* This function requires the caller holds hdev->lock */
|
||||||
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
|
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
|
||||||
bdaddr_t *addr, u8 addr_type)
|
bdaddr_t *addr, u8 addr_type)
|
||||||
@ -2297,14 +2323,15 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
|
|||||||
return params;
|
return params;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hci_conn_params_free(struct hci_conn_params *params)
|
void hci_conn_params_free(struct hci_conn_params *params)
|
||||||
{
|
{
|
||||||
|
hci_pend_le_list_del_init(params);
|
||||||
|
|
||||||
if (params->conn) {
|
if (params->conn) {
|
||||||
hci_conn_drop(params->conn);
|
hci_conn_drop(params->conn);
|
||||||
hci_conn_put(params->conn);
|
hci_conn_put(params->conn);
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del(¶ms->action);
|
|
||||||
list_del(¶ms->list);
|
list_del(¶ms->list);
|
||||||
kfree(params);
|
kfree(params);
|
||||||
}
|
}
|
||||||
@ -2342,8 +2369,7 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del(¶ms->list);
|
hci_conn_params_free(params);
|
||||||
kfree(params);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BT_DBG("All LE disabled connection parameters were removed");
|
BT_DBG("All LE disabled connection parameters were removed");
|
||||||
|
@ -1564,7 +1564,7 @@ static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
|
|||||||
|
|
||||||
params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
|
params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
|
||||||
if (params)
|
if (params)
|
||||||
params->privacy_mode = cp->mode;
|
WRITE_ONCE(params->privacy_mode, cp->mode);
|
||||||
|
|
||||||
hci_dev_unlock(hdev);
|
hci_dev_unlock(hdev);
|
||||||
|
|
||||||
@ -2784,6 +2784,9 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
|
|||||||
hci_enable_advertising(hdev);
|
hci_enable_advertising(hdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Inform sockets conn is gone before we delete it */
|
||||||
|
hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
|
||||||
|
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2804,8 +2807,8 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
|
|||||||
|
|
||||||
case HCI_AUTO_CONN_DIRECT:
|
case HCI_AUTO_CONN_DIRECT:
|
||||||
case HCI_AUTO_CONN_ALWAYS:
|
case HCI_AUTO_CONN_ALWAYS:
|
||||||
list_del_init(¶ms->action);
|
hci_pend_le_list_del_init(params);
|
||||||
list_add(¶ms->action, &hdev->pend_le_conns);
|
hci_pend_le_list_add(params, &hdev->pend_le_conns);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@ -3423,8 +3426,8 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
|
|||||||
|
|
||||||
case HCI_AUTO_CONN_DIRECT:
|
case HCI_AUTO_CONN_DIRECT:
|
||||||
case HCI_AUTO_CONN_ALWAYS:
|
case HCI_AUTO_CONN_ALWAYS:
|
||||||
list_del_init(¶ms->action);
|
hci_pend_le_list_del_init(params);
|
||||||
list_add(¶ms->action, &hdev->pend_le_conns);
|
hci_pend_le_list_add(params, &hdev->pend_le_conns);
|
||||||
hci_update_passive_scan(hdev);
|
hci_update_passive_scan(hdev);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -5962,7 +5965,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
|
|||||||
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
|
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
|
||||||
conn->dst_type);
|
conn->dst_type);
|
||||||
if (params) {
|
if (params) {
|
||||||
list_del_init(¶ms->action);
|
hci_pend_le_list_del_init(params);
|
||||||
if (params->conn) {
|
if (params->conn) {
|
||||||
hci_conn_drop(params->conn);
|
hci_conn_drop(params->conn);
|
||||||
hci_conn_put(params->conn);
|
hci_conn_put(params->conn);
|
||||||
|
@ -2160,15 +2160,23 @@ static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct conn_params {
|
||||||
|
bdaddr_t addr;
|
||||||
|
u8 addr_type;
|
||||||
|
hci_conn_flags_t flags;
|
||||||
|
u8 privacy_mode;
|
||||||
|
};
|
||||||
|
|
||||||
/* Adds connection to resolve list if needed.
|
/* Adds connection to resolve list if needed.
|
||||||
* Setting params to NULL programs local hdev->irk
|
* Setting params to NULL programs local hdev->irk
|
||||||
*/
|
*/
|
||||||
static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
|
static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
|
||||||
struct hci_conn_params *params)
|
struct conn_params *params)
|
||||||
{
|
{
|
||||||
struct hci_cp_le_add_to_resolv_list cp;
|
struct hci_cp_le_add_to_resolv_list cp;
|
||||||
struct smp_irk *irk;
|
struct smp_irk *irk;
|
||||||
struct bdaddr_list_with_irk *entry;
|
struct bdaddr_list_with_irk *entry;
|
||||||
|
struct hci_conn_params *p;
|
||||||
|
|
||||||
if (!use_ll_privacy(hdev))
|
if (!use_ll_privacy(hdev))
|
||||||
return 0;
|
return 0;
|
||||||
@ -2203,6 +2211,16 @@ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
|
|||||||
/* Default privacy mode is always Network */
|
/* Default privacy mode is always Network */
|
||||||
params->privacy_mode = HCI_NETWORK_PRIVACY;
|
params->privacy_mode = HCI_NETWORK_PRIVACY;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
p = hci_pend_le_action_lookup(&hdev->pend_le_conns,
|
||||||
|
¶ms->addr, params->addr_type);
|
||||||
|
if (!p)
|
||||||
|
p = hci_pend_le_action_lookup(&hdev->pend_le_reports,
|
||||||
|
¶ms->addr, params->addr_type);
|
||||||
|
if (p)
|
||||||
|
WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
done:
|
done:
|
||||||
if (hci_dev_test_flag(hdev, HCI_PRIVACY))
|
if (hci_dev_test_flag(hdev, HCI_PRIVACY))
|
||||||
memcpy(cp.local_irk, hdev->irk, 16);
|
memcpy(cp.local_irk, hdev->irk, 16);
|
||||||
@ -2215,7 +2233,7 @@ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
|
|||||||
|
|
||||||
/* Set Device Privacy Mode. */
|
/* Set Device Privacy Mode. */
|
||||||
static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
|
static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
|
||||||
struct hci_conn_params *params)
|
struct conn_params *params)
|
||||||
{
|
{
|
||||||
struct hci_cp_le_set_privacy_mode cp;
|
struct hci_cp_le_set_privacy_mode cp;
|
||||||
struct smp_irk *irk;
|
struct smp_irk *irk;
|
||||||
@ -2240,6 +2258,8 @@ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
|
|||||||
bacpy(&cp.bdaddr, &irk->bdaddr);
|
bacpy(&cp.bdaddr, &irk->bdaddr);
|
||||||
cp.mode = HCI_DEVICE_PRIVACY;
|
cp.mode = HCI_DEVICE_PRIVACY;
|
||||||
|
|
||||||
|
/* Note: params->privacy_mode is not updated since it is a copy */
|
||||||
|
|
||||||
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
|
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
|
||||||
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
|
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
|
||||||
}
|
}
|
||||||
@ -2249,7 +2269,7 @@ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
|
|||||||
* properly set the privacy mode.
|
* properly set the privacy mode.
|
||||||
*/
|
*/
|
||||||
static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
|
static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
|
||||||
struct hci_conn_params *params,
|
struct conn_params *params,
|
||||||
u8 *num_entries)
|
u8 *num_entries)
|
||||||
{
|
{
|
||||||
struct hci_cp_le_add_to_accept_list cp;
|
struct hci_cp_le_add_to_accept_list cp;
|
||||||
@ -2447,6 +2467,52 @@ struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
|
|||||||
return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
|
return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
|
||||||
|
{
|
||||||
|
struct hci_conn_params *params;
|
||||||
|
struct conn_params *p;
|
||||||
|
size_t i;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
i = 0;
|
||||||
|
list_for_each_entry_rcu(params, list, action)
|
||||||
|
++i;
|
||||||
|
*n = i;
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL);
|
||||||
|
if (!p)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
i = 0;
|
||||||
|
list_for_each_entry_rcu(params, list, action) {
|
||||||
|
/* Racing adds are handled in next scan update */
|
||||||
|
if (i >= *n)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* No hdev->lock, but: addr, addr_type are immutable.
|
||||||
|
* privacy_mode is only written by us or in
|
||||||
|
* hci_cc_le_set_privacy_mode that we wait for.
|
||||||
|
* We should be idempotent so MGMT updating flags
|
||||||
|
* while we are processing is OK.
|
||||||
|
*/
|
||||||
|
bacpy(&p[i].addr, ¶ms->addr);
|
||||||
|
p[i].addr_type = params->addr_type;
|
||||||
|
p[i].flags = READ_ONCE(params->flags);
|
||||||
|
p[i].privacy_mode = READ_ONCE(params->privacy_mode);
|
||||||
|
++i;
|
||||||
|
}
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
*n = i;
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
/* Device must not be scanning when updating the accept list.
|
/* Device must not be scanning when updating the accept list.
|
||||||
*
|
*
|
||||||
* Update is done using the following sequence:
|
* Update is done using the following sequence:
|
||||||
@ -2466,11 +2532,12 @@ struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
|
|||||||
*/
|
*/
|
||||||
static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
|
static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
|
||||||
{
|
{
|
||||||
struct hci_conn_params *params;
|
struct conn_params *params;
|
||||||
struct bdaddr_list *b, *t;
|
struct bdaddr_list *b, *t;
|
||||||
u8 num_entries = 0;
|
u8 num_entries = 0;
|
||||||
bool pend_conn, pend_report;
|
bool pend_conn, pend_report;
|
||||||
u8 filter_policy;
|
u8 filter_policy;
|
||||||
|
size_t i, n;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Pause advertising if resolving list can be used as controllers
|
/* Pause advertising if resolving list can be used as controllers
|
||||||
@ -2504,6 +2571,7 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
|
|||||||
if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
|
if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/* Pointers not dereferenced, no locks needed */
|
||||||
pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
|
pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
|
||||||
&b->bdaddr,
|
&b->bdaddr,
|
||||||
b->bdaddr_type);
|
b->bdaddr_type);
|
||||||
@ -2532,23 +2600,50 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
|
|||||||
* available accept list entries in the controller, then
|
* available accept list entries in the controller, then
|
||||||
* just abort and return filer policy value to not use the
|
* just abort and return filer policy value to not use the
|
||||||
* accept list.
|
* accept list.
|
||||||
|
*
|
||||||
|
* The list and params may be mutated while we wait for events,
|
||||||
|
* so make a copy and iterate it.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(params, &hdev->pend_le_conns, action) {
|
|
||||||
err = hci_le_add_accept_list_sync(hdev, params, &num_entries);
|
params = conn_params_copy(&hdev->pend_le_conns, &n);
|
||||||
if (err)
|
if (!params) {
|
||||||
goto done;
|
err = -ENOMEM;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < n; ++i) {
|
||||||
|
err = hci_le_add_accept_list_sync(hdev, ¶ms[i],
|
||||||
|
&num_entries);
|
||||||
|
if (err) {
|
||||||
|
kvfree(params);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
kvfree(params);
|
||||||
|
|
||||||
/* After adding all new pending connections, walk through
|
/* After adding all new pending connections, walk through
|
||||||
* the list of pending reports and also add these to the
|
* the list of pending reports and also add these to the
|
||||||
* accept list if there is still space. Abort if space runs out.
|
* accept list if there is still space. Abort if space runs out.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(params, &hdev->pend_le_reports, action) {
|
|
||||||
err = hci_le_add_accept_list_sync(hdev, params, &num_entries);
|
params = conn_params_copy(&hdev->pend_le_reports, &n);
|
||||||
if (err)
|
if (!params) {
|
||||||
goto done;
|
err = -ENOMEM;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < n; ++i) {
|
||||||
|
err = hci_le_add_accept_list_sync(hdev, ¶ms[i],
|
||||||
|
&num_entries);
|
||||||
|
if (err) {
|
||||||
|
kvfree(params);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
kvfree(params);
|
||||||
|
|
||||||
/* Use the allowlist unless the following conditions are all true:
|
/* Use the allowlist unless the following conditions are all true:
|
||||||
* - We are not currently suspending
|
* - We are not currently suspending
|
||||||
* - There are 1 or more ADV monitors registered and it's not offloaded
|
* - There are 1 or more ADV monitors registered and it's not offloaded
|
||||||
@ -4837,12 +4932,12 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev)
|
|||||||
struct hci_conn_params *p;
|
struct hci_conn_params *p;
|
||||||
|
|
||||||
list_for_each_entry(p, &hdev->le_conn_params, list) {
|
list_for_each_entry(p, &hdev->le_conn_params, list) {
|
||||||
|
hci_pend_le_list_del_init(p);
|
||||||
if (p->conn) {
|
if (p->conn) {
|
||||||
hci_conn_drop(p->conn);
|
hci_conn_drop(p->conn);
|
||||||
hci_conn_put(p->conn);
|
hci_conn_put(p->conn);
|
||||||
p->conn = NULL;
|
p->conn = NULL;
|
||||||
}
|
}
|
||||||
list_del_init(&p->action);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BT_DBG("All LE pending actions cleared");
|
BT_DBG("All LE pending actions cleared");
|
||||||
|
@ -123,8 +123,11 @@ static struct iso_conn *iso_conn_add(struct hci_conn *hcon)
|
|||||||
{
|
{
|
||||||
struct iso_conn *conn = hcon->iso_data;
|
struct iso_conn *conn = hcon->iso_data;
|
||||||
|
|
||||||
if (conn)
|
if (conn) {
|
||||||
|
if (!conn->hcon)
|
||||||
|
conn->hcon = hcon;
|
||||||
return conn;
|
return conn;
|
||||||
|
}
|
||||||
|
|
||||||
conn = kzalloc(sizeof(*conn), GFP_KERNEL);
|
conn = kzalloc(sizeof(*conn), GFP_KERNEL);
|
||||||
if (!conn)
|
if (!conn)
|
||||||
@ -300,14 +303,13 @@ static int iso_connect_bis(struct sock *sk)
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
hci_dev_unlock(hdev);
|
lock_sock(sk);
|
||||||
hci_dev_put(hdev);
|
|
||||||
|
|
||||||
err = iso_chan_add(conn, sk, NULL);
|
err = iso_chan_add(conn, sk, NULL);
|
||||||
if (err)
|
if (err) {
|
||||||
return err;
|
release_sock(sk);
|
||||||
|
goto unlock;
|
||||||
lock_sock(sk);
|
}
|
||||||
|
|
||||||
/* Update source addr of the socket */
|
/* Update source addr of the socket */
|
||||||
bacpy(&iso_pi(sk)->src, &hcon->src);
|
bacpy(&iso_pi(sk)->src, &hcon->src);
|
||||||
@ -321,7 +323,6 @@ static int iso_connect_bis(struct sock *sk)
|
|||||||
}
|
}
|
||||||
|
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
return err;
|
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
hci_dev_unlock(hdev);
|
hci_dev_unlock(hdev);
|
||||||
@ -389,14 +390,13 @@ static int iso_connect_cis(struct sock *sk)
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
hci_dev_unlock(hdev);
|
lock_sock(sk);
|
||||||
hci_dev_put(hdev);
|
|
||||||
|
|
||||||
err = iso_chan_add(conn, sk, NULL);
|
err = iso_chan_add(conn, sk, NULL);
|
||||||
if (err)
|
if (err) {
|
||||||
return err;
|
release_sock(sk);
|
||||||
|
goto unlock;
|
||||||
lock_sock(sk);
|
}
|
||||||
|
|
||||||
/* Update source addr of the socket */
|
/* Update source addr of the socket */
|
||||||
bacpy(&iso_pi(sk)->src, &hcon->src);
|
bacpy(&iso_pi(sk)->src, &hcon->src);
|
||||||
@ -413,7 +413,6 @@ static int iso_connect_cis(struct sock *sk)
|
|||||||
}
|
}
|
||||||
|
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
return err;
|
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
hci_dev_unlock(hdev);
|
hci_dev_unlock(hdev);
|
||||||
@ -1072,8 +1071,8 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||||||
size_t len)
|
size_t len)
|
||||||
{
|
{
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
struct iso_conn *conn = iso_pi(sk)->conn;
|
|
||||||
struct sk_buff *skb, **frag;
|
struct sk_buff *skb, **frag;
|
||||||
|
size_t mtu;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
BT_DBG("sock %p, sk %p", sock, sk);
|
BT_DBG("sock %p, sk %p", sock, sk);
|
||||||
@ -1085,11 +1084,18 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||||||
if (msg->msg_flags & MSG_OOB)
|
if (msg->msg_flags & MSG_OOB)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (sk->sk_state != BT_CONNECTED)
|
lock_sock(sk);
|
||||||
return -ENOTCONN;
|
|
||||||
|
|
||||||
skb = bt_skb_sendmsg(sk, msg, len, conn->hcon->hdev->iso_mtu,
|
if (sk->sk_state != BT_CONNECTED) {
|
||||||
HCI_ISO_DATA_HDR_SIZE, 0);
|
release_sock(sk);
|
||||||
|
return -ENOTCONN;
|
||||||
|
}
|
||||||
|
|
||||||
|
mtu = iso_pi(sk)->conn->hcon->hdev->iso_mtu;
|
||||||
|
|
||||||
|
release_sock(sk);
|
||||||
|
|
||||||
|
skb = bt_skb_sendmsg(sk, msg, len, mtu, HCI_ISO_DATA_HDR_SIZE, 0);
|
||||||
if (IS_ERR(skb))
|
if (IS_ERR(skb))
|
||||||
return PTR_ERR(skb);
|
return PTR_ERR(skb);
|
||||||
|
|
||||||
@ -1102,8 +1108,7 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||||||
while (len) {
|
while (len) {
|
||||||
struct sk_buff *tmp;
|
struct sk_buff *tmp;
|
||||||
|
|
||||||
tmp = bt_skb_sendmsg(sk, msg, len, conn->hcon->hdev->iso_mtu,
|
tmp = bt_skb_sendmsg(sk, msg, len, mtu, 0, 0);
|
||||||
0, 0);
|
|
||||||
if (IS_ERR(tmp)) {
|
if (IS_ERR(tmp)) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return PTR_ERR(tmp);
|
return PTR_ERR(tmp);
|
||||||
@ -1158,15 +1163,19 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
|
|||||||
BT_DBG("sk %p", sk);
|
BT_DBG("sk %p", sk);
|
||||||
|
|
||||||
if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
|
if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
|
||||||
|
lock_sock(sk);
|
||||||
switch (sk->sk_state) {
|
switch (sk->sk_state) {
|
||||||
case BT_CONNECT2:
|
case BT_CONNECT2:
|
||||||
lock_sock(sk);
|
|
||||||
iso_conn_defer_accept(pi->conn->hcon);
|
iso_conn_defer_accept(pi->conn->hcon);
|
||||||
sk->sk_state = BT_CONFIG;
|
sk->sk_state = BT_CONFIG;
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
return 0;
|
return 0;
|
||||||
case BT_CONNECT:
|
case BT_CONNECT:
|
||||||
|
release_sock(sk);
|
||||||
return iso_connect_cis(sk);
|
return iso_connect_cis(sk);
|
||||||
|
default:
|
||||||
|
release_sock(sk);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1297,15 +1297,15 @@ static void restart_le_actions(struct hci_dev *hdev)
|
|||||||
/* Needed for AUTO_OFF case where might not "really"
|
/* Needed for AUTO_OFF case where might not "really"
|
||||||
* have been powered off.
|
* have been powered off.
|
||||||
*/
|
*/
|
||||||
list_del_init(&p->action);
|
hci_pend_le_list_del_init(p);
|
||||||
|
|
||||||
switch (p->auto_connect) {
|
switch (p->auto_connect) {
|
||||||
case HCI_AUTO_CONN_DIRECT:
|
case HCI_AUTO_CONN_DIRECT:
|
||||||
case HCI_AUTO_CONN_ALWAYS:
|
case HCI_AUTO_CONN_ALWAYS:
|
||||||
list_add(&p->action, &hdev->pend_le_conns);
|
hci_pend_le_list_add(p, &hdev->pend_le_conns);
|
||||||
break;
|
break;
|
||||||
case HCI_AUTO_CONN_REPORT:
|
case HCI_AUTO_CONN_REPORT:
|
||||||
list_add(&p->action, &hdev->pend_le_reports);
|
hci_pend_le_list_add(p, &hdev->pend_le_reports);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
@ -5169,7 +5169,7 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
params->flags = current_flags;
|
WRITE_ONCE(params->flags, current_flags);
|
||||||
status = MGMT_STATUS_SUCCESS;
|
status = MGMT_STATUS_SUCCESS;
|
||||||
|
|
||||||
/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
|
/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
|
||||||
@ -7285,7 +7285,7 @@ static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
|
|||||||
|
|
||||||
bt_dev_dbg(hdev, "err %d", err);
|
bt_dev_dbg(hdev, "err %d", err);
|
||||||
|
|
||||||
memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
|
memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
|
||||||
|
|
||||||
status = mgmt_status(err);
|
status = mgmt_status(err);
|
||||||
if (status == MGMT_STATUS_SUCCESS) {
|
if (status == MGMT_STATUS_SUCCESS) {
|
||||||
@ -7580,7 +7580,7 @@ static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
|
|||||||
if (params->auto_connect == auto_connect)
|
if (params->auto_connect == auto_connect)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
list_del_init(¶ms->action);
|
hci_pend_le_list_del_init(params);
|
||||||
|
|
||||||
switch (auto_connect) {
|
switch (auto_connect) {
|
||||||
case HCI_AUTO_CONN_DISABLED:
|
case HCI_AUTO_CONN_DISABLED:
|
||||||
@ -7589,18 +7589,18 @@ static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
|
|||||||
* connect to device, keep connecting.
|
* connect to device, keep connecting.
|
||||||
*/
|
*/
|
||||||
if (params->explicit_connect)
|
if (params->explicit_connect)
|
||||||
list_add(¶ms->action, &hdev->pend_le_conns);
|
hci_pend_le_list_add(params, &hdev->pend_le_conns);
|
||||||
break;
|
break;
|
||||||
case HCI_AUTO_CONN_REPORT:
|
case HCI_AUTO_CONN_REPORT:
|
||||||
if (params->explicit_connect)
|
if (params->explicit_connect)
|
||||||
list_add(¶ms->action, &hdev->pend_le_conns);
|
hci_pend_le_list_add(params, &hdev->pend_le_conns);
|
||||||
else
|
else
|
||||||
list_add(¶ms->action, &hdev->pend_le_reports);
|
hci_pend_le_list_add(params, &hdev->pend_le_reports);
|
||||||
break;
|
break;
|
||||||
case HCI_AUTO_CONN_DIRECT:
|
case HCI_AUTO_CONN_DIRECT:
|
||||||
case HCI_AUTO_CONN_ALWAYS:
|
case HCI_AUTO_CONN_ALWAYS:
|
||||||
if (!is_connected(hdev, addr, addr_type))
|
if (!is_connected(hdev, addr, addr_type))
|
||||||
list_add(¶ms->action, &hdev->pend_le_conns);
|
hci_pend_le_list_add(params, &hdev->pend_le_conns);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7823,9 +7823,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del(¶ms->action);
|
hci_conn_params_free(params);
|
||||||
list_del(¶ms->list);
|
|
||||||
kfree(params);
|
|
||||||
|
|
||||||
device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
|
device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
|
||||||
} else {
|
} else {
|
||||||
@ -7856,9 +7854,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
|
|||||||
p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
|
p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
list_del(&p->action);
|
hci_conn_params_free(p);
|
||||||
list_del(&p->list);
|
|
||||||
kfree(p);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bt_dev_dbg(hdev, "All LE connection parameters were removed");
|
bt_dev_dbg(hdev, "All LE connection parameters were removed");
|
||||||
|
@ -126,8 +126,11 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
|
|||||||
struct hci_dev *hdev = hcon->hdev;
|
struct hci_dev *hdev = hcon->hdev;
|
||||||
struct sco_conn *conn = hcon->sco_data;
|
struct sco_conn *conn = hcon->sco_data;
|
||||||
|
|
||||||
if (conn)
|
if (conn) {
|
||||||
|
if (!conn->hcon)
|
||||||
|
conn->hcon = hcon;
|
||||||
return conn;
|
return conn;
|
||||||
|
}
|
||||||
|
|
||||||
conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL);
|
conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL);
|
||||||
if (!conn)
|
if (!conn)
|
||||||
@ -268,21 +271,21 @@ static int sco_connect(struct sock *sk)
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
hci_dev_unlock(hdev);
|
|
||||||
hci_dev_put(hdev);
|
|
||||||
|
|
||||||
conn = sco_conn_add(hcon);
|
conn = sco_conn_add(hcon);
|
||||||
if (!conn) {
|
if (!conn) {
|
||||||
hci_conn_drop(hcon);
|
hci_conn_drop(hcon);
|
||||||
return -ENOMEM;
|
err = -ENOMEM;
|
||||||
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = sco_chan_add(conn, sk, NULL);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
|
err = sco_chan_add(conn, sk, NULL);
|
||||||
|
if (err) {
|
||||||
|
release_sock(sk);
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
/* Update source addr of the socket */
|
/* Update source addr of the socket */
|
||||||
bacpy(&sco_pi(sk)->src, &hcon->src);
|
bacpy(&sco_pi(sk)->src, &hcon->src);
|
||||||
|
|
||||||
@ -296,8 +299,6 @@ static int sco_connect(struct sock *sk)
|
|||||||
|
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
|
|
||||||
return err;
|
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
hci_dev_unlock(hdev);
|
hci_dev_unlock(hdev);
|
||||||
hci_dev_put(hdev);
|
hci_dev_put(hdev);
|
||||||
|
@ -1526,6 +1526,12 @@ static int bcm_release(struct socket *sock)
|
|||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
|
#if IS_ENABLED(CONFIG_PROC_FS)
|
||||||
|
/* remove procfs entry */
|
||||||
|
if (net->can.bcmproc_dir && bo->bcm_proc_read)
|
||||||
|
remove_proc_entry(bo->procname, net->can.bcmproc_dir);
|
||||||
|
#endif /* CONFIG_PROC_FS */
|
||||||
|
|
||||||
list_for_each_entry_safe(op, next, &bo->tx_ops, list)
|
list_for_each_entry_safe(op, next, &bo->tx_ops, list)
|
||||||
bcm_remove_op(op);
|
bcm_remove_op(op);
|
||||||
|
|
||||||
@ -1561,12 +1567,6 @@ static int bcm_release(struct socket *sock)
|
|||||||
list_for_each_entry_safe(op, next, &bo->rx_ops, list)
|
list_for_each_entry_safe(op, next, &bo->rx_ops, list)
|
||||||
bcm_remove_op(op);
|
bcm_remove_op(op);
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_PROC_FS)
|
|
||||||
/* remove procfs entry */
|
|
||||||
if (net->can.bcmproc_dir && bo->bcm_proc_read)
|
|
||||||
remove_proc_entry(bo->procname, net->can.bcmproc_dir);
|
|
||||||
#endif /* CONFIG_PROC_FS */
|
|
||||||
|
|
||||||
/* remove device reference */
|
/* remove device reference */
|
||||||
if (bo->bound) {
|
if (bo->bound) {
|
||||||
bo->bound = 0;
|
bo->bound = 0;
|
||||||
|
@ -84,6 +84,7 @@ struct raw_sock {
|
|||||||
struct sock sk;
|
struct sock sk;
|
||||||
int bound;
|
int bound;
|
||||||
int ifindex;
|
int ifindex;
|
||||||
|
struct net_device *dev;
|
||||||
struct list_head notifier;
|
struct list_head notifier;
|
||||||
int loopback;
|
int loopback;
|
||||||
int recv_own_msgs;
|
int recv_own_msgs;
|
||||||
@ -277,7 +278,7 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg,
|
|||||||
if (!net_eq(dev_net(dev), sock_net(sk)))
|
if (!net_eq(dev_net(dev), sock_net(sk)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (ro->ifindex != dev->ifindex)
|
if (ro->dev != dev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
switch (msg) {
|
switch (msg) {
|
||||||
@ -292,6 +293,7 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg,
|
|||||||
|
|
||||||
ro->ifindex = 0;
|
ro->ifindex = 0;
|
||||||
ro->bound = 0;
|
ro->bound = 0;
|
||||||
|
ro->dev = NULL;
|
||||||
ro->count = 0;
|
ro->count = 0;
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
|
|
||||||
@ -337,6 +339,7 @@ static int raw_init(struct sock *sk)
|
|||||||
|
|
||||||
ro->bound = 0;
|
ro->bound = 0;
|
||||||
ro->ifindex = 0;
|
ro->ifindex = 0;
|
||||||
|
ro->dev = NULL;
|
||||||
|
|
||||||
/* set default filter to single entry dfilter */
|
/* set default filter to single entry dfilter */
|
||||||
ro->dfilter.can_id = 0;
|
ro->dfilter.can_id = 0;
|
||||||
@ -385,19 +388,13 @@ static int raw_release(struct socket *sock)
|
|||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
/* remove current filters & unregister */
|
/* remove current filters & unregister */
|
||||||
if (ro->bound) {
|
if (ro->bound) {
|
||||||
if (ro->ifindex) {
|
if (ro->dev)
|
||||||
struct net_device *dev;
|
raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk);
|
||||||
|
else
|
||||||
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
|
|
||||||
if (dev) {
|
|
||||||
raw_disable_allfilters(dev_net(dev), dev, sk);
|
|
||||||
dev_put(dev);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
raw_disable_allfilters(sock_net(sk), NULL, sk);
|
raw_disable_allfilters(sock_net(sk), NULL, sk);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ro->count > 1)
|
if (ro->count > 1)
|
||||||
@ -405,8 +402,10 @@ static int raw_release(struct socket *sock)
|
|||||||
|
|
||||||
ro->ifindex = 0;
|
ro->ifindex = 0;
|
||||||
ro->bound = 0;
|
ro->bound = 0;
|
||||||
|
ro->dev = NULL;
|
||||||
ro->count = 0;
|
ro->count = 0;
|
||||||
free_percpu(ro->uniq);
|
free_percpu(ro->uniq);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
sock_orphan(sk);
|
sock_orphan(sk);
|
||||||
sock->sk = NULL;
|
sock->sk = NULL;
|
||||||
@ -422,6 +421,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
|
|||||||
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
|
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
|
||||||
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
||||||
struct raw_sock *ro = raw_sk(sk);
|
struct raw_sock *ro = raw_sk(sk);
|
||||||
|
struct net_device *dev = NULL;
|
||||||
int ifindex;
|
int ifindex;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
int notify_enetdown = 0;
|
int notify_enetdown = 0;
|
||||||
@ -431,14 +431,13 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
|
|||||||
if (addr->can_family != AF_CAN)
|
if (addr->can_family != AF_CAN)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
if (ro->bound && addr->can_ifindex == ro->ifindex)
|
if (ro->bound && addr->can_ifindex == ro->ifindex)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (addr->can_ifindex) {
|
if (addr->can_ifindex) {
|
||||||
struct net_device *dev;
|
|
||||||
|
|
||||||
dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
|
dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
|
||||||
if (!dev) {
|
if (!dev) {
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
@ -467,26 +466,20 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
|
|||||||
if (!err) {
|
if (!err) {
|
||||||
if (ro->bound) {
|
if (ro->bound) {
|
||||||
/* unregister old filters */
|
/* unregister old filters */
|
||||||
if (ro->ifindex) {
|
if (ro->dev)
|
||||||
struct net_device *dev;
|
raw_disable_allfilters(dev_net(ro->dev),
|
||||||
|
ro->dev, sk);
|
||||||
dev = dev_get_by_index(sock_net(sk),
|
else
|
||||||
ro->ifindex);
|
|
||||||
if (dev) {
|
|
||||||
raw_disable_allfilters(dev_net(dev),
|
|
||||||
dev, sk);
|
|
||||||
dev_put(dev);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
raw_disable_allfilters(sock_net(sk), NULL, sk);
|
raw_disable_allfilters(sock_net(sk), NULL, sk);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
ro->ifindex = ifindex;
|
ro->ifindex = ifindex;
|
||||||
ro->bound = 1;
|
ro->bound = 1;
|
||||||
|
ro->dev = dev;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
if (notify_enetdown) {
|
if (notify_enetdown) {
|
||||||
sk->sk_err = ENETDOWN;
|
sk->sk_err = ENETDOWN;
|
||||||
@ -553,9 +546,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
|||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
if (ro->bound && ro->ifindex) {
|
dev = ro->dev;
|
||||||
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
|
if (ro->bound && dev) {
|
||||||
if (!dev) {
|
if (dev->reg_state != NETREG_REGISTERED) {
|
||||||
if (count > 1)
|
if (count > 1)
|
||||||
kfree(filter);
|
kfree(filter);
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
@ -596,7 +589,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
|||||||
ro->count = count;
|
ro->count = count;
|
||||||
|
|
||||||
out_fil:
|
out_fil:
|
||||||
dev_put(dev);
|
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
|
||||||
@ -614,9 +606,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
|||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
if (ro->bound && ro->ifindex) {
|
dev = ro->dev;
|
||||||
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
|
if (ro->bound && dev) {
|
||||||
if (!dev) {
|
if (dev->reg_state != NETREG_REGISTERED) {
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
@ -640,7 +632,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
|||||||
ro->err_mask = err_mask;
|
ro->err_mask = err_mask;
|
||||||
|
|
||||||
out_err:
|
out_err:
|
||||||
dev_put(dev);
|
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
|
||||||
|
@ -1132,7 +1132,7 @@ static int esp_init_authenc(struct xfrm_state *x,
|
|||||||
err = crypto_aead_setkey(aead, key, keylen);
|
err = crypto_aead_setkey(aead, key, keylen);
|
||||||
|
|
||||||
free_key:
|
free_key:
|
||||||
kfree(key);
|
kfree_sensitive(key);
|
||||||
|
|
||||||
error:
|
error:
|
||||||
return err;
|
return err;
|
||||||
|
@ -1019,7 +1019,7 @@ static void reqsk_timer_handler(struct timer_list *t)
|
|||||||
|
|
||||||
icsk = inet_csk(sk_listener);
|
icsk = inet_csk(sk_listener);
|
||||||
net = sock_net(sk_listener);
|
net = sock_net(sk_listener);
|
||||||
max_syn_ack_retries = icsk->icsk_syn_retries ? :
|
max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
|
||||||
READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
|
READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
|
||||||
/* Normally all the openreqs are young and become mature
|
/* Normally all the openreqs are young and become mature
|
||||||
* (i.e. converted to established socket) for first timeout.
|
* (i.e. converted to established socket) for first timeout.
|
||||||
|
@ -650,20 +650,8 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
|
|||||||
spin_lock(lock);
|
spin_lock(lock);
|
||||||
if (osk) {
|
if (osk) {
|
||||||
WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
|
WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
|
||||||
ret = sk_hashed(osk);
|
ret = sk_nulls_del_node_init_rcu(osk);
|
||||||
if (ret) {
|
} else if (found_dup_sk) {
|
||||||
/* Before deleting the node, we insert a new one to make
|
|
||||||
* sure that the look-up-sk process would not miss either
|
|
||||||
* of them and that at least one node would exist in ehash
|
|
||||||
* table all the time. Otherwise there's a tiny chance
|
|
||||||
* that lookup process could find nothing in ehash table.
|
|
||||||
*/
|
|
||||||
__sk_nulls_add_node_tail_rcu(sk, list);
|
|
||||||
sk_nulls_del_node_init_rcu(osk);
|
|
||||||
}
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
if (found_dup_sk) {
|
|
||||||
*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
|
*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
|
||||||
if (*found_dup_sk)
|
if (*found_dup_sk)
|
||||||
ret = false;
|
ret = false;
|
||||||
@ -672,7 +660,6 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
|
|||||||
if (ret)
|
if (ret)
|
||||||
__sk_nulls_add_node_rcu(sk, list);
|
__sk_nulls_add_node_rcu(sk, list);
|
||||||
|
|
||||||
unlock:
|
|
||||||
spin_unlock(lock);
|
spin_unlock(lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -88,10 +88,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(inet_twsk_put);
|
EXPORT_SYMBOL_GPL(inet_twsk_put);
|
||||||
|
|
||||||
static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw,
|
static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
|
||||||
struct hlist_nulls_head *list)
|
struct hlist_nulls_head *list)
|
||||||
{
|
{
|
||||||
hlist_nulls_add_tail_rcu(&tw->tw_node, list);
|
hlist_nulls_add_head_rcu(&tw->tw_node, list);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
|
static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
|
||||||
@ -144,7 +144,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
|
|||||||
|
|
||||||
spin_lock(lock);
|
spin_lock(lock);
|
||||||
|
|
||||||
inet_twsk_add_node_tail_rcu(tw, &ehead->chain);
|
inet_twsk_add_node_rcu(tw, &ehead->chain);
|
||||||
|
|
||||||
/* Step 3: Remove SK from hash chain */
|
/* Step 3: Remove SK from hash chain */
|
||||||
if (__sk_nulls_del_node_init_rcu(sk))
|
if (__sk_nulls_del_node_init_rcu(sk))
|
||||||
|
@ -548,7 +548,8 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
goto err_free_skb;
|
goto err_free_skb;
|
||||||
|
|
||||||
if (skb->len > dev->mtu + dev->hard_header_len) {
|
if (skb->len > dev->mtu + dev->hard_header_len) {
|
||||||
pskb_trim(skb, dev->mtu + dev->hard_header_len);
|
if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
|
||||||
|
goto err_free_skb;
|
||||||
truncate = true;
|
truncate = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -689,7 +690,8 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
|
|||||||
goto free_skb;
|
goto free_skb;
|
||||||
|
|
||||||
if (skb->len > dev->mtu + dev->hard_header_len) {
|
if (skb->len > dev->mtu + dev->hard_header_len) {
|
||||||
pskb_trim(skb, dev->mtu + dev->hard_header_len);
|
if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
|
||||||
|
goto free_skb;
|
||||||
truncate = true;
|
truncate = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3291,7 +3291,7 @@ int tcp_sock_set_syncnt(struct sock *sk, int val)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
inet_csk(sk)->icsk_syn_retries = val;
|
WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val);
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3300,7 +3300,7 @@ EXPORT_SYMBOL(tcp_sock_set_syncnt);
|
|||||||
void tcp_sock_set_user_timeout(struct sock *sk, u32 val)
|
void tcp_sock_set_user_timeout(struct sock *sk, u32 val)
|
||||||
{
|
{
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
inet_csk(sk)->icsk_user_timeout = val;
|
WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val);
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(tcp_sock_set_user_timeout);
|
EXPORT_SYMBOL(tcp_sock_set_user_timeout);
|
||||||
@ -3312,7 +3312,8 @@ int tcp_sock_set_keepidle_locked(struct sock *sk, int val)
|
|||||||
if (val < 1 || val > MAX_TCP_KEEPIDLE)
|
if (val < 1 || val > MAX_TCP_KEEPIDLE)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
tp->keepalive_time = val * HZ;
|
/* Paired with WRITE_ONCE() in keepalive_time_when() */
|
||||||
|
WRITE_ONCE(tp->keepalive_time, val * HZ);
|
||||||
if (sock_flag(sk, SOCK_KEEPOPEN) &&
|
if (sock_flag(sk, SOCK_KEEPOPEN) &&
|
||||||
!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
|
!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
|
||||||
u32 elapsed = keepalive_time_elapsed(tp);
|
u32 elapsed = keepalive_time_elapsed(tp);
|
||||||
@ -3344,7 +3345,7 @@ int tcp_sock_set_keepintvl(struct sock *sk, int val)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
tcp_sk(sk)->keepalive_intvl = val * HZ;
|
WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ);
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3356,7 +3357,8 @@ int tcp_sock_set_keepcnt(struct sock *sk, int val)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
tcp_sk(sk)->keepalive_probes = val;
|
/* Paired with READ_ONCE() in keepalive_probes() */
|
||||||
|
WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val);
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3558,19 +3560,19 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
|
|||||||
if (val < 1 || val > MAX_TCP_KEEPINTVL)
|
if (val < 1 || val > MAX_TCP_KEEPINTVL)
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
else
|
else
|
||||||
tp->keepalive_intvl = val * HZ;
|
WRITE_ONCE(tp->keepalive_intvl, val * HZ);
|
||||||
break;
|
break;
|
||||||
case TCP_KEEPCNT:
|
case TCP_KEEPCNT:
|
||||||
if (val < 1 || val > MAX_TCP_KEEPCNT)
|
if (val < 1 || val > MAX_TCP_KEEPCNT)
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
else
|
else
|
||||||
tp->keepalive_probes = val;
|
WRITE_ONCE(tp->keepalive_probes, val);
|
||||||
break;
|
break;
|
||||||
case TCP_SYNCNT:
|
case TCP_SYNCNT:
|
||||||
if (val < 1 || val > MAX_TCP_SYNCNT)
|
if (val < 1 || val > MAX_TCP_SYNCNT)
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
else
|
else
|
||||||
icsk->icsk_syn_retries = val;
|
WRITE_ONCE(icsk->icsk_syn_retries, val);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TCP_SAVE_SYN:
|
case TCP_SAVE_SYN:
|
||||||
@ -3583,18 +3585,18 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
|
|||||||
|
|
||||||
case TCP_LINGER2:
|
case TCP_LINGER2:
|
||||||
if (val < 0)
|
if (val < 0)
|
||||||
tp->linger2 = -1;
|
WRITE_ONCE(tp->linger2, -1);
|
||||||
else if (val > TCP_FIN_TIMEOUT_MAX / HZ)
|
else if (val > TCP_FIN_TIMEOUT_MAX / HZ)
|
||||||
tp->linger2 = TCP_FIN_TIMEOUT_MAX;
|
WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX);
|
||||||
else
|
else
|
||||||
tp->linger2 = val * HZ;
|
WRITE_ONCE(tp->linger2, val * HZ);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TCP_DEFER_ACCEPT:
|
case TCP_DEFER_ACCEPT:
|
||||||
/* Translate value in seconds to number of retransmits */
|
/* Translate value in seconds to number of retransmits */
|
||||||
icsk->icsk_accept_queue.rskq_defer_accept =
|
WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept,
|
||||||
secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
|
secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
|
||||||
TCP_RTO_MAX / HZ);
|
TCP_RTO_MAX / HZ));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TCP_WINDOW_CLAMP:
|
case TCP_WINDOW_CLAMP:
|
||||||
@ -3618,7 +3620,7 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
|
|||||||
if (val < 0)
|
if (val < 0)
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
else
|
else
|
||||||
icsk->icsk_user_timeout = val;
|
WRITE_ONCE(icsk->icsk_user_timeout, val);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TCP_FASTOPEN:
|
case TCP_FASTOPEN:
|
||||||
@ -3656,13 +3658,13 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
|
|||||||
if (!tp->repair)
|
if (!tp->repair)
|
||||||
err = -EPERM;
|
err = -EPERM;
|
||||||
else
|
else
|
||||||
tp->tsoffset = val - tcp_time_stamp_raw();
|
WRITE_ONCE(tp->tsoffset, val - tcp_time_stamp_raw());
|
||||||
break;
|
break;
|
||||||
case TCP_REPAIR_WINDOW:
|
case TCP_REPAIR_WINDOW:
|
||||||
err = tcp_repair_set_window(tp, optval, optlen);
|
err = tcp_repair_set_window(tp, optval, optlen);
|
||||||
break;
|
break;
|
||||||
case TCP_NOTSENT_LOWAT:
|
case TCP_NOTSENT_LOWAT:
|
||||||
tp->notsent_lowat = val;
|
WRITE_ONCE(tp->notsent_lowat, val);
|
||||||
sk->sk_write_space(sk);
|
sk->sk_write_space(sk);
|
||||||
break;
|
break;
|
||||||
case TCP_INQ:
|
case TCP_INQ:
|
||||||
@ -3674,7 +3676,7 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
|
|||||||
case TCP_TX_DELAY:
|
case TCP_TX_DELAY:
|
||||||
if (val)
|
if (val)
|
||||||
tcp_enable_tx_delay();
|
tcp_enable_tx_delay();
|
||||||
tp->tcp_tx_delay = val;
|
WRITE_ONCE(tp->tcp_tx_delay, val);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
err = -ENOPROTOOPT;
|
err = -ENOPROTOOPT;
|
||||||
@ -3991,17 +3993,18 @@ int do_tcp_getsockopt(struct sock *sk, int level,
|
|||||||
val = keepalive_probes(tp);
|
val = keepalive_probes(tp);
|
||||||
break;
|
break;
|
||||||
case TCP_SYNCNT:
|
case TCP_SYNCNT:
|
||||||
val = icsk->icsk_syn_retries ? :
|
val = READ_ONCE(icsk->icsk_syn_retries) ? :
|
||||||
READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
|
READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
|
||||||
break;
|
break;
|
||||||
case TCP_LINGER2:
|
case TCP_LINGER2:
|
||||||
val = tp->linger2;
|
val = READ_ONCE(tp->linger2);
|
||||||
if (val >= 0)
|
if (val >= 0)
|
||||||
val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ;
|
val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ;
|
||||||
break;
|
break;
|
||||||
case TCP_DEFER_ACCEPT:
|
case TCP_DEFER_ACCEPT:
|
||||||
val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
|
val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept);
|
||||||
TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
|
val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ,
|
||||||
|
TCP_RTO_MAX / HZ);
|
||||||
break;
|
break;
|
||||||
case TCP_WINDOW_CLAMP:
|
case TCP_WINDOW_CLAMP:
|
||||||
val = tp->window_clamp;
|
val = tp->window_clamp;
|
||||||
@ -4138,11 +4141,11 @@ int do_tcp_getsockopt(struct sock *sk, int level,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case TCP_USER_TIMEOUT:
|
case TCP_USER_TIMEOUT:
|
||||||
val = icsk->icsk_user_timeout;
|
val = READ_ONCE(icsk->icsk_user_timeout);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TCP_FASTOPEN:
|
case TCP_FASTOPEN:
|
||||||
val = icsk->icsk_accept_queue.fastopenq.max_qlen;
|
val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TCP_FASTOPEN_CONNECT:
|
case TCP_FASTOPEN_CONNECT:
|
||||||
@ -4154,14 +4157,14 @@ int do_tcp_getsockopt(struct sock *sk, int level,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case TCP_TX_DELAY:
|
case TCP_TX_DELAY:
|
||||||
val = tp->tcp_tx_delay;
|
val = READ_ONCE(tp->tcp_tx_delay);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case TCP_TIMESTAMP:
|
case TCP_TIMESTAMP:
|
||||||
val = tcp_time_stamp_raw() + tp->tsoffset;
|
val = tcp_time_stamp_raw() + READ_ONCE(tp->tsoffset);
|
||||||
break;
|
break;
|
||||||
case TCP_NOTSENT_LOWAT:
|
case TCP_NOTSENT_LOWAT:
|
||||||
val = tp->notsent_lowat;
|
val = READ_ONCE(tp->notsent_lowat);
|
||||||
break;
|
break;
|
||||||
case TCP_INQ:
|
case TCP_INQ:
|
||||||
val = tp->recvmsg_inq;
|
val = tp->recvmsg_inq;
|
||||||
|
@ -296,6 +296,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
|
|||||||
static bool tcp_fastopen_queue_check(struct sock *sk)
|
static bool tcp_fastopen_queue_check(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct fastopen_queue *fastopenq;
|
struct fastopen_queue *fastopenq;
|
||||||
|
int max_qlen;
|
||||||
|
|
||||||
/* Make sure the listener has enabled fastopen, and we don't
|
/* Make sure the listener has enabled fastopen, and we don't
|
||||||
* exceed the max # of pending TFO requests allowed before trying
|
* exceed the max # of pending TFO requests allowed before trying
|
||||||
@ -308,10 +309,11 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
|
|||||||
* temporarily vs a server not supporting Fast Open at all.
|
* temporarily vs a server not supporting Fast Open at all.
|
||||||
*/
|
*/
|
||||||
fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
|
fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
|
||||||
if (fastopenq->max_qlen == 0)
|
max_qlen = READ_ONCE(fastopenq->max_qlen);
|
||||||
|
if (max_qlen == 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (fastopenq->qlen >= fastopenq->max_qlen) {
|
if (fastopenq->qlen >= max_qlen) {
|
||||||
struct request_sock *req1;
|
struct request_sock *req1;
|
||||||
spin_lock(&fastopenq->lock);
|
spin_lock(&fastopenq->lock);
|
||||||
req1 = fastopenq->rskq_rst_head;
|
req1 = fastopenq->rskq_rst_head;
|
||||||
|
@ -307,8 +307,9 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||||||
inet->inet_daddr,
|
inet->inet_daddr,
|
||||||
inet->inet_sport,
|
inet->inet_sport,
|
||||||
usin->sin_port));
|
usin->sin_port));
|
||||||
tp->tsoffset = secure_tcp_ts_off(net, inet->inet_saddr,
|
WRITE_ONCE(tp->tsoffset,
|
||||||
inet->inet_daddr);
|
secure_tcp_ts_off(net, inet->inet_saddr,
|
||||||
|
inet->inet_daddr));
|
||||||
}
|
}
|
||||||
|
|
||||||
inet->inet_id = get_random_u16();
|
inet->inet_id = get_random_u16();
|
||||||
@ -988,11 +989,12 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
|
|||||||
tcp_rsk(req)->rcv_nxt,
|
tcp_rsk(req)->rcv_nxt,
|
||||||
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
|
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
|
||||||
tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
|
tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
|
||||||
req->ts_recent,
|
READ_ONCE(req->ts_recent),
|
||||||
0,
|
0,
|
||||||
tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
|
tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
|
||||||
inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
|
inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
|
||||||
ip_hdr(skb)->tos, tcp_rsk(req)->txhash);
|
ip_hdr(skb)->tos,
|
||||||
|
READ_ONCE(tcp_rsk(req)->txhash));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -528,7 +528,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
|
|||||||
newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
|
newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
|
||||||
|
|
||||||
newtp->lsndtime = tcp_jiffies32;
|
newtp->lsndtime = tcp_jiffies32;
|
||||||
newsk->sk_txhash = treq->txhash;
|
newsk->sk_txhash = READ_ONCE(treq->txhash);
|
||||||
newtp->total_retrans = req->num_retrans;
|
newtp->total_retrans = req->num_retrans;
|
||||||
|
|
||||||
tcp_init_xmit_timers(newsk);
|
tcp_init_xmit_timers(newsk);
|
||||||
@ -555,7 +555,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
|
|||||||
newtp->max_window = newtp->snd_wnd;
|
newtp->max_window = newtp->snd_wnd;
|
||||||
|
|
||||||
if (newtp->rx_opt.tstamp_ok) {
|
if (newtp->rx_opt.tstamp_ok) {
|
||||||
newtp->rx_opt.ts_recent = req->ts_recent;
|
newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
|
||||||
newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
|
newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
|
||||||
newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
|
newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
|
||||||
} else {
|
} else {
|
||||||
@ -619,7 +619,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
|||||||
tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
|
tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
|
||||||
|
|
||||||
if (tmp_opt.saw_tstamp) {
|
if (tmp_opt.saw_tstamp) {
|
||||||
tmp_opt.ts_recent = req->ts_recent;
|
tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
|
||||||
if (tmp_opt.rcv_tsecr)
|
if (tmp_opt.rcv_tsecr)
|
||||||
tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
|
tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
|
||||||
/* We do not store true stamp, but it is not required,
|
/* We do not store true stamp, but it is not required,
|
||||||
@ -758,8 +758,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
|||||||
|
|
||||||
/* In sequence, PAWS is OK. */
|
/* In sequence, PAWS is OK. */
|
||||||
|
|
||||||
|
/* TODO: We probably should defer ts_recent change once
|
||||||
|
* we take ownership of @req.
|
||||||
|
*/
|
||||||
if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
|
if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
|
||||||
req->ts_recent = tmp_opt.rcv_tsval;
|
WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
|
||||||
|
|
||||||
if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
|
if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
|
||||||
/* Truncate SYN, it is out of window starting
|
/* Truncate SYN, it is out of window starting
|
||||||
|
@ -878,7 +878,7 @@ static unsigned int tcp_synack_options(const struct sock *sk,
|
|||||||
if (likely(ireq->tstamp_ok)) {
|
if (likely(ireq->tstamp_ok)) {
|
||||||
opts->options |= OPTION_TS;
|
opts->options |= OPTION_TS;
|
||||||
opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
|
opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
|
||||||
opts->tsecr = req->ts_recent;
|
opts->tsecr = READ_ONCE(req->ts_recent);
|
||||||
remaining -= TCPOLEN_TSTAMP_ALIGNED;
|
remaining -= TCPOLEN_TSTAMP_ALIGNED;
|
||||||
}
|
}
|
||||||
if (likely(ireq->sack_ok)) {
|
if (likely(ireq->sack_ok)) {
|
||||||
@ -3660,7 +3660,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
|
md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
|
||||||
#endif
|
#endif
|
||||||
skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
|
skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4);
|
||||||
/* bpf program will be interested in the tcp_flags */
|
/* bpf program will be interested in the tcp_flags */
|
||||||
TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
|
TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
|
||||||
tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
|
tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
|
||||||
@ -4210,7 +4210,7 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
|
|||||||
|
|
||||||
/* Paired with WRITE_ONCE() in sock_setsockopt() */
|
/* Paired with WRITE_ONCE() in sock_setsockopt() */
|
||||||
if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED)
|
if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED)
|
||||||
tcp_rsk(req)->txhash = net_tx_rndhash();
|
WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash());
|
||||||
res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
|
res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
|
||||||
NULL);
|
NULL);
|
||||||
if (!res) {
|
if (!res) {
|
||||||
|
@ -274,13 +274,20 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
|
|||||||
__sum16 check;
|
__sum16 check;
|
||||||
__be16 newlen;
|
__be16 newlen;
|
||||||
|
|
||||||
if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
|
|
||||||
return __udp_gso_segment_list(gso_skb, features, is_ipv6);
|
|
||||||
|
|
||||||
mss = skb_shinfo(gso_skb)->gso_size;
|
mss = skb_shinfo(gso_skb)->gso_size;
|
||||||
if (gso_skb->len <= sizeof(*uh) + mss)
|
if (gso_skb->len <= sizeof(*uh) + mss)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
|
if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) {
|
||||||
|
/* Packet is from an untrusted source, reset gso_segs. */
|
||||||
|
skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),
|
||||||
|
mss);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
|
||||||
|
return __udp_gso_segment_list(gso_skb, features, is_ipv6);
|
||||||
|
|
||||||
skb_pull(gso_skb, sizeof(*uh));
|
skb_pull(gso_skb, sizeof(*uh));
|
||||||
|
|
||||||
/* clear destructor to avoid skb_segment assigning it to tail */
|
/* clear destructor to avoid skb_segment assigning it to tail */
|
||||||
@ -388,8 +395,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
|
|||||||
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
|
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
|
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
|
||||||
!skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
|
|
||||||
return __udp_gso_segment(skb, features, false);
|
return __udp_gso_segment(skb, features, false);
|
||||||
|
|
||||||
mss = skb_shinfo(skb)->gso_size;
|
mss = skb_shinfo(skb)->gso_size;
|
||||||
|
@ -955,7 +955,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
|||||||
goto tx_err;
|
goto tx_err;
|
||||||
|
|
||||||
if (skb->len > dev->mtu + dev->hard_header_len) {
|
if (skb->len > dev->mtu + dev->hard_header_len) {
|
||||||
pskb_trim(skb, dev->mtu + dev->hard_header_len);
|
if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
|
||||||
|
goto tx_err;
|
||||||
truncate = true;
|
truncate = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1126,10 +1126,10 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
|
|||||||
tcp_rsk(req)->rcv_nxt,
|
tcp_rsk(req)->rcv_nxt,
|
||||||
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
|
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
|
||||||
tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
|
tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
|
||||||
req->ts_recent, sk->sk_bound_dev_if,
|
READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
|
||||||
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
|
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
|
||||||
ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority,
|
ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority,
|
||||||
tcp_rsk(req)->txhash);
|
READ_ONCE(tcp_rsk(req)->txhash));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -43,8 +43,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
|
|||||||
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
|
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
|
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
|
||||||
!skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
|
|
||||||
return __udp_gso_segment(skb, features, true);
|
return __udp_gso_segment(skb, features, true);
|
||||||
|
|
||||||
mss = skb_shinfo(skb)->gso_size;
|
mss = skb_shinfo(skb)->gso_size;
|
||||||
|
@ -402,7 +402,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
|
|||||||
memcpy(laddr.mac, addr->sllc_mac, IFHWADDRLEN);
|
memcpy(laddr.mac, addr->sllc_mac, IFHWADDRLEN);
|
||||||
laddr.lsap = addr->sllc_sap;
|
laddr.lsap = addr->sllc_sap;
|
||||||
rc = -EADDRINUSE; /* mac + sap clash. */
|
rc = -EADDRINUSE; /* mac + sap clash. */
|
||||||
ask = llc_lookup_established(sap, &daddr, &laddr);
|
ask = llc_lookup_established(sap, &daddr, &laddr, &init_net);
|
||||||
if (ask) {
|
if (ask) {
|
||||||
sock_put(ask);
|
sock_put(ask);
|
||||||
goto out_put;
|
goto out_put;
|
||||||
|
@ -453,11 +453,13 @@ static int llc_exec_conn_trans_actions(struct sock *sk,
|
|||||||
static inline bool llc_estab_match(const struct llc_sap *sap,
|
static inline bool llc_estab_match(const struct llc_sap *sap,
|
||||||
const struct llc_addr *daddr,
|
const struct llc_addr *daddr,
|
||||||
const struct llc_addr *laddr,
|
const struct llc_addr *laddr,
|
||||||
const struct sock *sk)
|
const struct sock *sk,
|
||||||
|
const struct net *net)
|
||||||
{
|
{
|
||||||
struct llc_sock *llc = llc_sk(sk);
|
struct llc_sock *llc = llc_sk(sk);
|
||||||
|
|
||||||
return llc->laddr.lsap == laddr->lsap &&
|
return net_eq(sock_net(sk), net) &&
|
||||||
|
llc->laddr.lsap == laddr->lsap &&
|
||||||
llc->daddr.lsap == daddr->lsap &&
|
llc->daddr.lsap == daddr->lsap &&
|
||||||
ether_addr_equal(llc->laddr.mac, laddr->mac) &&
|
ether_addr_equal(llc->laddr.mac, laddr->mac) &&
|
||||||
ether_addr_equal(llc->daddr.mac, daddr->mac);
|
ether_addr_equal(llc->daddr.mac, daddr->mac);
|
||||||
@ -468,6 +470,7 @@ static inline bool llc_estab_match(const struct llc_sap *sap,
|
|||||||
* @sap: SAP
|
* @sap: SAP
|
||||||
* @daddr: address of remote LLC (MAC + SAP)
|
* @daddr: address of remote LLC (MAC + SAP)
|
||||||
* @laddr: address of local LLC (MAC + SAP)
|
* @laddr: address of local LLC (MAC + SAP)
|
||||||
|
* @net: netns to look up a socket in
|
||||||
*
|
*
|
||||||
* Search connection list of the SAP and finds connection using the remote
|
* Search connection list of the SAP and finds connection using the remote
|
||||||
* mac, remote sap, local mac, and local sap. Returns pointer for
|
* mac, remote sap, local mac, and local sap. Returns pointer for
|
||||||
@ -476,7 +479,8 @@ static inline bool llc_estab_match(const struct llc_sap *sap,
|
|||||||
*/
|
*/
|
||||||
static struct sock *__llc_lookup_established(struct llc_sap *sap,
|
static struct sock *__llc_lookup_established(struct llc_sap *sap,
|
||||||
struct llc_addr *daddr,
|
struct llc_addr *daddr,
|
||||||
struct llc_addr *laddr)
|
struct llc_addr *laddr,
|
||||||
|
const struct net *net)
|
||||||
{
|
{
|
||||||
struct sock *rc;
|
struct sock *rc;
|
||||||
struct hlist_nulls_node *node;
|
struct hlist_nulls_node *node;
|
||||||
@ -486,12 +490,12 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap,
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
again:
|
again:
|
||||||
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
|
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
|
||||||
if (llc_estab_match(sap, daddr, laddr, rc)) {
|
if (llc_estab_match(sap, daddr, laddr, rc, net)) {
|
||||||
/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
|
/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
|
||||||
if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
|
if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
|
||||||
goto again;
|
goto again;
|
||||||
if (unlikely(llc_sk(rc)->sap != sap ||
|
if (unlikely(llc_sk(rc)->sap != sap ||
|
||||||
!llc_estab_match(sap, daddr, laddr, rc))) {
|
!llc_estab_match(sap, daddr, laddr, rc, net))) {
|
||||||
sock_put(rc);
|
sock_put(rc);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -513,29 +517,33 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap,
|
|||||||
|
|
||||||
struct sock *llc_lookup_established(struct llc_sap *sap,
|
struct sock *llc_lookup_established(struct llc_sap *sap,
|
||||||
struct llc_addr *daddr,
|
struct llc_addr *daddr,
|
||||||
struct llc_addr *laddr)
|
struct llc_addr *laddr,
|
||||||
|
const struct net *net)
|
||||||
{
|
{
|
||||||
struct sock *sk;
|
struct sock *sk;
|
||||||
|
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
sk = __llc_lookup_established(sap, daddr, laddr);
|
sk = __llc_lookup_established(sap, daddr, laddr, net);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
return sk;
|
return sk;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool llc_listener_match(const struct llc_sap *sap,
|
static inline bool llc_listener_match(const struct llc_sap *sap,
|
||||||
const struct llc_addr *laddr,
|
const struct llc_addr *laddr,
|
||||||
const struct sock *sk)
|
const struct sock *sk,
|
||||||
|
const struct net *net)
|
||||||
{
|
{
|
||||||
struct llc_sock *llc = llc_sk(sk);
|
struct llc_sock *llc = llc_sk(sk);
|
||||||
|
|
||||||
return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
|
return net_eq(sock_net(sk), net) &&
|
||||||
|
sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
|
||||||
llc->laddr.lsap == laddr->lsap &&
|
llc->laddr.lsap == laddr->lsap &&
|
||||||
ether_addr_equal(llc->laddr.mac, laddr->mac);
|
ether_addr_equal(llc->laddr.mac, laddr->mac);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sock *__llc_lookup_listener(struct llc_sap *sap,
|
static struct sock *__llc_lookup_listener(struct llc_sap *sap,
|
||||||
struct llc_addr *laddr)
|
struct llc_addr *laddr,
|
||||||
|
const struct net *net)
|
||||||
{
|
{
|
||||||
struct sock *rc;
|
struct sock *rc;
|
||||||
struct hlist_nulls_node *node;
|
struct hlist_nulls_node *node;
|
||||||
@ -545,12 +553,12 @@ static struct sock *__llc_lookup_listener(struct llc_sap *sap,
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
again:
|
again:
|
||||||
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
|
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
|
||||||
if (llc_listener_match(sap, laddr, rc)) {
|
if (llc_listener_match(sap, laddr, rc, net)) {
|
||||||
/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
|
/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
|
||||||
if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
|
if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
|
||||||
goto again;
|
goto again;
|
||||||
if (unlikely(llc_sk(rc)->sap != sap ||
|
if (unlikely(llc_sk(rc)->sap != sap ||
|
||||||
!llc_listener_match(sap, laddr, rc))) {
|
!llc_listener_match(sap, laddr, rc, net))) {
|
||||||
sock_put(rc);
|
sock_put(rc);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -574,6 +582,7 @@ static struct sock *__llc_lookup_listener(struct llc_sap *sap,
|
|||||||
* llc_lookup_listener - Finds listener for local MAC + SAP
|
* llc_lookup_listener - Finds listener for local MAC + SAP
|
||||||
* @sap: SAP
|
* @sap: SAP
|
||||||
* @laddr: address of local LLC (MAC + SAP)
|
* @laddr: address of local LLC (MAC + SAP)
|
||||||
|
* @net: netns to look up a socket in
|
||||||
*
|
*
|
||||||
* Search connection list of the SAP and finds connection listening on
|
* Search connection list of the SAP and finds connection listening on
|
||||||
* local mac, and local sap. Returns pointer for parent socket found,
|
* local mac, and local sap. Returns pointer for parent socket found,
|
||||||
@ -581,24 +590,26 @@ static struct sock *__llc_lookup_listener(struct llc_sap *sap,
|
|||||||
* Caller has to make sure local_bh is disabled.
|
* Caller has to make sure local_bh is disabled.
|
||||||
*/
|
*/
|
||||||
static struct sock *llc_lookup_listener(struct llc_sap *sap,
|
static struct sock *llc_lookup_listener(struct llc_sap *sap,
|
||||||
struct llc_addr *laddr)
|
struct llc_addr *laddr,
|
||||||
|
const struct net *net)
|
||||||
{
|
{
|
||||||
|
struct sock *rc = __llc_lookup_listener(sap, laddr, net);
|
||||||
static struct llc_addr null_addr;
|
static struct llc_addr null_addr;
|
||||||
struct sock *rc = __llc_lookup_listener(sap, laddr);
|
|
||||||
|
|
||||||
if (!rc)
|
if (!rc)
|
||||||
rc = __llc_lookup_listener(sap, &null_addr);
|
rc = __llc_lookup_listener(sap, &null_addr, net);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sock *__llc_lookup(struct llc_sap *sap,
|
static struct sock *__llc_lookup(struct llc_sap *sap,
|
||||||
struct llc_addr *daddr,
|
struct llc_addr *daddr,
|
||||||
struct llc_addr *laddr)
|
struct llc_addr *laddr,
|
||||||
|
const struct net *net)
|
||||||
{
|
{
|
||||||
struct sock *sk = __llc_lookup_established(sap, daddr, laddr);
|
struct sock *sk = __llc_lookup_established(sap, daddr, laddr, net);
|
||||||
|
|
||||||
return sk ? : llc_lookup_listener(sap, laddr);
|
return sk ? : llc_lookup_listener(sap, laddr, net);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -776,7 +787,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
|
|||||||
llc_pdu_decode_da(skb, daddr.mac);
|
llc_pdu_decode_da(skb, daddr.mac);
|
||||||
llc_pdu_decode_dsap(skb, &daddr.lsap);
|
llc_pdu_decode_dsap(skb, &daddr.lsap);
|
||||||
|
|
||||||
sk = __llc_lookup(sap, &saddr, &daddr);
|
sk = __llc_lookup(sap, &saddr, &daddr, dev_net(skb->dev));
|
||||||
if (!sk)
|
if (!sk)
|
||||||
goto drop;
|
goto drop;
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ int llc_establish_connection(struct sock *sk, const u8 *lmac, u8 *dmac, u8 dsap)
|
|||||||
daddr.lsap = dsap;
|
daddr.lsap = dsap;
|
||||||
memcpy(daddr.mac, dmac, sizeof(daddr.mac));
|
memcpy(daddr.mac, dmac, sizeof(daddr.mac));
|
||||||
memcpy(laddr.mac, lmac, sizeof(laddr.mac));
|
memcpy(laddr.mac, lmac, sizeof(laddr.mac));
|
||||||
existing = llc_lookup_established(llc->sap, &daddr, &laddr);
|
existing = llc_lookup_established(llc->sap, &daddr, &laddr, sock_net(sk));
|
||||||
if (existing) {
|
if (existing) {
|
||||||
if (existing->sk_state == TCP_ESTABLISHED) {
|
if (existing->sk_state == TCP_ESTABLISHED) {
|
||||||
sk = existing;
|
sk = existing;
|
||||||
|
@ -163,9 +163,6 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||||||
void (*sta_handler)(struct sk_buff *skb);
|
void (*sta_handler)(struct sk_buff *skb);
|
||||||
void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb);
|
void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb);
|
||||||
|
|
||||||
if (!net_eq(dev_net(dev), &init_net))
|
|
||||||
goto drop;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When the interface is in promisc. mode, drop all the crap that it
|
* When the interface is in promisc. mode, drop all the crap that it
|
||||||
* receives, do not try to analyse it.
|
* receives, do not try to analyse it.
|
||||||
|
@ -294,25 +294,29 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
|
|||||||
|
|
||||||
static inline bool llc_dgram_match(const struct llc_sap *sap,
|
static inline bool llc_dgram_match(const struct llc_sap *sap,
|
||||||
const struct llc_addr *laddr,
|
const struct llc_addr *laddr,
|
||||||
const struct sock *sk)
|
const struct sock *sk,
|
||||||
|
const struct net *net)
|
||||||
{
|
{
|
||||||
struct llc_sock *llc = llc_sk(sk);
|
struct llc_sock *llc = llc_sk(sk);
|
||||||
|
|
||||||
return sk->sk_type == SOCK_DGRAM &&
|
return sk->sk_type == SOCK_DGRAM &&
|
||||||
llc->laddr.lsap == laddr->lsap &&
|
net_eq(sock_net(sk), net) &&
|
||||||
ether_addr_equal(llc->laddr.mac, laddr->mac);
|
llc->laddr.lsap == laddr->lsap &&
|
||||||
|
ether_addr_equal(llc->laddr.mac, laddr->mac);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* llc_lookup_dgram - Finds dgram socket for the local sap/mac
|
* llc_lookup_dgram - Finds dgram socket for the local sap/mac
|
||||||
* @sap: SAP
|
* @sap: SAP
|
||||||
* @laddr: address of local LLC (MAC + SAP)
|
* @laddr: address of local LLC (MAC + SAP)
|
||||||
|
* @net: netns to look up a socket in
|
||||||
*
|
*
|
||||||
* Search socket list of the SAP and finds connection using the local
|
* Search socket list of the SAP and finds connection using the local
|
||||||
* mac, and local sap. Returns pointer for socket found, %NULL otherwise.
|
* mac, and local sap. Returns pointer for socket found, %NULL otherwise.
|
||||||
*/
|
*/
|
||||||
static struct sock *llc_lookup_dgram(struct llc_sap *sap,
|
static struct sock *llc_lookup_dgram(struct llc_sap *sap,
|
||||||
const struct llc_addr *laddr)
|
const struct llc_addr *laddr,
|
||||||
|
const struct net *net)
|
||||||
{
|
{
|
||||||
struct sock *rc;
|
struct sock *rc;
|
||||||
struct hlist_nulls_node *node;
|
struct hlist_nulls_node *node;
|
||||||
@ -322,12 +326,12 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap,
|
|||||||
rcu_read_lock_bh();
|
rcu_read_lock_bh();
|
||||||
again:
|
again:
|
||||||
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
|
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
|
||||||
if (llc_dgram_match(sap, laddr, rc)) {
|
if (llc_dgram_match(sap, laddr, rc, net)) {
|
||||||
/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
|
/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
|
||||||
if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
|
if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
|
||||||
goto again;
|
goto again;
|
||||||
if (unlikely(llc_sk(rc)->sap != sap ||
|
if (unlikely(llc_sk(rc)->sap != sap ||
|
||||||
!llc_dgram_match(sap, laddr, rc))) {
|
!llc_dgram_match(sap, laddr, rc, net))) {
|
||||||
sock_put(rc);
|
sock_put(rc);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -429,7 +433,7 @@ void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb)
|
|||||||
llc_sap_mcast(sap, &laddr, skb);
|
llc_sap_mcast(sap, &laddr, skb);
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
} else {
|
} else {
|
||||||
struct sock *sk = llc_lookup_dgram(sap, &laddr);
|
struct sock *sk = llc_lookup_dgram(sap, &laddr, dev_net(skb->dev));
|
||||||
if (sk) {
|
if (sk) {
|
||||||
llc_sap_rcv(sap, skb, sk);
|
llc_sap_rcv(sap, skb, sk);
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
|
@ -3685,8 +3685,6 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
|
|||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
cond_resched();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -3710,6 +3708,8 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
|
|||||||
err = nft_chain_validate(&ctx, chain);
|
err = nft_chain_validate(&ctx, chain);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -4087,6 +4087,8 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info,
|
|||||||
list_for_each_entry(chain, &table->chains, list) {
|
list_for_each_entry(chain, &table->chains, list) {
|
||||||
if (!nft_is_active_next(net, chain))
|
if (!nft_is_active_next(net, chain))
|
||||||
continue;
|
continue;
|
||||||
|
if (nft_chain_is_bound(chain))
|
||||||
|
continue;
|
||||||
|
|
||||||
ctx.chain = chain;
|
ctx.chain = chain;
|
||||||
err = nft_delrule_by_chain(&ctx);
|
err = nft_delrule_by_chain(&ctx);
|
||||||
@ -10517,6 +10519,9 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
|
|||||||
|
|
||||||
if (!tb[NFTA_VERDICT_CODE])
|
if (!tb[NFTA_VERDICT_CODE])
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* zero padding hole for memcmp */
|
||||||
|
memset(data, 0, sizeof(*data));
|
||||||
data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
|
data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
|
||||||
|
|
||||||
switch (data->verdict.code) {
|
switch (data->verdict.code) {
|
||||||
@ -10799,6 +10804,9 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
|
|||||||
ctx.family = table->family;
|
ctx.family = table->family;
|
||||||
ctx.table = table;
|
ctx.table = table;
|
||||||
list_for_each_entry(chain, &table->chains, list) {
|
list_for_each_entry(chain, &table->chains, list) {
|
||||||
|
if (nft_chain_is_bound(chain))
|
||||||
|
continue;
|
||||||
|
|
||||||
ctx.chain = chain;
|
ctx.chain = chain;
|
||||||
list_for_each_entry_safe(rule, nr, &chain->rules, list) {
|
list_for_each_entry_safe(rule, nr, &chain->rules, list) {
|
||||||
list_del(&rule->list);
|
list_del(&rule->list);
|
||||||
|
@ -1929,7 +1929,11 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
|
|||||||
int i, start, rules_fx;
|
int i, start, rules_fx;
|
||||||
|
|
||||||
match_start = data;
|
match_start = data;
|
||||||
match_end = (const u8 *)nft_set_ext_key_end(&e->ext)->data;
|
|
||||||
|
if (nft_set_ext_exists(&e->ext, NFT_SET_EXT_KEY_END))
|
||||||
|
match_end = (const u8 *)nft_set_ext_key_end(&e->ext)->data;
|
||||||
|
else
|
||||||
|
match_end = data;
|
||||||
|
|
||||||
start = first_rule;
|
start = first_rule;
|
||||||
rules_fx = rules_f0;
|
rules_fx = rules_f0;
|
||||||
|
@ -406,56 +406,6 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
|
|
||||||
struct cls_bpf_prog *prog, unsigned long base,
|
|
||||||
struct nlattr **tb, struct nlattr *est, u32 flags,
|
|
||||||
struct netlink_ext_ack *extack)
|
|
||||||
{
|
|
||||||
bool is_bpf, is_ebpf, have_exts = false;
|
|
||||||
u32 gen_flags = 0;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
|
|
||||||
is_ebpf = tb[TCA_BPF_FD];
|
|
||||||
if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, flags,
|
|
||||||
extack);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (tb[TCA_BPF_FLAGS]) {
|
|
||||||
u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
|
|
||||||
|
|
||||||
if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
|
|
||||||
}
|
|
||||||
if (tb[TCA_BPF_FLAGS_GEN]) {
|
|
||||||
gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
|
|
||||||
if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
|
|
||||||
!tc_flags_valid(gen_flags))
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
prog->exts_integrated = have_exts;
|
|
||||||
prog->gen_flags = gen_flags;
|
|
||||||
|
|
||||||
ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
|
|
||||||
cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (tb[TCA_BPF_CLASSID]) {
|
|
||||||
prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
|
|
||||||
tcf_bind_filter(tp, &prog->res, base);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
|
static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
|
||||||
struct tcf_proto *tp, unsigned long base,
|
struct tcf_proto *tp, unsigned long base,
|
||||||
u32 handle, struct nlattr **tca,
|
u32 handle, struct nlattr **tca,
|
||||||
@ -463,9 +413,12 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
|
|||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
struct cls_bpf_head *head = rtnl_dereference(tp->root);
|
struct cls_bpf_head *head = rtnl_dereference(tp->root);
|
||||||
|
bool is_bpf, is_ebpf, have_exts = false;
|
||||||
struct cls_bpf_prog *oldprog = *arg;
|
struct cls_bpf_prog *oldprog = *arg;
|
||||||
struct nlattr *tb[TCA_BPF_MAX + 1];
|
struct nlattr *tb[TCA_BPF_MAX + 1];
|
||||||
|
bool bound_to_filter = false;
|
||||||
struct cls_bpf_prog *prog;
|
struct cls_bpf_prog *prog;
|
||||||
|
u32 gen_flags = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (tca[TCA_OPTIONS] == NULL)
|
if (tca[TCA_OPTIONS] == NULL)
|
||||||
@ -504,11 +457,51 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
|
|||||||
goto errout;
|
goto errout;
|
||||||
prog->handle = handle;
|
prog->handle = handle;
|
||||||
|
|
||||||
ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], flags,
|
is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
|
||||||
extack);
|
is_ebpf = tb[TCA_BPF_FD];
|
||||||
|
if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto errout_idr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &prog->exts,
|
||||||
|
flags, extack);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto errout_idr;
|
goto errout_idr;
|
||||||
|
|
||||||
|
if (tb[TCA_BPF_FLAGS]) {
|
||||||
|
u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
|
||||||
|
|
||||||
|
if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto errout_idr;
|
||||||
|
}
|
||||||
|
|
||||||
|
have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
|
||||||
|
}
|
||||||
|
if (tb[TCA_BPF_FLAGS_GEN]) {
|
||||||
|
gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
|
||||||
|
if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
|
||||||
|
!tc_flags_valid(gen_flags)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto errout_idr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
prog->exts_integrated = have_exts;
|
||||||
|
prog->gen_flags = gen_flags;
|
||||||
|
|
||||||
|
ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
|
||||||
|
cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
|
||||||
|
if (ret < 0)
|
||||||
|
goto errout_idr;
|
||||||
|
|
||||||
|
if (tb[TCA_BPF_CLASSID]) {
|
||||||
|
prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
|
||||||
|
tcf_bind_filter(tp, &prog->res, base);
|
||||||
|
bound_to_filter = true;
|
||||||
|
}
|
||||||
|
|
||||||
ret = cls_bpf_offload(tp, prog, oldprog, extack);
|
ret = cls_bpf_offload(tp, prog, oldprog, extack);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto errout_parms;
|
goto errout_parms;
|
||||||
@ -530,6 +523,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
errout_parms:
|
errout_parms:
|
||||||
|
if (bound_to_filter)
|
||||||
|
tcf_unbind_filter(tp, &prog->res);
|
||||||
cls_bpf_free_parms(prog);
|
cls_bpf_free_parms(prog);
|
||||||
errout_idr:
|
errout_idr:
|
||||||
if (!oldprog)
|
if (!oldprog)
|
||||||
|
@ -2173,53 +2173,6 @@ static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask)
|
|||||||
return mask->meta.l2_miss;
|
return mask->meta.l2_miss;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fl_set_parms(struct net *net, struct tcf_proto *tp,
|
|
||||||
struct cls_fl_filter *f, struct fl_flow_mask *mask,
|
|
||||||
unsigned long base, struct nlattr **tb,
|
|
||||||
struct nlattr *est,
|
|
||||||
struct fl_flow_tmplt *tmplt,
|
|
||||||
u32 flags, u32 fl_flags,
|
|
||||||
struct netlink_ext_ack *extack)
|
|
||||||
{
|
|
||||||
int err;
|
|
||||||
|
|
||||||
err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
|
|
||||||
fl_flags, extack);
|
|
||||||
if (err < 0)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
if (tb[TCA_FLOWER_CLASSID]) {
|
|
||||||
f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
|
|
||||||
if (flags & TCA_ACT_FLAGS_NO_RTNL)
|
|
||||||
rtnl_lock();
|
|
||||||
tcf_bind_filter(tp, &f->res, base);
|
|
||||||
if (flags & TCA_ACT_FLAGS_NO_RTNL)
|
|
||||||
rtnl_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
err = fl_set_key(net, tb, &f->key, &mask->key, extack);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
fl_mask_update_range(mask);
|
|
||||||
fl_set_masked_key(&f->mkey, &f->key, mask);
|
|
||||||
|
|
||||||
if (!fl_mask_fits_tmplt(tmplt, mask)) {
|
|
||||||
NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Enable tc skb extension if filter matches on data extracted from
|
|
||||||
* this extension.
|
|
||||||
*/
|
|
||||||
if (fl_needs_tc_skb_ext(&mask->key)) {
|
|
||||||
f->needs_tc_skb_ext = 1;
|
|
||||||
tc_skb_ext_tc_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
|
static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
|
||||||
struct cls_fl_filter *fold,
|
struct cls_fl_filter *fold,
|
||||||
bool *in_ht)
|
bool *in_ht)
|
||||||
@ -2251,6 +2204,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
|||||||
struct cls_fl_head *head = fl_head_dereference(tp);
|
struct cls_fl_head *head = fl_head_dereference(tp);
|
||||||
bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
|
bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
|
||||||
struct cls_fl_filter *fold = *arg;
|
struct cls_fl_filter *fold = *arg;
|
||||||
|
bool bound_to_filter = false;
|
||||||
struct cls_fl_filter *fnew;
|
struct cls_fl_filter *fnew;
|
||||||
struct fl_flow_mask *mask;
|
struct fl_flow_mask *mask;
|
||||||
struct nlattr **tb;
|
struct nlattr **tb;
|
||||||
@ -2335,15 +2289,46 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
|||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto errout_idr;
|
goto errout_idr;
|
||||||
|
|
||||||
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
|
err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
|
||||||
tp->chain->tmplt_priv, flags, fnew->flags,
|
&fnew->exts, flags, fnew->flags,
|
||||||
extack);
|
extack);
|
||||||
if (err)
|
if (err < 0)
|
||||||
goto errout_idr;
|
goto errout_idr;
|
||||||
|
|
||||||
|
if (tb[TCA_FLOWER_CLASSID]) {
|
||||||
|
fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
|
||||||
|
if (flags & TCA_ACT_FLAGS_NO_RTNL)
|
||||||
|
rtnl_lock();
|
||||||
|
tcf_bind_filter(tp, &fnew->res, base);
|
||||||
|
if (flags & TCA_ACT_FLAGS_NO_RTNL)
|
||||||
|
rtnl_unlock();
|
||||||
|
bound_to_filter = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = fl_set_key(net, tb, &fnew->key, &mask->key, extack);
|
||||||
|
if (err)
|
||||||
|
goto unbind_filter;
|
||||||
|
|
||||||
|
fl_mask_update_range(mask);
|
||||||
|
fl_set_masked_key(&fnew->mkey, &fnew->key, mask);
|
||||||
|
|
||||||
|
if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) {
|
||||||
|
NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
|
||||||
|
err = -EINVAL;
|
||||||
|
goto unbind_filter;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Enable tc skb extension if filter matches on data extracted from
|
||||||
|
* this extension.
|
||||||
|
*/
|
||||||
|
if (fl_needs_tc_skb_ext(&mask->key)) {
|
||||||
|
fnew->needs_tc_skb_ext = 1;
|
||||||
|
tc_skb_ext_tc_enable();
|
||||||
|
}
|
||||||
|
|
||||||
err = fl_check_assign_mask(head, fnew, fold, mask);
|
err = fl_check_assign_mask(head, fnew, fold, mask);
|
||||||
if (err)
|
if (err)
|
||||||
goto errout_idr;
|
goto unbind_filter;
|
||||||
|
|
||||||
err = fl_ht_insert_unique(fnew, fold, &in_ht);
|
err = fl_ht_insert_unique(fnew, fold, &in_ht);
|
||||||
if (err)
|
if (err)
|
||||||
@ -2434,6 +2419,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
|||||||
fnew->mask->filter_ht_params);
|
fnew->mask->filter_ht_params);
|
||||||
errout_mask:
|
errout_mask:
|
||||||
fl_mask_put(head, fnew->mask);
|
fl_mask_put(head, fnew->mask);
|
||||||
|
|
||||||
|
unbind_filter:
|
||||||
|
if (bound_to_filter) {
|
||||||
|
if (flags & TCA_ACT_FLAGS_NO_RTNL)
|
||||||
|
rtnl_lock();
|
||||||
|
tcf_unbind_filter(tp, &fnew->res);
|
||||||
|
if (flags & TCA_ACT_FLAGS_NO_RTNL)
|
||||||
|
rtnl_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
errout_idr:
|
errout_idr:
|
||||||
if (!fold)
|
if (!fold)
|
||||||
idr_remove(&head->handle_idr, fnew->handle);
|
idr_remove(&head->handle_idr, fnew->handle);
|
||||||
|
@ -159,26 +159,6 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
|
|||||||
[TCA_MATCHALL_FLAGS] = { .type = NLA_U32 },
|
[TCA_MATCHALL_FLAGS] = { .type = NLA_U32 },
|
||||||
};
|
};
|
||||||
|
|
||||||
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
|
|
||||||
struct cls_mall_head *head,
|
|
||||||
unsigned long base, struct nlattr **tb,
|
|
||||||
struct nlattr *est, u32 flags, u32 fl_flags,
|
|
||||||
struct netlink_ext_ack *extack)
|
|
||||||
{
|
|
||||||
int err;
|
|
||||||
|
|
||||||
err = tcf_exts_validate_ex(net, tp, tb, est, &head->exts, flags,
|
|
||||||
fl_flags, extack);
|
|
||||||
if (err < 0)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
if (tb[TCA_MATCHALL_CLASSID]) {
|
|
||||||
head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
|
|
||||||
tcf_bind_filter(tp, &head->res, base);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mall_change(struct net *net, struct sk_buff *in_skb,
|
static int mall_change(struct net *net, struct sk_buff *in_skb,
|
||||||
struct tcf_proto *tp, unsigned long base,
|
struct tcf_proto *tp, unsigned long base,
|
||||||
u32 handle, struct nlattr **tca,
|
u32 handle, struct nlattr **tca,
|
||||||
@ -187,6 +167,7 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
|
|||||||
{
|
{
|
||||||
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
||||||
struct nlattr *tb[TCA_MATCHALL_MAX + 1];
|
struct nlattr *tb[TCA_MATCHALL_MAX + 1];
|
||||||
|
bool bound_to_filter = false;
|
||||||
struct cls_mall_head *new;
|
struct cls_mall_head *new;
|
||||||
u32 userflags = 0;
|
u32 userflags = 0;
|
||||||
int err;
|
int err;
|
||||||
@ -226,11 +207,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
|
|||||||
goto err_alloc_percpu;
|
goto err_alloc_percpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE],
|
err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
|
||||||
flags, new->flags, extack);
|
&new->exts, flags, new->flags, extack);
|
||||||
if (err)
|
if (err < 0)
|
||||||
goto err_set_parms;
|
goto err_set_parms;
|
||||||
|
|
||||||
|
if (tb[TCA_MATCHALL_CLASSID]) {
|
||||||
|
new->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
|
||||||
|
tcf_bind_filter(tp, &new->res, base);
|
||||||
|
bound_to_filter = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (!tc_skip_hw(new->flags)) {
|
if (!tc_skip_hw(new->flags)) {
|
||||||
err = mall_replace_hw_filter(tp, new, (unsigned long)new,
|
err = mall_replace_hw_filter(tp, new, (unsigned long)new,
|
||||||
extack);
|
extack);
|
||||||
@ -246,6 +233,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_replace_hw_filter:
|
err_replace_hw_filter:
|
||||||
|
if (bound_to_filter)
|
||||||
|
tcf_unbind_filter(tp, &new->res);
|
||||||
err_set_parms:
|
err_set_parms:
|
||||||
free_percpu(new->pf);
|
free_percpu(new->pf);
|
||||||
err_alloc_percpu:
|
err_alloc_percpu:
|
||||||
|
@ -712,8 +712,23 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
|
|||||||
[TCA_U32_FLAGS] = { .type = NLA_U32 },
|
[TCA_U32_FLAGS] = { .type = NLA_U32 },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void u32_unbind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
|
||||||
|
struct nlattr **tb)
|
||||||
|
{
|
||||||
|
if (tb[TCA_U32_CLASSID])
|
||||||
|
tcf_unbind_filter(tp, &n->res);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void u32_bind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
|
||||||
|
unsigned long base, struct nlattr **tb)
|
||||||
|
{
|
||||||
|
if (tb[TCA_U32_CLASSID]) {
|
||||||
|
n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
|
||||||
|
tcf_bind_filter(tp, &n->res, base);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int u32_set_parms(struct net *net, struct tcf_proto *tp,
|
static int u32_set_parms(struct net *net, struct tcf_proto *tp,
|
||||||
unsigned long base,
|
|
||||||
struct tc_u_knode *n, struct nlattr **tb,
|
struct tc_u_knode *n, struct nlattr **tb,
|
||||||
struct nlattr *est, u32 flags, u32 fl_flags,
|
struct nlattr *est, u32 flags, u32 fl_flags,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
@ -760,10 +775,6 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
|
|||||||
if (ht_old)
|
if (ht_old)
|
||||||
ht_old->refcnt--;
|
ht_old->refcnt--;
|
||||||
}
|
}
|
||||||
if (tb[TCA_U32_CLASSID]) {
|
|
||||||
n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
|
|
||||||
tcf_bind_filter(tp, &n->res, base);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ifindex >= 0)
|
if (ifindex >= 0)
|
||||||
n->ifindex = ifindex;
|
n->ifindex = ifindex;
|
||||||
@ -903,17 +914,27 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
|
|||||||
if (!new)
|
if (!new)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
err = u32_set_parms(net, tp, base, new, tb,
|
err = u32_set_parms(net, tp, new, tb, tca[TCA_RATE],
|
||||||
tca[TCA_RATE], flags, new->flags,
|
flags, new->flags, extack);
|
||||||
extack);
|
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
__u32_destroy_key(new);
|
__u32_destroy_key(new);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u32_bind_filter(tp, new, base, tb);
|
||||||
|
|
||||||
err = u32_replace_hw_knode(tp, new, flags, extack);
|
err = u32_replace_hw_knode(tp, new, flags, extack);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
u32_unbind_filter(tp, new, tb);
|
||||||
|
|
||||||
|
if (tb[TCA_U32_LINK]) {
|
||||||
|
struct tc_u_hnode *ht_old;
|
||||||
|
|
||||||
|
ht_old = rtnl_dereference(n->ht_down);
|
||||||
|
if (ht_old)
|
||||||
|
ht_old->refcnt++;
|
||||||
|
}
|
||||||
__u32_destroy_key(new);
|
__u32_destroy_key(new);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -1074,15 +1095,18 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE],
|
err = u32_set_parms(net, tp, n, tb, tca[TCA_RATE],
|
||||||
flags, n->flags, extack);
|
flags, n->flags, extack);
|
||||||
|
|
||||||
|
u32_bind_filter(tp, n, base, tb);
|
||||||
|
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
struct tc_u_knode __rcu **ins;
|
struct tc_u_knode __rcu **ins;
|
||||||
struct tc_u_knode *pins;
|
struct tc_u_knode *pins;
|
||||||
|
|
||||||
err = u32_replace_hw_knode(tp, n, flags, extack);
|
err = u32_replace_hw_knode(tp, n, flags, extack);
|
||||||
if (err)
|
if (err)
|
||||||
goto errhw;
|
goto errunbind;
|
||||||
|
|
||||||
if (!tc_in_hw(n->flags))
|
if (!tc_in_hw(n->flags))
|
||||||
n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
|
n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
|
||||||
@ -1100,7 +1124,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
errhw:
|
errunbind:
|
||||||
|
u32_unbind_filter(tp, n, tb);
|
||||||
|
|
||||||
#ifdef CONFIG_CLS_U32_MARK
|
#ifdef CONFIG_CLS_U32_MARK
|
||||||
free_percpu(n->pcpu_success);
|
free_percpu(n->pcpu_success);
|
||||||
#endif
|
#endif
|
||||||
|
@ -22,9 +22,16 @@ static int timer_cb(void *map, int *key, struct bpf_timer *timer)
|
|||||||
return buf[69];
|
return buf[69];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__attribute__((noinline))
|
||||||
|
static int bad_timer_cb(void *map, int *key, struct bpf_timer *timer)
|
||||||
|
{
|
||||||
|
volatile char buf[300] = {};
|
||||||
|
return buf[255] + timer_cb(NULL, NULL, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
SEC("tc")
|
SEC("tc")
|
||||||
__failure __msg("combined stack size of 2 calls")
|
__failure __msg("combined stack size of 2 calls is 576. Too large")
|
||||||
int prog(struct __sk_buff *ctx)
|
int pseudo_call_check(struct __sk_buff *ctx)
|
||||||
{
|
{
|
||||||
struct hmap_elem *elem;
|
struct hmap_elem *elem;
|
||||||
volatile char buf[256] = {};
|
volatile char buf[256] = {};
|
||||||
@ -37,4 +44,18 @@ int prog(struct __sk_buff *ctx)
|
|||||||
return bpf_timer_set_callback(&elem->timer, timer_cb) + buf[0];
|
return bpf_timer_set_callback(&elem->timer, timer_cb) + buf[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SEC("tc")
|
||||||
|
__failure __msg("combined stack size of 2 calls is 608. Too large")
|
||||||
|
int async_call_root_check(struct __sk_buff *ctx)
|
||||||
|
{
|
||||||
|
struct hmap_elem *elem;
|
||||||
|
volatile char buf[256] = {};
|
||||||
|
|
||||||
|
elem = bpf_map_lookup_elem(&hmap, &(int){0});
|
||||||
|
if (!elem)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return bpf_timer_set_callback(&elem->timer, bad_timer_cb) + buf[0];
|
||||||
|
}
|
||||||
|
|
||||||
char _license[] SEC("license") = "GPL";
|
char _license[] SEC("license") = "GPL";
|
||||||
|
@ -5,6 +5,8 @@ CONFIG_NF_CONNTRACK=m
|
|||||||
CONFIG_NF_CONNTRACK_MARK=y
|
CONFIG_NF_CONNTRACK_MARK=y
|
||||||
CONFIG_NF_CONNTRACK_ZONES=y
|
CONFIG_NF_CONNTRACK_ZONES=y
|
||||||
CONFIG_NF_CONNTRACK_LABELS=y
|
CONFIG_NF_CONNTRACK_LABELS=y
|
||||||
|
CONFIG_NF_CONNTRACK_PROCFS=y
|
||||||
|
CONFIG_NF_FLOW_TABLE=m
|
||||||
CONFIG_NF_NAT=m
|
CONFIG_NF_NAT=m
|
||||||
CONFIG_NETFILTER_XT_TARGET_LOG=m
|
CONFIG_NETFILTER_XT_TARGET_LOG=m
|
||||||
|
|
||||||
|
1
tools/testing/selftests/tc-testing/settings
Normal file
1
tools/testing/selftests/tc-testing/settings
Normal file
@ -0,0 +1 @@
|
|||||||
|
timeout=900
|
Loading…
Reference in New Issue
Block a user