mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
Including fixes from bluetooth and netfilter.
Current release - regressions: - Revert "igc: fix a log entry using uninitialized netdev", it traded lack of netdev name in a printk() for a crash Previous releases - regressions: - Bluetooth: L2CAP: fix rejecting L2CAP_CONN_PARAM_UPDATE_REQ - geneve: fix incorrectly setting lengths of inner headers in the skb, confusing the drivers and causing mangled packets - sched: initialize noop_qdisc owner to avoid false-positive recursion detection (recursing on CPU 0), which bubbles up to user space as a sendmsg() error, while noop_qdisc should silently drop - netdevsim: fix backwards compatibility in nsim_get_iflink() Previous releases - always broken: - netfilter: ipset: fix race between namespace cleanup and gc in the list:set type Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmZrFjoACgkQMUZtbf5S Iru6Bw/+MfomIf6qvdCXKdka4eOeqZLg7gZU0UdC99VM1SH7QGazkAvj4ACbDMa7 04mgNZKquV5Fx6AJQwjAodzHx2KUl5WA5cWzAuLyA78lJXoipI7W+KRtcBzGl0gs IQ+IQCofWjduLMc9y67TqTSnVhtDWaHWw6PwMW8Z4BotD9hXxoUeGXz373UA8xhW 2Wz1HkQbDqIFqc0Sp1c0IfAQtnzzvg4yC+KCV+2nHB/d8CAlCUJ6deVWbCtF8d5O /ospqFykzkENbYh8ySMEs6bAH0mS2nMiLPRnoLW1b2vMQWgOwv8xYVaYHI5tP+7u NxMZd4JQntBLhe8jV3sc6ciPnlPSDu6rNDwWJcvK26EHPXYg/opsihH18nMu1esO fp//KvKz8BT4vrkAW+YpxaD86V1X0dKkPIr2qFQ3eMHF8A1p+lYcGiWd1BQNPj5A HHX1ERTVHxyl1nH2wy0FHhPXt1k5SzUT9AS0PyBou14stwN1O8VHHmGrTbu+CHe5 /P1jJ9DNDGO6LdDr60W9r+ucyvGYGxoZe09NQOiBXYnJbb1Xq5Allh+d6O+oyT0y kM1jsPt2360nF2TZ8lMpn+R+OfTdOaQMw5nHXd+XFX0VktQ/231vW9L/dRfcOt6C ESuaDHz0Q1DE8PI/dfrxRQLDG7UckN27aTHdn+ZHkq4VjdUPUdk= =cyRR -----END PGP SIGNATURE----- Merge tag 'net-6.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from bluetooth and netfilter. Slim pickings this time, probably a combination of summer, DevConf.cz, and the end of first half of the year at corporations. Current release - regressions: - Revert "igc: fix a log entry using uninitialized netdev", it traded lack of netdev name in a printk() for a crash Previous releases - regressions: - Bluetooth: L2CAP: fix rejecting L2CAP_CONN_PARAM_UPDATE_REQ - geneve: fix incorrectly setting lengths of inner headers in the skb, confusing the drivers and causing mangled packets - sched: initialize noop_qdisc owner to avoid false-positive recursion detection (recursing on CPU 0), which bubbles up to user space as a sendmsg() error, while noop_qdisc should silently drop - netdevsim: fix backwards compatibility in nsim_get_iflink() Previous releases - always broken: - netfilter: ipset: fix race between namespace cleanup and gc in the list:set type" * tag 'net-6.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (35 commits) bnxt_en: Adjust logging of firmware messages in case of released token in __hwrm_send() af_unix: Read with MSG_PEEK loops if the first unread byte is OOB bnxt_en: Cap the size of HWRM_PORT_PHY_QCFG forwarded response gve: Clear napi->skb before dev_kfree_skb_any() ionic: fix use after netif_napi_del() Revert "igc: fix a log entry using uninitialized netdev" net: bridge: mst: fix suspicious rcu usage in br_mst_set_state net: bridge: mst: pass vlan group directly to br_mst_vlan_set_state net/ipv6: Fix the RT cache flush via sysctl using a previous delay net: stmmac: replace priv->speed with the portTransmitRate from the tc-cbs parameters gve: ignore nonrelevant GSO type bits when processing TSO headers net: pse-pd: Use EOPNOTSUPP error code instead of ENOTSUPP netfilter: Use flowlabel flow key when re-routing mangled packets netfilter: ipset: Fix race between namespace cleanup and gc in the list:set type netfilter: nft_inner: validate mandatory meta and payload tcp: use signed arithmetic in tcp_rtx_probe0_timed_out() mailmap: map Geliang's new email address mptcp: pm: update add_addr counters after connect mptcp: pm: inc RmAddr MIB counter once per RM_ADDR ID mptcp: ensure snd_una is properly initialized on connect ...
This commit is contained in:
commit
d20f6b3d74
1
.mailmap
1
.mailmap
@ -219,6 +219,7 @@ Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
|
||||
Geliang Tang <geliang@kernel.org> <geliangtang@xiaomi.com>
|
||||
Geliang Tang <geliang@kernel.org> <geliangtang@gmail.com>
|
||||
Geliang Tang <geliang@kernel.org> <geliangtang@163.com>
|
||||
Geliang Tang <geliang@kernel.org> <tanggeliang@kylinos.cn>
|
||||
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
|
||||
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
|
||||
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
|
||||
|
@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
|
||||
init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d",
|
||||
priv->internal_mdio_bus->id,
|
||||
port_num);
|
||||
if (!init_data.devicename)
|
||||
if (!init_data.devicename) {
|
||||
fwnode_handle_put(led);
|
||||
fwnode_handle_put(leds);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
|
||||
if (ret)
|
||||
@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
|
||||
kfree(init_data.devicename);
|
||||
}
|
||||
|
||||
fwnode_handle_put(leds);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv)
|
||||
* the correct port for LED setup.
|
||||
*/
|
||||
ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
|
||||
if (ret)
|
||||
if (ret) {
|
||||
fwnode_handle_put(port);
|
||||
fwnode_handle_put(ports);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
fwnode_handle_put(ports);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1434,6 +1434,57 @@ struct bnxt_l2_filter {
|
||||
atomic_t refcnt;
|
||||
};
|
||||
|
||||
/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes. The
|
||||
* first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h.
|
||||
* The last valid byte in the compat version is different.
|
||||
*/
|
||||
struct hwrm_port_phy_qcfg_output_compat {
|
||||
__le16 error_code;
|
||||
__le16 req_type;
|
||||
__le16 seq_id;
|
||||
__le16 resp_len;
|
||||
u8 link;
|
||||
u8 active_fec_signal_mode;
|
||||
__le16 link_speed;
|
||||
u8 duplex_cfg;
|
||||
u8 pause;
|
||||
__le16 support_speeds;
|
||||
__le16 force_link_speed;
|
||||
u8 auto_mode;
|
||||
u8 auto_pause;
|
||||
__le16 auto_link_speed;
|
||||
__le16 auto_link_speed_mask;
|
||||
u8 wirespeed;
|
||||
u8 lpbk;
|
||||
u8 force_pause;
|
||||
u8 module_status;
|
||||
__le32 preemphasis;
|
||||
u8 phy_maj;
|
||||
u8 phy_min;
|
||||
u8 phy_bld;
|
||||
u8 phy_type;
|
||||
u8 media_type;
|
||||
u8 xcvr_pkg_type;
|
||||
u8 eee_config_phy_addr;
|
||||
u8 parallel_detect;
|
||||
__le16 link_partner_adv_speeds;
|
||||
u8 link_partner_adv_auto_mode;
|
||||
u8 link_partner_adv_pause;
|
||||
__le16 adv_eee_link_speed_mask;
|
||||
__le16 link_partner_adv_eee_link_speed_mask;
|
||||
__le32 xcvr_identifier_type_tx_lpi_timer;
|
||||
__le16 fec_cfg;
|
||||
u8 duplex_state;
|
||||
u8 option_flags;
|
||||
char phy_vendor_name[16];
|
||||
char phy_vendor_partnumber[16];
|
||||
__le16 support_pam4_speeds;
|
||||
__le16 force_pam4_link_speed;
|
||||
__le16 auto_pam4_link_speed_mask;
|
||||
u8 link_partner_pam4_adv_speeds;
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
struct bnxt_link_info {
|
||||
u8 phy_type;
|
||||
u8 media_type;
|
||||
|
@ -680,7 +680,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
|
||||
req_type);
|
||||
else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
|
||||
hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
|
||||
req_type, token->seq_id, rc);
|
||||
req_type, le16_to_cpu(ctx->req->seq_id), rc);
|
||||
rc = __hwrm_to_stderr(rc);
|
||||
exit:
|
||||
if (token)
|
||||
|
@ -950,8 +950,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
|
||||
struct hwrm_fwd_resp_input *req;
|
||||
int rc;
|
||||
|
||||
if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
|
||||
if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) {
|
||||
netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n",
|
||||
msg_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
|
||||
if (!rc) {
|
||||
@ -1085,7 +1088,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
|
||||
rc = bnxt_hwrm_exec_fwd_resp(
|
||||
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
|
||||
} else {
|
||||
struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
|
||||
struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {};
|
||||
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
|
||||
|
||||
phy_qcfg_req =
|
||||
@ -1096,6 +1099,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
|
||||
mutex_unlock(&bp->link_lock);
|
||||
phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
|
||||
phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
|
||||
/* New SPEEDS2 fields are beyond the legacy structure, so
|
||||
* clear the SPEEDS2_SUPPORTED flag.
|
||||
*/
|
||||
phy_qcfg_resp.option_flags &=
|
||||
~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED;
|
||||
phy_qcfg_resp.valid = 1;
|
||||
|
||||
if (vf->flags & BNXT_VF_LINK_UP) {
|
||||
|
@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct,
|
||||
pg_info->page_offset;
|
||||
memcpy(skb->data, va, MIN_SKB_SIZE);
|
||||
skb_put(skb, MIN_SKB_SIZE);
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
pg_info->page,
|
||||
pg_info->page_offset + MIN_SKB_SIZE,
|
||||
len - MIN_SKB_SIZE,
|
||||
LIO_RXBUFFER_SZ);
|
||||
}
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
pg_info->page,
|
||||
pg_info->page_offset + MIN_SKB_SIZE,
|
||||
len - MIN_SKB_SIZE,
|
||||
LIO_RXBUFFER_SZ);
|
||||
} else {
|
||||
struct octeon_skb_page_info *pg_info =
|
||||
((struct octeon_skb_page_info *)(skb->cb));
|
||||
|
@ -647,11 +647,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
|
||||
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
|
||||
}
|
||||
|
||||
static void gve_rx_free_skb(struct gve_rx_ring *rx)
|
||||
static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
|
||||
{
|
||||
if (!rx->ctx.skb_head)
|
||||
return;
|
||||
|
||||
if (rx->ctx.skb_head == napi->skb)
|
||||
napi->skb = NULL;
|
||||
dev_kfree_skb_any(rx->ctx.skb_head);
|
||||
rx->ctx.skb_head = NULL;
|
||||
rx->ctx.skb_tail = NULL;
|
||||
@ -950,7 +952,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
|
||||
|
||||
err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
|
||||
if (err < 0) {
|
||||
gve_rx_free_skb(rx);
|
||||
gve_rx_free_skb(napi, rx);
|
||||
u64_stats_update_begin(&rx->statss);
|
||||
if (err == -ENOMEM)
|
||||
rx->rx_skb_alloc_fail++;
|
||||
@ -993,7 +995,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
|
||||
|
||||
/* gve_rx_complete_skb() will consume skb if successful */
|
||||
if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
|
||||
gve_rx_free_skb(rx);
|
||||
gve_rx_free_skb(napi, rx);
|
||||
u64_stats_update_begin(&rx->statss);
|
||||
rx->rx_desc_err_dropped_pkt++;
|
||||
u64_stats_update_end(&rx->statss);
|
||||
|
@ -555,28 +555,18 @@ static int gve_prep_tso(struct sk_buff *skb)
|
||||
if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
|
||||
return -1;
|
||||
|
||||
if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Needed because we will modify header. */
|
||||
err = skb_cow_head(skb, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
tcp = tcp_hdr(skb);
|
||||
|
||||
/* Remove payload length from checksum. */
|
||||
paylen = skb->len - skb_transport_offset(skb);
|
||||
|
||||
switch (skb_shinfo(skb)->gso_type) {
|
||||
case SKB_GSO_TCPV4:
|
||||
case SKB_GSO_TCPV6:
|
||||
csum_replace_by_diff(&tcp->check,
|
||||
(__force __wsum)htonl(paylen));
|
||||
|
||||
/* Compute length of segmentation header. */
|
||||
header_len = skb_tcp_all_headers(skb);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
|
||||
header_len = skb_tcp_all_headers(skb);
|
||||
|
||||
if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
|
||||
return -EINVAL;
|
||||
|
@ -3535,6 +3535,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
|
||||
ret = hns3_alloc_and_attach_buffer(ring, i);
|
||||
if (ret)
|
||||
goto out_buffer_fail;
|
||||
|
||||
if (!(i % HNS3_RESCHED_BD_NUM))
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -5107,6 +5110,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
|
||||
}
|
||||
|
||||
u64_stats_init(&priv->ring[i].syncp);
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -214,6 +214,8 @@ enum hns3_nic_state {
|
||||
#define HNS3_CQ_MODE_EQE 1U
|
||||
#define HNS3_CQ_MODE_CQE 0U
|
||||
|
||||
#define HNS3_RESCHED_BD_NUM 1024
|
||||
|
||||
enum hns3_pkt_l2t_type {
|
||||
HNS3_L2_TYPE_UNICAST,
|
||||
HNS3_L2_TYPE_MULTICAST,
|
||||
|
@ -3086,9 +3086,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev)
|
||||
|
||||
static void hclge_update_link_status(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hnae3_handle *rhandle = &hdev->vport[0].roce;
|
||||
struct hnae3_handle *handle = &hdev->vport[0].nic;
|
||||
struct hnae3_client *rclient = hdev->roce_client;
|
||||
struct hnae3_client *client = hdev->nic_client;
|
||||
int state;
|
||||
int ret;
|
||||
@ -3112,8 +3110,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
|
||||
|
||||
client->ops->link_status_change(handle, state);
|
||||
hclge_config_mac_tnl_int(hdev, state);
|
||||
if (rclient && rclient->ops->link_status_change)
|
||||
rclient->ops->link_status_change(rhandle, state);
|
||||
|
||||
if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
|
||||
struct hnae3_handle *rhandle = &hdev->vport[0].roce;
|
||||
struct hnae3_client *rclient = hdev->roce_client;
|
||||
|
||||
if (rclient && rclient->ops->link_status_change)
|
||||
rclient->ops->link_status_change(rhandle,
|
||||
state);
|
||||
}
|
||||
|
||||
hclge_push_link_status(hdev);
|
||||
}
|
||||
@ -11319,6 +11324,12 @@ static int hclge_init_client_instance(struct hnae3_client *client,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
|
||||
{
|
||||
return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
|
||||
test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
|
||||
}
|
||||
|
||||
static void hclge_uninit_client_instance(struct hnae3_client *client,
|
||||
struct hnae3_ae_dev *ae_dev)
|
||||
{
|
||||
@ -11327,7 +11338,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
|
||||
|
||||
if (hdev->roce_client) {
|
||||
clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
|
||||
while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
|
||||
while (hclge_uninit_need_wait(hdev))
|
||||
msleep(HCLGE_WAIT_RESET_DONE);
|
||||
|
||||
hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
|
||||
|
@ -7032,6 +7032,8 @@ static int igc_probe(struct pci_dev *pdev,
|
||||
device_set_wakeup_enable(&adapter->pdev->dev,
|
||||
adapter->flags & IGC_FLAG_WOL_SUPPORTED);
|
||||
|
||||
igc_ptp_init(adapter);
|
||||
|
||||
igc_tsn_clear_schedule(adapter);
|
||||
|
||||
/* reset the hardware with the new settings */
|
||||
@ -7053,9 +7055,6 @@ static int igc_probe(struct pci_dev *pdev,
|
||||
/* Check if Media Autosense is enabled */
|
||||
adapter->ei = *ei;
|
||||
|
||||
/* do hw tstamp init after resetting */
|
||||
igc_ptp_init(adapter);
|
||||
|
||||
/* print pcie link status and MAC address */
|
||||
pcie_print_link_status(pdev);
|
||||
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
|
||||
|
@ -4875,7 +4875,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
|
||||
|
||||
/* Verify if UDP port is being offloaded by HW */
|
||||
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
|
||||
return features;
|
||||
return vxlan_features_check(skb, features);
|
||||
|
||||
#if IS_ENABLED(CONFIG_GENEVE)
|
||||
/* Support Geneve offload for default UDP port */
|
||||
@ -4901,7 +4901,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
features = vlan_features_check(skb, features);
|
||||
features = vxlan_features_check(skb, features);
|
||||
|
||||
/* Validate if the tunneled packet is being offloaded by HW */
|
||||
if (skb->encapsulation &&
|
||||
|
@ -304,10 +304,8 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (qcq->napi.poll)
|
||||
napi_enable(&qcq->napi);
|
||||
|
||||
if (qcq->flags & IONIC_QCQ_F_INTR) {
|
||||
napi_enable(&qcq->napi);
|
||||
irq_set_affinity_hint(qcq->intr.vector,
|
||||
&qcq->intr.affinity_mask);
|
||||
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
|
||||
|
@ -93,6 +93,7 @@ struct ethqos_emac_driver_data {
|
||||
bool has_emac_ge_3;
|
||||
const char *link_clk_name;
|
||||
bool has_integrated_pcs;
|
||||
u32 dma_addr_width;
|
||||
struct dwmac4_addrs dwmac4_addrs;
|
||||
};
|
||||
|
||||
@ -276,6 +277,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
|
||||
.has_emac_ge_3 = true,
|
||||
.link_clk_name = "phyaux",
|
||||
.has_integrated_pcs = true,
|
||||
.dma_addr_width = 36,
|
||||
.dwmac4_addrs = {
|
||||
.dma_chan = 0x00008100,
|
||||
.dma_chan_offset = 0x1000,
|
||||
@ -845,6 +847,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
|
||||
plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
|
||||
if (data->has_integrated_pcs)
|
||||
plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
|
||||
if (data->dma_addr_width)
|
||||
plat_dat->host_dma_width = data->dma_addr_width;
|
||||
|
||||
if (ethqos->serdes_phy) {
|
||||
plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup;
|
||||
|
@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
|
||||
struct tc_cbs_qopt_offload *qopt)
|
||||
{
|
||||
u32 tx_queues_count = priv->plat->tx_queues_to_use;
|
||||
s64 port_transmit_rate_kbps;
|
||||
u32 queue = qopt->queue;
|
||||
u32 ptr, speed_div;
|
||||
u32 mode_to_use;
|
||||
u64 value;
|
||||
u32 ptr;
|
||||
int ret;
|
||||
|
||||
/* Queue 0 is not AVB capable */
|
||||
@ -355,30 +356,26 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
|
||||
if (!priv->dma_cap.av)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
|
||||
|
||||
/* Port Transmit Rate and Speed Divider */
|
||||
switch (priv->speed) {
|
||||
switch (div_s64(port_transmit_rate_kbps, 1000)) {
|
||||
case SPEED_10000:
|
||||
ptr = 32;
|
||||
speed_div = 10000000;
|
||||
break;
|
||||
case SPEED_5000:
|
||||
ptr = 32;
|
||||
speed_div = 5000000;
|
||||
break;
|
||||
case SPEED_2500:
|
||||
ptr = 8;
|
||||
speed_div = 2500000;
|
||||
break;
|
||||
case SPEED_1000:
|
||||
ptr = 8;
|
||||
speed_div = 1000000;
|
||||
break;
|
||||
case SPEED_100:
|
||||
ptr = 4;
|
||||
speed_div = 100000;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
netdev_err(priv->dev,
|
||||
"Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
|
||||
port_transmit_rate_kbps);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
|
||||
@ -398,10 +395,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
|
||||
}
|
||||
|
||||
/* Final adjustments for HW */
|
||||
value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
|
||||
value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
|
||||
priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
|
||||
|
||||
value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
|
||||
value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
|
||||
priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
|
||||
|
||||
value = qopt->hicredit * 1024ll * 8;
|
||||
|
@ -815,6 +815,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
struct geneve_dev *geneve,
|
||||
const struct ip_tunnel_info *info)
|
||||
{
|
||||
bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
|
||||
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
|
||||
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
|
||||
const struct ip_tunnel_key *key = &info->key;
|
||||
@ -826,7 +827,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
__be16 sport;
|
||||
int err;
|
||||
|
||||
if (!skb_vlan_inet_prepare(skb))
|
||||
if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
|
||||
return -EINVAL;
|
||||
|
||||
if (!gs4)
|
||||
@ -908,7 +909,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
}
|
||||
|
||||
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
|
||||
geneve->cfg.inner_proto_inherit);
|
||||
inner_proto_inherit);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
@ -925,6 +926,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
struct geneve_dev *geneve,
|
||||
const struct ip_tunnel_info *info)
|
||||
{
|
||||
bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
|
||||
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
|
||||
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
|
||||
const struct ip_tunnel_key *key = &info->key;
|
||||
@ -935,7 +937,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
__be16 sport;
|
||||
int err;
|
||||
|
||||
if (!skb_vlan_inet_prepare(skb))
|
||||
if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
|
||||
return -EINVAL;
|
||||
|
||||
if (!gs6)
|
||||
@ -997,7 +999,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
ttl = ttl ? : ip6_dst_hoplimit(dst);
|
||||
}
|
||||
err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
|
||||
geneve->cfg.inner_proto_inherit);
|
||||
inner_proto_inherit);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
|
@ -324,7 +324,8 @@ static int nsim_get_iflink(const struct net_device *dev)
|
||||
|
||||
rcu_read_lock();
|
||||
peer = rcu_dereference(nsim->peer);
|
||||
iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0;
|
||||
iflink = peer ? READ_ONCE(peer->netdev->ifindex) :
|
||||
READ_ONCE(dev->ifindex);
|
||||
rcu_read_unlock();
|
||||
|
||||
return iflink;
|
||||
|
@ -2429,8 +2429,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
|
||||
|
||||
/* Handle remove event globally, it resets this state machine */
|
||||
if (event == SFP_E_REMOVE) {
|
||||
if (sfp->sm_mod_state > SFP_MOD_PROBE)
|
||||
sfp_sm_mod_remove(sfp);
|
||||
sfp_sm_mod_remove(sfp);
|
||||
sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
|
||||
return;
|
||||
}
|
||||
|
@ -1,10 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
|
||||
/*
|
||||
* Device Tree constants for the Texas Instruments DP83867 PHY
|
||||
*
|
||||
* Author: Dan Murphy <dmurphy@ti.com>
|
||||
*
|
||||
* Copyright: (C) 2015 Texas Instruments, Inc.
|
||||
* Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
|
||||
*/
|
||||
|
||||
#ifndef _DT_BINDINGS_TI_DP83867_H
|
||||
|
@ -1,10 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
|
||||
/*
|
||||
* Device Tree constants for the Texas Instruments DP83869 PHY
|
||||
*
|
||||
* Author: Dan Murphy <dmurphy@ti.com>
|
||||
*
|
||||
* Copyright: (C) 2019 Texas Instruments, Inc.
|
||||
* Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
|
||||
*/
|
||||
|
||||
#ifndef _DT_BINDINGS_TI_DP83869_H
|
||||
|
@ -167,14 +167,14 @@ static inline int pse_ethtool_get_status(struct pse_control *psec,
|
||||
struct netlink_ext_ack *extack,
|
||||
struct pse_control_status *status)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int pse_ethtool_set_config(struct pse_control *psec,
|
||||
struct netlink_ext_ack *extack,
|
||||
const struct pse_control_config *config)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool pse_has_podl(struct pse_control *psec)
|
||||
|
@ -2113,18 +2113,46 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
|
||||
{
|
||||
u16 max_latency;
|
||||
|
||||
if (min > max || min < 6 || max > 3200)
|
||||
if (min > max) {
|
||||
BT_WARN("min %d > max %d", min, max);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (to_multiplier < 10 || to_multiplier > 3200)
|
||||
if (min < 6) {
|
||||
BT_WARN("min %d < 6", min);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (max >= to_multiplier * 8)
|
||||
if (max > 3200) {
|
||||
BT_WARN("max %d > 3200", max);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (to_multiplier < 10) {
|
||||
BT_WARN("to_multiplier %d < 10", to_multiplier);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (to_multiplier > 3200) {
|
||||
BT_WARN("to_multiplier %d > 3200", to_multiplier);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (max >= to_multiplier * 8) {
|
||||
BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
max_latency = (to_multiplier * 4 / max) - 1;
|
||||
if (latency > 499 || latency > max_latency)
|
||||
if (latency > 499) {
|
||||
BT_WARN("latency %d > 499", latency);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (latency > max_latency) {
|
||||
BT_WARN("latency %d > max_latency %d", latency, max_latency);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -461,9 +461,10 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
|
||||
|
||||
/* Variant of pskb_inet_may_pull().
|
||||
*/
|
||||
static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
|
||||
static inline bool skb_vlan_inet_prepare(struct sk_buff *skb,
|
||||
bool inner_proto_inherit)
|
||||
{
|
||||
int nhlen = 0, maclen = ETH_HLEN;
|
||||
int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN;
|
||||
__be16 type = skb->protocol;
|
||||
|
||||
/* Essentially this is skb_protocol(skb, true)
|
||||
|
@ -1194,7 +1194,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
|
||||
|
||||
cp.own_addr_type = own_addr_type;
|
||||
cp.channel_map = hdev->le_adv_channel_map;
|
||||
cp.handle = instance;
|
||||
cp.handle = adv ? adv->handle : instance;
|
||||
|
||||
if (flags & MGMT_ADV_FLAG_SEC_2M) {
|
||||
cp.primary_phy = HCI_ADV_PHY_1M;
|
||||
|
@ -4011,8 +4011,8 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
|
||||
status = L2CAP_CS_AUTHOR_PEND;
|
||||
chan->ops->defer(chan);
|
||||
} else {
|
||||
l2cap_state_change(chan, BT_CONNECT2);
|
||||
result = L2CAP_CR_PEND;
|
||||
l2cap_state_change(chan, BT_CONFIG);
|
||||
result = L2CAP_CR_SUCCESS;
|
||||
status = L2CAP_CS_NO_INFO;
|
||||
}
|
||||
} else {
|
||||
@ -4647,13 +4647,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
|
||||
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
|
||||
if (max > hcon->le_conn_max_interval) {
|
||||
BT_DBG("requested connection interval exceeds current bounds.");
|
||||
err = -EINVAL;
|
||||
} else {
|
||||
err = hci_check_conn_params(min, max, latency, to_multiplier);
|
||||
}
|
||||
|
||||
err = hci_check_conn_params(min, max, latency, to_multiplier);
|
||||
if (err)
|
||||
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
|
||||
else
|
||||
|
@ -73,11 +73,10 @@ int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(br_mst_get_state);
|
||||
|
||||
static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v,
|
||||
static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg,
|
||||
struct net_bridge_vlan *v,
|
||||
u8 state)
|
||||
{
|
||||
struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
|
||||
|
||||
if (br_vlan_get_state(v) == state)
|
||||
return;
|
||||
|
||||
@ -103,7 +102,7 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
|
||||
int err = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
vg = nbp_vlan_group(p);
|
||||
vg = nbp_vlan_group_rcu(p);
|
||||
if (!vg)
|
||||
goto out;
|
||||
|
||||
@ -121,7 +120,7 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
|
||||
if (v->brvlan->msti != msti)
|
||||
continue;
|
||||
|
||||
br_mst_vlan_set_state(p, v, state);
|
||||
br_mst_vlan_set_state(vg, v, state);
|
||||
}
|
||||
|
||||
out:
|
||||
@ -140,13 +139,13 @@ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
|
||||
* it.
|
||||
*/
|
||||
if (v != pv && v->brvlan->msti == msti) {
|
||||
br_mst_vlan_set_state(pv->port, pv, v->state);
|
||||
br_mst_vlan_set_state(vg, pv, v->state);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Otherwise, start out in a new MSTI with all ports disabled. */
|
||||
return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED);
|
||||
return br_mst_vlan_set_state(vg, pv, BR_STATE_DISABLED);
|
||||
}
|
||||
|
||||
int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti)
|
||||
|
@ -485,8 +485,12 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
const int timeout = TCP_RTO_MAX * 2;
|
||||
u32 rcv_delta;
|
||||
s32 rcv_delta;
|
||||
|
||||
/* Note: timer interrupt might have been delayed by at least one jiffy,
|
||||
* and tp->rcv_tstamp might very well have been written recently.
|
||||
* rcv_delta can thus be negative.
|
||||
*/
|
||||
rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
|
||||
if (rcv_delta <= timeout)
|
||||
return false;
|
||||
|
@ -36,6 +36,7 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
|
||||
.flowi6_uid = sock_net_uid(net, sk),
|
||||
.daddr = iph->daddr,
|
||||
.saddr = iph->saddr,
|
||||
.flowlabel = ip6_flowinfo(iph),
|
||||
};
|
||||
int err;
|
||||
|
||||
|
@ -6343,12 +6343,12 @@ static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
|
||||
if (!write)
|
||||
return -EINVAL;
|
||||
|
||||
net = (struct net *)ctl->extra1;
|
||||
delay = net->ipv6.sysctl.flush_delay;
|
||||
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
net = (struct net *)ctl->extra1;
|
||||
delay = net->ipv6.sysctl.flush_delay;
|
||||
fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1439,7 +1439,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
|
||||
*/
|
||||
|
||||
newsk->sk_gso_type = SKB_GSO_TCPV6;
|
||||
ip6_dst_store(newsk, dst, NULL, NULL);
|
||||
inet6_sk_rx_dst_set(newsk, skb);
|
||||
|
||||
inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
|
||||
@ -1450,6 +1449,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
|
||||
|
||||
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
|
||||
|
||||
ip6_dst_store(newsk, dst, NULL, NULL);
|
||||
|
||||
newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
|
||||
newnp->saddr = ireq->ir_v6_loc_addr;
|
||||
newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
|
||||
|
@ -677,6 +677,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
|
||||
unsigned int add_addr_accept_max;
|
||||
struct mptcp_addr_info remote;
|
||||
unsigned int subflows_max;
|
||||
bool sf_created = false;
|
||||
int i, nr;
|
||||
|
||||
add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
|
||||
@ -704,15 +705,18 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
|
||||
if (nr == 0)
|
||||
return;
|
||||
|
||||
msk->pm.add_addr_accepted++;
|
||||
if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
|
||||
msk->pm.subflows >= subflows_max)
|
||||
WRITE_ONCE(msk->pm.accept_addr, false);
|
||||
|
||||
spin_unlock_bh(&msk->pm.lock);
|
||||
for (i = 0; i < nr; i++)
|
||||
__mptcp_subflow_connect(sk, &addrs[i], &remote);
|
||||
if (__mptcp_subflow_connect(sk, &addrs[i], &remote) == 0)
|
||||
sf_created = true;
|
||||
spin_lock_bh(&msk->pm.lock);
|
||||
|
||||
if (sf_created) {
|
||||
msk->pm.add_addr_accepted++;
|
||||
if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
|
||||
msk->pm.subflows >= subflows_max)
|
||||
WRITE_ONCE(msk->pm.accept_addr, false);
|
||||
}
|
||||
}
|
||||
|
||||
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
|
||||
@ -814,10 +818,13 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
|
||||
spin_lock_bh(&msk->pm.lock);
|
||||
|
||||
removed = true;
|
||||
__MPTCP_INC_STATS(sock_net(sk), rm_type);
|
||||
if (rm_type == MPTCP_MIB_RMSUBFLOW)
|
||||
__MPTCP_INC_STATS(sock_net(sk), rm_type);
|
||||
}
|
||||
if (rm_type == MPTCP_MIB_RMSUBFLOW)
|
||||
__set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
|
||||
else if (rm_type == MPTCP_MIB_RMADDR)
|
||||
__MPTCP_INC_STATS(sock_net(sk), rm_type);
|
||||
if (!removed)
|
||||
continue;
|
||||
|
||||
|
@ -3740,6 +3740,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
||||
|
||||
WRITE_ONCE(msk->write_seq, subflow->idsn);
|
||||
WRITE_ONCE(msk->snd_nxt, subflow->idsn);
|
||||
WRITE_ONCE(msk->snd_una, subflow->idsn);
|
||||
if (likely(!__mptcp_check_fallback(msk)))
|
||||
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
|
||||
|
||||
|
@ -1172,23 +1172,50 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
|
||||
.len = IPSET_MAXNAMELEN - 1 },
|
||||
};
|
||||
|
||||
static void
|
||||
ip_set_destroy_set(struct ip_set *set)
|
||||
{
|
||||
pr_debug("set: %s\n", set->name);
|
||||
|
||||
/* Must call it without holding any lock */
|
||||
set->variant->destroy(set);
|
||||
module_put(set->type->me);
|
||||
kfree(set);
|
||||
}
|
||||
/* In order to return quickly when destroying a single set, it is split
|
||||
* into two stages:
|
||||
* - Cancel garbage collector
|
||||
* - Destroy the set itself via call_rcu()
|
||||
*/
|
||||
|
||||
static void
|
||||
ip_set_destroy_set_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct ip_set *set = container_of(head, struct ip_set, rcu);
|
||||
|
||||
ip_set_destroy_set(set);
|
||||
set->variant->destroy(set);
|
||||
module_put(set->type->me);
|
||||
kfree(set);
|
||||
}
|
||||
|
||||
static void
|
||||
_destroy_all_sets(struct ip_set_net *inst)
|
||||
{
|
||||
struct ip_set *set;
|
||||
ip_set_id_t i;
|
||||
bool need_wait = false;
|
||||
|
||||
/* First cancel gc's: set:list sets are flushed as well */
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
set = ip_set(inst, i);
|
||||
if (set) {
|
||||
set->variant->cancel_gc(set);
|
||||
if (set->type->features & IPSET_TYPE_NAME)
|
||||
need_wait = true;
|
||||
}
|
||||
}
|
||||
/* Must wait for flush to be really finished */
|
||||
if (need_wait)
|
||||
rcu_barrier();
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
set = ip_set(inst, i);
|
||||
if (set) {
|
||||
ip_set(inst, i) = NULL;
|
||||
set->variant->destroy(set);
|
||||
module_put(set->type->me);
|
||||
kfree(set);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
@ -1202,11 +1229,10 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
if (unlikely(protocol_min_failed(attr)))
|
||||
return -IPSET_ERR_PROTOCOL;
|
||||
|
||||
|
||||
/* Commands are serialized and references are
|
||||
* protected by the ip_set_ref_lock.
|
||||
* External systems (i.e. xt_set) must call
|
||||
* ip_set_put|get_nfnl_* functions, that way we
|
||||
* ip_set_nfnl_get_* functions, that way we
|
||||
* can safely check references here.
|
||||
*
|
||||
* list:set timer can only decrement the reference
|
||||
@ -1214,8 +1240,6 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
* without holding the lock.
|
||||
*/
|
||||
if (!attr[IPSET_ATTR_SETNAME]) {
|
||||
/* Must wait for flush to be really finished in list:set */
|
||||
rcu_barrier();
|
||||
read_lock_bh(&ip_set_ref_lock);
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
s = ip_set(inst, i);
|
||||
@ -1226,15 +1250,7 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
}
|
||||
inst->is_destroyed = true;
|
||||
read_unlock_bh(&ip_set_ref_lock);
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
s = ip_set(inst, i);
|
||||
if (s) {
|
||||
ip_set(inst, i) = NULL;
|
||||
/* Must cancel garbage collectors */
|
||||
s->variant->cancel_gc(s);
|
||||
ip_set_destroy_set(s);
|
||||
}
|
||||
}
|
||||
_destroy_all_sets(inst);
|
||||
/* Modified by ip_set_destroy() only, which is serialized */
|
||||
inst->is_destroyed = false;
|
||||
} else {
|
||||
@ -1255,12 +1271,12 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
features = s->type->features;
|
||||
ip_set(inst, i) = NULL;
|
||||
read_unlock_bh(&ip_set_ref_lock);
|
||||
/* Must cancel garbage collectors */
|
||||
s->variant->cancel_gc(s);
|
||||
if (features & IPSET_TYPE_NAME) {
|
||||
/* Must wait for flush to be really finished */
|
||||
rcu_barrier();
|
||||
}
|
||||
/* Must cancel garbage collectors */
|
||||
s->variant->cancel_gc(s);
|
||||
call_rcu(&s->rcu, ip_set_destroy_set_rcu);
|
||||
}
|
||||
return 0;
|
||||
@ -2364,31 +2380,26 @@ ip_set_net_init(struct net *net)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __net_exit
|
||||
ip_set_net_pre_exit(struct net *net)
|
||||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
|
||||
inst->is_deleted = true; /* flag for ip_set_nfnl_put */
|
||||
}
|
||||
|
||||
static void __net_exit
|
||||
ip_set_net_exit(struct net *net)
|
||||
{
|
||||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
|
||||
struct ip_set *set = NULL;
|
||||
ip_set_id_t i;
|
||||
|
||||
inst->is_deleted = true; /* flag for ip_set_nfnl_put */
|
||||
|
||||
nfnl_lock(NFNL_SUBSYS_IPSET);
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
set = ip_set(inst, i);
|
||||
if (set) {
|
||||
ip_set(inst, i) = NULL;
|
||||
set->variant->cancel_gc(set);
|
||||
ip_set_destroy_set(set);
|
||||
}
|
||||
}
|
||||
nfnl_unlock(NFNL_SUBSYS_IPSET);
|
||||
_destroy_all_sets(inst);
|
||||
kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
|
||||
}
|
||||
|
||||
static struct pernet_operations ip_set_net_ops = {
|
||||
.init = ip_set_net_init,
|
||||
.pre_exit = ip_set_net_pre_exit,
|
||||
.exit = ip_set_net_exit,
|
||||
.id = &ip_set_net_id,
|
||||
.size = sizeof(struct ip_set_net),
|
||||
|
@ -79,7 +79,7 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
|
||||
struct set_elem *e;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(e, &map->members, list) {
|
||||
list_for_each_entry_rcu(e, &map->members, list) {
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
continue;
|
||||
@ -99,7 +99,7 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
|
||||
struct set_elem *e;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(e, &map->members, list) {
|
||||
list_for_each_entry_rcu(e, &map->members, list) {
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
continue;
|
||||
@ -188,9 +188,10 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
|
||||
struct list_set *map = set->data;
|
||||
struct set_adt_elem *d = value;
|
||||
struct set_elem *e, *next, *prev = NULL;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry(e, &map->members, list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(e, &map->members, list) {
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
continue;
|
||||
@ -201,6 +202,7 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
|
||||
|
||||
if (d->before == 0) {
|
||||
ret = 1;
|
||||
goto out;
|
||||
} else if (d->before > 0) {
|
||||
next = list_next_entry(e, list);
|
||||
ret = !list_is_last(&e->list, &map->members) &&
|
||||
@ -208,9 +210,11 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
|
||||
} else {
|
||||
ret = prev && prev->id == d->refid;
|
||||
}
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -239,7 +243,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
|
||||
|
||||
/* Find where to add the new entry */
|
||||
n = prev = next = NULL;
|
||||
list_for_each_entry(e, &map->members, list) {
|
||||
list_for_each_entry_rcu(e, &map->members, list) {
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
continue;
|
||||
@ -316,9 +320,9 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
|
||||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_adt_elem *d = value;
|
||||
struct set_elem *e, *next, *prev = NULL;
|
||||
struct set_elem *e, *n, *next, *prev = NULL;
|
||||
|
||||
list_for_each_entry(e, &map->members, list) {
|
||||
list_for_each_entry_safe(e, n, &map->members, list) {
|
||||
if (SET_WITH_TIMEOUT(set) &&
|
||||
ip_set_timeout_expired(ext_timeout(e, set)))
|
||||
continue;
|
||||
@ -424,14 +428,8 @@ static void
|
||||
list_set_destroy(struct ip_set *set)
|
||||
{
|
||||
struct list_set *map = set->data;
|
||||
struct set_elem *e, *n;
|
||||
|
||||
list_for_each_entry_safe(e, n, &map->members, list) {
|
||||
list_del(&e->list);
|
||||
ip_set_put_byindex(map->net, e->id);
|
||||
ip_set_ext_destroy(set, e);
|
||||
kfree(e);
|
||||
}
|
||||
WARN_ON_ONCE(!list_empty(&map->members));
|
||||
kfree(map);
|
||||
|
||||
set->data = NULL;
|
||||
|
@ -839,6 +839,9 @@ static int nft_meta_inner_init(const struct nft_ctx *ctx,
|
||||
struct nft_meta *priv = nft_expr_priv(expr);
|
||||
unsigned int len;
|
||||
|
||||
if (!tb[NFTA_META_KEY] || !tb[NFTA_META_DREG])
|
||||
return -EINVAL;
|
||||
|
||||
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
|
||||
switch (priv->key) {
|
||||
case NFT_META_PROTOCOL:
|
||||
|
@ -650,6 +650,10 @@ static int nft_payload_inner_init(const struct nft_ctx *ctx,
|
||||
struct nft_payload *priv = nft_expr_priv(expr);
|
||||
u32 base;
|
||||
|
||||
if (!tb[NFTA_PAYLOAD_BASE] || !tb[NFTA_PAYLOAD_OFFSET] ||
|
||||
!tb[NFTA_PAYLOAD_LEN] || !tb[NFTA_PAYLOAD_DREG])
|
||||
return -EINVAL;
|
||||
|
||||
base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
|
||||
switch (base) {
|
||||
case NFT_PAYLOAD_TUN_HEADER:
|
||||
|
@ -676,6 +676,7 @@ struct Qdisc noop_qdisc = {
|
||||
.qlen = 0,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
|
||||
},
|
||||
.owner = -1,
|
||||
};
|
||||
EXPORT_SYMBOL(noop_qdisc);
|
||||
|
||||
|
@ -2625,18 +2625,18 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
|
||||
if (skb == u->oob_skb) {
|
||||
if (copied) {
|
||||
skb = NULL;
|
||||
} else if (sock_flag(sk, SOCK_URGINLINE)) {
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
} else if (!(flags & MSG_PEEK)) {
|
||||
if (sock_flag(sk, SOCK_URGINLINE)) {
|
||||
WRITE_ONCE(u->oob_skb, NULL);
|
||||
consume_skb(skb);
|
||||
} else {
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
WRITE_ONCE(u->oob_skb, NULL);
|
||||
unlinked_skb = skb;
|
||||
skb = skb_peek(&sk->sk_receive_queue);
|
||||
}
|
||||
} else if (flags & MSG_PEEK) {
|
||||
skb = NULL;
|
||||
} else {
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
WRITE_ONCE(u->oob_skb, NULL);
|
||||
unlinked_skb = skb;
|
||||
skb = skb_peek(&sk->sk_receive_queue);
|
||||
} else if (!sock_flag(sk, SOCK_URGINLINE)) {
|
||||
skb = skb_peek_next(skb, &sk->sk_receive_queue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2249,9 +2249,10 @@ remove_tests()
|
||||
if reset "remove invalid addresses"; then
|
||||
pm_nl_set_limits $ns1 3 3
|
||||
pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
|
||||
# broadcast IP: no packet for this address will be received on ns1
|
||||
pm_nl_add_endpoint $ns1 224.0.0.1 flags signal
|
||||
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
|
||||
pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
|
||||
pm_nl_set_limits $ns2 3 3
|
||||
pm_nl_set_limits $ns2 2 2
|
||||
addr_nr_ns1=-3 speed=10 \
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 1 1 1
|
||||
|
Loading…
Reference in New Issue
Block a user