Including fixes from can and netfilter.

Current release - regressions:
 
   - rtnetlink: try the outer netns attribute in rtnl_get_peer_net().
 
   - rust: net::phy fix module autoloading
 
 Current release - new code bugs:
 
   - phy: avoid undefined behavior in *_led_polarity_set()
 
   - eth: octeontx2-pf: fix netdev memory leak in rvu_rep_create()
 
 Previous releases - regressions:
 
   - smc: check sndbuf_space again after NOSPACE flag is set in smc_poll
 
   - ipvs: fix clamp() of ip_vs_conn_tab on small memory systems
 
   - dsa: restore dsa_software_vlan_untag() ability to operate on VLAN-untagged traffic
 
   - eth: tun: fix tun_napi_alloc_frags()
 
   - eth: ionic: no double destroy workqueue
 
   - eth: idpf: trigger SW interrupt when exiting wb_on_itr mode
 
   - eth: rswitch: rework ts tags management
 
   - eth: team: fix feature exposure when no ports are present
 
 Previous releases - always broken:
 
   - core: fix repeated netlink messages in queue dump
 
   - mdiobus: fix an OF node reference leak
 
   - smc: check iparea_offset and ipv6_prefixes_cnt when receiving proposal msg
 
   - can: fix missed interrupts with m_can_pci
 
   - eth: oa_tc6: fix infinite loop error when tx credits becomes 0
 
 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmdkEN0SHHBhYmVuaUBy
 ZWRoYXQuY29tAAoJECkkeY3MjxOkMEIP/17RY8tyMSUbM9If6pOgQzfEfiUaHPsb
 Asn1XMwEfvAbsh3ZztiK72bXxJ0ilc6ksBLXMEkyPWneiMGOK99ss9fY4ISj4sh9
 y+7Jiu+W+uRaw/58idnt4VR9mulwZNxbLjXOs0e0ncF7C0Ply3CxBqrTNt/yIL6M
 OVJhQzjJFdw/oFS63h9up/zr1J2a/aZs98XRnS6nC6H8if4v2frZrWEmAhDY2n/H
 rIMRvSeaKwzoDwkCT4gYX5+70tnh1rrmO5BPk7utM+BYmbrsdhsG/woqb8c7ely7
 6yE9XP90z8JwPW34qeJHahSBud12tuOncvh57kIlqrxaI+QunWwb6j9jqJqxhjWP
 2urTJ5hJOM5BxePDoez40BpUVQ7jlEyxibBL5IQbTq67tshD+DArbeBZUzAUFgYY
 FfhGc/AJ4MtoWBSdA/qy2kT0vXg/Tl83VF2wCeQOOqvfYlwzHsTrBptX4/jBYzqK
 XIDb8V1r5xu+MkAGes1llzQMVTfB1jHBAzJ6glHyEcEsNIchoFyRhOMIomiJDkMB
 PSD8cq2O6tz/qWJeCHsgPYFELHyEMkcy9acgTW1wczEa78D56JJX5DNqAeXrkRMc
 78x50WC1SyQimV38bl5zHmm6eqqhWFi2ILrGIo5XIPG8rxUQXv3C0eBV0vpSfi2X
 y0gouLocXbJo
 =gpjJ
 -----END PGP SIGNATURE-----

Merge tag 'net-6.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
 "Including fixes from can and netfilter.

  Current release - regressions:

   - rtnetlink: try the outer netns attribute in rtnl_get_peer_net()

   - rust: net::phy fix module autoloading

  Current release - new code bugs:

   - phy: avoid undefined behavior in *_led_polarity_set()

   - eth: octeontx2-pf: fix netdev memory leak in rvu_rep_create()

  Previous releases - regressions:

   - smc: check sndbuf_space again after NOSPACE flag is set in smc_poll

   - ipvs: fix clamp() of ip_vs_conn_tab on small memory systems

   - dsa: restore dsa_software_vlan_untag() ability to operate on
     VLAN-untagged traffic

   - eth:
       - tun: fix tun_napi_alloc_frags()
       - ionic: no double destroy workqueue
       - idpf: trigger SW interrupt when exiting wb_on_itr mode
       - rswitch: rework ts tags management
       - team: fix feature exposure when no ports are present

  Previous releases - always broken:

   - core: fix repeated netlink messages in queue dump

   - mdiobus: fix an OF node reference leak

   - smc: check iparea_offset and ipv6_prefixes_cnt when receiving
     proposal msg

   - can: fix missed interrupts with m_can_pci

   - eth: oa_tc6: fix infinite loop error when tx credits becomes 0"

* tag 'net-6.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (45 commits)
  net: mctp: handle skb cleanup on sock_queue failures
  net: mdiobus: fix an OF node reference leak
  octeontx2-pf: fix error handling of devlink port in rvu_rep_create()
  octeontx2-pf: fix netdev memory leak in rvu_rep_create()
  psample: adjust size if rate_as_probability is set
  netdev-genl: avoid empty messages in queue dump
  net: dsa: restore dsa_software_vlan_untag() ability to operate on VLAN-untagged traffic
  selftests: openvswitch: fix tcpdump execution
  net: usb: qmi_wwan: add Quectel RG255C
  net: phy: avoid undefined behavior in *_led_polarity_set()
  netfilter: ipset: Fix for recursive locking warning
  ipvs: Fix clamp() of ip_vs_conn_tab on small memory systems
  can: m_can: fix missed interrupts with m_can_pci
  can: m_can: set init flag earlier in probe
  rtnetlink: Try the outer netns attribute in rtnl_get_peer_net().
  net: netdevsim: fix nsim_pp_hold_write()
  idpf: trigger SW interrupt when exiting wb_on_itr mode
  idpf: add support for SW triggered interrupts
  qed: fix possible uninit pointer read in qed_mcp_nvm_info_populate()
  net: ethernet: bgmac-platform: fix an OF node reference leak
  ...
This commit is contained in:
Linus Torvalds 2024-12-19 09:19:11 -08:00
commit 8faabc041a
46 changed files with 408 additions and 154 deletions

View File

@ -1220,20 +1220,32 @@ static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
static int m_can_interrupt_handler(struct m_can_classdev *cdev)
{
struct net_device *dev = cdev->net;
u32 ir;
u32 ir = 0, ir_read;
int ret;
if (pm_runtime_suspended(cdev->dev))
return IRQ_NONE;
ir = m_can_read(cdev, M_CAN_IR);
/* The m_can controller signals its interrupt status as a level, but
* depending in the integration the CPU may interpret the signal as
* edge-triggered (for example with m_can_pci). For these
* edge-triggered integrations, we must observe that IR is 0 at least
* once to be sure that the next interrupt will generate an edge.
*/
while ((ir_read = m_can_read(cdev, M_CAN_IR)) != 0) {
ir |= ir_read;
/* ACK all irqs */
m_can_write(cdev, M_CAN_IR, ir);
if (!cdev->irq_edge_triggered)
break;
}
m_can_coalescing_update(cdev, ir);
if (!ir)
return IRQ_NONE;
/* ACK all irqs */
m_can_write(cdev, M_CAN_IR, ir);
if (cdev->ops->clear_interrupts)
cdev->ops->clear_interrupts(cdev);
@ -1695,6 +1707,14 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
return -EINVAL;
}
/* Write the INIT bit, in case no hardware reset has happened before
* the probe (for example, it was observed that the Intel Elkhart Lake
* SoCs do not properly reset the CAN controllers on reboot)
*/
err = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
if (err)
return err;
if (!cdev->is_peripheral)
netif_napi_add(dev, &cdev->napi, m_can_poll);
@ -1746,11 +1766,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
return -EINVAL;
}
/* Forcing standby mode should be redundant, as the chip should be in
* standby after a reset. Write the INIT bit anyways, should the chip
* be configured by previous stage.
*/
return m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
return 0;
}
static void m_can_stop(struct net_device *dev)

View File

@ -99,6 +99,7 @@ struct m_can_classdev {
int pm_clock_support;
int pm_wake_source;
int is_peripheral;
bool irq_edge_triggered;
// Cached M_CAN_IE register content
u32 active_interrupts;

View File

@ -127,6 +127,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->pm_clock_support = 1;
mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = id->driver_data;
mcan_class->irq_edge_triggered = true;
mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class);

View File

@ -171,6 +171,7 @@ static int platform_phy_connect(struct bgmac *bgmac)
static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *phy_node;
struct bgmac *bgmac;
struct resource *regs;
int ret;
@ -236,7 +237,9 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = platform_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32;
if (of_parse_phandle(np, "phy-handle", 0)) {
phy_node = of_parse_phandle(np, "phy-handle", 0);
if (phy_node) {
of_node_put(phy_node);
bgmac->phy_connect = platform_phy_connect;
} else {
bgmac->phy_connect = bgmac_phy_connect_direct;

View File

@ -346,8 +346,9 @@ static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
* driver. Once driver synthesizes cpl_pass_accept_req the skb will go
* through the regular cpl_pass_accept_req processing in TOM.
*/
skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
- pktshift, GFP_ATOMIC);
skb = alloc_skb(size_add(gl->tot_len,
sizeof(struct cpl_pass_accept_req)) -
pktshift, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)

View File

@ -172,6 +172,7 @@ static int create_txqs(struct hinic_dev *nic_dev)
hinic_sq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->txqs);
nic_dev->txqs = NULL;
return err;
}
@ -268,6 +269,7 @@ static int create_rxqs(struct hinic_dev *nic_dev)
hinic_rq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->rxqs);
nic_dev->rxqs = NULL;
return err;
}

View File

@ -101,6 +101,9 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M;
intr->dyn_ctl_swint_trig_m = PF_GLINT_DYN_CTL_SWINT_TRIG_M;
intr->dyn_ctl_sw_itridx_ena_m =
PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_PF_ITR_IDX_SPACING);

View File

@ -3604,21 +3604,31 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
/**
* idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
* @q_vector: pointer to q_vector
* @type: itr index
* @itr: itr value
*/
static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector,
const int type, u16 itr)
static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
{
u32 itr_val;
u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m;
int type = IDPF_NO_ITR_UPDATE_IDX;
u16 itr = 0;
if (q_vector->wb_on_itr) {
/*
* Trigger a software interrupt when exiting wb_on_itr, to make
* sure we catch any pending write backs that might have been
* missed due to interrupt state transition.
*/
itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m |
q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m;
type = IDPF_SW_ITR_UPDATE_IDX;
itr = IDPF_ITR_20K;
}
itr &= IDPF_ITR_MASK;
/* Don't clear PBA because that can cause lost interrupts that
* came in while we were cleaning/polling
*/
itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
(type << q_vector->intr_reg.dyn_ctl_itridx_s) |
(itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
(itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
return itr_val;
}
@ -3716,9 +3726,8 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/* net_dim() updates ITR out-of-band using a work item */
idpf_net_dim(q_vector);
intval = idpf_vport_intr_buildreg_itr(q_vector);
q_vector->wb_on_itr = false;
intval = idpf_vport_intr_buildreg_itr(q_vector,
IDPF_NO_ITR_UPDATE_IDX, 0);
writel(intval, q_vector->intr_reg.dyn_ctl);
}

View File

@ -354,6 +354,8 @@ struct idpf_vec_regs {
* @dyn_ctl_itridx_m: Mask for ITR index
* @dyn_ctl_intrvl_s: Register bit offset for ITR interval
* @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
* @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
* @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
* @rx_itr: RX ITR register
* @tx_itr: TX ITR register
* @icr_ena: Interrupt cause register offset
@ -367,6 +369,8 @@ struct idpf_intr_reg {
u32 dyn_ctl_itridx_m;
u32 dyn_ctl_intrvl_s;
u32 dyn_ctl_wb_on_itr_m;
u32 dyn_ctl_sw_itridx_ena_m;
u32 dyn_ctl_swint_trig_m;
void __iomem *rx_itr;
void __iomem *tx_itr;
void __iomem *icr_ena;
@ -437,7 +441,7 @@ struct idpf_q_vector {
cpumask_var_t affinity_mask;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_q_vector, 112,
libeth_cacheline_set_assert(struct idpf_q_vector, 120,
24 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
8 + sizeof(cpumask_var_t));
@ -471,6 +475,8 @@ struct idpf_tx_queue_stats {
#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
#define IDPF_ITR_TX_DEF IDPF_ITR_20K
#define IDPF_ITR_RX_DEF IDPF_ITR_20K
/* Index used for 'SW ITR' update in DYN_CTL register */
#define IDPF_SW_ITR_UPDATE_IDX 2
/* Index used for 'No ITR' update in DYN_CTL register */
#define IDPF_NO_ITR_UPDATE_IDX 3
#define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt)

View File

@ -101,6 +101,9 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = VF_INT_DYN_CTLN_INTERVAL_S;
intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M;
intr->dyn_ctl_swint_trig_m = VF_INT_DYN_CTLN_SWINT_TRIG_M;
intr->dyn_ctl_sw_itridx_ena_m =
VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_VF_ITR_IDX_SPACING);

View File

@ -680,14 +680,17 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
ndev->features |= ndev->hw_features;
eth_hw_addr_random(ndev);
err = rvu_rep_devlink_port_register(rep);
if (err)
if (err) {
free_netdev(ndev);
goto exit;
}
SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port);
err = register_netdev(ndev);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"PFVF representor registration failed");
rvu_rep_devlink_port_unregister(rep);
free_netdev(ndev);
goto exit;
}

View File

@ -1432,7 +1432,7 @@ void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
memset(ifh, 0, OCELOT_TAG_LEN);
ocelot_ifh_set_bypass(ifh, 1);
ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports));
ocelot_ifh_set_src(ifh, ocelot->num_phys_ports);
ocelot_ifh_set_dest(ifh, BIT_ULL(port));
ocelot_ifh_set_qos_class(ifh, qos_class);
ocelot_ifh_set_tag_type(ifh, tag_type);

View File

@ -113,6 +113,7 @@ struct oa_tc6 {
struct mii_bus *mdiobus;
struct spi_device *spi;
struct mutex spi_ctrl_lock; /* Protects spi control transfer */
spinlock_t tx_skb_lock; /* Protects tx skb handling */
void *spi_ctrl_tx_buf;
void *spi_ctrl_rx_buf;
void *spi_data_tx_buf;
@ -1004,8 +1005,10 @@ static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
used_tx_credits++) {
if (!tc6->ongoing_tx_skb) {
spin_lock_bh(&tc6->tx_skb_lock);
tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
tc6->waiting_tx_skb = NULL;
spin_unlock_bh(&tc6->tx_skb_lock);
}
if (!tc6->ongoing_tx_skb)
break;
@ -1111,8 +1114,9 @@ static int oa_tc6_spi_thread_handler(void *data)
/* This kthread will be waken up if there is a tx skb or mac-phy
* interrupt to perform spi transfer with tx chunks.
*/
wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb ||
tc6->int_flag ||
wait_event_interruptible(tc6->spi_wq, tc6->int_flag ||
(tc6->waiting_tx_skb &&
tc6->tx_credits) ||
kthread_should_stop());
if (kthread_should_stop())
@ -1209,7 +1213,9 @@ netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
return NETDEV_TX_OK;
}
spin_lock_bh(&tc6->tx_skb_lock);
tc6->waiting_tx_skb = skb;
spin_unlock_bh(&tc6->tx_skb_lock);
/* Wake spi kthread to perform spi transfer */
wake_up_interruptible(&tc6->spi_wq);
@ -1239,6 +1245,7 @@ struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
tc6->netdev = netdev;
SET_NETDEV_DEV(netdev, &spi->dev);
mutex_init(&tc6->spi_ctrl_lock);
spin_lock_init(&tc6->tx_skb_lock);
/* Set the SPI controller to pump at realtime priority */
tc6->spi->rt = true;

View File

@ -277,7 +277,10 @@ void ionic_dev_teardown(struct ionic *ionic)
idev->phy_cmb_pages = 0;
idev->cmb_npages = 0;
destroy_workqueue(ionic->wq);
if (ionic->wq) {
destroy_workqueue(ionic->wq);
ionic->wq = NULL;
}
mutex_destroy(&idev->cmb_inuse_lock);
}

View File

@ -961,8 +961,8 @@ static int ionic_get_module_eeprom(struct net_device *netdev,
len = min_t(u32, sizeof(xcvr->sprom), ee->len);
do {
memcpy(data, xcvr->sprom, len);
memcpy(tbuf, xcvr->sprom, len);
memcpy(data, &xcvr->sprom[ee->offset], len);
memcpy(tbuf, &xcvr->sprom[ee->offset], len);
/* Let's make sure we got a consistent copy */
if (!memcmp(data, tbuf, len))

View File

@ -3869,8 +3869,8 @@ int ionic_lif_register(struct ionic_lif *lif)
/* only register LIF0 for now */
err = register_netdev(lif->netdev);
if (err) {
dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
ionic_lif_unregister_phc(lif);
dev_err(lif->ionic->dev, "Cannot register net device: %d, aborting\n", err);
ionic_lif_unregister(lif);
return err;
}

View File

@ -3358,6 +3358,7 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
p_ptt, &nvm_info.num_images);
if (rc == -EOPNOTSUPP) {
DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
nvm_info.num_images = 0;
goto out;
} else if (rc || !nvm_info.num_images) {
DP_ERR(p_hwfn, "Failed getting number of images\n");

View File

@ -547,7 +547,6 @@ static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
desc = &gq->ts_ring[gq->ring_size];
desc->desc.die_dt = DT_LINKFIX;
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
INIT_LIST_HEAD(&priv->gwca.ts_info_list);
return 0;
}
@ -1003,9 +1002,10 @@ static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
static void rswitch_ts(struct rswitch_private *priv)
{
struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
struct rswitch_gwca_ts_info *ts_info, *ts_info2;
struct skb_shared_hwtstamps shhwtstamps;
struct rswitch_ts_desc *desc;
struct rswitch_device *rdev;
struct sk_buff *ts_skb;
struct timespec64 ts;
unsigned int num;
u32 tag, port;
@ -1015,23 +1015,28 @@ static void rswitch_ts(struct rswitch_private *priv)
dma_rmb();
port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
if (unlikely(port >= RSWITCH_NUM_PORTS))
goto next;
rdev = priv->rdev[port];
tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
if (unlikely(tag >= TS_TAGS_PER_PORT))
goto next;
ts_skb = xchg(&rdev->ts_skb[tag], NULL);
smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
clear_bit(tag, rdev->ts_skb_used);
list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
if (!(ts_info->port == port && ts_info->tag == tag))
continue;
if (unlikely(!ts_skb))
goto next;
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
ts.tv_sec = __le32_to_cpu(desc->ts_sec);
ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
skb_tstamp_tx(ts_info->skb, &shhwtstamps);
dev_consume_skb_irq(ts_info->skb);
list_del(&ts_info->list);
kfree(ts_info);
break;
}
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
ts.tv_sec = __le32_to_cpu(desc->ts_sec);
ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
skb_tstamp_tx(ts_skb, &shhwtstamps);
dev_consume_skb_irq(ts_skb);
next:
gq->cur = rswitch_next_queue_index(gq, true, 1);
desc = &gq->ts_ring[gq->cur];
}
@ -1576,8 +1581,9 @@ static int rswitch_open(struct net_device *ndev)
static int rswitch_stop(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_ts_info *ts_info, *ts_info2;
struct sk_buff *ts_skb;
unsigned long flags;
unsigned int tag;
netif_tx_stop_all_queues(ndev);
@ -1594,12 +1600,13 @@ static int rswitch_stop(struct net_device *ndev)
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
if (ts_info->port != rdev->port)
continue;
dev_kfree_skb_irq(ts_info->skb);
list_del(&ts_info->list);
kfree(ts_info);
for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
tag < TS_TAGS_PER_PORT;
tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) {
ts_skb = xchg(&rdev->ts_skb[tag], NULL);
clear_bit(tag, rdev->ts_skb_used);
if (ts_skb)
dev_kfree_skb(ts_skb);
}
return 0;
@ -1612,20 +1619,17 @@ static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
struct rswitch_gwca_ts_info *ts_info;
unsigned int tag;
ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
if (!ts_info)
tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
if (tag == TS_TAGS_PER_PORT)
return false;
smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
rdev->ts_skb[tag] = skb_get(skb);
set_bit(tag, rdev->ts_skb_used);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
rdev->ts_tag++;
desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
ts_info->skb = skb_get(skb);
ts_info->port = rdev->port;
ts_info->tag = rdev->ts_tag;
list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC);
skb_tx_timestamp(skb);
}

View File

@ -972,14 +972,6 @@ struct rswitch_gwca_queue {
};
};
struct rswitch_gwca_ts_info {
struct sk_buff *skb;
struct list_head list;
int port;
u8 tag;
};
#define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
struct rswitch_gwca {
unsigned int index;
@ -989,7 +981,6 @@ struct rswitch_gwca {
struct rswitch_gwca_queue *queues;
int num_queues;
struct rswitch_gwca_queue ts_queue;
struct list_head ts_info_list;
DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
@ -997,6 +988,7 @@ struct rswitch_gwca {
};
#define NUM_QUEUES_PER_NDEV 2
#define TS_TAGS_PER_PORT 256
struct rswitch_device {
struct rswitch_private *priv;
struct net_device *ndev;
@ -1004,7 +996,8 @@ struct rswitch_device {
void __iomem *addr;
struct rswitch_gwca_queue *tx_queue;
struct rswitch_gwca_queue *rx_queue;
u8 ts_tag;
struct sk_buff *ts_skb[TS_TAGS_PER_PORT];
DECLARE_BITMAP(ts_skb_used, TS_TAGS_PER_PORT);
bool disabled;
int port;

View File

@ -40,6 +40,7 @@ fwnode_find_pse_control(struct fwnode_handle *fwnode)
static struct mii_timestamper *
fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
{
struct mii_timestamper *mii_ts;
struct of_phandle_args arg;
int err;
@ -53,10 +54,16 @@ fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
else if (err)
return ERR_PTR(err);
if (arg.args_count != 1)
return ERR_PTR(-EINVAL);
if (arg.args_count != 1) {
mii_ts = ERR_PTR(-EINVAL);
goto put_node;
}
return register_mii_timestamper(arg.np, arg.args[0]);
mii_ts = register_mii_timestamper(arg.np, arg.args[0]);
put_node:
of_node_put(arg.np);
return mii_ts;
}
int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,

View File

@ -149,6 +149,8 @@ static ssize_t nsim_dev_health_break_write(struct file *file,
char *break_msg;
int err;
if (count == 0 || count > PAGE_SIZE)
return -EINVAL;
break_msg = memdup_user_nul(data, count);
if (IS_ERR(break_msg))
return PTR_ERR(break_msg);

View File

@ -635,10 +635,10 @@ nsim_pp_hold_write(struct file *file, const char __user *data,
page_pool_put_full_page(ns->page->pp, ns->page, false);
ns->page = NULL;
}
rtnl_unlock();
exit:
return count;
rtnl_unlock();
return ret;
}
static const struct file_operations nsim_pp_hold_fops = {

View File

@ -156,5 +156,5 @@ int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long
if (force_active_high || force_active_low)
return aqr_phy_led_active_low_set(phydev, index, force_active_low);
unreachable();
return -EINVAL;
}

View File

@ -529,7 +529,7 @@ static int xway_gphy_led_polarity_set(struct phy_device *phydev, int index,
if (force_active_high)
return phy_clear_bits(phydev, XWAY_MDIO_LED, XWAY_GPHY_LED_INV(index));
unreachable();
return -EINVAL;
}
static struct phy_driver xway_gphy[] = {

View File

@ -1014,7 +1014,7 @@ static int gpy_led_polarity_set(struct phy_device *phydev, int index,
if (force_active_high)
return phy_clear_bits(phydev, PHY_LED, PHY_LED_POLARITY(index));
unreachable();
return -EINVAL;
}
static struct phy_driver gpy_drivers[] = {

View File

@ -998,9 +998,13 @@ static void __team_compute_features(struct team *team)
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
vlan_features = netdev_base_features(vlan_features);
rcu_read_lock();
if (list_empty(&team->port_list))
goto done;
vlan_features = netdev_base_features(vlan_features);
enc_features = netdev_base_features(enc_features);
list_for_each_entry_rcu(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features,
port->dev->vlan_features,
@ -1010,11 +1014,11 @@ static void __team_compute_features(struct team *team)
port->dev->hw_enc_features,
TEAM_ENC_FEATURES);
dst_release_flag &= port->dev->priv_flags;
if (port->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = port->dev->hard_header_len;
}
done:
rcu_read_unlock();
team->dev->vlan_features = vlan_features;

View File

@ -1481,7 +1481,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
skb->truesize += skb->data_len;
for (i = 1; i < it->nr_segs; i++) {
const struct iovec *iov = iter_iov(it);
const struct iovec *iov = iter_iov(it) + i;
size_t fragsz = iov->iov_len;
struct page *page;
void *frag;

View File

@ -1429,6 +1429,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)}, /* Quectel EM05GV2 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0316, 3)}, /* Quectel RG255C */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0112, 0)}, /* Fibocom FG132 */
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */

View File

@ -430,10 +430,10 @@ static int
netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
u32 q_type, const struct genl_info *info)
{
int err = 0;
int err;
if (!(netdev->flags & IFF_UP))
return err;
return -ENOENT;
err = netdev_nl_queue_validate(netdev, q_idx, q_type);
if (err)
@ -488,24 +488,21 @@ netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
struct netdev_nl_dump_ctx *ctx)
{
int err = 0;
int i;
if (!(netdev->flags & IFF_UP))
return err;
for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) {
err = netdev_nl_queue_fill_one(rsp, netdev, i,
for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) {
err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx,
NETDEV_QUEUE_TYPE_RX, info);
if (err)
return err;
ctx->rxq_idx = i++;
}
for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) {
err = netdev_nl_queue_fill_one(rsp, netdev, i,
for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) {
err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx,
NETDEV_QUEUE_TYPE_TX, info);
if (err)
return err;
ctx->txq_idx = i++;
}
return err;
@ -671,7 +668,7 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
i, info);
if (err)
return err;
ctx->rxq_idx = i++;
ctx->rxq_idx = ++i;
}
i = ctx->txq_idx;
while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
@ -679,7 +676,7 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
i, info);
if (err)
return err;
ctx->txq_idx = i++;
ctx->txq_idx = ++i;
}
ctx->rxq_idx = 0;

View File

@ -3819,6 +3819,7 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
}
static struct net *rtnl_get_peer_net(const struct rtnl_link_ops *ops,
struct nlattr *tbp[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
@ -3826,7 +3827,7 @@ static struct net *rtnl_get_peer_net(const struct rtnl_link_ops *ops,
int err;
if (!data || !data[ops->peer_type])
return NULL;
return rtnl_link_get_net_ifla(tbp);
err = rtnl_nla_parse_ifinfomsg(tb, data[ops->peer_type], extack);
if (err < 0)
@ -3971,7 +3972,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
}
if (ops->peer_type) {
peer_net = rtnl_get_peer_net(ops, data, extack);
peer_net = rtnl_get_peer_net(ops, tb, data, extack);
if (IS_ERR(peer_net)) {
ret = PTR_ERR(peer_net);
goto put_ops;

View File

@ -138,9 +138,10 @@ static inline void dsa_software_untag_vlan_unaware_bridge(struct sk_buff *skb,
* dsa_software_vlan_untag: Software VLAN untagging in DSA receive path
* @skb: Pointer to socket buffer (packet)
*
* Receive path method for switches which cannot avoid tagging all packets
* towards the CPU port. Called when ds->untag_bridge_pvid (legacy) or
* ds->untag_vlan_aware_bridge_pvid is set to true.
* Receive path method for switches which send some packets as VLAN-tagged
* towards the CPU port (generally from VLAN-aware bridge ports) even when the
* packet was not tagged on the wire. Called when ds->untag_bridge_pvid
* (legacy) or ds->untag_vlan_aware_bridge_pvid is set to true.
*
* As a side effect of this method, any VLAN tag from the skb head is moved
* to hwaccel.
@ -149,14 +150,19 @@ static inline struct sk_buff *dsa_software_vlan_untag(struct sk_buff *skb)
{
struct dsa_port *dp = dsa_user_to_port(skb->dev);
struct net_device *br = dsa_port_bridge_dev_get(dp);
u16 vid;
u16 vid, proto;
int err;
/* software untagging for standalone ports not yet necessary */
if (!br)
return skb;
err = br_vlan_get_proto(br, &proto);
if (err)
return skb;
/* Move VLAN tag from data to hwaccel */
if (!skb_vlan_tag_present(skb)) {
if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
skb = skb_vlan_untag(skb);
if (!skb)
return NULL;

View File

@ -374,8 +374,13 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
msk = NULL;
rc = -EINVAL;
/* we may be receiving a locally-routed packet; drop source sk
* accounting
/* We may be receiving a locally-routed packet; drop source sk
* accounting.
*
* From here, we will either queue the skb - either to a frag_queue, or
* to a receiving socket. When that succeeds, we clear the skb pointer;
* a non-NULL skb on exit will be otherwise unowned, and hence
* kfree_skb()-ed.
*/
skb_orphan(skb);
@ -434,7 +439,9 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* pending key.
*/
if (flags & MCTP_HDR_FLAG_EOM) {
sock_queue_rcv_skb(&msk->sk, skb);
rc = sock_queue_rcv_skb(&msk->sk, skb);
if (!rc)
skb = NULL;
if (key) {
/* we've hit a pending reassembly; not much we
* can do but drop it
@ -443,7 +450,6 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
MCTP_TRACE_KEY_REPLIED);
key = NULL;
}
rc = 0;
goto out_unlock;
}
@ -470,8 +476,10 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* this function.
*/
rc = mctp_key_add(key, msk);
if (!rc)
if (!rc) {
trace_mctp_key_acquire(key);
skb = NULL;
}
/* we don't need to release key->lock on exit, so
* clean up here and suppress the unlock via
@ -489,6 +497,8 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
key = NULL;
} else {
rc = mctp_frag_queue(key, skb);
if (!rc)
skb = NULL;
}
}
@ -503,12 +513,19 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
else
rc = mctp_frag_queue(key, skb);
if (rc)
goto out_unlock;
/* we've queued; the queue owns the skb now */
skb = NULL;
/* end of message? deliver to socket, and we're done with
* the reassembly/response key
*/
if (!rc && flags & MCTP_HDR_FLAG_EOM) {
sock_queue_rcv_skb(key->sk, key->reasm_head);
key->reasm_head = NULL;
if (flags & MCTP_HDR_FLAG_EOM) {
rc = sock_queue_rcv_skb(key->sk, key->reasm_head);
if (!rc)
key->reasm_head = NULL;
__mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED);
key = NULL;
}
@ -527,8 +544,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
if (any_key)
mctp_key_unref(any_key);
out:
if (rc)
kfree_skb(skb);
kfree_skb(skb);
return rc;
}

View File

@ -837,6 +837,90 @@ static void mctp_test_route_input_multiple_nets_key(struct kunit *test)
mctp_test_route_input_multiple_nets_key_fini(test, &t2);
}
/* Input route to socket, using a single-packet message, where sock delivery
* fails. Ensure we're handling the failure appropriately.
*/
static void mctp_test_route_input_sk_fail_single(struct kunit *test)
{
const struct mctp_hdr hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO);
struct mctp_test_route *rt;
struct mctp_test_dev *dev;
struct socket *sock;
struct sk_buff *skb;
int rc;
__mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
/* No rcvbuf space, so delivery should fail. __sock_set_rcvbuf will
* clamp the minimum to SOCK_MIN_RCVBUF, so we open-code this.
*/
lock_sock(sock->sk);
WRITE_ONCE(sock->sk->sk_rcvbuf, 0);
release_sock(sock->sk);
skb = mctp_test_create_skb(&hdr, 10);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
skb_get(skb);
mctp_test_skb_set_dev(skb, dev);
/* do route input, which should fail */
rc = mctp_route_input(&rt->rt, skb);
KUNIT_EXPECT_NE(test, rc, 0);
/* we should hold the only reference to skb */
KUNIT_EXPECT_EQ(test, refcount_read(&skb->users), 1);
kfree_skb(skb);
__mctp_route_test_fini(test, dev, rt, sock);
}
/* Input route to socket, using a fragmented message, where sock delivery fails.
*/
static void mctp_test_route_input_sk_fail_frag(struct kunit *test)
{
const struct mctp_hdr hdrs[2] = { RX_FRAG(FL_S, 0), RX_FRAG(FL_E, 1) };
struct mctp_test_route *rt;
struct mctp_test_dev *dev;
struct sk_buff *skbs[2];
struct socket *sock;
unsigned int i;
int rc;
__mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
lock_sock(sock->sk);
WRITE_ONCE(sock->sk->sk_rcvbuf, 0);
release_sock(sock->sk);
for (i = 0; i < ARRAY_SIZE(skbs); i++) {
skbs[i] = mctp_test_create_skb(&hdrs[i], 10);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skbs[i]);
skb_get(skbs[i]);
mctp_test_skb_set_dev(skbs[i], dev);
}
/* first route input should succeed, we're only queueing to the
* frag list
*/
rc = mctp_route_input(&rt->rt, skbs[0]);
KUNIT_EXPECT_EQ(test, rc, 0);
/* final route input should fail to deliver to the socket */
rc = mctp_route_input(&rt->rt, skbs[1]);
KUNIT_EXPECT_NE(test, rc, 0);
/* we should hold the only reference to both skbs */
KUNIT_EXPECT_EQ(test, refcount_read(&skbs[0]->users), 1);
kfree_skb(skbs[0]);
KUNIT_EXPECT_EQ(test, refcount_read(&skbs[1]->users), 1);
kfree_skb(skbs[1]);
__mctp_route_test_fini(test, dev, rt, sock);
}
#if IS_ENABLED(CONFIG_MCTP_FLOWS)
static void mctp_test_flow_init(struct kunit *test,
@ -1053,6 +1137,8 @@ static struct kunit_case mctp_test_cases[] = {
mctp_route_input_sk_reasm_gen_params),
KUNIT_CASE_PARAM(mctp_test_route_input_sk_keys,
mctp_route_input_sk_keys_gen_params),
KUNIT_CASE(mctp_test_route_input_sk_fail_single),
KUNIT_CASE(mctp_test_route_input_sk_fail_frag),
KUNIT_CASE(mctp_test_route_input_multiple_nets_bind),
KUNIT_CASE(mctp_test_route_input_multiple_nets_key),
KUNIT_CASE(mctp_test_packet_flow),

View File

@ -611,6 +611,8 @@ init_list_set(struct net *net, struct ip_set *set, u32 size)
return true;
}
static struct lock_class_key list_set_lockdep_key;
static int
list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
u32 flags)
@ -627,6 +629,7 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (size < IP_SET_LIST_MIN_SIZE)
size = IP_SET_LIST_MIN_SIZE;
lockdep_set_class(&set->lock, &list_set_lockdep_key);
set->variant = &set_variant;
set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
__alignof__(struct set_elem));

View File

@ -1495,8 +1495,8 @@ int __init ip_vs_conn_init(void)
max_avail -= 2; /* ~4 in hash row */
max_avail -= 1; /* IPVS up to 1/2 of mem */
max_avail -= order_base_2(sizeof(struct ip_vs_conn));
max = clamp(max, min, max_avail);
ip_vs_conn_tab_bits = clamp_val(ip_vs_conn_tab_bits, min, max);
max = clamp(max_avail, min, max);
ip_vs_conn_tab_bits = clamp(ip_vs_conn_tab_bits, min, max);
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;

View File

@ -393,7 +393,9 @@ void psample_sample_packet(struct psample_group *group,
nla_total_size_64bit(sizeof(u64)) + /* timestamp */
nla_total_size(sizeof(u16)) + /* protocol */
(md->user_cookie_len ?
nla_total_size(md->user_cookie_len) : 0); /* user cookie */
nla_total_size(md->user_cookie_len) : 0) + /* user cookie */
(md->rate_as_probability ?
nla_total_size(0) : 0); /* rate as probability */
#ifdef CONFIG_INET
tun_info = skb_tunnel_info(skb);
@ -498,8 +500,9 @@ void psample_sample_packet(struct psample_group *group,
md->user_cookie))
goto error;
if (md->rate_as_probability)
nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY);
if (md->rate_as_probability &&
nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY))
goto error;
genlmsg_end(nl_skb, data);
genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,

View File

@ -2032,6 +2032,8 @@ static int smc_listen_prfx_check(struct smc_sock *new_smc,
if (pclc->hdr.typev1 == SMC_TYPE_N)
return 0;
pclc_prfx = smc_clc_proposal_get_prefix(pclc);
if (!pclc_prfx)
return -EPROTO;
if (smc_clc_prfx_match(newclcsock, pclc_prfx))
return SMC_CLC_DECL_DIFFPREFIX;
@ -2145,6 +2147,8 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
pclc_smcd = smc_get_clc_msg_smcd(pclc);
smc_v2_ext = smc_get_clc_v2_ext(pclc);
smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
if (!pclc_smcd || !smc_v2_ext || !smcd_v2_ext)
goto not_found;
mutex_lock(&smcd_dev_list.mutex);
if (pclc_smcd->ism.chid) {
@ -2221,7 +2225,9 @@ static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
int rc = 0;
/* check if ISM V1 is available */
if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
if (!(ini->smcd_version & SMC_V1) ||
!smcd_indicated(ini->smc_type_v1) ||
!pclc_smcd)
goto not_found;
ini->is_smcd = true; /* prepare ISM check */
ini->ism_peer_gid[0].gid = ntohll(pclc_smcd->ism.gid);
@ -2272,7 +2278,8 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
goto not_found;
smc_v2_ext = smc_get_clc_v2_ext(pclc);
if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
if (!smc_v2_ext ||
!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
goto not_found;
/* prepare RDMA check */
@ -2881,6 +2888,13 @@ __poll_t smc_poll(struct file *file, struct socket *sock,
} else {
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
if (sk->sk_state != SMC_INIT) {
/* Race breaker the same way as tcp_poll(). */
smp_mb__after_atomic();
if (atomic_read(&smc->conn.sndbuf_space))
mask |= EPOLLOUT | EPOLLWRNORM;
}
}
if (atomic_read(&smc->conn.bytes_to_rcv))
mask |= EPOLLIN | EPOLLRDNORM;

View File

@ -352,8 +352,11 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
struct smc_clc_msg_hdr *hdr = &pclc->hdr;
struct smc_clc_v2_extension *v2_ext;
v2_ext = smc_get_clc_v2_ext(pclc);
pclc_prfx = smc_clc_proposal_get_prefix(pclc);
if (!pclc_prfx ||
pclc_prfx->ipv6_prefixes_cnt > SMC_CLC_MAX_V6_PREFIX)
return false;
if (hdr->version == SMC_V1) {
if (hdr->typev1 == SMC_TYPE_N)
return false;
@ -365,6 +368,13 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
sizeof(struct smc_clc_msg_trail))
return false;
} else {
v2_ext = smc_get_clc_v2_ext(pclc);
if ((hdr->typev2 != SMC_TYPE_N &&
(!v2_ext || v2_ext->hdr.eid_cnt > SMC_CLC_MAX_UEID)) ||
(smcd_indicated(hdr->typev2) &&
v2_ext->hdr.ism_gid_cnt > SMCD_CLC_MAX_V2_GID_ENTRIES))
return false;
if (ntohs(hdr->length) !=
sizeof(*pclc) +
sizeof(struct smc_clc_msg_smcd) +
@ -764,6 +774,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
SMC_CLC_RECV_BUF_LEN : datlen;
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen);
len = sock_recvmsg(smc->clcsock, &msg, krflags);
if (len < recvlen) {
smc->sk.sk_err = EPROTO;
reason_code = -EPROTO;
goto out;
}
datlen -= len;
}
if (clcm->type == SMC_CLC_DECLINE) {

View File

@ -336,8 +336,12 @@ struct smc_clc_msg_decline_v2 { /* clc decline message */
static inline struct smc_clc_msg_proposal_prefix *
smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc)
{
u16 offset = ntohs(pclc->iparea_offset);
if (offset > sizeof(struct smc_clc_msg_smcd))
return NULL;
return (struct smc_clc_msg_proposal_prefix *)
((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset));
((u8 *)pclc + sizeof(*pclc) + offset);
}
static inline bool smcr_indicated(int smc_type)
@ -376,8 +380,14 @@ static inline struct smc_clc_v2_extension *
smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop)
{
struct smc_clc_msg_smcd *prop_smcd = smc_get_clc_msg_smcd(prop);
u16 max_offset;
if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset))
max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) -
offsetof(struct smc_clc_msg_proposal_area, pclc_smcd) -
offsetofend(struct smc_clc_msg_smcd, v2_ext_offset);
if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset) ||
ntohs(prop_smcd->v2_ext_offset) > max_offset)
return NULL;
return (struct smc_clc_v2_extension *)
@ -390,9 +400,15 @@ smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop)
static inline struct smc_clc_smcd_v2_extension *
smc_get_clc_smcd_v2_ext(struct smc_clc_v2_extension *prop_v2ext)
{
u16 max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_smcd_v2_ext) -
offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) -
offsetof(struct smc_clc_v2_extension, hdr) -
offsetofend(struct smc_clnt_opts_area_hdr, smcd_v2_ext_offset);
if (!prop_v2ext)
return NULL;
if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset))
if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) ||
ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) > max_offset)
return NULL;
return (struct smc_clc_smcd_v2_extension *)

View File

@ -1818,7 +1818,9 @@ void smcr_link_down_cond_sched(struct smc_link *lnk)
{
if (smc_link_downing(&lnk->state)) {
trace_smcr_link_down(lnk, __builtin_return_address(0));
schedule_work(&lnk->link_down_wrk);
smcr_link_hold(lnk); /* smcr_link_put in link_down_wrk */
if (!schedule_work(&lnk->link_down_wrk))
smcr_link_put(lnk);
}
}
@ -1850,11 +1852,14 @@ static void smc_link_down_work(struct work_struct *work)
struct smc_link_group *lgr = link->lgr;
if (list_empty(&lgr->list))
return;
goto out;
wake_up_all(&lgr->llc_msg_waiter);
down_write(&lgr->llc_conf_mutex);
smcr_link_down(link);
up_write(&lgr->llc_conf_mutex);
out:
smcr_link_put(link); /* smcr_link_hold by schedulers of link_down_work */
}
static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,

View File

@ -860,7 +860,7 @@ const fn as_int(&self) -> u32 {
/// ];
/// #[cfg(MODULE)]
/// #[no_mangle]
/// static __mod_mdio__phydev_device_table: [::kernel::bindings::mdio_device_id; 2] = _DEVICE_TABLE;
/// static __mod_device_table__mdio__phydev: [::kernel::bindings::mdio_device_id; 2] = _DEVICE_TABLE;
/// ```
#[macro_export]
macro_rules! module_phy_driver {
@ -883,7 +883,7 @@ macro_rules! module_phy_driver {
#[cfg(MODULE)]
#[no_mangle]
static __mod_mdio__phydev_device_table: [$crate::bindings::mdio_device_id;
static __mod_device_table__mdio__phydev: [$crate::bindings::mdio_device_id;
$crate::module_phy_driver!(@count_devices $($dev),+) + 1] = _DEVICE_TABLE;
};

View File

@ -556,10 +556,10 @@ class YnlFamily(SpecFamily):
if attr["type"] == 'nest':
nl_type |= Netlink.NLA_F_NESTED
attr_payload = b''
sub_attrs = SpaceAttrs(self.attr_sets[space], value, search_attrs)
sub_space = attr['nested-attributes']
sub_attrs = SpaceAttrs(self.attr_sets[sub_space], value, search_attrs)
for subname, subvalue in value.items():
attr_payload += self._add_attr(attr['nested-attributes'],
subname, subvalue, sub_attrs)
attr_payload += self._add_attr(sub_space, subname, subvalue, sub_attrs)
elif attr["type"] == 'flag':
if not value:
# If value is absent or false then skip attribute creation.

View File

@ -8,25 +8,28 @@ from lib.py import cmd
import glob
def sys_get_queues(ifname) -> int:
folders = glob.glob(f'/sys/class/net/{ifname}/queues/rx-*')
def sys_get_queues(ifname, qtype='rx') -> int:
folders = glob.glob(f'/sys/class/net/{ifname}/queues/{qtype}-*')
return len(folders)
def nl_get_queues(cfg, nl):
def nl_get_queues(cfg, nl, qtype='rx'):
queues = nl.queue_get({'ifindex': cfg.ifindex}, dump=True)
if queues:
return len([q for q in queues if q['type'] == 'rx'])
return len([q for q in queues if q['type'] == qtype])
return None
def get_queues(cfg, nl) -> None:
queues = nl_get_queues(cfg, nl)
if not queues:
raise KsftSkipEx('queue-get not supported by device')
snl = NetdevFamily(recv_size=4096)
expected = sys_get_queues(cfg.dev['ifname'])
ksft_eq(queues, expected)
for qtype in ['rx', 'tx']:
queues = nl_get_queues(cfg, snl, qtype)
if not queues:
raise KsftSkipEx('queue-get not supported by device')
expected = sys_get_queues(cfg.dev['ifname'], qtype)
ksft_eq(queues, expected)
def addremove_queues(cfg, nl) -> None:
@ -57,7 +60,7 @@ def addremove_queues(cfg, nl) -> None:
def main() -> None:
with NetDrvEnv(__file__, queue_count=3) as cfg:
with NetDrvEnv(__file__, queue_count=100) as cfg:
ksft_run([get_queues, addremove_queues], args=(cfg, NetdevFamily()))
ksft_exit()

View File

@ -110,6 +110,23 @@ def qstat_by_ifindex(cfg) -> None:
ksft_ge(triple[1][key], triple[0][key], comment="bad key: " + key)
ksft_ge(triple[2][key], triple[1][key], comment="bad key: " + key)
# Sanity check the dumps
queues = NetdevFamily(recv_size=4096).qstats_get({"scope": "queue"}, dump=True)
# Reformat the output into {ifindex: {rx: [id, id, ...], tx: [id, id, ...]}}
parsed = {}
for entry in queues:
ifindex = entry["ifindex"]
if ifindex not in parsed:
parsed[ifindex] = {"rx":[], "tx": []}
parsed[ifindex][entry["queue-type"]].append(entry['queue-id'])
# Now, validate
for ifindex, queues in parsed.items():
for qtype in ['rx', 'tx']:
ksft_eq(len(queues[qtype]), len(set(queues[qtype])),
comment="repeated queue keys")
ksft_eq(len(queues[qtype]), max(queues[qtype]) + 1,
comment="missing queue keys")
# Test invalid dumps
# 0 is invalid
with ksft_raises(NlError) as cm:
@ -158,7 +175,7 @@ def check_down(cfg) -> None:
def main() -> None:
with NetDrvEnv(__file__) as cfg:
with NetDrvEnv(__file__, queue_count=100) as cfg:
ksft_run([check_pause, check_fec, pkt_byte_sum, qstat_by_ifindex,
check_down],
args=(cfg, ))

View File

@ -32,23 +32,23 @@ except ModuleNotFoundError as e:
# Set schema='' to avoid jsonschema validation, it's slow
#
class EthtoolFamily(YnlFamily):
def __init__(self):
def __init__(self, recv_size=0):
super().__init__((SPEC_PATH / Path('ethtool.yaml')).as_posix(),
schema='')
schema='', recv_size=recv_size)
class RtnlFamily(YnlFamily):
def __init__(self):
def __init__(self, recv_size=0):
super().__init__((SPEC_PATH / Path('rt_link.yaml')).as_posix(),
schema='')
schema='', recv_size=recv_size)
class NetdevFamily(YnlFamily):
def __init__(self):
def __init__(self, recv_size=0):
super().__init__((SPEC_PATH / Path('netdev.yaml')).as_posix(),
schema='')
schema='', recv_size=recv_size)
class NetshaperFamily(YnlFamily):
def __init__(self):
def __init__(self, recv_size=0):
super().__init__((SPEC_PATH / Path('net_shaper.yaml')).as_posix(),
schema='')
schema='', recv_size=recv_size)

View File

@ -171,8 +171,10 @@ ovs_add_netns_and_veths () {
ovs_add_if "$1" "$2" "$4" -u || return 1
fi
[ $TRACING -eq 1 ] && ovs_netns_spawn_daemon "$1" "$ns" \
tcpdump -i any -s 65535
if [ $TRACING -eq 1 ]; then
ovs_netns_spawn_daemon "$1" "$3" tcpdump -l -i any -s 6553
ovs_wait grep -q "listening on any" ${ovs_dir}/stderr
fi
return 0
}