Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2024-03-04 (ice)

This series contains updates to ice driver only.

Jake changes the driver to use relative VSI index for VF VSIs as the VF
driver has no direct use of the VSI number on ice hardware. He also
reworks some Tx/Rx functions to clarify their uses, cleans up some style
issues, and utilizes kernel helper functions.

Maciej removes a redundant call to disable Tx queues on ifdown and
removes some unnecessary devm usages.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2024-03-06 11:29:19 +00:00
commit db72b6fc8f
12 changed files with 125 additions and 201 deletions

View File

@ -1002,9 +1002,9 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
*/
int ice_init_hw(struct ice_hw *hw)
{
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
void *mac_buf __free(kfree);
u16 mac_buf_len;
void *mac_buf;
int status;
/* Set MAC type based on DeviceID */
@ -1082,7 +1082,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) {
status = -ENOMEM;
goto err_unroll_sched;
@ -1092,7 +1092,6 @@ int ice_init_hw(struct ice_hw *hw)
status = ice_aq_get_phy_caps(hw->port_info, false,
ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
NULL);
devm_kfree(ice_hw_to_dev(hw), pcaps);
if (status)
dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
status);
@ -1119,18 +1118,15 @@ int ice_init_hw(struct ice_hw *hw)
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
sizeof(struct ice_aqc_manage_mac_read_resp),
GFP_KERNEL);
mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp),
GFP_KERNEL);
if (!mac_buf) {
status = -ENOMEM;
goto err_unroll_fltr_mgmt_struct;
}
mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
devm_kfree(ice_hw_to_dev(hw), mac_buf);
if (status)
goto err_unroll_fltr_mgmt_struct;
@ -1399,9 +1395,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/
int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
@ -3277,19 +3272,14 @@ int ice_update_link_info(struct ice_port_info *pi)
return status;
if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_hw *hw;
struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
hw = pi->hw;
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
GFP_KERNEL);
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
devm_kfree(ice_hw_to_dev(hw), pcaps);
}
return status;
@ -3430,8 +3420,8 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_hw *hw;
int status;
@ -3441,7 +3431,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
*aq_failures = 0;
hw = pi->hw;
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
@ -3493,7 +3483,6 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
}
out:
devm_kfree(ice_hw_to_dev(hw), pcaps);
return status;
}
@ -3572,7 +3561,7 @@ int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
struct ice_hw *hw;
int status;
@ -3641,8 +3630,6 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
}
out:
kfree(pcaps);
return status;
}
@ -4362,13 +4349,13 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
/* End of FW Admin Queue command wrappers */
/**
* ice_write_byte - write a byte to a packed context structure
* @src_ctx: the context structure to read from
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
* ice_pack_ctx_byte - write a byte to a packed context structure
* @src_ctx: unpacked source context structure
* @dest_ctx: packed destination context data
* @ce_info: context element description
*/
static void
ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
{
u8 src_byte, dest_byte, mask;
u8 *from, *dest;
@ -4379,14 +4366,11 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
mask = (u8)(BIT(ce_info->width) - 1);
mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
src_byte = *from;
src_byte &= mask;
/* shift to correct alignment */
mask <<= shift_width;
src_byte <<= shift_width;
src_byte &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@ -4401,13 +4385,13 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
* ice_write_word - write a word to a packed context structure
* @src_ctx: the context structure to read from
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
* ice_pack_ctx_word - write a word to a packed context structure
* @src_ctx: unpacked source context structure
* @dest_ctx: packed destination context data
* @ce_info: context element description
*/
static void
ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
{
u16 src_word, mask;
__le16 dest_word;
@ -4419,17 +4403,14 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
mask = BIT(ce_info->width) - 1;
mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_word = *(u16 *)from;
src_word &= mask;
/* shift to correct alignment */
mask <<= shift_width;
src_word <<= shift_width;
src_word &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@ -4444,13 +4425,13 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
* ice_write_dword - write a dword to a packed context structure
* @src_ctx: the context structure to read from
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
* ice_pack_ctx_dword - write a dword to a packed context structure
* @src_ctx: unpacked source context structure
* @dest_ctx: packed destination context data
* @ce_info: context element description
*/
static void
ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
{
u32 src_dword, mask;
__le32 dest_dword;
@ -4462,25 +4443,14 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
/* if the field width is exactly 32 on an x86 machine, then the shift
* operation will not work because the SHL instructions count is masked
* to 5 bits so the shift will do nothing
*/
if (ce_info->width < 32)
mask = BIT(ce_info->width) - 1;
else
mask = (u32)~0;
mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_dword = *(u32 *)from;
src_dword &= mask;
/* shift to correct alignment */
mask <<= shift_width;
src_dword <<= shift_width;
src_dword &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@ -4495,13 +4465,13 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
* ice_write_qword - write a qword to a packed context structure
* @src_ctx: the context structure to read from
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
* ice_pack_ctx_qword - write a qword to a packed context structure
* @src_ctx: unpacked source context structure
* @dest_ctx: packed destination context data
* @ce_info: context element description
*/
static void
ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
{
u64 src_qword, mask;
__le64 dest_qword;
@ -4513,25 +4483,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
/* if the field width is exactly 64 on an x86 machine, then the shift
* operation will not work because the SHL instructions count is masked
* to 6 bits so the shift will do nothing
*/
if (ce_info->width < 64)
mask = BIT_ULL(ce_info->width) - 1;
else
mask = (u64)~0;
mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_qword = *(u64 *)from;
src_qword &= mask;
/* shift to correct alignment */
mask <<= shift_width;
src_qword <<= shift_width;
src_qword &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@ -4550,11 +4509,10 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @hw: pointer to the hardware structure
* @src_ctx: pointer to a generic non-packed context structure
* @dest_ctx: pointer to memory for the packed structure
* @ce_info: a description of the structure to be transformed
* @ce_info: List of Rx context elements
*/
int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
{
int f;
@ -4570,16 +4528,16 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
}
switch (ce_info[f].size_of) {
case sizeof(u8):
ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u16):
ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u32):
ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u64):
ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
break;
default:
return -EINVAL;

View File

@ -53,9 +53,8 @@ int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
@ -72,9 +71,8 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info);
int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;

View File

@ -802,7 +802,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
if (!pf)
return -EINVAL;
data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL);
data = kzalloc(size, GFP_KERNEL);
if (!data)
return -ENOMEM;
@ -945,11 +945,9 @@ static u64 ice_loopback_test(struct net_device *netdev)
int num_frames, valid_frames;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
struct device *dev;
u8 *tx_frame;
u8 *tx_frame __free(kfree);
int i;
dev = ice_pf_to_dev(pf);
netdev_info(netdev, "loopback test\n");
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
@ -994,7 +992,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
for (i = 0; i < num_frames; i++) {
if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) {
ret = 8;
goto lbtest_free_frame;
goto remove_mac_filters;
}
}
@ -1004,8 +1002,6 @@ static u64 ice_loopback_test(struct net_device *netdev)
else if (valid_frames != num_frames)
ret = 10;
lbtest_free_frame:
devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");

View File

@ -2719,61 +2719,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
}
}
/**
* ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
* @vsi: the VSI being un-configured
*/
void ice_vsi_dis_irq(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 val;
int i;
/* disable interrupt causation from each queue */
if (vsi->tx_rings) {
ice_for_each_txq(vsi, i) {
if (vsi->tx_rings[i]) {
u16 reg;
reg = vsi->tx_rings[i]->reg_idx;
val = rd32(hw, QINT_TQCTL(reg));
val &= ~QINT_TQCTL_CAUSE_ENA_M;
wr32(hw, QINT_TQCTL(reg), val);
}
}
}
if (vsi->rx_rings) {
ice_for_each_rxq(vsi, i) {
if (vsi->rx_rings[i]) {
u16 reg;
reg = vsi->rx_rings[i]->reg_idx;
val = rd32(hw, QINT_RQCTL(reg));
val &= ~QINT_RQCTL_CAUSE_ENA_M;
wr32(hw, QINT_RQCTL(reg), val);
}
}
}
/* disable each interrupt */
ice_for_each_q_vector(vsi, i) {
if (!vsi->q_vectors[i])
continue;
wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
}
ice_flush(hw);
/* don't call synchronize_irq() for VF's from the host */
if (vsi->type == ICE_VSI_VF)
return;
ice_for_each_q_vector(vsi, i)
synchronize_irq(vsi->q_vectors[i]->irq.virq);
}
/**
* __ice_queue_set_napi - Set the napi instance for the queue
* @dev: device to which NAPI and queue belong

View File

@ -110,8 +110,6 @@ void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
bool ena_ts);
void ice_vsi_dis_irq(struct ice_vsi *vsi);
void ice_vsi_free_irq(struct ice_vsi *vsi);
void ice_vsi_free_rx_rings(struct ice_vsi *vsi);

View File

@ -7001,6 +7001,50 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
}
}
/**
* ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
* @vsi: the VSI being un-configured
*/
static void ice_vsi_dis_irq(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 val;
int i;
/* disable interrupt causation from each Rx queue; Tx queues are
* handled in ice_vsi_stop_tx_ring()
*/
if (vsi->rx_rings) {
ice_for_each_rxq(vsi, i) {
if (vsi->rx_rings[i]) {
u16 reg;
reg = vsi->rx_rings[i]->reg_idx;
val = rd32(hw, QINT_RQCTL(reg));
val &= ~QINT_RQCTL_CAUSE_ENA_M;
wr32(hw, QINT_RQCTL(reg), val);
}
}
}
/* disable each interrupt */
ice_for_each_q_vector(vsi, i) {
if (!vsi->q_vectors[i])
continue;
wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
}
ice_flush(hw);
/* don't call synchronize_irq() for VF's from the host */
if (vsi->type == ICE_VSI_VF)
return;
ice_for_each_q_vector(vsi, i)
synchronize_irq(vsi->q_vectors[i]->irq.virq);
}
/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped

View File

@ -240,7 +240,6 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
}
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_num = vsi->vsi_num;
return vsi;
}

View File

@ -280,12 +280,6 @@ int ice_vf_reconfig_vsi(struct ice_vf *vf)
return err;
}
/* Update the lan_vsi_num field since it might have been changed. The
* PF lan_vsi_idx number remains the same so we don't need to change
* that.
*/
vf->lan_vsi_num = vsi->vsi_num;
return 0;
}
@ -315,7 +309,6 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
* vf->lan_vsi_idx
*/
vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
vf->lan_vsi_num = vsi->vsi_num;
return 0;
}
@ -1315,13 +1308,12 @@ int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
}
/**
* ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
* ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access
* @vf: VF to remove access to VSI for
*/
void ice_vf_invalidate_vsi(struct ice_vf *vf)
{
vf->lan_vsi_idx = ICE_NO_VSI;
vf->lan_vsi_num = ICE_NO_VSI;
}
/**

View File

@ -109,11 +109,6 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always
* the main LAN VSI for the PF.
*/
u16 lan_vsi_num; /* ID as used by firmware */
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */

View File

@ -506,7 +506,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID;
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
@ -552,27 +552,20 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
*/
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
vsi = ice_find_vsi(pf, vsi_id);
return (vsi && (vsi->vf == vf));
return vsi_id == ICE_VF_VSI_ID;
}
/**
* ice_vc_isvalid_q_id
* @vf: pointer to the VF info
* @vsi_id: VSI ID
* @vsi: VSI to check queue ID against
* @qid: VSI relative queue ID
*
* check for the valid queue ID
*/
static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
{
struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
/* allocated Tx and Rx queues should be always equal for VF VSI */
return (vsi && (qid < vsi->alloc_txq));
return qid < vsi->alloc_txq;
}
/**
@ -1330,7 +1323,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
*/
q_map = vqs->rx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@ -1352,7 +1345,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@ -1457,7 +1450,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@ -1483,7 +1476,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
} else if (q_map) {
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@ -1539,7 +1532,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
vsi_q_id = vsi_q_id_idx;
if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
return VIRTCHNL_STATUS_ERR_PARAM;
q_vector->num_ring_rx++;
@ -1553,7 +1546,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
vsi_q_id = vsi_q_id_idx;
if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
return VIRTCHNL_STATUS_ERR_PARAM;
q_vector->num_ring_tx++;
@ -1710,7 +1703,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
qpi->txq.headwb_enabled ||
!ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
!ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
goto error_param;
}

View File

@ -19,6 +19,15 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
/* VFs only get a single VSI. For ice hardware, the VF does not need to know
* its VSI index. However, the virtchnl interface requires a VSI number,
* mainly due to legacy hardware.
*
* Since the VF doesn't need this information, report a static value to the VF
* instead of leaking any information about the PF or hardware setup.
*/
#define ICE_VF_VSI_ID 1
struct ice_virtchnl_ops {
int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);

View File

@ -94,9 +94,6 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
return -EINVAL;
if (vsi_id != vf->lan_vsi_num)
return -EINVAL;
if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
return -EINVAL;