Merge branch 'i2c/for-current' into i2c/for-next

This commit is contained in:
Wolfram Sang 2025-01-12 13:09:37 +01:00
commit 22dbf71480
191 changed files with 1864 additions and 948 deletions

View File

@ -435,7 +435,7 @@ Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com>
Mathieu Othacehe <m.othacehe@gmail.com> <othacehe@gnu.org>
Mathieu Othacehe <othacehe@gnu.org> <m.othacehe@gmail.com>
Mat Martineau <martineau@kernel.org> <mathew.j.martineau@linux.intel.com>
Mat Martineau <martineau@kernel.org> <mathewm@codeaurora.org>
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>

View File

@ -445,8 +445,10 @@ event code Key Notes
0x1008 0x07 FN+F8 IBM: toggle screen expand
Lenovo: configure UltraNav,
or toggle screen expand.
On newer platforms (2024+)
replaced by 0x131f (see below)
On 2024 platforms replaced by
0x131f (see below) and on newer
platforms (2025 +) keycode is
replaced by 0x1401 (see below).
0x1009 0x08 FN+F9 -
@ -506,9 +508,11 @@ event code Key Notes
0x1019 0x18 unknown
0x131f ... FN+F8 Platform Mode change.
0x131f ... FN+F8 Platform Mode change (2024 systems).
Implemented in driver.
0x1401 ... FN+F8 Platform Mode change (2025 + systems).
Implemented in driver.
... ... ...
0x1020 0x1F unknown

View File

@ -436,7 +436,7 @@ AnonHugePmdMapped).
The number of file transparent huge pages mapped to userspace is available
by reading ShmemPmdMapped and ShmemHugePages fields in ``/proc/meminfo``.
To identify what applications are mapping file transparent huge pages, it
is necessary to read ``/proc/PID/smaps`` and count the FileHugeMapped fields
is necessary to read ``/proc/PID/smaps`` and count the FilePmdMapped fields
for each mapping.
Note that reading the smaps file is expensive and reading it

View File

@ -90,7 +90,7 @@ properties:
adi,dsi-lanes:
description: Number of DSI data lanes connected to the DSI host.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [ 1, 2, 3, 4 ]
enum: [ 2, 3, 4 ]
"#sound-dai-cells":
const: 0

View File

@ -22,65 +22,67 @@ definitions:
doc: unused event
-
name: created
doc:
token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
doc: >-
A new MPTCP connection has been created. It is the good time to
allocate memory and send ADD_ADDR if needed. Depending on the
traffic-patterns it can take a long time until the
MPTCP_EVENT_ESTABLISHED is sent.
Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
dport, server-side.
-
name: established
doc:
token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
doc: >-
A MPTCP connection is established (can start new subflows).
Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
dport, server-side.
-
name: closed
doc:
token
doc: >-
A MPTCP connection has stopped.
Attribute: token.
-
name: announced
value: 6
doc:
token, rem_id, family, daddr4 | daddr6 [, dport]
doc: >-
A new address has been announced by the peer.
Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
-
name: removed
doc:
token, rem_id
doc: >-
An address has been lost by the peer.
Attributes: token, rem_id.
-
name: sub-established
value: 10
doc:
token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
dport, backup, if_idx [, error]
doc: >-
A new subflow has been established. 'error' should not be set.
Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
daddr6, sport, dport, backup, if_idx [, error].
-
name: sub-closed
doc:
token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
dport, backup, if_idx [, error]
doc: >-
A subflow has been closed. An error (copy of sk_err) could be set if an
error has been detected for this subflow.
Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
daddr6, sport, dport, backup, if_idx [, error].
-
name: sub-priority
value: 13
doc:
token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
dport, backup, if_idx [, error]
doc: >-
The priority of a subflow has changed. 'error' should not be set.
Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
daddr6, sport, dport, backup, if_idx [, error].
-
name: listener-created
value: 15
doc:
family, sport, saddr4 | saddr6
doc: >-
A new PM listener is created.
Attributes: family, sport, saddr4 | saddr6.
-
name: listener-closed
doc:
family, sport, saddr4 | saddr6
doc: >-
A PM listener is closed.
Attributes: family, sport, saddr4 | saddr6.
attribute-sets:
-
@ -306,8 +308,8 @@ operations:
attributes:
- addr
-
name: flush-addrs
doc: flush addresses
name: flush-addrs
doc: Flush addresses
attribute-set: endpoint
dont-validate: [ strict ]
flags: [ uns-admin-perm ]
@ -351,7 +353,7 @@ operations:
- addr-remote
-
name: announce
doc: announce new sf
doc: Announce new address
attribute-set: attr
dont-validate: [ strict ]
flags: [ uns-admin-perm ]
@ -362,7 +364,7 @@ operations:
- token
-
name: remove
doc: announce removal
doc: Announce removal
attribute-set: attr
dont-validate: [ strict ]
flags: [ uns-admin-perm ]
@ -373,7 +375,7 @@ operations:
- loc-id
-
name: subflow-create
doc: todo
doc: Create subflow
attribute-set: attr
dont-validate: [ strict ]
flags: [ uns-admin-perm ]
@ -385,7 +387,7 @@ operations:
- addr-remote
-
name: subflow-destroy
doc: todo
doc: Destroy subflow
attribute-set: attr
dont-validate: [ strict ]
flags: [ uns-admin-perm ]

View File

@ -1797,7 +1797,6 @@ F: include/uapi/linux/if_arcnet.h
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
M: Arnd Bergmann <arnd@arndb.de>
M: Olof Johansson <olof@lixom.net>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: soc@lists.linux.dev
S: Maintained
@ -3608,6 +3607,7 @@ F: drivers/phy/qualcomm/phy-ath79-usb.c
ATHEROS ATH GENERIC UTILITIES
M: Kalle Valo <kvalo@kernel.org>
M: Jeff Johnson <jjohnson@kernel.org>
L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/net/wireless/ath/*
@ -14756,7 +14756,7 @@ F: drivers/memory/mtk-smi.c
F: include/soc/mediatek/smi.h
MEDIATEK SWITCH DRIVER
M: Arınç ÜNAL <arinc.unal@arinc9.com>
M: Chester A. Unal <chester.a.unal@arinc9.com>
M: Daniel Golle <daniel@makrotopia.org>
M: DENG Qingfang <dqfext@gmail.com>
M: Sean Wang <sean.wang@mediatek.com>
@ -18460,7 +18460,7 @@ F: Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml
F: drivers/pinctrl/mediatek/
PIN CONTROLLER - MEDIATEK MIPS
M: Arınç ÜNAL <arinc.unal@arinc9.com>
M: Chester A. Unal <chester.a.unal@arinc9.com>
M: Sergio Paracuellos <sergio.paracuellos@gmail.com>
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
L: linux-mips@vger.kernel.org
@ -19504,7 +19504,7 @@ S: Maintained
F: arch/mips/ralink
RALINK MT7621 MIPS ARCHITECTURE
M: Arınç ÜNAL <arinc.unal@arinc9.com>
M: Chester A. Unal <chester.a.unal@arinc9.com>
M: Sergio Paracuellos <sergio.paracuellos@gmail.com>
L: linux-mips@vger.kernel.org
S: Maintained
@ -20907,6 +20907,8 @@ F: kernel/sched/
SCHEDULER - SCHED_EXT
R: Tejun Heo <tj@kernel.org>
R: David Vernet <void@manifault.com>
R: Andrea Righi <arighi@nvidia.com>
R: Changwoo Min <changwoo@igalia.com>
L: linux-kernel@vger.kernel.org
S: Maintained
W: https://github.com/sched-ext/scx

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 13
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -6,6 +6,7 @@ menuconfig ARCH_MXC
select CLKSRC_IMX_GPT
select GENERIC_IRQ_CHIP
select GPIOLIB
select PINCTRL
select PM_OPP if PM
select SOC_BUS
select SRAM

View File

@ -143,11 +143,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
" DIV:\t\t%s\n"
" BMX:\t\t%s\n"
" CDX:\t\t%s\n",
cpuinfo.has_mul ? "yes" : "no",
cpuinfo.has_mulx ? "yes" : "no",
cpuinfo.has_div ? "yes" : "no",
cpuinfo.has_bmx ? "yes" : "no",
cpuinfo.has_cdx ? "yes" : "no");
str_yes_no(cpuinfo.has_mul),
str_yes_no(cpuinfo.has_mulx),
str_yes_no(cpuinfo.has_div),
str_yes_no(cpuinfo.has_bmx),
str_yes_no(cpuinfo.has_cdx));
seq_printf(m,
"Icache:\t\t%ukB, line length: %u\n",

View File

@ -1106,7 +1106,7 @@ int open_for_data(struct cdrom_device_info *cdi)
}
}
cd_dbg(CD_OPEN, "all seems well, opening the devicen");
cd_dbg(CD_OPEN, "all seems well, opening the device\n");
/* all seems well, we can open the device */
ret = cdo->open(cdi, 0); /* open for data */

View File

@ -278,7 +278,8 @@ static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
#else /* !CONFIG_RESET_CONTROLLER */
static int clk_imx8mp_audiomix_reset_controller_register(struct clk_imx8mp_audiomix_priv *priv)
static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
struct clk_imx8mp_audiomix_priv *priv)
{
return 0;
}

View File

@ -779,6 +779,13 @@ static struct ccu_div dpu1_clk = {
},
};
static CLK_FIXED_FACTOR_HW(emmc_sdio_ref_clk, "emmc-sdio-ref",
&video_pll_clk.common.hw, 4, 1, 0);
static const struct clk_parent_data emmc_sdio_ref_clk_pd[] = {
{ .hw = &emmc_sdio_ref_clk.hw },
};
static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, BIT(4), 0);
static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, BIT(5), 0);
static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_aclk_pd,
@ -798,7 +805,7 @@ static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", p
0x150, BIT(12), 0);
static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, BIT(13), 0);
static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", video_pll_clk_pd, 0x204, BIT(30), 0);
static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, BIT(30), 0);
static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, BIT(26), 0);
static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, BIT(24), 0);
static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, BIT(23), 0);
@ -1059,6 +1066,10 @@ static int th1520_clk_probe(struct platform_device *pdev)
return ret;
priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
if (ret)
return ret;
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv);
if (ret)
return ret;

View File

@ -153,7 +153,16 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
ADV7511_AUDIO_CFG3_LEN_MASK, len);
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
regmap_write(adv7511->regmap, 0x73, 0x1);
/* send current Audio infoframe values while updating */
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
BIT(5), BIT(5));
regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
/* use Audio infoframe updated info */
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
BIT(5), 0);
return 0;
}
@ -184,8 +193,9 @@ static int audio_startup(struct device *dev, void *data)
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
BIT(7) | BIT(6), BIT(7));
/* use Audio infoframe updated info */
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
BIT(5), 0);
/* enable SPDIF receiver */
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,

View File

@ -1241,8 +1241,10 @@ static int adv7511_probe(struct i2c_client *i2c)
return ret;
ret = adv7511_init_regulators(adv7511);
if (ret)
return dev_err_probe(dev, ret, "failed to init regulators\n");
if (ret) {
dev_err_probe(dev, ret, "failed to init regulators\n");
goto err_of_node_put;
}
/*
* The power down GPIO is optional. If present, toggle it from active to
@ -1363,6 +1365,8 @@ err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
uninit_regulators:
adv7511_uninit_regulators(adv7511);
err_of_node_put:
of_node_put(adv7511->host_node);
return ret;
}
@ -1371,6 +1375,8 @@ static void adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
of_node_put(adv7511->host_node);
adv7511_uninit_regulators(adv7511);
drm_bridge_remove(&adv7511->bridge);

View File

@ -172,7 +172,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
if (num_lanes < 1 || num_lanes > 4)
if (num_lanes < 2 || num_lanes > 4)
return -EINVAL;
adv->num_dsi_lanes = num_lanes;
@ -181,8 +181,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
if (!adv->host_node)
return -ENODEV;
of_node_put(adv->host_node);
adv->use_timing_gen = !of_property_read_bool(np,
"adi,disable-timing-generator");

View File

@ -2115,14 +2115,6 @@ static void intel_c10_pll_program(struct intel_display *display,
0, C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
/* Custom width needs to be programmed to 0 for both the phy lanes */
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
MB_WRITE_COMMITTED);
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
/* Program the pll values only for the master lane */
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
@ -2132,6 +2124,10 @@ static void intel_c10_pll_program(struct intel_display *display,
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
/* Custom width needs to be programmed to 0 for both the phy lanes */
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
MB_WRITE_COMMITTED);
intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);

View File

@ -133,7 +133,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
if (GRAPHICS_VER(gt->i915) >= 12) {
if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
for (i = 0; i < I915_MAX_VCS; i++)
if (HAS_ENGINE(gt, _VCS(i)))
pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |

View File

@ -724,7 +724,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
new_mem->mem_type == XE_PL_SYSTEM) {
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP,
true,
false,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
ret = timeout;
@ -848,8 +848,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
out:
if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
ttm_bo->ttm)
ttm_bo->ttm) {
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
DMA_RESV_USAGE_KERNEL,
false,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0)
ret = timeout;
xe_tt_unmap_sg(ttm_bo->ttm);
}
return ret;
}

View File

@ -109,7 +109,11 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
drm_puts(&p, "\n**** GuC CT ****\n");
xe_guc_ct_snapshot_print(ss->guc.ct, &p);
drm_puts(&p, "\n**** Contexts ****\n");
/*
* Don't add a new section header here because the mesa debug decoder
* tool expects the context information to be in the 'GuC CT' section.
*/
/* drm_puts(&p, "\n**** Contexts ****\n"); */
xe_guc_exec_queue_snapshot_print(ss->ge, &p);
drm_puts(&p, "\n**** Job ****\n");
@ -363,6 +367,15 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
char buff[ASCII85_BUFSZ], *line_buff;
size_t line_pos = 0;
/*
* Splitting blobs across multiple lines is not compatible with the mesa
* debug decoder tool. Note that even dropping the explicit '\n' below
* doesn't help because the GuC log is so big some underlying implementation
* still splits the lines at 512K characters. So just bail completely for
* the moment.
*/
return;
#define DMESG_MAX_LINE_LEN 800
#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */

View File

@ -8,6 +8,7 @@
#include <linux/nospec.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <uapi/drm/xe_drm.h>
@ -762,9 +763,11 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
*/
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
struct xe_device *xe = gt_to_xe(q->gt);
struct xe_file *xef;
struct xe_lrc *lrc;
u32 old_ts, new_ts;
int idx;
/*
* Jobs that are run during driver load may use an exec_queue, but are
@ -774,6 +777,10 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
if (!q->vm || !q->vm->xef)
return;
/* Synchronize with unbind while holding the xe file open */
if (!drm_dev_enter(&xe->drm, &idx))
return;
xef = q->vm->xef;
/*
@ -787,6 +794,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
lrc = q->lrc[0];
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
drm_dev_exit(idx);
}
/**

View File

@ -2046,7 +2046,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_any = valid_any || (valid_ggtt && is_primary);
if (IS_DGFX(xe)) {
bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
valid_any = valid_any || (valid_lmem && is_primary);
valid_all = valid_all && valid_lmem;

View File

@ -74,12 +74,6 @@ struct xe_oa_config {
struct rcu_head rcu;
};
struct flex {
struct xe_reg reg;
u32 offset;
u32 value;
};
struct xe_oa_open_param {
struct xe_file *xef;
u32 oa_unit_id;
@ -596,19 +590,38 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait)
return ret;
}
static void xe_oa_lock_vma(struct xe_exec_queue *q)
{
if (q->vm) {
down_read(&q->vm->lock);
xe_vm_lock(q->vm, false);
}
}
static void xe_oa_unlock_vma(struct xe_exec_queue *q)
{
if (q->vm) {
xe_vm_unlock(q->vm);
up_read(&q->vm->lock);
}
}
static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps,
struct xe_bb *bb)
{
struct xe_exec_queue *q = stream->exec_q ?: stream->k_exec_q;
struct xe_sched_job *job;
struct dma_fence *fence;
int err = 0;
/* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
job = xe_bb_create_job(stream->k_exec_q, bb);
xe_oa_lock_vma(q);
job = xe_bb_create_job(q, bb);
if (IS_ERR(job)) {
err = PTR_ERR(job);
goto exit;
}
job->ggtt = true;
if (deps == XE_OA_SUBMIT_ADD_DEPS) {
for (int i = 0; i < stream->num_syncs && !err; i++)
@ -623,10 +636,13 @@ static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
xe_oa_unlock_vma(q);
return fence;
err_put_job:
xe_sched_job_put(job);
exit:
xe_oa_unlock_vma(q);
return ERR_PTR(err);
}
@ -675,63 +691,19 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream)
dma_fence_put(stream->last_fence);
}
static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
struct xe_bb *bb, const struct flex *flex, u32 count)
{
u32 offset = xe_bo_ggtt_addr(lrc->bo);
do {
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
bb->cs[bb->len++] = 0;
bb->cs[bb->len++] = flex->value;
} while (flex++, --count);
}
static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
const struct flex *flex, u32 count)
static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri, u32 count)
{
struct dma_fence *fence;
struct xe_bb *bb;
int err;
bb = xe_bb_new(stream->gt, 4 * count, false);
bb = xe_bb_new(stream->gt, 2 * count + 1, false);
if (IS_ERR(bb)) {
err = PTR_ERR(bb);
goto exit;
}
xe_oa_store_flex(stream, lrc, bb, flex, count);
fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
goto free_bb;
}
xe_bb_free(bb, fence);
dma_fence_put(fence);
return 0;
free_bb:
xe_bb_free(bb, NULL);
exit:
return err;
}
static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
{
struct dma_fence *fence;
struct xe_bb *bb;
int err;
bb = xe_bb_new(stream->gt, 3, false);
if (IS_ERR(bb)) {
err = PTR_ERR(bb);
goto exit;
}
write_cs_mi_lri(bb, reg_lri, 1);
write_cs_mi_lri(bb, reg_lri, count);
fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
if (IS_ERR(fence)) {
@ -751,71 +723,55 @@ exit:
static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
struct xe_lrc *lrc = stream->exec_q->lrc[0];
u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
struct flex regs_context[] = {
struct xe_oa_reg reg_lri[] = {
{
OACTXCONTROL(stream->hwe->mmio_base),
stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
enable ? OA_COUNTER_RESUME : 0,
},
{
OAR_OACONTROL,
oacontrol,
},
{
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
regs_offset + CTX_CONTEXT_CONTROL,
_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
},
};
struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
int err;
/* Modify stream hwe context image with regs_context */
err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
regs_context, ARRAY_SIZE(regs_context));
if (err)
return err;
/* Apply reg_lri using LRI */
return xe_oa_load_with_lri(stream, &reg_lri);
return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
}
static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
struct xe_lrc *lrc = stream->exec_q->lrc[0];
u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
struct flex regs_context[] = {
struct xe_oa_reg reg_lri[] = {
{
OACTXCONTROL(stream->hwe->mmio_base),
stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
enable ? OA_COUNTER_RESUME : 0,
},
{
OAC_OACONTROL,
oacontrol
},
{
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
regs_offset + CTX_CONTEXT_CONTROL,
_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
},
};
struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
int err;
/* Set ccs select to enable programming of OAC_OACONTROL */
xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl,
__oa_ccs_select(stream));
/* Modify stream hwe context image with regs_context */
err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
regs_context, ARRAY_SIZE(regs_context));
if (err)
return err;
/* Apply reg_lri using LRI */
return xe_oa_load_with_lri(stream, &reg_lri);
return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
}
static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
@ -2066,8 +2022,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
if (XE_IOCTL_DBG(oa->xe, !param.exec_q))
return -ENOENT;
if (param.exec_q->width > 1)
drm_dbg(&oa->xe->drm, "exec_q->width > 1, programming only exec_q->lrc[0]\n");
if (XE_IOCTL_DBG(oa->xe, param.exec_q->width > 1))
return -EOPNOTSUPP;
}
/*

View File

@ -221,7 +221,10 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
static u32 get_ppgtt_flag(struct xe_sched_job *job)
{
return job->q->vm ? BIT(8) : 0;
if (job->q->vm && !job->ggtt)
return BIT(8);
return 0;
}
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)

View File

@ -56,6 +56,8 @@ struct xe_sched_job {
u32 migrate_flush_flags;
/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
bool ring_ops_flush_tlb;
/** @ggtt: mapped in ggtt. */
bool ggtt;
/** @ptrs: per instance pointers. */
struct xe_job_ptrs ptrs[];
};

View File

@ -412,7 +412,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
dev_name(dev), ret);
break;
case BUS_NOTIFY_DEL_DEVICE:
case BUS_NOTIFY_REMOVED_DEVICE:
i2c_atr_detach_client(client->adapter, client);
break;

View File

@ -1558,6 +1558,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
res = device_add(&adap->dev);
if (res) {
pr_err("adapter '%s': can't register device (%d)\n", adap->name, res);
put_device(&adap->dev);
goto out_list;
}

View File

@ -690,6 +690,7 @@ cma_validate_port(struct ib_device *device, u32 port,
int bound_if_index = dev_addr->bound_dev_if;
int dev_type = dev_addr->dev_type;
struct net_device *ndev = NULL;
struct net_device *pdev = NULL;
if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
goto out;
@ -714,6 +715,21 @@ cma_validate_port(struct ib_device *device, u32 port,
rcu_read_lock();
ndev = rcu_dereference(sgid_attr->ndev);
if (ndev->ifindex != bound_if_index) {
pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index);
if (pdev) {
if (is_vlan_dev(pdev)) {
pdev = vlan_dev_real_dev(pdev);
if (ndev->ifindex == pdev->ifindex)
bound_if_index = pdev->ifindex;
}
if (is_vlan_dev(ndev)) {
pdev = vlan_dev_real_dev(ndev);
if (bound_if_index == pdev->ifindex)
bound_if_index = ndev->ifindex;
}
}
}
if (!net_eq(dev_net(ndev), dev_addr->net) ||
ndev->ifindex != bound_if_index) {
rdma_put_gid_attr(sgid_attr);

View File

@ -2833,8 +2833,8 @@ int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
enum rdma_nl_notify_event_type type)
{
struct sk_buff *skb;
int ret = -EMSGSIZE;
struct net *net;
int ret = 0;
void *nlh;
net = read_pnet(&device->coredev.rdma_net);

View File

@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
{
const void __user *res = iter->cur;
if (iter->cur + len > iter->end)
if (len > iter->end - iter->cur)
return (void __force __user *)ERR_PTR(-ENOSPC);
iter->cur += len;
return res;
@ -2008,11 +2008,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
if (ret)
return ret;
wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
cmd.wr_count));
if (IS_ERR(wqes))
return PTR_ERR(wqes);
sgls = uverbs_request_next_ptr(
&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
sgls = uverbs_request_next_ptr(&iter,
size_mul(cmd.sge_count,
sizeof(struct ib_uverbs_sge)));
if (IS_ERR(sgls))
return PTR_ERR(sgls);
ret = uverbs_request_finish(&iter);
@ -2198,11 +2200,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
return ERR_PTR(-EINVAL);
wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
if (IS_ERR(wqes))
return ERR_CAST(wqes);
sgls = uverbs_request_next_ptr(
iter, sge_count * sizeof(struct ib_uverbs_sge));
sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
sizeof(struct ib_uverbs_sge)));
if (IS_ERR(sgls))
return ERR_CAST(sgls);
ret = uverbs_request_finish(iter);

View File

@ -199,7 +199,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
ib_attr->hw_ver = rdev->en_dev->pdev->revision;
ib_attr->max_qp = dev_attr->max_qp;
ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
ib_attr->device_cap_flags =
@ -967,13 +967,13 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
unsigned int flags;
int rc;
bnxt_re_debug_rem_qpinfo(rdev, qp);
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
if (rc)
ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
return rc;
}
if (rdma_is_kernel_res(&qp->ib_qp.res)) {
flags = bnxt_re_lock_cqs(qp);
@ -983,11 +983,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
rc = bnxt_re_destroy_gsi_sqp(qp);
if (rc)
return rc;
}
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
bnxt_re_destroy_gsi_sqp(qp);
mutex_lock(&rdev->qp_lock);
list_del(&qp->list);
@ -998,8 +995,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
atomic_dec(&rdev->stats.res.ud_qp_count);
bnxt_re_debug_rem_qpinfo(rdev, qp);
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
@ -2167,18 +2162,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
}
}
if (qp_attr_mask & IB_QP_PATH_MTU) {
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
} else if (qp_attr->qp_state == IB_QPS_RTR) {
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
qp->qplib_qp.path_mtu =
__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
qp->qplib_qp.mtu =
ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
if (qp_attr->qp_state == IB_QPS_RTR) {
enum ib_mtu qpmtu;
qpmtu = iboe_get_mtu(rdev->netdev->mtu);
if (qp_attr_mask & IB_QP_PATH_MTU) {
if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
ib_mtu_enum_to_int(qpmtu))
return -EINVAL;
qpmtu = qp_attr->path_mtu;
}
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
}
if (qp_attr_mask & IB_QP_TIMEOUT) {
@ -2328,6 +2325,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->retry_cnt = qplib_qp->retry_cnt;
qp_attr->rnr_retry = qplib_qp->rnr_retry;
qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
qp_attr->rq_psn = qplib_qp->rq.psn;
qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
qp_attr->sq_psn = qplib_qp->sq.psn;
@ -2824,7 +2822,8 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
bnxt_ud_qp_hw_stall_workaround(qp);
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
}
@ -2936,7 +2935,8 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
bnxt_ud_qp_hw_stall_workaround(qp);
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;

View File

@ -268,6 +268,10 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
static inline u32 __to_ib_port_num(u16 port_id)
{
return (u32)port_id + 1;
}
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);

View File

@ -1715,11 +1715,8 @@ static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
{
int mask = IB_QP_STATE;
struct ib_qp_attr qp_attr;
struct bnxt_re_qp *qp;
qp_attr.qp_state = IB_QPS_ERR;
mutex_lock(&rdev->qp_lock);
list_for_each_entry(qp, &rdev->qp_list, list) {
/* Modify the state of all QPs except QP1/Shadow QP */
@ -1727,12 +1724,9 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
if (qp->qplib_qp.state !=
CMDQ_MODIFY_QP_NEW_STATE_RESET &&
qp->qplib_qp.state !=
CMDQ_MODIFY_QP_NEW_STATE_ERR) {
CMDQ_MODIFY_QP_NEW_STATE_ERR)
bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1, IB_EVENT_QP_FATAL);
bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
NULL);
}
}
}
mutex_unlock(&rdev->qp_lock);

View File

@ -659,13 +659,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
if (rc)
return rc;
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
GFP_KERNEL);
if (!srq->swq) {
rc = -ENOMEM;
goto fail;
}
srq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_SRQ,
@ -694,9 +687,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
spin_lock_init(&srq->lock);
srq->start_idx = 0;
srq->last_idx = srq->hwq.max_elements - 1;
for (idx = 0; idx < srq->hwq.max_elements; idx++)
srq->swq[idx].next_idx = idx + 1;
srq->swq[srq->last_idx].next_idx = -1;
if (!srq->hwq.is_user) {
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
GFP_KERNEL);
if (!srq->swq) {
rc = -ENOMEM;
goto fail;
}
for (idx = 0; idx < srq->hwq.max_elements; idx++)
srq->swq[idx].next_idx = idx + 1;
srq->swq[srq->last_idx].next_idx = -1;
}
srq->id = le32_to_cpu(resp.xid);
srq->dbinfo.hwq = &srq->hwq;
@ -1000,9 +1001,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u32 tbl_indx;
u16 nsge;
if (res->dattr)
qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
sq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_QP,
@ -1034,7 +1033,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
: 0;
/* Update msn tbl size */
if (qp->is_host_msn_tbl && psn_sz) {
hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
hwq_attr.aux_depth =
roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
else
hwq_attr.aux_depth =
roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
qp->msn_tbl_sz = hwq_attr.aux_depth;
qp->msn = 0;
}
@ -1044,13 +1048,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (rc)
return rc;
rc = bnxt_qplib_alloc_init_swq(sq);
if (rc)
goto fail_sq;
if (psn_sz)
bnxt_qplib_init_psn_ptr(qp, psn_sz);
if (!sq->hwq.is_user) {
rc = bnxt_qplib_alloc_init_swq(sq);
if (rc)
goto fail_sq;
if (psn_sz)
bnxt_qplib_init_psn_ptr(qp, psn_sz);
}
req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
@ -1076,9 +1081,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
goto sq_swq;
rc = bnxt_qplib_alloc_init_swq(rq);
if (rc)
goto fail_rq;
if (!rq->hwq.is_user) {
rc = bnxt_qplib_alloc_init_swq(rq);
if (rc)
goto fail_rq;
}
req.rq_size = cpu_to_le32(rq->max_wqe);
pbl = &rq->hwq.pbl[PBL_LVL_0];
@ -1174,9 +1181,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rq->dbinfo.db = qp->dpi->dbr;
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
}
spin_lock_bh(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
spin_unlock_bh(&rcfw->tbl_lock);
return 0;
fail:
@ -1283,7 +1292,8 @@ static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
}
}
static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp,
struct cmdq_modify_qp *req)
{
u32 mandatory_flags = 0;
@ -1298,6 +1308,14 @@ static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
}
if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
(qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
mandatory_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
}
if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
@ -1338,7 +1356,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
is_optimized_state_transition(qp))
bnxt_set_mandatory_attributes(qp, &req);
bnxt_set_mandatory_attributes(res, qp, &req);
}
bmask = qp->modify_flags;
req.modify_mask = cpu_to_le32(qp->modify_flags);
@ -1521,6 +1539,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
memcpy(qp->smac, sb->src_mac, 6);
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
qp->port_id = le16_to_cpu(sb->port_id);
bail:
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
sbuf.sb, sbuf.dma_addr);
@ -2667,10 +2686,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
bnxt_qplib_add_flush_qp(qp);
} else {
/* Before we complete, do WA 9060 */
if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
cqe_sq_cons)) {
*lib_qp = qp;
goto out;
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
cqe_sq_cons)) {
*lib_qp = qp;
goto out;
}
}
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
cqe->status = CQ_REQ_STATUS_OK;

View File

@ -114,7 +114,6 @@ struct bnxt_qplib_sge {
u32 size;
};
#define BNXT_QPLIB_QP_MAX_SGL 6
struct bnxt_qplib_swq {
u64 wr_id;
int next_idx;
@ -154,7 +153,7 @@ struct bnxt_qplib_swqe {
#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
#define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE];
int num_sge;
/* Max inline data is 96 bytes */
u32 inline_len;
@ -299,6 +298,7 @@ struct bnxt_qplib_qp {
u32 dest_qpn;
u8 smac[6];
u16 vlan_id;
u16 port_id;
u8 nw_type;
struct bnxt_qplib_ah ah;

View File

@ -424,7 +424,8 @@ static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
/* Prevent posting if f/w is not in a state to process */
if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
return bnxt_qplib_map_rc(opcode);
return -ENXIO;
if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
return -ETIMEDOUT;
@ -493,7 +494,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
rc = __send_message_basic_sanity(rcfw, msg, opcode);
if (rc)
return rc;
return rc == -ENXIO ? bnxt_qplib_map_rc(opcode) : rc;
rc = __send_message(rcfw, msg, opcode);
if (rc)

View File

@ -584,6 +584,11 @@ static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED;
}
static inline bool _is_min_rnr_in_rtr_rts_mandatory(u16 dev_cap_ext_flags2)
{
return !!(dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED);
}
static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
{
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;

View File

@ -129,12 +129,18 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_qp_init_rd_atom =
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
/*
* 128 WQEs needs to be reserved for the HW (8916). Prevent
* reporting the max number
*/
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) {
/*
* 128 WQEs needs to be reserved for the HW (8916). Prevent
* reporting the max number on legacy devices
*/
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
}
/* Adjust for max_qp_wqes for variable wqe */
if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1;
attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;

View File

@ -2215,6 +2215,7 @@ struct creq_query_func_resp_sb {
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
#define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
__le16 max_xp_qp_size;
__le16 create_qp_batch_size;
__le16 destroy_qp_batch_size;

View File

@ -931,6 +931,7 @@ struct hns_roce_hem_item {
size_t count; /* max ba numbers */
int start; /* start buf offset in this hem */
int end; /* end buf offset in this hem */
bool exist_bt;
};
/* All HEM items are linked in a tree structure */
@ -959,6 +960,7 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
}
}
hem->exist_bt = exist_bt;
hem->count = count;
hem->start = start;
hem->end = end;
@ -969,22 +971,22 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
}
static void hem_list_free_item(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_item *hem, bool exist_bt)
struct hns_roce_hem_item *hem)
{
if (exist_bt)
if (hem->exist_bt)
dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
hem->addr, hem->dma_addr);
kfree(hem);
}
static void hem_list_free_all(struct hns_roce_dev *hr_dev,
struct list_head *head, bool exist_bt)
struct list_head *head)
{
struct hns_roce_hem_item *hem, *temp_hem;
list_for_each_entry_safe(hem, temp_hem, head, list) {
list_del(&hem->list);
hem_list_free_item(hr_dev, hem, exist_bt);
hem_list_free_item(hr_dev, hem);
}
}
@ -1084,6 +1086,10 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
for (i = 0; i < region_cnt; i++) {
r = (struct hns_roce_buf_region *)&regions[i];
/* when r->hopnum = 0, the region should not occupy root_ba. */
if (!r->hopnum)
continue;
if (r->hopnum > 1) {
step = hem_list_calc_ba_range(r->hopnum, 1, unit);
if (step > 0)
@ -1177,7 +1183,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
err_exit:
for (level = 1; level < hopnum; level++)
hem_list_free_all(hr_dev, &temp_list[level], true);
hem_list_free_all(hr_dev, &temp_list[level]);
return ret;
}
@ -1218,16 +1224,26 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
{
struct hns_roce_hem_item *hem;
/* This is on the has_mtt branch, if r->hopnum
* is 0, there is no root_ba to reuse for the
* region's fake hem, so a dma_alloc request is
* necessary here.
*/
hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
r->count, false);
r->count, !r->hopnum);
if (!hem)
return -ENOMEM;
hem_list_assign_bt(hem, cpu_base, phy_base);
/* The root_ba can be reused only when r->hopnum > 0. */
if (r->hopnum)
hem_list_assign_bt(hem, cpu_base, phy_base);
list_add(&hem->list, branch_head);
list_add(&hem->sibling, leaf_head);
return r->count;
/* If r->hopnum == 0, 0 is returned,
* so that the root_bt entry is not occupied.
*/
return r->hopnum ? r->count : 0;
}
static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
@ -1271,7 +1287,7 @@ setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
return -ENOMEM;
total = 0;
for (i = 0; i < region_cnt && total < max_ba_num; i++) {
for (i = 0; i < region_cnt && total <= max_ba_num; i++) {
r = &regions[i];
if (!r->count)
continue;
@ -1337,9 +1353,9 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
region_cnt);
if (ret) {
for (i = 0; i < region_cnt; i++)
hem_list_free_all(hr_dev, &head.branch[i], false);
hem_list_free_all(hr_dev, &head.branch[i]);
hem_list_free_all(hr_dev, &head.root, true);
hem_list_free_all(hr_dev, &head.root);
}
return ret;
@ -1402,10 +1418,9 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
j != 0);
hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]);
hem_list_free_all(hr_dev, &hem_list->root_bt, true);
hem_list_free_all(hr_dev, &hem_list->root_bt);
INIT_LIST_HEAD(&hem_list->btm_bt);
hem_list->root_ba = 0;
}

View File

@ -468,7 +468,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
ret = set_ud_opcode(ud_sq_wqe, wr);
if (WARN_ON(ret))
if (WARN_ON_ONCE(ret))
return ret;
ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
@ -572,7 +572,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
if (WARN_ON(ret))
if (WARN_ON_ONCE(ret))
return ret;
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
@ -670,6 +670,10 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
#define HNS_ROCE_SL_SHIFT 2
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
if (unlikely(qp->state == IB_QPS_ERR)) {
flush_cqe(hr_dev, qp);
return;
}
/* All kinds of DirectWQE have the same header field layout */
hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
@ -5619,6 +5623,9 @@ static void put_dip_ctx_idx(struct hns_roce_dev *hr_dev,
{
struct hns_roce_dip *hr_dip = hr_qp->dip;
if (!hr_dip)
return;
xa_lock(&hr_dev->qp_table.dip_xa);
hr_dip->qp_cnt--;

View File

@ -814,11 +814,6 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
mapped_cnt < page_cnt; i++) {
r = &mtr->hem_cfg.region[i];
/* if hopnum is 0, no need to map pages in this region */
if (!r->hopnum) {
mapped_cnt += r->count;
continue;
}
if (r->offset + r->count > page_cnt) {
ret = -EINVAL;

View File

@ -2839,7 +2839,7 @@ static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane)
int err;
*num_plane = 0;
if (!MLX5_CAP_GEN(mdev, ib_virt))
if (!MLX5_CAP_GEN(mdev, ib_virt) || !MLX5_CAP_GEN_2(mdev, multiplane))
return 0;
err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx);
@ -3639,7 +3639,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
list) {
if (dev->sys_image_guid == mpi->sys_image_guid &&
(mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
(mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
bound = mlx5_ib_bind_slave_port(dev, mpi);
}
@ -4785,7 +4786,8 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
mutex_lock(&mlx5_ib_multiport_mutex);
list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
if (dev->sys_image_guid == mpi->sys_image_guid)
if (dev->sys_image_guid == mpi->sys_image_guid &&
mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
bound = mlx5_ib_bind_slave_port(dev, mpi);
if (bound) {

View File

@ -40,6 +40,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
/* initialize rxe device parameters */
static void rxe_init_device_param(struct rxe_dev *rxe)
{
struct net_device *ndev;
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
rxe->attr.vendor_id = RXE_VENDOR_ID;
@ -71,8 +73,15 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
if (!ndev)
return;
addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
rxe->ndev->dev_addr);
ndev->dev_addr);
dev_put(ndev);
rxe->max_ucontext = RXE_MAX_UCONTEXT;
}
@ -109,10 +118,15 @@ static void rxe_init_port_param(struct rxe_port *port)
static void rxe_init_ports(struct rxe_dev *rxe)
{
struct rxe_port *port = &rxe->port;
struct net_device *ndev;
rxe_init_port_param(port);
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
if (!ndev)
return;
addrconf_addr_eui48((unsigned char *)&port->port_guid,
rxe->ndev->dev_addr);
ndev->dev_addr);
dev_put(ndev);
spin_lock_init(&port->port_lock);
}
@ -167,12 +181,13 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
/* called by ifc layer to create new rxe device.
* The caller should allocate memory for rxe by calling ib_alloc_device.
*/
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
struct net_device *ndev)
{
rxe_init(rxe);
rxe_set_mtu(rxe, mtu);
return rxe_register_device(rxe, ibdev_name);
return rxe_register_device(rxe, ibdev_name, ndev);
}
static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)

View File

@ -139,7 +139,8 @@ enum resp_states {
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
struct net_device *ndev);
void rxe_rcv(struct sk_buff *skb);

View File

@ -31,10 +31,19 @@
static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
{
unsigned char ll_addr[ETH_ALEN];
struct net_device *ndev;
int ret;
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
if (!ndev)
return -ENODEV;
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
return dev_mc_add(rxe->ndev, ll_addr);
ret = dev_mc_add(ndev, ll_addr);
dev_put(ndev);
return ret;
}
/**
@ -47,10 +56,19 @@ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
{
unsigned char ll_addr[ETH_ALEN];
struct net_device *ndev;
int ret;
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
if (!ndev)
return -ENODEV;
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
return dev_mc_del(rxe->ndev, ll_addr);
ret = dev_mc_del(ndev, ll_addr);
dev_put(ndev);
return ret;
}
/**

View File

@ -524,7 +524,16 @@ out:
*/
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
{
return rxe->ndev->name;
struct net_device *ndev;
char *ndev_name;
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
if (!ndev)
return NULL;
ndev_name = ndev->name;
dev_put(ndev);
return ndev_name;
}
int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
@ -536,10 +545,9 @@ int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
if (!rxe)
return -ENOMEM;
rxe->ndev = ndev;
ib_mark_name_assigned_by_user(&rxe->ib_dev);
err = rxe_add(rxe, ndev->mtu, ibdev_name);
err = rxe_add(rxe, ndev->mtu, ibdev_name, ndev);
if (err) {
ib_dealloc_device(&rxe->ib_dev);
return err;
@ -587,10 +595,18 @@ void rxe_port_down(struct rxe_dev *rxe)
void rxe_set_port_state(struct rxe_dev *rxe)
{
if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev))
struct net_device *ndev;
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
if (!ndev)
return;
if (netif_running(ndev) && netif_carrier_ok(ndev))
rxe_port_up(rxe);
else
rxe_port_down(rxe);
dev_put(ndev);
}
static int rxe_notify(struct notifier_block *not_blk,

View File

@ -41,6 +41,7 @@ static int rxe_query_port(struct ib_device *ibdev,
u32 port_num, struct ib_port_attr *attr)
{
struct rxe_dev *rxe = to_rdev(ibdev);
struct net_device *ndev;
int err, ret;
if (port_num != 1) {
@ -49,6 +50,12 @@ static int rxe_query_port(struct ib_device *ibdev,
goto err_out;
}
ndev = rxe_ib_device_get_netdev(ibdev);
if (!ndev) {
err = -ENODEV;
goto err_out;
}
memcpy(attr, &rxe->port.attr, sizeof(*attr));
mutex_lock(&rxe->usdev_lock);
@ -57,13 +64,14 @@ static int rxe_query_port(struct ib_device *ibdev,
if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
else if (dev_get_flags(rxe->ndev) & IFF_UP)
else if (dev_get_flags(ndev) & IFF_UP)
attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
else
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
mutex_unlock(&rxe->usdev_lock);
dev_put(ndev);
return ret;
err_out:
@ -1425,9 +1433,16 @@ static const struct attribute_group rxe_attr_group = {
static int rxe_enable_driver(struct ib_device *ib_dev)
{
struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
struct net_device *ndev;
ndev = rxe_ib_device_get_netdev(ib_dev);
if (!ndev)
return -ENODEV;
rxe_set_port_state(rxe);
dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(ndev));
dev_put(ndev);
return 0;
}
@ -1495,7 +1510,8 @@ static const struct ib_device_ops rxe_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
};
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
struct net_device *ndev)
{
int err;
struct ib_device *dev = &rxe->ib_dev;
@ -1507,13 +1523,13 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
dev->num_comp_vectors = num_possible_cpus();
dev->local_dma_lkey = 0;
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
ndev->dev_addr);
dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
ib_set_device_ops(dev, &rxe_dev_ops);
err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
err = ib_device_set_netdev(&rxe->ib_dev, ndev, 1);
if (err)
return err;

View File

@ -370,6 +370,7 @@ struct rxe_port {
u32 qp_gsi_index;
};
#define RXE_PORT 1
struct rxe_dev {
struct ib_device ib_dev;
struct ib_device_attr attr;
@ -377,8 +378,6 @@ struct rxe_dev {
int max_inline_data;
struct mutex usdev_lock;
struct net_device *ndev;
struct rxe_pool uc_pool;
struct rxe_pool pd_pool;
struct rxe_pool ah_pool;
@ -406,6 +405,11 @@ struct rxe_dev {
struct crypto_shash *tfm;
};
static inline struct net_device *rxe_ib_device_get_netdev(struct ib_device *dev)
{
return ib_device_get_netdev(dev, RXE_PORT);
}
static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
{
atomic64_inc(&rxe->stats_counters[index]);
@ -471,6 +475,7 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
return to_rpd(mw->ibmw.pd);
}
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
struct net_device *ndev);
#endif /* RXE_VERBS_H */

View File

@ -46,6 +46,9 @@
*/
#define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
/* There is always only a port 1 per siw device */
#define SIW_PORT 1
struct siw_dev_cap {
int max_qp;
int max_qp_wr;
@ -69,16 +72,12 @@ struct siw_pd {
struct siw_device {
struct ib_device base_dev;
struct net_device *netdev;
struct siw_dev_cap attrs;
u32 vendor_part_id;
int numa_node;
char raw_gid[ETH_ALEN];
/* physical port state (only one port per device) */
enum ib_port_state state;
spinlock_t lock;
struct xarray qp_xa;

View File

@ -1759,6 +1759,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
{
struct socket *s;
struct siw_cep *cep = NULL;
struct net_device *ndev = NULL;
struct siw_device *sdev = to_siw_dev(id->device);
int addr_family = id->local_addr.ss_family;
int rv = 0;
@ -1779,9 +1780,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
/* For wildcard addr, limit binding to current device only */
if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) {
ndev = ib_device_get_netdev(id->device, SIW_PORT);
if (ndev) {
s->sk->sk_bound_dev_if = ndev->ifindex;
} else {
rv = -ENODEV;
goto error;
}
}
rv = s->ops->bind(s, (struct sockaddr *)laddr,
sizeof(struct sockaddr_in));
} else {
@ -1797,9 +1804,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
}
/* For wildcard addr, limit binding to current device only */
if (ipv6_addr_any(&laddr->sin6_addr))
s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
if (ipv6_addr_any(&laddr->sin6_addr)) {
ndev = ib_device_get_netdev(id->device, SIW_PORT);
if (ndev) {
s->sk->sk_bound_dev_if = ndev->ifindex;
} else {
rv = -ENODEV;
goto error;
}
}
rv = s->ops->bind(s, (struct sockaddr *)laddr,
sizeof(struct sockaddr_in6));
}
@ -1860,6 +1873,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
}
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
cep->state = SIW_EPSTATE_LISTENING;
dev_put(ndev);
siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
@ -1879,6 +1893,7 @@ error:
siw_cep_set_free_and_put(cep);
}
sock_release(s);
dev_put(ndev);
return rv;
}

View File

@ -287,7 +287,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
return NULL;
base_dev = &sdev->base_dev;
sdev->netdev = netdev;
if (netdev->addr_len) {
memcpy(sdev->raw_gid, netdev->dev_addr,
@ -381,12 +380,10 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
switch (event) {
case NETDEV_UP:
sdev->state = IB_PORT_ACTIVE;
siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
break;
case NETDEV_DOWN:
sdev->state = IB_PORT_DOWN;
siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
break;
@ -407,12 +404,8 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
siw_port_event(sdev, 1, IB_EVENT_LID_CHANGE);
break;
/*
* Todo: Below netdev events are currently not handled.
* All other events are not handled
*/
case NETDEV_CHANGEMTU:
case NETDEV_CHANGE:
break;
default:
break;
}
@ -442,12 +435,6 @@ static int siw_newlink(const char *basedev_name, struct net_device *netdev)
sdev = siw_device_create(netdev);
if (sdev) {
dev_dbg(&netdev->dev, "siw: new device\n");
if (netif_running(netdev) && netif_carrier_ok(netdev))
sdev->state = IB_PORT_ACTIVE;
else
sdev->state = IB_PORT_DOWN;
ib_mark_name_assigned_by_user(&sdev->base_dev);
rv = siw_device_register(sdev, basedev_name);
if (rv)

View File

@ -171,21 +171,29 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
int siw_query_port(struct ib_device *base_dev, u32 port,
struct ib_port_attr *attr)
{
struct siw_device *sdev = to_siw_dev(base_dev);
struct net_device *ndev;
int rv;
memset(attr, 0, sizeof(*attr));
rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
&attr->active_width);
if (rv)
return rv;
ndev = ib_device_get_netdev(base_dev, SIW_PORT);
if (!ndev)
return -ENODEV;
attr->gid_tbl_len = 1;
attr->max_msg_sz = -1;
attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
attr->phys_state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
attr->state = attr->phys_state == IB_PORT_PHYS_STATE_LINK_UP ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
attr->state = sdev->state;
/*
* All zero
*
@ -199,6 +207,7 @@ int siw_query_port(struct ib_device *base_dev, u32 port,
* attr->subnet_timeout = 0;
* attr->init_type_repy = 0;
*/
dev_put(ndev);
return rv;
}
@ -505,21 +514,24 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
struct siw_qp *qp;
struct siw_device *sdev;
struct net_device *ndev;
if (base_qp && qp_attr && qp_init_attr) {
if (base_qp && qp_attr && qp_init_attr)
qp = to_siw_qp(base_qp);
sdev = to_siw_dev(base_qp->device);
} else {
else
return -EINVAL;
}
ndev = ib_device_get_netdev(base_qp->device, SIW_PORT);
if (!ndev)
return -ENODEV;
qp_attr->qp_state = siw_qp_state_to_ib_qp_state[qp->attrs.state];
qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
qp_attr->cap.max_send_wr = qp->attrs.sq_size;
qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
qp_attr->path_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
qp_attr->max_rd_atomic = qp->attrs.irq_size;
qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
@ -534,6 +546,7 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
dev_put(ndev);
return 0;
}

View File

@ -349,6 +349,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
struct rtrs_srv_mr *srv_mr;
bool need_inval = false;
enum ib_send_flags flags;
struct ib_sge list;
u32 imm;
int err;
@ -401,7 +402,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
imm_wr.wr.next = NULL;
if (always_invalidate) {
struct ib_sge list;
struct rtrs_msg_rkey_rsp *msg;
srv_mr = &srv_path->mrs[id->msg_id];

View File

@ -1867,20 +1867,20 @@ static int sdhci_msm_program_key(struct cqhci_host *cq_host,
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
union cqhci_crypto_cap_entry cap;
if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
return qcom_ice_evict_key(msm_host->ice, slot);
/* Only AES-256-XTS has been tested so far. */
cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256)
return -EINVAL;
if (cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE)
return qcom_ice_program_key(msm_host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
QCOM_ICE_CRYPTO_KEY_SIZE_256,
cfg->crypto_key,
cfg->data_unit_size, slot);
else
return qcom_ice_evict_key(msm_host->ice, slot);
return qcom_ice_program_key(msm_host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
QCOM_ICE_CRYPTO_KEY_SIZE_256,
cfg->crypto_key,
cfg->data_unit_size, slot);
}
#else /* CONFIG_MMC_CRYPTO */

View File

@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 switch driver main logic
*
* Copyright (C) 2017-2019 Microchip Technology Inc.
* Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#include <linux/kernel.h>
@ -983,26 +983,51 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
u32 secs = msecs / 1000;
u8 value;
u8 data;
u8 data, mult, value;
u32 max_val;
int ret;
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
#define MAX_TIMER_VAL ((1 << 8) - 1)
ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
if (ret < 0)
return ret;
/* The aging timer comprises a 3-bit multiplier and an 8-bit second
* value. Either of them cannot be zero. The maximum timer is then
* 7 * 255 = 1785 seconds.
*/
if (!secs)
secs = 1;
data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
/* Return error if too large. */
else if (secs > 7 * MAX_TIMER_VAL)
return -EINVAL;
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
if (ret < 0)
return ret;
value &= ~SW_AGE_CNT_M;
value |= FIELD_PREP(SW_AGE_CNT_M, data);
/* Check whether there is need to update the multiplier. */
mult = FIELD_GET(SW_AGE_CNT_M, value);
max_val = MAX_TIMER_VAL;
if (mult > 0) {
/* Try to use the same multiplier already in the register as
* the hardware default uses multiplier 4 and 75 seconds for
* 300 seconds.
*/
max_val = DIV_ROUND_UP(secs, mult);
if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
max_val = MAX_TIMER_VAL;
}
return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
data = DIV_ROUND_UP(secs, max_val);
if (mult != data) {
value &= ~SW_AGE_CNT_M;
value |= FIELD_PREP(SW_AGE_CNT_M, data);
ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
if (ret < 0)
return ret;
}
value = DIV_ROUND_UP(secs, data);
return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
}
void ksz9477_port_queue_split(struct ksz_device *dev, int port)

View File

@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 register definitions
*
* Copyright (C) 2017-2018 Microchip Technology Inc.
* Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#ifndef __KSZ9477_REGS_H
@ -165,8 +165,6 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_AGE_CNT_S 3
#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define SW_HASH_OPTION_M 0x03
#define SW_HASH_OPTION_CRC 1

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Microchip LAN937X switch driver main logic
* Copyright (C) 2019-2022 Microchip Technology Inc.
* Copyright (C) 2019-2024 Microchip Technology Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@ -461,10 +461,66 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
u32 secs = msecs / 1000;
u32 value;
u8 data, mult, value8;
bool in_msec = false;
u32 max_val, value;
u32 secs = msecs;
int ret;
#define MAX_TIMER_VAL ((1 << 20) - 1)
/* The aging timer comprises a 3-bit multiplier and a 20-bit second
* value. Either of them cannot be zero. The maximum timer is then
* 7 * 1048575 = 7340025 seconds. As this value is too large for
* practical use it can be interpreted as microseconds, making the
* maximum timer 7340 seconds with finer control. This allows for
* maximum 122 minutes compared to 29 minutes in KSZ9477 switch.
*/
if (msecs % 1000)
in_msec = true;
else
secs /= 1000;
if (!secs)
secs = 1;
/* Return error if too large. */
else if (secs > 7 * MAX_TIMER_VAL)
return -EINVAL;
/* Configure how to interpret the number value. */
ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC,
in_msec ? SW_AGE_CNT_IN_MICROSEC : 0);
if (ret < 0)
return ret;
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8);
if (ret < 0)
return ret;
/* Check whether there is need to update the multiplier. */
mult = FIELD_GET(SW_AGE_CNT_M, value8);
max_val = MAX_TIMER_VAL;
if (mult > 0) {
/* Try to use the same multiplier already in the register as
* the hardware default uses multiplier 4 and 75 seconds for
* 300 seconds.
*/
max_val = DIV_ROUND_UP(secs, mult);
if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
max_val = MAX_TIMER_VAL;
}
data = DIV_ROUND_UP(secs, max_val);
if (mult != data) {
value8 &= ~SW_AGE_CNT_M;
value8 |= FIELD_PREP(SW_AGE_CNT_M, data);
ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8);
if (ret < 0)
return ret;
}
secs = DIV_ROUND_UP(secs, data);
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip LAN937X switch register definitions
* Copyright (C) 2019-2021 Microchip Technology Inc.
* Copyright (C) 2019-2024 Microchip Technology Inc.
*/
#ifndef __LAN937X_REG_H
#define __LAN937X_REG_H
@ -56,8 +56,7 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
#define SW_AGE_CNT_M 0x7
#define SW_AGE_CNT_S 3
#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define REG_SW_LUE_CTRL_1 0x0311
@ -70,6 +69,10 @@
#define SW_FAST_AGING BIT(1)
#define SW_LINK_AUTO_AGING BIT(0)
#define REG_SW_LUE_CTRL_2 0x0312
#define SW_AGE_CNT_IN_MICROSEC BIT(7)
#define REG_SW_AGE_PERIOD__1 0x0313
#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)

View File

@ -1933,7 +1933,11 @@ static int bcm_sysport_open(struct net_device *dev)
unsigned int i;
int ret;
clk_prepare_enable(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret) {
netdev_err(dev, "could not enable priv clock\n");
return ret;
}
/* Reset UniMAC */
umac_reset(priv);
@ -2591,7 +2595,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_deregister_notifier;
}
clk_prepare_enable(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(&pdev->dev, "could not enable priv clock\n");
goto err_deregister_netdev;
}
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
@ -2605,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
err_deregister_netdev:
unregister_netdev(dev);
err_deregister_notifier:
unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
@ -2774,7 +2784,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
clk_prepare_enable(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret) {
netdev_err(dev, "could not enable priv clock\n");
return ret;
}
if (priv->wolopts)
clk_disable_unprepare(priv->wol_clk);

View File

@ -1140,6 +1140,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_free_rings_gqi(struct gve_priv *priv,

View File

@ -333,6 +333,14 @@ int gve_napi_poll(struct napi_struct *napi, int budget)
if (block->rx) {
work_done = gve_rx_poll(block, budget);
/* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
* TX and RX work done.
*/
if (priv->xdp_prog)
work_done = max_t(int, work_done,
gve_xsk_tx_poll(block, budget));
reschedule |= work_done == budget;
}
@ -922,11 +930,13 @@ static void gve_init_sync_stats(struct gve_priv *priv)
static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0;
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->ring_size = priv->tx_desc_cnt;
cfg->start_idx = 0;
cfg->num_rings = gve_num_tx_queues(priv);
cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues;
cfg->tx = priv->tx;
}
@ -1623,8 +1633,8 @@ static int gve_xsk_pool_enable(struct net_device *dev,
if (err)
return err;
/* If XDP prog is not installed, return */
if (!priv->xdp_prog)
/* If XDP prog is not installed or interface is down, return. */
if (!priv->xdp_prog || !netif_running(dev))
return 0;
rx = &priv->rx[qid];
@ -1669,21 +1679,16 @@ static int gve_xsk_pool_disable(struct net_device *dev,
if (qid >= priv->rx_cfg.num_queues)
return -EINVAL;
/* If XDP prog is not installed, unmap DMA and return */
if (!priv->xdp_prog)
/* If XDP prog is not installed or interface is down, unmap DMA and
* return.
*/
if (!priv->xdp_prog || !netif_running(dev))
goto done;
tx_qid = gve_xdp_tx_queue_id(priv, qid);
if (!netif_running(dev)) {
priv->rx[qid].xsk_pool = NULL;
xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
priv->tx[tx_qid].xsk_pool = NULL;
goto done;
}
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
napi_disable(napi_rx); /* make sure current rx poll is done */
tx_qid = gve_xdp_tx_queue_id(priv, qid);
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
napi_disable(napi_tx); /* make sure current tx poll is done */
@ -1709,24 +1714,20 @@ done:
static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{
struct gve_priv *priv = netdev_priv(dev);
int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
struct napi_struct *napi;
if (!gve_get_napi_enabled(priv))
return -ENETDOWN;
if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
return -EINVAL;
if (flags & XDP_WAKEUP_TX) {
struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
struct napi_struct *napi =
&priv->ntfy_blocks[tx->ntfy_id].napi;
if (!napi_if_scheduled_mark_missed(napi)) {
/* Call local_bh_enable to trigger SoftIRQ processing */
local_bh_disable();
napi_schedule(napi);
local_bh_enable();
}
tx->xdp_xsk_wakeup++;
napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
if (!napi_if_scheduled_mark_missed(napi)) {
/* Call local_bh_enable to trigger SoftIRQ processing */
local_bh_disable();
napi_schedule(napi);
local_bh_enable();
}
return 0;
@ -1837,6 +1838,7 @@ int gve_adjust_queues(struct gve_priv *priv,
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
int num_xdp_queues;
int err;
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
@ -1847,6 +1849,10 @@ int gve_adjust_queues(struct gve_priv *priv,
rx_alloc_cfg.qcfg = &new_rx_config;
tx_alloc_cfg.num_rings = new_tx_config.num_queues;
/* Add dedicated XDP TX queues if enabled. */
num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0;
tx_alloc_cfg.num_rings += num_xdp_queues;
if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
@ -1899,6 +1905,9 @@ static void gve_turndown(struct gve_priv *priv)
gve_clear_napi_enabled(priv);
gve_clear_report_stats(priv);
/* Make sure that all traffic is finished processing. */
synchronize_net();
}
static void gve_turnup(struct gve_priv *priv)

View File

@ -206,7 +206,10 @@ void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
return;
gve_remove_napi(priv, ntfy_idx);
gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
if (tx->q_num < priv->tx_cfg.num_queues)
gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
else
gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_remove_from_block(priv, idx);
}
@ -834,9 +837,12 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct gve_tx_ring *tx;
int i, err = 0, qid;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
return -EINVAL;
if (!gve_get_napi_enabled(priv))
return -ENETDOWN;
qid = gve_xdp_tx_queue_id(priv,
smp_processor_id() % priv->num_xdp_queues);
@ -975,33 +981,41 @@ out:
return sent;
}
int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
{
struct gve_rx_ring *rx = rx_block->rx;
struct gve_priv *priv = rx->gve;
struct gve_tx_ring *tx;
int sent = 0;
tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
if (tx->xsk_pool) {
sent = gve_xsk_tx(priv, tx, budget);
u64_stats_update_begin(&tx->statss);
tx->xdp_xsk_sent += sent;
u64_stats_update_end(&tx->statss);
if (xsk_uses_need_wakeup(tx->xsk_pool))
xsk_set_tx_need_wakeup(tx->xsk_pool);
}
return sent;
}
bool gve_xdp_poll(struct gve_notify_block *block, int budget)
{
struct gve_priv *priv = block->priv;
struct gve_tx_ring *tx = block->tx;
u32 nic_done;
bool repoll;
u32 to_do;
/* Find out how much work there is to be done */
nic_done = gve_tx_load_event_counter(priv, tx);
to_do = min_t(u32, (nic_done - tx->done), budget);
gve_clean_xdp_done(priv, tx, to_do);
repoll = nic_done != tx->done;
if (tx->xsk_pool) {
int sent = gve_xsk_tx(priv, tx, budget);
u64_stats_update_begin(&tx->statss);
tx->xdp_xsk_sent += sent;
u64_stats_update_end(&tx->statss);
repoll |= (sent == budget);
if (xsk_uses_need_wakeup(tx->xsk_pool))
xsk_set_tx_need_wakeup(tx->xsk_pool);
}
/* If we still have work we want to repoll */
return repoll;
return nic_done != tx->done;
}
bool gve_tx_poll(struct gve_notify_block *block, int budget)

View File

@ -2704,9 +2704,15 @@ static struct platform_device *port_platdev[3];
static void mv643xx_eth_shared_of_remove(void)
{
struct mv643xx_eth_platform_data *pd;
int n;
for (n = 0; n < 3; n++) {
if (!port_platdev[n])
continue;
pd = dev_get_platdata(&port_platdev[n]->dev);
if (pd)
of_node_put(pd->phy_node);
platform_device_del(port_platdev[n]);
port_platdev[n] = NULL;
}
@ -2769,8 +2775,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
}
ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
if (!ppdev)
return -ENOMEM;
if (!ppdev) {
ret = -ENOMEM;
goto put_err;
}
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
ppdev->dev.of_node = pnp;
@ -2792,6 +2800,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
port_err:
platform_device_put(ppdev);
put_err:
of_node_put(ppd.phy_node);
return ret;
}

View File

@ -130,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */

View File

@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
struct mlx5_macsec_rule_attrs rule_attrs;
union mlx5_macsec_rule *macsec_rule;
if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
return 0;
rule_attrs.macsec_obj_id = sa->macsec_obj_id;
rule_attrs.sci = sa->sci;
rule_attrs.assoc_num = sa->assoc_num;

View File

@ -6542,8 +6542,23 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
mlx5_core_uplink_netdev_set(mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
_mlx5e_suspend(adev, false);
/* When unload driver, the netdev is in registered state
* if it's from legacy mode. If from switchdev mode, it
* is already unregistered before changing to NIC profile.
*/
if (priv->netdev->reg_state == NETREG_REGISTERED) {
unregister_netdev(priv->netdev);
_mlx5e_suspend(adev, false);
} else {
struct mlx5_core_dev *pos;
int i;
if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
mlx5_sd_for_each_dev(i, mdev, pos)
mlx5e_destroy_mdev_resources(pos);
else
_mlx5e_suspend(adev, true);
}
/* Avoid cleanup if profile rollback failed. */
if (priv->profile)
priv->profile->cleanup(priv);

View File

@ -1509,6 +1509,21 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
priv = netdev_priv(netdev);
/* This bit is set when using devlink to change eswitch mode from
* switchdev to legacy. As need to keep uplink netdev ifindex, we
* detach uplink representor profile and attach NIC profile only.
* The netdev will be unregistered later when unload NIC auxiliary
* driver for this case.
* We explicitly block devlink eswitch mode change if any IPSec rules
* offloaded, but can't block other cases, such as driver unload
* and devlink reload. We have to unregister netdev before profile
* change for those cases. This is to avoid resource leak because
* the offloaded rules don't have the chance to be unoffloaded before
* cleanup which is triggered by detach uplink representor profile.
*/
if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
unregister_netdev(netdev);
mlx5e_netdev_attach_nic_profile(priv);
}

View File

@ -150,11 +150,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
unsigned long i;
int err;
xa_for_each(&esw->offloads.vport_reps, i, rep) {
rpriv = rep->rep_data[REP_ETH].priv;
if (!rpriv || !rpriv->netdev)
mlx5_esw_for_each_rep(esw, i, rep) {
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue;
rpriv = rep->rep_data[REP_ETH].priv;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
rhashtable_walk_start(&iter);
while ((flow = rhashtable_walk_next(&iter)) != NULL) {

View File

@ -714,6 +714,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
(last) - 1)
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
struct mlx5_eswitch *__must_check
mlx5_devlink_eswitch_get(struct devlink *devlink);

View File

@ -53,9 +53,6 @@
#include "lag/lag.h"
#include "en/tc/post_meter.h"
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/
@ -3780,6 +3777,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {

View File

@ -1067,7 +1067,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
int inlen, err, eqn;
void *cqc, *in;
__be64 *pas;
int vector;
u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@ -1096,8 +1095,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
err = mlx5_comp_eqn_get(mdev, vector, &eqn);
err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;

View File

@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
0);
0, 0, tun->net, parms.link, tun->fwmark, 0, 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))

View File

@ -64,7 +64,7 @@ static void fbnic_csr_get_regs_rpc_ram(struct fbnic_dev *fbd, u32 **data_p)
u32 i, j;
*(data++) = start;
*(data++) = end - 1;
*(data++) = end;
/* FBNIC_RPC_TCAM_ACT */
for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {

View File

@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
void *cb_priv);
static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
.key_len = offsetof(struct efx_tc_ct_zone, linkage),
.key_len = sizeof_field(struct efx_tc_ct_zone, zone),
.key_offset = 0,
.head_offset = offsetof(struct efx_tc_ct_zone, linkage),
};

View File

@ -405,22 +405,6 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
return -ENODEV;
}
/**
* stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
* @pdev: platform_device structure
* @plat: driver data platform structure
*
* Release resources claimed by stmmac_probe_config_dt().
*/
static void stmmac_remove_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat)
{
clk_disable_unprepare(plat->stmmac_clk);
clk_disable_unprepare(plat->pclk);
of_node_put(plat->phy_node);
of_node_put(plat->mdio_node);
}
/**
* stmmac_probe_config_dt - parse device-tree driver parameters
* @pdev: platform_device structure
@ -490,8 +474,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
rc = stmmac_mdio_setup(plat, np, &pdev->dev);
if (rc)
return ERR_PTR(rc);
if (rc) {
ret = ERR_PTR(rc);
goto error_put_phy;
}
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
@ -581,8 +567,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
stmmac_remove_config_dt(pdev, plat);
return ERR_PTR(-ENOMEM);
ret = ERR_PTR(-ENOMEM);
goto error_put_mdio;
}
plat->dma_cfg = dma_cfg;
@ -610,8 +596,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
rc = stmmac_mtl_setup(pdev, plat);
if (rc) {
stmmac_remove_config_dt(pdev, plat);
return ERR_PTR(rc);
ret = ERR_PTR(rc);
goto error_put_mdio;
}
/* clock setup */
@ -663,6 +649,10 @@ error_hw_init:
clk_disable_unprepare(plat->pclk);
error_pclk_get:
clk_disable_unprepare(plat->stmmac_clk);
error_put_mdio:
of_node_put(plat->mdio_node);
error_put_phy:
of_node_put(plat->phy_node);
return ret;
}
@ -671,16 +661,17 @@ static void devm_stmmac_remove_config_dt(void *data)
{
struct plat_stmmacenet_data *plat = data;
/* Platform data argument is unused */
stmmac_remove_config_dt(NULL, plat);
clk_disable_unprepare(plat->stmmac_clk);
clk_disable_unprepare(plat->pclk);
of_node_put(plat->mdio_node);
of_node_put(plat->phy_node);
}
/**
* devm_stmmac_probe_config_dt
* @pdev: platform_device structure
* @mac: MAC address to use
* Description: Devres variant of stmmac_probe_config_dt(). Does not require
* the user to call stmmac_remove_config_dt() at driver detach.
* Description: Devres variant of stmmac_probe_config_dt().
*/
struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)

View File

@ -3551,7 +3551,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
init_completion(&common->tdown_complete);
common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
common->pf_p0_rx_ptype_rrobin = false;
common->pf_p0_rx_ptype_rrobin = true;
common->default_vlan = 1;
common->ports = devm_kcalloc(dev, common->port_num,

View File

@ -215,6 +215,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
IEP_CMP_CFG_CMP_EN(cmp), 0);
}
/* enable reset counter on CMP0 event */
@ -780,6 +783,11 @@ int icss_iep_exit(struct icss_iep *iep)
}
icss_iep_disable(iep);
if (iep->pps_enabled)
icss_iep_pps_enable(iep, false);
else if (iep->perout_enabled)
icss_iep_perout_enable(iep, NULL, false);
return 0;
}
EXPORT_SYMBOL_GPL(icss_iep_exit);

View File

@ -855,31 +855,6 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(prueth_rx_irq);
void prueth_emac_stop(struct prueth_emac *emac)
{
struct prueth *prueth = emac->prueth;
int slice;
switch (emac->port_id) {
case PRUETH_PORT_MII0:
slice = ICSS_SLICE0;
break;
case PRUETH_PORT_MII1:
slice = ICSS_SLICE1;
break;
default:
netdev_err(emac->ndev, "invalid port\n");
return;
}
emac->fw_running = 0;
if (!emac->is_sr1)
rproc_shutdown(prueth->txpru[slice]);
rproc_shutdown(prueth->rtu[slice]);
rproc_shutdown(prueth->pru[slice]);
}
EXPORT_SYMBOL_GPL(prueth_emac_stop);
void prueth_cleanup_tx_ts(struct prueth_emac *emac)
{
int i;

View File

@ -397,7 +397,7 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
return 0;
}
static void icssg_init_emac_mode(struct prueth *prueth)
void icssg_init_emac_mode(struct prueth *prueth)
{
/* When the device is configured as a bridge and it is being brought
* back to the emac mode, the host mac address has to be set as 0.
@ -406,9 +406,6 @@ static void icssg_init_emac_mode(struct prueth *prueth)
int i;
u8 mac[ETH_ALEN] = { 0 };
if (prueth->emacs_initialized)
return;
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
@ -423,15 +420,13 @@ static void icssg_init_emac_mode(struct prueth *prueth)
/* Clear host MAC address */
icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
}
EXPORT_SYMBOL_GPL(icssg_init_emac_mode);
static void icssg_init_fw_offload_mode(struct prueth *prueth)
void icssg_init_fw_offload_mode(struct prueth *prueth)
{
u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
int i;
if (prueth->emacs_initialized)
return;
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
@ -448,6 +443,7 @@ static void icssg_init_fw_offload_mode(struct prueth *prueth)
icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
}
EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode);
int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
{
@ -455,11 +451,6 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
struct icssg_flow_cfg __iomem *flow_cfg;
int ret;
if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
icssg_init_fw_offload_mode(prueth);
else
icssg_init_emac_mode(prueth);
memset_io(config, 0, TAS_GATE_MASK_LIST0);
icssg_miig_queues_init(prueth, slice);
@ -786,3 +777,27 @@ void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
}
EXPORT_SYMBOL_GPL(icssg_set_pvid);
int emac_fdb_flow_id_updated(struct prueth_emac *emac)
{
struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
int slice = prueth_emac_slice(emac);
struct mgmt_cmd fdb_cmd = { 0 };
int ret;
fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW;
fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
fdb_cmd.param = 0;
fdb_cmd.param |= (slice << 4);
fdb_cmd.cmd_args[0] = 0;
ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
if (ret)
return ret;
WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated);

View File

@ -55,6 +55,7 @@ struct icssg_rxq_ctx {
#define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03
#define ICSSG_FW_MGMT_CMD_TYPE 0x04
#define ICSSG_FW_MGMT_PKT 0x80000000
#define ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW 0x05
struct icssg_r30_cmd {
u32 cmd[4];

View File

@ -164,11 +164,26 @@ static struct icssg_firmwares icssg_emac_firmwares[] = {
}
};
static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
static int prueth_start(struct rproc *rproc, const char *fw_name)
{
int ret;
ret = rproc_set_firmware(rproc, fw_name);
if (ret)
return ret;
return rproc_boot(rproc);
}
static void prueth_shutdown(struct rproc *rproc)
{
rproc_shutdown(rproc);
}
static int prueth_emac_start(struct prueth *prueth)
{
struct icssg_firmwares *firmwares;
struct device *dev = prueth->dev;
int slice, ret;
int ret, slice;
if (prueth->is_switch_mode)
firmwares = icssg_switch_firmwares;
@ -177,49 +192,126 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
else
firmwares = icssg_emac_firmwares;
slice = prueth_emac_slice(emac);
if (slice < 0) {
netdev_err(emac->ndev, "invalid port\n");
return -EINVAL;
for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
if (ret) {
dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
goto unwind_slices;
}
ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
if (ret) {
dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
rproc_shutdown(prueth->pru[slice]);
goto unwind_slices;
}
ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
if (ret) {
dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
rproc_shutdown(prueth->rtu[slice]);
rproc_shutdown(prueth->pru[slice]);
goto unwind_slices;
}
}
ret = icssg_config(prueth, emac, slice);
if (ret)
return ret;
ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
ret = rproc_boot(prueth->pru[slice]);
if (ret) {
dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
return -EINVAL;
}
ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
ret = rproc_boot(prueth->rtu[slice]);
if (ret) {
dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
goto halt_pru;
}
ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
ret = rproc_boot(prueth->txpru[slice]);
if (ret) {
dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
goto halt_rtu;
}
emac->fw_running = 1;
return 0;
halt_rtu:
rproc_shutdown(prueth->rtu[slice]);
halt_pru:
rproc_shutdown(prueth->pru[slice]);
unwind_slices:
while (--slice >= 0) {
prueth_shutdown(prueth->txpru[slice]);
prueth_shutdown(prueth->rtu[slice]);
prueth_shutdown(prueth->pru[slice]);
}
return ret;
}
static void prueth_emac_stop(struct prueth *prueth)
{
int slice;
for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
prueth_shutdown(prueth->txpru[slice]);
prueth_shutdown(prueth->rtu[slice]);
prueth_shutdown(prueth->pru[slice]);
}
}
static int prueth_emac_common_start(struct prueth *prueth)
{
struct prueth_emac *emac;
int ret = 0;
int slice;
if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
return -EINVAL;
/* clear SMEM and MSMC settings for all slices */
memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
icssg_init_fw_offload_mode(prueth);
else
icssg_init_emac_mode(prueth);
for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
emac = prueth->emac[slice];
if (!emac)
continue;
ret = icssg_config(prueth, emac, slice);
if (ret)
goto disable_class;
}
ret = prueth_emac_start(prueth);
if (ret)
goto disable_class;
emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
prueth->emac[ICSS_SLICE1];
ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
emac, IEP_DEFAULT_CYCLE_TIME_NS);
if (ret) {
dev_err(prueth->dev, "Failed to initialize IEP module\n");
goto stop_pruss;
}
return 0;
stop_pruss:
prueth_emac_stop(prueth);
disable_class:
icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
return ret;
}
static int prueth_emac_common_stop(struct prueth *prueth)
{
struct prueth_emac *emac;
if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
return -EINVAL;
icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
prueth_emac_stop(prueth);
emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
prueth->emac[ICSS_SLICE1];
icss_iep_exit(emac->iep);
return 0;
}
/* called back by PHY layer if there is change in link state of hw port*/
static void emac_adjust_link(struct net_device *ndev)
{
@ -374,9 +466,6 @@ static void prueth_iep_settime(void *clockops_data, u64 ns)
u32 cycletime;
int timeout;
if (!emac->fw_running)
return;
sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
@ -543,23 +632,17 @@ static int emac_ndo_open(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
int ret, i, num_data_chn = emac->tx_ch_num;
struct icssg_flow_cfg __iomem *flow_cfg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
struct device *dev = prueth->dev;
int max_rx_flows;
int rx_flow;
/* clear SMEM and MSMC settings for all slices */
if (!prueth->emacs_initialized) {
memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
}
/* set h/w MAC as user might have re-configured */
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
icssg_class_default(prueth->miig_rt, slice, 0, false);
icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
/* Notify the stack of the actual queue counts. */
@ -597,18 +680,23 @@ static int emac_ndo_open(struct net_device *ndev)
goto cleanup_napi;
}
/* reset and start PRU firmware */
ret = prueth_emac_start(prueth, emac);
if (ret)
goto free_rx_irq;
if (!prueth->emacs_initialized) {
ret = prueth_emac_common_start(prueth);
if (ret)
goto free_rx_irq;
}
flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
ret = emac_fdb_flow_id_updated(emac);
if (ret) {
netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
goto stop;
}
icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
if (!prueth->emacs_initialized) {
ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
emac, IEP_DEFAULT_CYCLE_TIME_NS);
}
ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
IRQF_ONESHOT, dev_name(dev), emac);
if (ret)
@ -653,7 +741,8 @@ reset_rx_chn:
free_tx_ts_irq:
free_irq(emac->tx_ts_irq, emac);
stop:
prueth_emac_stop(emac);
if (!prueth->emacs_initialized)
prueth_emac_common_stop(prueth);
free_rx_irq:
free_irq(emac->rx_chns.irq[rx_flow], emac);
cleanup_napi:
@ -689,8 +778,6 @@ static int emac_ndo_stop(struct net_device *ndev)
if (ndev->phydev)
phy_stop(ndev->phydev);
icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
if (emac->prueth->is_hsr_offload_mode)
__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
else
@ -728,11 +815,9 @@ static int emac_ndo_stop(struct net_device *ndev)
/* Destroying the queued work in ndo_stop() */
cancel_delayed_work_sync(&emac->stats_work);
if (prueth->emacs_initialized == 1)
icss_iep_exit(emac->iep);
/* stop PRUs */
prueth_emac_stop(emac);
if (prueth->emacs_initialized == 1)
prueth_emac_common_stop(prueth);
free_irq(emac->tx_ts_irq, emac);
@ -1053,10 +1138,11 @@ static void prueth_offload_fwd_mark_update(struct prueth *prueth)
}
}
static void prueth_emac_restart(struct prueth *prueth)
static int prueth_emac_restart(struct prueth *prueth)
{
struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
int ret;
/* Detach the net_device for both PRUeth ports*/
if (netif_running(emac0->ndev))
@ -1065,36 +1151,46 @@ static void prueth_emac_restart(struct prueth *prueth)
netif_device_detach(emac1->ndev);
/* Disable both PRUeth ports */
icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
if (ret)
return ret;
/* Stop both pru cores for both PRUeth ports*/
prueth_emac_stop(emac0);
prueth->emacs_initialized--;
prueth_emac_stop(emac1);
prueth->emacs_initialized--;
ret = prueth_emac_common_stop(prueth);
if (ret) {
dev_err(prueth->dev, "Failed to stop the firmwares");
return ret;
}
/* Start both pru cores for both PRUeth ports */
prueth_emac_start(prueth, emac0);
prueth->emacs_initialized++;
prueth_emac_start(prueth, emac1);
prueth->emacs_initialized++;
ret = prueth_emac_common_start(prueth);
if (ret) {
dev_err(prueth->dev, "Failed to start the firmwares");
return ret;
}
/* Enable forwarding for both PRUeth ports */
icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
/* Attache net_device for both PRUeth ports */
netif_device_attach(emac0->ndev);
netif_device_attach(emac1->ndev);
return ret;
}
static void icssg_change_mode(struct prueth *prueth)
{
struct prueth_emac *emac;
int mac;
int mac, ret;
prueth_emac_restart(prueth);
ret = prueth_emac_restart(prueth);
if (ret) {
dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
return;
}
for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
emac = prueth->emac[mac];
@ -1173,13 +1269,18 @@ static void prueth_netdevice_port_unlink(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
int ret;
prueth->br_members &= ~BIT(emac->port_id);
if (prueth->is_switch_mode) {
prueth->is_switch_mode = false;
emac->port_vlan = 0;
prueth_emac_restart(prueth);
ret = prueth_emac_restart(prueth);
if (ret) {
dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
return;
}
}
prueth_offload_fwd_mark_update(prueth);
@ -1228,6 +1329,7 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
struct prueth *prueth = emac->prueth;
struct prueth_emac *emac0;
struct prueth_emac *emac1;
int ret;
emac0 = prueth->emac[PRUETH_MAC0];
emac1 = prueth->emac[PRUETH_MAC1];
@ -1238,7 +1340,11 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
emac0->port_vlan = 0;
emac1->port_vlan = 0;
prueth->hsr_dev = NULL;
prueth_emac_restart(prueth);
ret = prueth_emac_restart(prueth);
if (ret) {
dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
return;
}
netdev_dbg(ndev, "Disabling HSR Offload mode\n");
}
}
@ -1413,13 +1519,10 @@ static int prueth_probe(struct platform_device *pdev)
prueth->pa_stats = NULL;
}
if (eth0_node) {
if (eth0_node || eth1_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
if (ret)
goto put_cores;
}
if (eth1_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
if (ret)
goto put_cores;
@ -1618,14 +1721,12 @@ put_pruss:
pruss_put(prueth->pruss);
put_cores:
if (eth1_node) {
prueth_put_cores(prueth, ICSS_SLICE1);
of_node_put(eth1_node);
}
if (eth0_node) {
if (eth0_node || eth1_node) {
prueth_put_cores(prueth, ICSS_SLICE0);
of_node_put(eth0_node);
prueth_put_cores(prueth, ICSS_SLICE1);
of_node_put(eth1_node);
}
return ret;

View File

@ -140,7 +140,6 @@ struct prueth_rx_chn {
/* data for each emac port */
struct prueth_emac {
bool is_sr1;
bool fw_running;
struct prueth *prueth;
struct net_device *ndev;
u8 mac_addr[6];
@ -361,6 +360,8 @@ int icssg_set_port_state(struct prueth_emac *emac,
enum icssg_port_state_cmd state);
void icssg_config_set_speed(struct prueth_emac *emac);
void icssg_config_half_duplex(struct prueth_emac *emac);
void icssg_init_emac_mode(struct prueth *prueth);
void icssg_init_fw_offload_mode(struct prueth *prueth);
/* Buffer queue helpers */
int icssg_queue_pop(struct prueth *prueth, u8 queue);
@ -377,6 +378,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
u8 untag_mask, bool add);
u16 icssg_get_pvid(struct prueth_emac *emac);
void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port);
int emac_fdb_flow_id_updated(struct prueth_emac *emac);
#define prueth_napi_to_tx_chn(pnapi) \
container_of(pnapi, struct prueth_tx_chn, napi_tx)
@ -407,7 +409,6 @@ void emac_rx_timestamp(struct prueth_emac *emac,
struct sk_buff *skb, u32 *psdata);
enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
irqreturn_t prueth_rx_irq(int irq, void *dev_id);
void prueth_emac_stop(struct prueth_emac *emac);
void prueth_cleanup_tx_ts(struct prueth_emac *emac);
int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget);
int prueth_prepare_rx_chan(struct prueth_emac *emac,

View File

@ -440,7 +440,6 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
goto halt_pru;
}
emac->fw_running = 1;
return 0;
halt_pru:
@ -449,6 +448,29 @@ halt_pru:
return ret;
}
static void prueth_emac_stop(struct prueth_emac *emac)
{
struct prueth *prueth = emac->prueth;
int slice;
switch (emac->port_id) {
case PRUETH_PORT_MII0:
slice = ICSS_SLICE0;
break;
case PRUETH_PORT_MII1:
slice = ICSS_SLICE1;
break;
default:
netdev_err(emac->ndev, "invalid port\n");
return;
}
if (!emac->is_sr1)
rproc_shutdown(prueth->txpru[slice]);
rproc_shutdown(prueth->rtu[slice]);
rproc_shutdown(prueth->pru[slice]);
}
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device

View File

@ -432,10 +432,12 @@ struct kszphy_ptp_priv {
struct kszphy_priv {
struct kszphy_ptp_priv ptp_priv;
const struct kszphy_type *type;
struct clk *clk;
int led_mode;
u16 vct_ctrl1000;
bool rmii_ref_clk_sel;
bool rmii_ref_clk_sel_val;
bool clk_enable;
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
};
@ -2050,6 +2052,46 @@ static void kszphy_get_stats(struct phy_device *phydev,
data[i] = kszphy_get_stat(phydev, i);
}
static void kszphy_enable_clk(struct phy_device *phydev)
{
struct kszphy_priv *priv = phydev->priv;
if (!priv->clk_enable && priv->clk) {
clk_prepare_enable(priv->clk);
priv->clk_enable = true;
}
}
static void kszphy_disable_clk(struct phy_device *phydev)
{
struct kszphy_priv *priv = phydev->priv;
if (priv->clk_enable && priv->clk) {
clk_disable_unprepare(priv->clk);
priv->clk_enable = false;
}
}
static int kszphy_generic_resume(struct phy_device *phydev)
{
kszphy_enable_clk(phydev);
return genphy_resume(phydev);
}
static int kszphy_generic_suspend(struct phy_device *phydev)
{
int ret;
ret = genphy_suspend(phydev);
if (ret)
return ret;
kszphy_disable_clk(phydev);
return 0;
}
static int kszphy_suspend(struct phy_device *phydev)
{
/* Disable PHY Interrupts */
@ -2059,7 +2101,7 @@ static int kszphy_suspend(struct phy_device *phydev)
phydev->drv->config_intr(phydev);
}
return genphy_suspend(phydev);
return kszphy_generic_suspend(phydev);
}
static void kszphy_parse_led_mode(struct phy_device *phydev)
@ -2090,7 +2132,9 @@ static int kszphy_resume(struct phy_device *phydev)
{
int ret;
genphy_resume(phydev);
ret = kszphy_generic_resume(phydev);
if (ret)
return ret;
/* After switching from power-down to normal mode, an internal global
* reset is automatically generated. Wait a minimum of 1 ms before
@ -2112,6 +2156,24 @@ static int kszphy_resume(struct phy_device *phydev)
return 0;
}
/* Because of errata DS80000700A, receiver error following software
* power down. Suspend and resume callbacks only disable and enable
* external rmii reference clock.
*/
static int ksz8041_resume(struct phy_device *phydev)
{
kszphy_enable_clk(phydev);
return 0;
}
static int ksz8041_suspend(struct phy_device *phydev)
{
kszphy_disable_clk(phydev);
return 0;
}
static int ksz9477_resume(struct phy_device *phydev)
{
int ret;
@ -2159,7 +2221,10 @@ static int ksz8061_resume(struct phy_device *phydev)
if (!(ret & BMCR_PDOWN))
return 0;
genphy_resume(phydev);
ret = kszphy_generic_resume(phydev);
if (ret)
return ret;
usleep_range(1000, 2000);
/* Re-program the value after chip is reset. */
@ -2177,6 +2242,11 @@ static int ksz8061_resume(struct phy_device *phydev)
return 0;
}
static int ksz8061_suspend(struct phy_device *phydev)
{
return kszphy_suspend(phydev);
}
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@ -2217,10 +2287,14 @@ static int kszphy_probe(struct phy_device *phydev)
} else if (!clk) {
/* unnamed clock from the generic ethernet-phy binding */
clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
}
if (IS_ERR(clk))
return PTR_ERR(clk);
clk_disable_unprepare(clk);
priv->clk = clk;
if (ksz8041_fiber_mode(phydev))
phydev->port = PORT_FIBRE;
@ -5290,6 +5364,21 @@ static int lan8841_probe(struct phy_device *phydev)
return 0;
}
static int lan8804_resume(struct phy_device *phydev)
{
return kszphy_resume(phydev);
}
static int lan8804_suspend(struct phy_device *phydev)
{
return kszphy_generic_suspend(phydev);
}
static int lan8841_resume(struct phy_device *phydev)
{
return kszphy_generic_resume(phydev);
}
static int lan8841_suspend(struct phy_device *phydev)
{
struct kszphy_priv *priv = phydev->priv;
@ -5298,7 +5387,7 @@ static int lan8841_suspend(struct phy_device *phydev)
if (ptp_priv->ptp_clock)
ptp_cancel_worker_sync(ptp_priv->ptp_clock);
return genphy_suspend(phydev);
return kszphy_generic_suspend(phydev);
}
static struct phy_driver ksphy_driver[] = {
@ -5358,9 +5447,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
/* No suspend/resume callbacks because of errata DS80000700A,
* receiver error following software power down.
*/
.suspend = ksz8041_suspend,
.resume = ksz8041_resume,
}, {
.phy_id = PHY_ID_KSZ8041RNLI,
.phy_id_mask = MICREL_PHY_ID_MASK,
@ -5436,7 +5524,7 @@ static struct phy_driver ksphy_driver[] = {
.soft_reset = genphy_soft_reset,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = kszphy_suspend,
.suspend = ksz8061_suspend,
.resume = ksz8061_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
@ -5507,8 +5595,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = kszphy_resume,
.suspend = lan8804_suspend,
.resume = lan8804_resume,
.config_intr = lan8804_config_intr,
.handle_interrupt = lan8804_handle_interrupt,
}, {
@ -5526,7 +5614,7 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.suspend = lan8841_suspend,
.resume = genphy_resume,
.resume = lan8841_resume,
.cable_test_start = lan8814_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
}, {

View File

@ -64,15 +64,11 @@ static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
if (id >= TPS23881_MAX_CHANS)
return -ERANGE;
ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
if (ret < 0)
return ret;
chan = priv->port[id].chan[0];
if (chan < 4)
val = (u16)(ret | BIT(chan));
val = BIT(chan);
else
val = (u16)(ret | BIT(chan + 4));
val = BIT(chan + 4);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
@ -100,15 +96,11 @@ static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
if (id >= TPS23881_MAX_CHANS)
return -ERANGE;
ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
if (ret < 0)
return ret;
chan = priv->port[id].chan[0];
if (chan < 4)
val = (u16)(ret | BIT(chan + 4));
val = BIT(chan + 4);
else
val = (u16)(ret | BIT(chan + 8));
val = BIT(chan + 8);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];

View File

@ -161,6 +161,7 @@ const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";

View File

@ -545,6 +545,7 @@ extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
extern const char iwl_bz_name[];
extern const char iwl_fm_name[];
extern const char iwl_wh_name[];
extern const char iwl_gl_name[];
extern const char iwl_mtp_name[];
extern const char iwl_sc_name[];

View File

@ -2954,6 +2954,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
int idx)
{
int i;
int n_channels = 0;
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
@ -2962,7 +2963,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
match->channels[match->n_channels++] =
match->channels[n_channels++] =
mvm->nd_channels[i]->center_freq;
} else {
struct iwl_scan_offload_profile_match_v1 *matches =
@ -2970,9 +2971,11 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
match->channels[match->n_channels++] =
match->channels[n_channels++] =
mvm->nd_channels[i]->center_freq;
}
/* We may have ended up with fewer channels than we allocated. */
match->n_channels = n_channels;
}
/**
@ -3053,6 +3056,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
GFP_KERNEL);
if (!net_detect || !n_matches)
goto out_report_nd;
net_detect->n_matches = n_matches;
n_matches = 0;
for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
struct cfg80211_wowlan_nd_match *match;
@ -3066,8 +3071,9 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
GFP_KERNEL);
if (!match)
goto out_report_nd;
match->n_channels = n_channels;
net_detect->matches[net_detect->n_matches++] = match;
net_detect->matches[n_matches++] = match;
/* We inverted the order of the SSIDs in the scan
* request, so invert the index here.
@ -3082,6 +3088,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i);
}
/* We may have fewer matches than we allocated. */
net_detect->n_matches = n_matches;
out_report_nd:
wakeup.net_detect = net_detect;

View File

@ -1106,18 +1106,53 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* Bz */
/* FIXME: need to change the naming according to the actual CRF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_fm_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_wh_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_fm_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_fm_name),
iwl_cfg_bz, iwl_wh_name),
/* Ga (Gl) */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,

View File

@ -442,8 +442,8 @@ static void cw1200_spi_disconnect(struct spi_device *func)
cw1200_core_release(self->core);
self->core = NULL;
}
cw1200_spi_off(self, dev_get_platdata(&func->dev));
}
cw1200_spi_off(self, dev_get_platdata(&func->dev));
}
static int __maybe_unused cw1200_spi_suspend(struct device *dev)

View File

@ -104,7 +104,7 @@ struct iosm_mmio *ipc_mmio_init(void __iomem *mmio, struct device *dev)
break;
msleep(20);
} while (retries-- > 0);
} while (--retries > 0);
if (!retries) {
dev_err(ipc_mmio->dev, "invalid exec stage %X", stage);

View File

@ -104,14 +104,21 @@ void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
fsm_state_notify(ctl->md, state);
}
static void fsm_release_command(struct kref *ref)
{
struct t7xx_fsm_command *cmd = container_of(ref, typeof(*cmd), refcnt);
kfree(cmd);
}
static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
{
if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
*cmd->ret = result;
complete_all(cmd->done);
cmd->result = result;
complete_all(&cmd->done);
}
kfree(cmd);
kref_put(&cmd->refcnt, fsm_release_command);
}
static void fsm_del_kf_event(struct t7xx_fsm_event *event)
@ -475,7 +482,6 @@ static int fsm_main_thread(void *data)
int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
{
DECLARE_COMPLETION_ONSTACK(done);
struct t7xx_fsm_command *cmd;
unsigned long flags;
int ret;
@ -487,11 +493,13 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
INIT_LIST_HEAD(&cmd->entry);
cmd->cmd_id = cmd_id;
cmd->flag = flag;
kref_init(&cmd->refcnt);
if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
cmd->done = &done;
cmd->ret = &ret;
init_completion(&cmd->done);
kref_get(&cmd->refcnt);
}
kref_get(&cmd->refcnt);
spin_lock_irqsave(&ctl->command_lock, flags);
list_add_tail(&cmd->entry, &ctl->command_queue);
spin_unlock_irqrestore(&ctl->command_lock, flags);
@ -501,11 +509,11 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
unsigned long wait_ret;
wait_ret = wait_for_completion_timeout(&done,
wait_ret = wait_for_completion_timeout(&cmd->done,
msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
if (!wait_ret)
return -ETIMEDOUT;
ret = wait_ret ? cmd->result : -ETIMEDOUT;
kref_put(&cmd->refcnt, fsm_release_command);
return ret;
}

View File

@ -110,8 +110,9 @@ struct t7xx_fsm_command {
struct list_head entry;
enum t7xx_fsm_cmd_state cmd_id;
unsigned int flag;
struct completion *done;
int *ret;
struct completion done;
int result;
struct kref refcnt;
};
struct t7xx_fsm_notifier {

View File

@ -173,6 +173,11 @@ enum nvme_quirks {
* MSI (but not MSI-X) interrupts are broken and never fire.
*/
NVME_QUIRK_BROKEN_MSI = (1 << 21),
/*
* Align dma pool segment size to 512 bytes
*/
NVME_QUIRK_DMAPOOL_ALIGN_512 = (1 << 22),
};
/*

View File

@ -2834,15 +2834,20 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
size_t small_align = 256;
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
NVME_CTRL_PAGE_SIZE,
NVME_CTRL_PAGE_SIZE, 0);
if (!dev->prp_page_pool)
return -ENOMEM;
if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
small_align = 512;
/* Optimisation for I/Os between 4k and 128k */
dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
256, 256, 0);
256, small_align, 0);
if (!dev->prp_small_pool) {
dma_pool_destroy(dev->prp_page_pool);
return -ENOMEM;
@ -3607,7 +3612,7 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
.driver_data = NVME_QUIRK_QDEPTH_ONE },
.driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
{ PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_BOGUS_NID, },

View File

@ -2024,14 +2024,6 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
return __nvme_tcp_alloc_io_queues(ctrl);
}
static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_io_queues(ctrl);
if (remove)
nvme_remove_io_tag_set(ctrl);
nvme_tcp_free_io_queues(ctrl);
}
static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
{
int ret, nr_queues;
@ -2176,9 +2168,11 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
nvme_cancel_tagset(ctrl);
if (remove)
if (remove) {
nvme_unquiesce_io_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove);
nvme_remove_io_tag_set(ctrl);
}
nvme_tcp_free_io_queues(ctrl);
}
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
@ -2267,7 +2261,9 @@ destroy_io:
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
nvme_cancel_tagset(ctrl);
nvme_tcp_destroy_io_queues(ctrl, new);
if (new)
nvme_remove_io_tag_set(ctrl);
nvme_tcp_free_io_queues(ctrl);
}
destroy_admin:
nvme_stop_keep_alive(ctrl);

View File

@ -139,7 +139,7 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
unsigned long idx;
ctrl = req->sq->ctrl;
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
/* we don't have the right data for file backed ns */
if (!ns->bdev)
continue;
@ -331,9 +331,10 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
u32 count = 0;
if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
xa_for_each(&ctrl->subsys->namespaces, idx, ns)
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->anagrpid == grpid)
desc->nsids[count++] = cpu_to_le32(ns->nsid);
}
}
desc->grpid = cpu_to_le32(grpid);
@ -772,7 +773,7 @@ static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
goto out;
}
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_endgid)
continue;
@ -815,7 +816,7 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
goto out;
}
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_nsid)
continue;
if (match_css && req->ns->csi != req->cmd->identify.csi)

View File

@ -810,18 +810,6 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
NULL,
};
bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
{
struct config_item *ns_item;
char name[12];
snprintf(name, sizeof(name), "%u", nsid);
mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
ns_item = config_group_find_item(&subsys->namespaces_group, name);
mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
return ns_item != NULL;
}
static void nvmet_ns_release(struct config_item *item)
{
struct nvmet_ns *ns = to_nvmet_ns(item);
@ -2254,12 +2242,17 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
const char *page, size_t count)
{
struct list_head *entry;
char *old_nqn, *new_nqn;
size_t len;
len = strcspn(page, "\n");
if (!len || len > NVMF_NQN_FIELD_LEN - 1)
return -EINVAL;
new_nqn = kstrndup(page, len, GFP_KERNEL);
if (!new_nqn)
return -ENOMEM;
down_write(&nvmet_config_sem);
list_for_each(entry, &nvmet_subsystems_group.cg_children) {
struct config_item *item =
@ -2268,13 +2261,15 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
if (!strncmp(config_item_name(item), page, len)) {
pr_err("duplicate NQN %s\n", config_item_name(item));
up_write(&nvmet_config_sem);
kfree(new_nqn);
return -EINVAL;
}
}
memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
memcpy(nvmet_disc_subsys->subsysnqn, page, len);
old_nqn = nvmet_disc_subsys->subsysnqn;
nvmet_disc_subsys->subsysnqn = new_nqn;
up_write(&nvmet_config_sem);
kfree(old_nqn);
return len;
}

View File

@ -127,7 +127,7 @@ static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
unsigned long idx;
u32 nsid = 0;
xa_for_each(&subsys->namespaces, idx, cur)
nvmet_for_each_enabled_ns(&subsys->namespaces, idx, cur)
nsid = cur->nsid;
return nsid;
@ -441,11 +441,14 @@ u16 nvmet_req_find_ns(struct nvmet_req *req)
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
req->ns = xa_load(&subsys->namespaces, nsid);
if (unlikely(!req->ns)) {
if (unlikely(!req->ns || !req->ns->enabled)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
if (nvmet_subsys_nsid_exists(subsys, nsid))
return NVME_SC_INTERNAL_PATH_ERROR;
return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
if (!req->ns) /* ns doesn't exist! */
return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
/* ns exists but it's disabled */
req->ns = NULL;
return NVME_SC_INTERNAL_PATH_ERROR;
}
percpu_ref_get(&req->ns->ref);
@ -583,8 +586,6 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
goto out_unlock;
ret = -EMFILE;
if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
goto out_unlock;
ret = nvmet_bdev_ns_enable(ns);
if (ret == -ENOTBLK)
@ -599,38 +600,19 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
0, GFP_KERNEL);
if (ret)
goto out_dev_put;
if (ns->nsid > subsys->max_nsid)
subsys->max_nsid = ns->nsid;
ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
if (ret)
goto out_restore_subsys_maxnsid;
if (ns->pr.enable) {
ret = nvmet_pr_init_ns(ns);
if (ret)
goto out_remove_from_subsys;
goto out_dev_put;
}
subsys->nr_namespaces++;
nvmet_ns_changed(subsys, ns->nsid);
ns->enabled = true;
xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
ret = 0;
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
out_remove_from_subsys:
xa_erase(&subsys->namespaces, ns->nsid);
out_restore_subsys_maxnsid:
subsys->max_nsid = nvmet_max_nsid(subsys);
percpu_ref_exit(&ns->ref);
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
@ -649,15 +631,37 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
goto out_unlock;
ns->enabled = false;
xa_erase(&ns->subsys->namespaces, ns->nsid);
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
xa_clear_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
mutex_unlock(&subsys->lock);
if (ns->pr.enable)
nvmet_pr_exit_ns(ns);
mutex_lock(&subsys->lock);
nvmet_ns_changed(subsys, ns->nsid);
nvmet_ns_dev_disable(ns);
out_unlock:
mutex_unlock(&subsys->lock);
}
void nvmet_ns_free(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
nvmet_ns_disable(ns);
mutex_lock(&subsys->lock);
xa_erase(&subsys->namespaces, ns->nsid);
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
mutex_unlock(&subsys->lock);
/*
* Now that we removed the namespaces from the lookup list, we
* can kill the per_cpu ref and wait for any remaining references
@ -671,21 +675,9 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
wait_for_completion(&ns->disable_done);
percpu_ref_exit(&ns->ref);
if (ns->pr.enable)
nvmet_pr_exit_ns(ns);
mutex_lock(&subsys->lock);
subsys->nr_namespaces--;
nvmet_ns_changed(subsys, ns->nsid);
nvmet_ns_dev_disable(ns);
out_unlock:
mutex_unlock(&subsys->lock);
}
void nvmet_ns_free(struct nvmet_ns *ns)
{
nvmet_ns_disable(ns);
down_write(&nvmet_ana_sem);
nvmet_ana_group_enabled[ns->anagrpid]--;
@ -699,15 +691,33 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
{
struct nvmet_ns *ns;
mutex_lock(&subsys->lock);
if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
goto out_unlock;
ns = kzalloc(sizeof(*ns), GFP_KERNEL);
if (!ns)
return NULL;
goto out_unlock;
init_completion(&ns->disable_done);
ns->nsid = nsid;
ns->subsys = subsys;
if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
goto out_free;
if (ns->nsid > subsys->max_nsid)
subsys->max_nsid = nsid;
if (xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL))
goto out_exit;
subsys->nr_namespaces++;
mutex_unlock(&subsys->lock);
down_write(&nvmet_ana_sem);
ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
nvmet_ana_group_enabled[ns->anagrpid]++;
@ -718,6 +728,14 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
ns->csi = NVME_CSI_NVM;
return ns;
out_exit:
subsys->max_nsid = nvmet_max_nsid(subsys);
percpu_ref_exit(&ns->ref);
out_free:
kfree(ns);
out_unlock:
mutex_unlock(&subsys->lock);
return NULL;
}
static void nvmet_update_sq_head(struct nvmet_req *req)
@ -1394,7 +1412,7 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
ctrl->p2p_client = get_device(req->p2p_client);
xa_for_each(&ctrl->subsys->namespaces, idx, ns)
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}

View File

@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
*/
id->nsfeat |= 1 << 4;
/* NPWG = Namespace Preferred Write Granularity. 0's based */
id->npwg = lpp0b;
id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev));
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */

Some files were not shown because too many files have changed in this diff Show More