mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 02:05:33 +00:00
Merge probes/for-next
This commit is contained in:
commit
16106d4894
2
.mailmap
2
.mailmap
@ -435,7 +435,7 @@ Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
|
||||
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
|
||||
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
|
||||
Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com>
|
||||
Mathieu Othacehe <m.othacehe@gmail.com> <othacehe@gnu.org>
|
||||
Mathieu Othacehe <othacehe@gnu.org> <m.othacehe@gmail.com>
|
||||
Mat Martineau <martineau@kernel.org> <mathew.j.martineau@linux.intel.com>
|
||||
Mat Martineau <martineau@kernel.org> <mathewm@codeaurora.org>
|
||||
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
|
||||
|
@ -445,8 +445,10 @@ event code Key Notes
|
||||
0x1008 0x07 FN+F8 IBM: toggle screen expand
|
||||
Lenovo: configure UltraNav,
|
||||
or toggle screen expand.
|
||||
On newer platforms (2024+)
|
||||
replaced by 0x131f (see below)
|
||||
On 2024 platforms replaced by
|
||||
0x131f (see below) and on newer
|
||||
platforms (2025 +) keycode is
|
||||
replaced by 0x1401 (see below).
|
||||
|
||||
0x1009 0x08 FN+F9 -
|
||||
|
||||
@ -506,9 +508,11 @@ event code Key Notes
|
||||
|
||||
0x1019 0x18 unknown
|
||||
|
||||
0x131f ... FN+F8 Platform Mode change.
|
||||
0x131f ... FN+F8 Platform Mode change (2024 systems).
|
||||
Implemented in driver.
|
||||
|
||||
0x1401 ... FN+F8 Platform Mode change (2025 + systems).
|
||||
Implemented in driver.
|
||||
... ... ...
|
||||
|
||||
0x1020 0x1F unknown
|
||||
|
@ -436,7 +436,7 @@ AnonHugePmdMapped).
|
||||
The number of file transparent huge pages mapped to userspace is available
|
||||
by reading ShmemPmdMapped and ShmemHugePages fields in ``/proc/meminfo``.
|
||||
To identify what applications are mapping file transparent huge pages, it
|
||||
is necessary to read ``/proc/PID/smaps`` and count the FileHugeMapped fields
|
||||
is necessary to read ``/proc/PID/smaps`` and count the FilePmdMapped fields
|
||||
for each mapping.
|
||||
|
||||
Note that reading the smaps file is expensive and reading it
|
||||
|
@ -90,7 +90,7 @@ properties:
|
||||
adi,dsi-lanes:
|
||||
description: Number of DSI data lanes connected to the DSI host.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
enum: [ 1, 2, 3, 4 ]
|
||||
enum: [ 2, 3, 4 ]
|
||||
|
||||
"#sound-dai-cells":
|
||||
const: 0
|
||||
|
@ -51,7 +51,7 @@ properties:
|
||||
description: Power supply for AVDD, providing 1.8V.
|
||||
|
||||
cpvdd-supply:
|
||||
description: Power supply for CPVDD, providing 3.5V.
|
||||
description: Power supply for CPVDD, providing 1.8V.
|
||||
|
||||
hp-detect-gpios:
|
||||
description:
|
||||
|
@ -22,65 +22,67 @@ definitions:
|
||||
doc: unused event
|
||||
-
|
||||
name: created
|
||||
doc:
|
||||
token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
|
||||
doc: >-
|
||||
A new MPTCP connection has been created. It is the good time to
|
||||
allocate memory and send ADD_ADDR if needed. Depending on the
|
||||
traffic-patterns it can take a long time until the
|
||||
MPTCP_EVENT_ESTABLISHED is sent.
|
||||
Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||
dport, server-side.
|
||||
-
|
||||
name: established
|
||||
doc:
|
||||
token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
|
||||
doc: >-
|
||||
A MPTCP connection is established (can start new subflows).
|
||||
Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||
dport, server-side.
|
||||
-
|
||||
name: closed
|
||||
doc:
|
||||
token
|
||||
doc: >-
|
||||
A MPTCP connection has stopped.
|
||||
Attribute: token.
|
||||
-
|
||||
name: announced
|
||||
value: 6
|
||||
doc:
|
||||
token, rem_id, family, daddr4 | daddr6 [, dport]
|
||||
doc: >-
|
||||
A new address has been announced by the peer.
|
||||
Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
|
||||
-
|
||||
name: removed
|
||||
doc:
|
||||
token, rem_id
|
||||
doc: >-
|
||||
An address has been lost by the peer.
|
||||
Attributes: token, rem_id.
|
||||
-
|
||||
name: sub-established
|
||||
value: 10
|
||||
doc:
|
||||
token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||
dport, backup, if_idx [, error]
|
||||
doc: >-
|
||||
A new subflow has been established. 'error' should not be set.
|
||||
Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
|
||||
daddr6, sport, dport, backup, if_idx [, error].
|
||||
-
|
||||
name: sub-closed
|
||||
doc:
|
||||
token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||
dport, backup, if_idx [, error]
|
||||
doc: >-
|
||||
A subflow has been closed. An error (copy of sk_err) could be set if an
|
||||
error has been detected for this subflow.
|
||||
Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
|
||||
daddr6, sport, dport, backup, if_idx [, error].
|
||||
-
|
||||
name: sub-priority
|
||||
value: 13
|
||||
doc:
|
||||
token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||
dport, backup, if_idx [, error]
|
||||
doc: >-
|
||||
The priority of a subflow has changed. 'error' should not be set.
|
||||
Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
|
||||
daddr6, sport, dport, backup, if_idx [, error].
|
||||
-
|
||||
name: listener-created
|
||||
value: 15
|
||||
doc:
|
||||
family, sport, saddr4 | saddr6
|
||||
doc: >-
|
||||
A new PM listener is created.
|
||||
Attributes: family, sport, saddr4 | saddr6.
|
||||
-
|
||||
name: listener-closed
|
||||
doc:
|
||||
family, sport, saddr4 | saddr6
|
||||
doc: >-
|
||||
A PM listener is closed.
|
||||
Attributes: family, sport, saddr4 | saddr6.
|
||||
|
||||
attribute-sets:
|
||||
-
|
||||
@ -307,7 +309,7 @@ operations:
|
||||
- addr
|
||||
-
|
||||
name: flush-addrs
|
||||
doc: flush addresses
|
||||
doc: Flush addresses
|
||||
attribute-set: endpoint
|
||||
dont-validate: [ strict ]
|
||||
flags: [ uns-admin-perm ]
|
||||
@ -351,7 +353,7 @@ operations:
|
||||
- addr-remote
|
||||
-
|
||||
name: announce
|
||||
doc: announce new sf
|
||||
doc: Announce new address
|
||||
attribute-set: attr
|
||||
dont-validate: [ strict ]
|
||||
flags: [ uns-admin-perm ]
|
||||
@ -362,7 +364,7 @@ operations:
|
||||
- token
|
||||
-
|
||||
name: remove
|
||||
doc: announce removal
|
||||
doc: Announce removal
|
||||
attribute-set: attr
|
||||
dont-validate: [ strict ]
|
||||
flags: [ uns-admin-perm ]
|
||||
@ -373,7 +375,7 @@ operations:
|
||||
- loc-id
|
||||
-
|
||||
name: subflow-create
|
||||
doc: todo
|
||||
doc: Create subflow
|
||||
attribute-set: attr
|
||||
dont-validate: [ strict ]
|
||||
flags: [ uns-admin-perm ]
|
||||
@ -385,7 +387,7 @@ operations:
|
||||
- addr-remote
|
||||
-
|
||||
name: subflow-destroy
|
||||
doc: todo
|
||||
doc: Destroy subflow
|
||||
attribute-set: attr
|
||||
dont-validate: [ strict ]
|
||||
flags: [ uns-admin-perm ]
|
||||
|
10
MAINTAINERS
10
MAINTAINERS
@ -1797,7 +1797,6 @@ F: include/uapi/linux/if_arcnet.h
|
||||
|
||||
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
|
||||
M: Arnd Bergmann <arnd@arndb.de>
|
||||
M: Olof Johansson <olof@lixom.net>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: soc@lists.linux.dev
|
||||
S: Maintained
|
||||
@ -3608,6 +3607,7 @@ F: drivers/phy/qualcomm/phy-ath79-usb.c
|
||||
|
||||
ATHEROS ATH GENERIC UTILITIES
|
||||
M: Kalle Valo <kvalo@kernel.org>
|
||||
M: Jeff Johnson <jjohnson@kernel.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/wireless/ath/*
|
||||
@ -14756,7 +14756,7 @@ F: drivers/memory/mtk-smi.c
|
||||
F: include/soc/mediatek/smi.h
|
||||
|
||||
MEDIATEK SWITCH DRIVER
|
||||
M: Arınç ÜNAL <arinc.unal@arinc9.com>
|
||||
M: Chester A. Unal <chester.a.unal@arinc9.com>
|
||||
M: Daniel Golle <daniel@makrotopia.org>
|
||||
M: DENG Qingfang <dqfext@gmail.com>
|
||||
M: Sean Wang <sean.wang@mediatek.com>
|
||||
@ -18460,7 +18460,7 @@ F: Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml
|
||||
F: drivers/pinctrl/mediatek/
|
||||
|
||||
PIN CONTROLLER - MEDIATEK MIPS
|
||||
M: Arınç ÜNAL <arinc.unal@arinc9.com>
|
||||
M: Chester A. Unal <chester.a.unal@arinc9.com>
|
||||
M: Sergio Paracuellos <sergio.paracuellos@gmail.com>
|
||||
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-mips@vger.kernel.org
|
||||
@ -19504,7 +19504,7 @@ S: Maintained
|
||||
F: arch/mips/ralink
|
||||
|
||||
RALINK MT7621 MIPS ARCHITECTURE
|
||||
M: Arınç ÜNAL <arinc.unal@arinc9.com>
|
||||
M: Chester A. Unal <chester.a.unal@arinc9.com>
|
||||
M: Sergio Paracuellos <sergio.paracuellos@gmail.com>
|
||||
L: linux-mips@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -20907,6 +20907,8 @@ F: kernel/sched/
|
||||
SCHEDULER - SCHED_EXT
|
||||
R: Tejun Heo <tj@kernel.org>
|
||||
R: David Vernet <void@manifault.com>
|
||||
R: Andrea Righi <arighi@nvidia.com>
|
||||
R: Changwoo Min <changwoo@igalia.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
W: https://github.com/sched-ext/scx
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -6,6 +6,7 @@ menuconfig ARCH_MXC
|
||||
select CLKSRC_IMX_GPT
|
||||
select GENERIC_IRQ_CHIP
|
||||
select GPIOLIB
|
||||
select PINCTRL
|
||||
select PM_OPP if PM
|
||||
select SOC_BUS
|
||||
select SRAM
|
||||
|
@ -143,11 +143,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
" DIV:\t\t%s\n"
|
||||
" BMX:\t\t%s\n"
|
||||
" CDX:\t\t%s\n",
|
||||
cpuinfo.has_mul ? "yes" : "no",
|
||||
cpuinfo.has_mulx ? "yes" : "no",
|
||||
cpuinfo.has_div ? "yes" : "no",
|
||||
cpuinfo.has_bmx ? "yes" : "no",
|
||||
cpuinfo.has_cdx ? "yes" : "no");
|
||||
str_yes_no(cpuinfo.has_mul),
|
||||
str_yes_no(cpuinfo.has_mulx),
|
||||
str_yes_no(cpuinfo.has_div),
|
||||
str_yes_no(cpuinfo.has_bmx),
|
||||
str_yes_no(cpuinfo.has_cdx));
|
||||
|
||||
seq_printf(m,
|
||||
"Icache:\t\t%ukB, line length: %u\n",
|
||||
|
@ -464,7 +464,43 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
/*
|
||||
* During mmap() paste address, mapping VMA is saved in VAS window
|
||||
* struct which is used to unmap during migration if the window is
|
||||
* still open. But the user space can remove this mapping with
|
||||
* munmap() before closing the window and the VMA address will
|
||||
* be invalid. Set VAS window VMA to NULL in this function which
|
||||
* is called before VMA free.
|
||||
*/
|
||||
static void vas_mmap_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct file *fp = vma->vm_file;
|
||||
struct coproc_instance *cp_inst = fp->private_data;
|
||||
struct vas_window *txwin;
|
||||
|
||||
/* Should not happen */
|
||||
if (!cp_inst || !cp_inst->txwin) {
|
||||
pr_err("No attached VAS window for the paste address mmap\n");
|
||||
return;
|
||||
}
|
||||
|
||||
txwin = cp_inst->txwin;
|
||||
/*
|
||||
* task_ref.vma is set in coproc_mmap() during mmap paste
|
||||
* address. So it has to be the same VMA that is getting freed.
|
||||
*/
|
||||
if (WARN_ON(txwin->task_ref.vma != vma)) {
|
||||
pr_err("Invalid paste address mmaping\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&txwin->task_ref.mmap_mutex);
|
||||
txwin->task_ref.vma = NULL;
|
||||
mutex_unlock(&txwin->task_ref.mmap_mutex);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct vas_vm_ops = {
|
||||
.close = vas_mmap_close,
|
||||
.fault = vas_mmap_fault,
|
||||
};
|
||||
|
||||
|
@ -429,6 +429,16 @@ static struct event_constraint intel_lnc_event_constraints[] = {
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct extra_reg intel_lnc_extra_regs[] __read_mostly = {
|
||||
INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0),
|
||||
INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1),
|
||||
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
|
||||
INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
|
||||
INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
|
||||
INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
|
||||
INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
|
||||
EVENT_EXTRA_END
|
||||
};
|
||||
|
||||
EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
|
||||
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
|
||||
@ -6422,7 +6432,7 @@ static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
|
||||
intel_pmu_init_glc(pmu);
|
||||
hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
|
||||
hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
|
||||
hybrid(pmu, extra_regs) = intel_rwc_extra_regs;
|
||||
hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
|
||||
}
|
||||
|
||||
static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
|
||||
|
@ -2517,6 +2517,7 @@ void __init intel_ds_init(void)
|
||||
x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
|
||||
break;
|
||||
|
||||
case 6:
|
||||
case 5:
|
||||
x86_pmu.pebs_ept = 1;
|
||||
fallthrough;
|
||||
|
@ -1910,6 +1910,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
|
||||
X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_uncore_init),
|
||||
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_uncore_init),
|
||||
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_uncore_init),
|
||||
X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_uncore_init),
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
|
||||
|
@ -81,6 +81,34 @@ static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
|
||||
static __ro_after_init bool ibt_fatal = true;
|
||||
|
||||
/*
|
||||
* By definition, all missing-ENDBRANCH #CPs are a result of WFE && !ENDBR.
|
||||
*
|
||||
* For the kernel IBT no ENDBR selftest where #CPs are deliberately triggered,
|
||||
* the WFE state of the interrupted context needs to be cleared to let execution
|
||||
* continue. Otherwise when the CPU resumes from the instruction that just
|
||||
* caused the previous #CP, another missing-ENDBRANCH #CP is raised and the CPU
|
||||
* enters a dead loop.
|
||||
*
|
||||
* This is not a problem with IDT because it doesn't preserve WFE and IRET doesn't
|
||||
* set WFE. But FRED provides space on the entry stack (in an expanded CS area)
|
||||
* to save and restore the WFE state, thus the WFE state is no longer clobbered,
|
||||
* so software must clear it.
|
||||
*/
|
||||
static void ibt_clear_fred_wfe(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* No need to do any FRED checks.
|
||||
*
|
||||
* For IDT event delivery, the high-order 48 bits of CS are pushed
|
||||
* as 0s into the stack, and later IRET ignores these bits.
|
||||
*
|
||||
* For FRED, a test to check if fred_cs.wfe is set would be dropped
|
||||
* by compilers.
|
||||
*/
|
||||
regs->fred_cs.wfe = 0;
|
||||
}
|
||||
|
||||
static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
{
|
||||
if ((error_code & CP_EC) != CP_ENDBR) {
|
||||
@ -90,6 +118,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
|
||||
if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) {
|
||||
regs->ax = 0;
|
||||
ibt_clear_fred_wfe(regs);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -97,6 +126,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
if (!ibt_fatal) {
|
||||
printk(KERN_DEFAULT CUT_HERE);
|
||||
__warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
|
||||
ibt_clear_fred_wfe(regs);
|
||||
return;
|
||||
}
|
||||
BUG();
|
||||
|
@ -1618,6 +1618,21 @@ static void ublk_unquiesce_dev(struct ublk_device *ub)
|
||||
blk_mq_kick_requeue_list(ub->ub_disk->queue);
|
||||
}
|
||||
|
||||
static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
|
||||
{
|
||||
struct gendisk *disk;
|
||||
|
||||
/* Sync with ublk_abort_queue() by holding the lock */
|
||||
spin_lock(&ub->lock);
|
||||
disk = ub->ub_disk;
|
||||
ub->dev_info.state = UBLK_S_DEV_DEAD;
|
||||
ub->dev_info.ublksrv_pid = -1;
|
||||
ub->ub_disk = NULL;
|
||||
spin_unlock(&ub->lock);
|
||||
|
||||
return disk;
|
||||
}
|
||||
|
||||
static void ublk_stop_dev(struct ublk_device *ub)
|
||||
{
|
||||
struct gendisk *disk;
|
||||
@ -1631,14 +1646,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
|
||||
ublk_unquiesce_dev(ub);
|
||||
}
|
||||
del_gendisk(ub->ub_disk);
|
||||
|
||||
/* Sync with ublk_abort_queue() by holding the lock */
|
||||
spin_lock(&ub->lock);
|
||||
disk = ub->ub_disk;
|
||||
ub->dev_info.state = UBLK_S_DEV_DEAD;
|
||||
ub->dev_info.ublksrv_pid = -1;
|
||||
ub->ub_disk = NULL;
|
||||
spin_unlock(&ub->lock);
|
||||
disk = ublk_detach_disk(ub);
|
||||
put_disk(disk);
|
||||
unlock:
|
||||
mutex_unlock(&ub->mutex);
|
||||
@ -2336,7 +2344,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
|
||||
|
||||
out_put_cdev:
|
||||
if (ret) {
|
||||
ub->dev_info.state = UBLK_S_DEV_DEAD;
|
||||
ublk_detach_disk(ub);
|
||||
ublk_put_device(ub);
|
||||
}
|
||||
if (ret)
|
||||
|
@ -1106,7 +1106,7 @@ int open_for_data(struct cdrom_device_info *cdi)
|
||||
}
|
||||
}
|
||||
|
||||
cd_dbg(CD_OPEN, "all seems well, opening the devicen");
|
||||
cd_dbg(CD_OPEN, "all seems well, opening the device\n");
|
||||
|
||||
/* all seems well, we can open the device */
|
||||
ret = cdo->open(cdi, 0); /* open for data */
|
||||
|
@ -278,7 +278,8 @@ static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
|
||||
|
||||
#else /* !CONFIG_RESET_CONTROLLER */
|
||||
|
||||
static int clk_imx8mp_audiomix_reset_controller_register(struct clk_imx8mp_audiomix_priv *priv)
|
||||
static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
|
||||
struct clk_imx8mp_audiomix_priv *priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -779,6 +779,13 @@ static struct ccu_div dpu1_clk = {
|
||||
},
|
||||
};
|
||||
|
||||
static CLK_FIXED_FACTOR_HW(emmc_sdio_ref_clk, "emmc-sdio-ref",
|
||||
&video_pll_clk.common.hw, 4, 1, 0);
|
||||
|
||||
static const struct clk_parent_data emmc_sdio_ref_clk_pd[] = {
|
||||
{ .hw = &emmc_sdio_ref_clk.hw },
|
||||
};
|
||||
|
||||
static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, BIT(4), 0);
|
||||
static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, BIT(5), 0);
|
||||
static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_aclk_pd,
|
||||
@ -798,7 +805,7 @@ static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", p
|
||||
0x150, BIT(12), 0);
|
||||
static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
|
||||
static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, BIT(13), 0);
|
||||
static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", video_pll_clk_pd, 0x204, BIT(30), 0);
|
||||
static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, BIT(30), 0);
|
||||
static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, BIT(26), 0);
|
||||
static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, BIT(24), 0);
|
||||
static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, BIT(23), 0);
|
||||
@ -1059,6 +1066,10 @@ static int th1520_clk_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
|
||||
|
||||
ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -7,9 +7,9 @@
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/platform_data/amd_qdma.h>
|
||||
#include <linux/regmap.h>
|
||||
@ -492,18 +492,9 @@ static int qdma_device_verify(struct qdma_device *qdev)
|
||||
|
||||
static int qdma_device_setup(struct qdma_device *qdev)
|
||||
{
|
||||
struct device *dev = &qdev->pdev->dev;
|
||||
u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
|
||||
int ret = 0;
|
||||
|
||||
while (dev && get_dma_ops(dev))
|
||||
dev = dev->parent;
|
||||
if (!dev) {
|
||||
qdma_err(qdev, "dma device not found");
|
||||
return -EINVAL;
|
||||
}
|
||||
set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev));
|
||||
|
||||
ret = qdma_setup_fmap_context(qdev);
|
||||
if (ret) {
|
||||
qdma_err(qdev, "Failed setup fmap context");
|
||||
@ -548,11 +539,12 @@ static void qdma_free_queue_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct qdma_queue *queue = to_qdma_queue(chan);
|
||||
struct qdma_device *qdev = queue->qdev;
|
||||
struct device *dev = qdev->dma_dev.dev;
|
||||
struct qdma_platdata *pdata;
|
||||
|
||||
qdma_clear_queue_context(queue);
|
||||
vchan_free_chan_resources(&queue->vchan);
|
||||
dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE,
|
||||
pdata = dev_get_platdata(&qdev->pdev->dev);
|
||||
dma_free_coherent(pdata->dma_dev, queue->ring_size * QDMA_MM_DESC_SIZE,
|
||||
queue->desc_base, queue->dma_desc_base);
|
||||
}
|
||||
|
||||
@ -565,6 +557,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
|
||||
struct qdma_queue *queue = to_qdma_queue(chan);
|
||||
struct qdma_device *qdev = queue->qdev;
|
||||
struct qdma_ctxt_sw_desc desc;
|
||||
struct qdma_platdata *pdata;
|
||||
size_t size;
|
||||
int ret;
|
||||
|
||||
@ -572,8 +565,9 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pdata = dev_get_platdata(&qdev->pdev->dev);
|
||||
size = queue->ring_size * QDMA_MM_DESC_SIZE;
|
||||
queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size,
|
||||
queue->desc_base = dma_alloc_coherent(pdata->dma_dev, size,
|
||||
&queue->dma_desc_base,
|
||||
GFP_KERNEL);
|
||||
if (!queue->desc_base) {
|
||||
@ -588,7 +582,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
|
||||
if (ret) {
|
||||
qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
|
||||
chan->name);
|
||||
dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base,
|
||||
dma_free_coherent(pdata->dma_dev, size, queue->desc_base,
|
||||
queue->dma_desc_base);
|
||||
return ret;
|
||||
}
|
||||
@ -948,8 +942,9 @@ static int qdma_init_error_irq(struct qdma_device *qdev)
|
||||
|
||||
static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
|
||||
{
|
||||
u32 ctxt[QDMA_CTXT_REGMAP_LEN];
|
||||
struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
|
||||
struct device *dev = &qdev->pdev->dev;
|
||||
u32 ctxt[QDMA_CTXT_REGMAP_LEN];
|
||||
struct qdma_intr_ring *ring;
|
||||
struct qdma_ctxt_intr intr_ctxt;
|
||||
u32 vector;
|
||||
@ -969,7 +964,8 @@ static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
|
||||
ring->msix_id = qdev->err_irq_idx + i + 1;
|
||||
ring->ridx = i;
|
||||
ring->color = 1;
|
||||
ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE,
|
||||
ring->base = dmam_alloc_coherent(pdata->dma_dev,
|
||||
QDMA_INTR_RING_SIZE,
|
||||
&ring->dev_base, GFP_KERNEL);
|
||||
if (!ring->base) {
|
||||
qdma_err(qdev, "Failed to alloc intr ring %d", i);
|
||||
|
@ -153,6 +153,8 @@ static int admac_alloc_sram_carveout(struct admac_data *ad,
|
||||
{
|
||||
struct admac_sram *sram;
|
||||
int i, ret = 0, nblocks;
|
||||
ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
|
||||
ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
|
||||
|
||||
if (dir == DMA_MEM_TO_DEV)
|
||||
sram = &ad->txcache;
|
||||
@ -912,12 +914,7 @@ static int admac_probe(struct platform_device *pdev)
|
||||
goto free_irq;
|
||||
}
|
||||
|
||||
ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
|
||||
ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
|
||||
|
||||
dev_info(&pdev->dev, "Audio DMA Controller\n");
|
||||
dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n",
|
||||
readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1363,6 +1363,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
||||
return NULL;
|
||||
|
||||
desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
list_add_tail(&desc->desc_node, &desc->descs_list);
|
||||
|
||||
desc->tx_dma_desc.cookie = -EBUSY;
|
||||
|
@ -8,13 +8,15 @@
|
||||
|
||||
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
|
||||
{
|
||||
struct dw_dma *dw = to_dw_dma(chan->device);
|
||||
struct dw_dma_chip_pdata *data = dev_get_drvdata(dw->dma.dev);
|
||||
struct acpi_dma_spec *dma_spec = param;
|
||||
struct dw_dma_slave slave = {
|
||||
.dma_dev = dma_spec->dev,
|
||||
.src_id = dma_spec->slave_id,
|
||||
.dst_id = dma_spec->slave_id,
|
||||
.m_master = 0,
|
||||
.p_master = 1,
|
||||
.m_master = data->m_master,
|
||||
.p_master = data->p_master,
|
||||
};
|
||||
|
||||
return dw_dma_filter(chan, &slave);
|
||||
|
@ -51,11 +51,15 @@ struct dw_dma_chip_pdata {
|
||||
int (*probe)(struct dw_dma_chip *chip);
|
||||
int (*remove)(struct dw_dma_chip *chip);
|
||||
struct dw_dma_chip *chip;
|
||||
u8 m_master;
|
||||
u8 p_master;
|
||||
};
|
||||
|
||||
static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = {
|
||||
.probe = dw_dma_probe,
|
||||
.remove = dw_dma_remove,
|
||||
.m_master = 0,
|
||||
.p_master = 1,
|
||||
};
|
||||
|
||||
static const struct dw_dma_platform_data idma32_pdata = {
|
||||
@ -72,6 +76,8 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
|
||||
.pdata = &idma32_pdata,
|
||||
.probe = idma32_dma_probe,
|
||||
.remove = idma32_dma_remove,
|
||||
.m_master = 0,
|
||||
.p_master = 0,
|
||||
};
|
||||
|
||||
static const struct dw_dma_platform_data xbar_pdata = {
|
||||
@ -88,6 +94,8 @@ static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = {
|
||||
.pdata = &xbar_pdata,
|
||||
.probe = idma32_dma_probe,
|
||||
.remove = idma32_dma_remove,
|
||||
.m_master = 0,
|
||||
.p_master = 0,
|
||||
};
|
||||
|
||||
#endif /* _DMA_DW_INTERNAL_H */
|
||||
|
@ -56,10 +56,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dw_dma_acpi_controller_register(chip->dw);
|
||||
|
||||
pci_set_drvdata(pdev, data);
|
||||
|
||||
dw_dma_acpi_controller_register(chip->dw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -166,6 +166,7 @@ struct fsl_edma_chan {
|
||||
struct work_struct issue_worker;
|
||||
struct platform_device *pdev;
|
||||
struct device *pd_dev;
|
||||
struct device_link *pd_dev_link;
|
||||
u32 srcid;
|
||||
struct clk *clk;
|
||||
int priority;
|
||||
|
@ -417,10 +417,33 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
|
||||
|
||||
static void fsl_edma3_detach_pd(struct fsl_edma_engine *fsl_edma)
|
||||
{
|
||||
struct fsl_edma_chan *fsl_chan;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < fsl_edma->n_chans; i++) {
|
||||
if (fsl_edma->chan_masked & BIT(i))
|
||||
continue;
|
||||
fsl_chan = &fsl_edma->chans[i];
|
||||
if (fsl_chan->pd_dev_link)
|
||||
device_link_del(fsl_chan->pd_dev_link);
|
||||
if (fsl_chan->pd_dev) {
|
||||
dev_pm_domain_detach(fsl_chan->pd_dev, false);
|
||||
pm_runtime_dont_use_autosuspend(fsl_chan->pd_dev);
|
||||
pm_runtime_set_suspended(fsl_chan->pd_dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void devm_fsl_edma3_detach_pd(void *data)
|
||||
{
|
||||
fsl_edma3_detach_pd(data);
|
||||
}
|
||||
|
||||
static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
|
||||
{
|
||||
struct fsl_edma_chan *fsl_chan;
|
||||
struct device_link *link;
|
||||
struct device *pd_chan;
|
||||
struct device *dev;
|
||||
int i;
|
||||
@ -436,15 +459,16 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
|
||||
pd_chan = dev_pm_domain_attach_by_id(dev, i);
|
||||
if (IS_ERR_OR_NULL(pd_chan)) {
|
||||
dev_err(dev, "Failed attach pd %d\n", i);
|
||||
return -EINVAL;
|
||||
goto detach;
|
||||
}
|
||||
|
||||
link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
|
||||
fsl_chan->pd_dev_link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
|
||||
DL_FLAG_PM_RUNTIME |
|
||||
DL_FLAG_RPM_ACTIVE);
|
||||
if (!link) {
|
||||
if (!fsl_chan->pd_dev_link) {
|
||||
dev_err(dev, "Failed to add device_link to %d\n", i);
|
||||
return -EINVAL;
|
||||
dev_pm_domain_detach(pd_chan, false);
|
||||
goto detach;
|
||||
}
|
||||
|
||||
fsl_chan->pd_dev = pd_chan;
|
||||
@ -455,6 +479,10 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
detach:
|
||||
fsl_edma3_detach_pd(fsl_edma);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int fsl_edma_probe(struct platform_device *pdev)
|
||||
@ -544,6 +572,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||
ret = fsl_edma3_attach_pd(pdev, fsl_edma);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = devm_add_action_or_reset(&pdev->dev, devm_fsl_edma3_detach_pd, fsl_edma);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (drvdata->flags & FSL_EDMA_DRV_TCD64)
|
||||
|
@ -31,7 +31,7 @@
|
||||
#define LDMA_ASK_VALID BIT(2)
|
||||
#define LDMA_START BIT(3) /* DMA start operation */
|
||||
#define LDMA_STOP BIT(4) /* DMA stop operation */
|
||||
#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
|
||||
#define LDMA_CONFIG_MASK GENMASK_ULL(4, 0) /* DMA controller config bits mask */
|
||||
|
||||
/* Bitfields in ndesc_addr field of HW descriptor */
|
||||
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
|
||||
|
@ -1388,6 +1388,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
||||
irq = irq_of_parse_and_map(np, 0);
|
||||
if (!irq) {
|
||||
ret = -ENODEV;
|
||||
of_node_put(np);
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
@ -1396,6 +1397,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(chan)) {
|
||||
ret = PTR_ERR(chan);
|
||||
irq_dispose_mapping(irq);
|
||||
of_node_put(np);
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
|
@ -231,6 +231,7 @@ struct tegra_dma_channel {
|
||||
bool config_init;
|
||||
char name[30];
|
||||
enum dma_transfer_direction sid_dir;
|
||||
enum dma_status status;
|
||||
int id;
|
||||
int irq;
|
||||
int slave_id;
|
||||
@ -393,6 +394,8 @@ static int tegra_dma_pause(struct tegra_dma_channel *tdc)
|
||||
tegra_dma_dump_chan_regs(tdc);
|
||||
}
|
||||
|
||||
tdc->status = DMA_PAUSED;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -419,6 +422,8 @@ static void tegra_dma_resume(struct tegra_dma_channel *tdc)
|
||||
val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
|
||||
val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
|
||||
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
|
||||
|
||||
tdc->status = DMA_IN_PROGRESS;
|
||||
}
|
||||
|
||||
static int tegra_dma_device_resume(struct dma_chan *dc)
|
||||
@ -544,6 +549,7 @@ static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
|
||||
|
||||
tegra_dma_sid_free(tdc);
|
||||
tdc->dma_desc = NULL;
|
||||
tdc->status = DMA_COMPLETE;
|
||||
}
|
||||
|
||||
static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
|
||||
@ -716,6 +722,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
|
||||
tdc->dma_desc = NULL;
|
||||
}
|
||||
|
||||
tdc->status = DMA_COMPLETE;
|
||||
tegra_dma_sid_free(tdc);
|
||||
vchan_get_all_descriptors(&tdc->vc, &head);
|
||||
spin_unlock_irqrestore(&tdc->vc.lock, flags);
|
||||
@ -769,6 +776,9 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
|
||||
if (ret == DMA_COMPLETE)
|
||||
return ret;
|
||||
|
||||
if (tdc->status == DMA_PAUSED)
|
||||
ret = DMA_PAUSED;
|
||||
|
||||
spin_lock_irqsave(&tdc->vc.lock, flags);
|
||||
vd = vchan_find_desc(&tdc->vc, cookie);
|
||||
if (vd) {
|
||||
|
@ -153,7 +153,16 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
|
||||
ADV7511_AUDIO_CFG3_LEN_MASK, len);
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
|
||||
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
|
||||
regmap_write(adv7511->regmap, 0x73, 0x1);
|
||||
|
||||
/* send current Audio infoframe values while updating */
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
|
||||
BIT(5), BIT(5));
|
||||
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
|
||||
|
||||
/* use Audio infoframe updated info */
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
|
||||
BIT(5), 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -184,8 +193,9 @@ static int audio_startup(struct device *dev, void *data)
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
|
||||
BIT(7) | BIT(6), BIT(7));
|
||||
/* use Audio infoframe updated info */
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
|
||||
BIT(5), 0);
|
||||
|
||||
/* enable SPDIF receiver */
|
||||
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
|
||||
|
@ -1241,8 +1241,10 @@ static int adv7511_probe(struct i2c_client *i2c)
|
||||
return ret;
|
||||
|
||||
ret = adv7511_init_regulators(adv7511);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed to init regulators\n");
|
||||
if (ret) {
|
||||
dev_err_probe(dev, ret, "failed to init regulators\n");
|
||||
goto err_of_node_put;
|
||||
}
|
||||
|
||||
/*
|
||||
* The power down GPIO is optional. If present, toggle it from active to
|
||||
@ -1363,6 +1365,8 @@ err_i2c_unregister_edid:
|
||||
i2c_unregister_device(adv7511->i2c_edid);
|
||||
uninit_regulators:
|
||||
adv7511_uninit_regulators(adv7511);
|
||||
err_of_node_put:
|
||||
of_node_put(adv7511->host_node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1371,6 +1375,8 @@ static void adv7511_remove(struct i2c_client *i2c)
|
||||
{
|
||||
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
|
||||
|
||||
of_node_put(adv7511->host_node);
|
||||
|
||||
adv7511_uninit_regulators(adv7511);
|
||||
|
||||
drm_bridge_remove(&adv7511->bridge);
|
||||
|
@ -172,7 +172,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
|
||||
|
||||
of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
|
||||
|
||||
if (num_lanes < 1 || num_lanes > 4)
|
||||
if (num_lanes < 2 || num_lanes > 4)
|
||||
return -EINVAL;
|
||||
|
||||
adv->num_dsi_lanes = num_lanes;
|
||||
@ -181,8 +181,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
|
||||
if (!adv->host_node)
|
||||
return -ENODEV;
|
||||
|
||||
of_node_put(adv->host_node);
|
||||
|
||||
adv->use_timing_gen = !of_property_read_bool(np,
|
||||
"adi,disable-timing-generator");
|
||||
|
||||
|
@ -2115,14 +2115,6 @@ static void intel_c10_pll_program(struct intel_display *display,
|
||||
0, C10_VDR_CTRL_MSGBUS_ACCESS,
|
||||
MB_WRITE_COMMITTED);
|
||||
|
||||
/* Custom width needs to be programmed to 0 for both the phy lanes */
|
||||
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
|
||||
C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
|
||||
MB_WRITE_COMMITTED);
|
||||
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
|
||||
0, C10_VDR_CTRL_UPDATE_CFG,
|
||||
MB_WRITE_COMMITTED);
|
||||
|
||||
/* Program the pll values only for the master lane */
|
||||
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
|
||||
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
|
||||
@ -2132,6 +2124,10 @@ static void intel_c10_pll_program(struct intel_display *display,
|
||||
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
|
||||
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
|
||||
|
||||
/* Custom width needs to be programmed to 0 for both the phy lanes */
|
||||
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
|
||||
C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
|
||||
MB_WRITE_COMMITTED);
|
||||
intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
|
||||
0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
|
||||
MB_WRITE_COMMITTED);
|
||||
|
@ -133,7 +133,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
|
||||
GEN9_MEDIA_PG_ENABLE |
|
||||
GEN11_MEDIA_SAMPLER_PG_ENABLE;
|
||||
|
||||
if (GRAPHICS_VER(gt->i915) >= 12) {
|
||||
if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
|
||||
for (i = 0; i < I915_MAX_VCS; i++)
|
||||
if (HAS_ENGINE(gt, _VCS(i)))
|
||||
pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
|
||||
|
@ -724,7 +724,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
|
||||
new_mem->mem_type == XE_PL_SYSTEM) {
|
||||
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
|
||||
DMA_RESV_USAGE_BOOKKEEP,
|
||||
true,
|
||||
false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (timeout < 0) {
|
||||
ret = timeout;
|
||||
@ -848,8 +848,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
|
||||
|
||||
out:
|
||||
if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
|
||||
ttm_bo->ttm)
|
||||
ttm_bo->ttm) {
|
||||
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
|
||||
DMA_RESV_USAGE_KERNEL,
|
||||
false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (timeout < 0)
|
||||
ret = timeout;
|
||||
|
||||
xe_tt_unmap_sg(ttm_bo->ttm);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -109,7 +109,11 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
|
||||
drm_puts(&p, "\n**** GuC CT ****\n");
|
||||
xe_guc_ct_snapshot_print(ss->guc.ct, &p);
|
||||
|
||||
drm_puts(&p, "\n**** Contexts ****\n");
|
||||
/*
|
||||
* Don't add a new section header here because the mesa debug decoder
|
||||
* tool expects the context information to be in the 'GuC CT' section.
|
||||
*/
|
||||
/* drm_puts(&p, "\n**** Contexts ****\n"); */
|
||||
xe_guc_exec_queue_snapshot_print(ss->ge, &p);
|
||||
|
||||
drm_puts(&p, "\n**** Job ****\n");
|
||||
@ -363,6 +367,15 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
|
||||
char buff[ASCII85_BUFSZ], *line_buff;
|
||||
size_t line_pos = 0;
|
||||
|
||||
/*
|
||||
* Splitting blobs across multiple lines is not compatible with the mesa
|
||||
* debug decoder tool. Note that even dropping the explicit '\n' below
|
||||
* doesn't help because the GuC log is so big some underlying implementation
|
||||
* still splits the lines at 512K characters. So just bail completely for
|
||||
* the moment.
|
||||
*/
|
||||
return;
|
||||
|
||||
#define DMESG_MAX_LINE_LEN 800
|
||||
#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <uapi/drm/xe_drm.h>
|
||||
|
||||
@ -762,9 +763,11 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
|
||||
*/
|
||||
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_device *xe = gt_to_xe(q->gt);
|
||||
struct xe_file *xef;
|
||||
struct xe_lrc *lrc;
|
||||
u32 old_ts, new_ts;
|
||||
int idx;
|
||||
|
||||
/*
|
||||
* Jobs that are run during driver load may use an exec_queue, but are
|
||||
@ -774,6 +777,10 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
|
||||
if (!q->vm || !q->vm->xef)
|
||||
return;
|
||||
|
||||
/* Synchronize with unbind while holding the xe file open */
|
||||
if (!drm_dev_enter(&xe->drm, &idx))
|
||||
return;
|
||||
|
||||
xef = q->vm->xef;
|
||||
|
||||
/*
|
||||
@ -787,6 +794,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
|
||||
lrc = q->lrc[0];
|
||||
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
|
||||
xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
|
||||
|
||||
drm_dev_exit(idx);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2046,7 +2046,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
|
||||
valid_any = valid_any || (valid_ggtt && is_primary);
|
||||
|
||||
if (IS_DGFX(xe)) {
|
||||
bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
|
||||
bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
|
||||
|
||||
valid_any = valid_any || (valid_lmem && is_primary);
|
||||
valid_all = valid_all && valid_lmem;
|
||||
|
@ -74,12 +74,6 @@ struct xe_oa_config {
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
struct flex {
|
||||
struct xe_reg reg;
|
||||
u32 offset;
|
||||
u32 value;
|
||||
};
|
||||
|
||||
struct xe_oa_open_param {
|
||||
struct xe_file *xef;
|
||||
u32 oa_unit_id;
|
||||
@ -596,19 +590,38 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void xe_oa_lock_vma(struct xe_exec_queue *q)
|
||||
{
|
||||
if (q->vm) {
|
||||
down_read(&q->vm->lock);
|
||||
xe_vm_lock(q->vm, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void xe_oa_unlock_vma(struct xe_exec_queue *q)
|
||||
{
|
||||
if (q->vm) {
|
||||
xe_vm_unlock(q->vm);
|
||||
up_read(&q->vm->lock);
|
||||
}
|
||||
}
|
||||
|
||||
static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps,
|
||||
struct xe_bb *bb)
|
||||
{
|
||||
struct xe_exec_queue *q = stream->exec_q ?: stream->k_exec_q;
|
||||
struct xe_sched_job *job;
|
||||
struct dma_fence *fence;
|
||||
int err = 0;
|
||||
|
||||
/* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
|
||||
job = xe_bb_create_job(stream->k_exec_q, bb);
|
||||
xe_oa_lock_vma(q);
|
||||
|
||||
job = xe_bb_create_job(q, bb);
|
||||
if (IS_ERR(job)) {
|
||||
err = PTR_ERR(job);
|
||||
goto exit;
|
||||
}
|
||||
job->ggtt = true;
|
||||
|
||||
if (deps == XE_OA_SUBMIT_ADD_DEPS) {
|
||||
for (int i = 0; i < stream->num_syncs && !err; i++)
|
||||
@ -623,10 +636,13 @@ static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa
|
||||
fence = dma_fence_get(&job->drm.s_fence->finished);
|
||||
xe_sched_job_push(job);
|
||||
|
||||
xe_oa_unlock_vma(q);
|
||||
|
||||
return fence;
|
||||
err_put_job:
|
||||
xe_sched_job_put(job);
|
||||
exit:
|
||||
xe_oa_unlock_vma(q);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
@ -675,63 +691,19 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream)
|
||||
dma_fence_put(stream->last_fence);
|
||||
}
|
||||
|
||||
static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
|
||||
struct xe_bb *bb, const struct flex *flex, u32 count)
|
||||
{
|
||||
u32 offset = xe_bo_ggtt_addr(lrc->bo);
|
||||
|
||||
do {
|
||||
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
|
||||
bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
|
||||
bb->cs[bb->len++] = 0;
|
||||
bb->cs[bb->len++] = flex->value;
|
||||
|
||||
} while (flex++, --count);
|
||||
}
|
||||
|
||||
static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
|
||||
const struct flex *flex, u32 count)
|
||||
static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri, u32 count)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
struct xe_bb *bb;
|
||||
int err;
|
||||
|
||||
bb = xe_bb_new(stream->gt, 4 * count, false);
|
||||
bb = xe_bb_new(stream->gt, 2 * count + 1, false);
|
||||
if (IS_ERR(bb)) {
|
||||
err = PTR_ERR(bb);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
xe_oa_store_flex(stream, lrc, bb, flex, count);
|
||||
|
||||
fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
|
||||
if (IS_ERR(fence)) {
|
||||
err = PTR_ERR(fence);
|
||||
goto free_bb;
|
||||
}
|
||||
xe_bb_free(bb, fence);
|
||||
dma_fence_put(fence);
|
||||
|
||||
return 0;
|
||||
free_bb:
|
||||
xe_bb_free(bb, NULL);
|
||||
exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
struct xe_bb *bb;
|
||||
int err;
|
||||
|
||||
bb = xe_bb_new(stream->gt, 3, false);
|
||||
if (IS_ERR(bb)) {
|
||||
err = PTR_ERR(bb);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
write_cs_mi_lri(bb, reg_lri, 1);
|
||||
write_cs_mi_lri(bb, reg_lri, count);
|
||||
|
||||
fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
|
||||
if (IS_ERR(fence)) {
|
||||
@ -751,71 +723,55 @@ exit:
|
||||
static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
|
||||
{
|
||||
const struct xe_oa_format *format = stream->oa_buffer.format;
|
||||
struct xe_lrc *lrc = stream->exec_q->lrc[0];
|
||||
u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
|
||||
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
|
||||
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
|
||||
|
||||
struct flex regs_context[] = {
|
||||
struct xe_oa_reg reg_lri[] = {
|
||||
{
|
||||
OACTXCONTROL(stream->hwe->mmio_base),
|
||||
stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
|
||||
enable ? OA_COUNTER_RESUME : 0,
|
||||
},
|
||||
{
|
||||
OAR_OACONTROL,
|
||||
oacontrol,
|
||||
},
|
||||
{
|
||||
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
|
||||
regs_offset + CTX_CONTEXT_CONTROL,
|
||||
_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
|
||||
_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
|
||||
enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
|
||||
},
|
||||
};
|
||||
struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
|
||||
int err;
|
||||
|
||||
/* Modify stream hwe context image with regs_context */
|
||||
err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
|
||||
regs_context, ARRAY_SIZE(regs_context));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Apply reg_lri using LRI */
|
||||
return xe_oa_load_with_lri(stream, ®_lri);
|
||||
return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
|
||||
}
|
||||
|
||||
static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
|
||||
{
|
||||
const struct xe_oa_format *format = stream->oa_buffer.format;
|
||||
struct xe_lrc *lrc = stream->exec_q->lrc[0];
|
||||
u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
|
||||
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
|
||||
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
|
||||
struct flex regs_context[] = {
|
||||
struct xe_oa_reg reg_lri[] = {
|
||||
{
|
||||
OACTXCONTROL(stream->hwe->mmio_base),
|
||||
stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
|
||||
enable ? OA_COUNTER_RESUME : 0,
|
||||
},
|
||||
{
|
||||
OAC_OACONTROL,
|
||||
oacontrol
|
||||
},
|
||||
{
|
||||
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
|
||||
regs_offset + CTX_CONTEXT_CONTROL,
|
||||
_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
|
||||
_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
|
||||
enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
|
||||
_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
|
||||
},
|
||||
};
|
||||
struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
|
||||
int err;
|
||||
|
||||
/* Set ccs select to enable programming of OAC_OACONTROL */
|
||||
xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl,
|
||||
__oa_ccs_select(stream));
|
||||
|
||||
/* Modify stream hwe context image with regs_context */
|
||||
err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
|
||||
regs_context, ARRAY_SIZE(regs_context));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Apply reg_lri using LRI */
|
||||
return xe_oa_load_with_lri(stream, ®_lri);
|
||||
return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
|
||||
}
|
||||
|
||||
static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
|
||||
@ -2066,8 +2022,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
|
||||
if (XE_IOCTL_DBG(oa->xe, !param.exec_q))
|
||||
return -ENOENT;
|
||||
|
||||
if (param.exec_q->width > 1)
|
||||
drm_dbg(&oa->xe->drm, "exec_q->width > 1, programming only exec_q->lrc[0]\n");
|
||||
if (XE_IOCTL_DBG(oa->xe, param.exec_q->width > 1))
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -221,7 +221,10 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
|
||||
|
||||
static u32 get_ppgtt_flag(struct xe_sched_job *job)
|
||||
{
|
||||
return job->q->vm ? BIT(8) : 0;
|
||||
if (job->q->vm && !job->ggtt)
|
||||
return BIT(8);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
|
||||
|
@ -56,6 +56,8 @@ struct xe_sched_job {
|
||||
u32 migrate_flush_flags;
|
||||
/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
|
||||
bool ring_ops_flush_tlb;
|
||||
/** @ggtt: mapped in ggtt. */
|
||||
bool ggtt;
|
||||
/** @ptrs: per instance pointers. */
|
||||
struct xe_job_ptrs ptrs[];
|
||||
};
|
||||
|
@ -335,6 +335,7 @@ static const struct of_device_id i2c_imx_dt_ids[] = {
|
||||
{ .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
|
||||
{ .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
|
||||
{ .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
|
||||
{ .compatible = "fsl,imx7d-i2c", .data = &imx6_i2c_hwdata, },
|
||||
{ .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
|
||||
{ .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
|
||||
{ .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
|
||||
@ -532,22 +533,20 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
|
||||
|
||||
static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic)
|
||||
{
|
||||
bool multi_master = i2c_imx->multi_master;
|
||||
unsigned long orig_jiffies = jiffies;
|
||||
unsigned int temp;
|
||||
|
||||
if (!i2c_imx->multi_master)
|
||||
return 0;
|
||||
|
||||
while (1) {
|
||||
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
|
||||
|
||||
/* check for arbitration lost */
|
||||
if (temp & I2SR_IAL) {
|
||||
if (multi_master && (temp & I2SR_IAL)) {
|
||||
i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (for_busy && (temp & I2SR_IBB)) {
|
||||
if (for_busy && (!multi_master || (temp & I2SR_IBB))) {
|
||||
i2c_imx->stopped = 0;
|
||||
break;
|
||||
}
|
||||
|
@ -93,27 +93,35 @@
|
||||
* @base: pointer to register struct
|
||||
* @dev: device reference
|
||||
* @i2c_clk: clock reference for i2c input clock
|
||||
* @msg_queue: pointer to the messages requiring sending
|
||||
* @buf: pointer to msg buffer for easier use
|
||||
* @msg_complete: xfer completion object
|
||||
* @adapter: core i2c abstraction
|
||||
* @msg_err: error code for completed message
|
||||
* @bus_clk_rate: current i2c bus clock rate
|
||||
* @isr_status: cached copy of local ISR status
|
||||
* @total_num: total number of messages to be sent/received
|
||||
* @current_num: index of the current message being sent/received
|
||||
* @msg_len: number of bytes transferred in msg
|
||||
* @addr: address of the current slave
|
||||
* @restart_needed: whether or not a repeated start is required after current message
|
||||
*/
|
||||
struct mchp_corei2c_dev {
|
||||
void __iomem *base;
|
||||
struct device *dev;
|
||||
struct clk *i2c_clk;
|
||||
struct i2c_msg *msg_queue;
|
||||
u8 *buf;
|
||||
struct completion msg_complete;
|
||||
struct i2c_adapter adapter;
|
||||
int msg_err;
|
||||
int total_num;
|
||||
int current_num;
|
||||
u32 bus_clk_rate;
|
||||
u32 isr_status;
|
||||
u16 msg_len;
|
||||
u8 addr;
|
||||
bool restart_needed;
|
||||
};
|
||||
|
||||
static void mchp_corei2c_core_disable(struct mchp_corei2c_dev *idev)
|
||||
@ -222,6 +230,47 @@ static int mchp_corei2c_fill_tx(struct mchp_corei2c_dev *idev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mchp_corei2c_next_msg(struct mchp_corei2c_dev *idev)
|
||||
{
|
||||
struct i2c_msg *this_msg;
|
||||
u8 ctrl;
|
||||
|
||||
if (idev->current_num >= idev->total_num) {
|
||||
complete(&idev->msg_complete);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there's been an error, the isr needs to return control
|
||||
* to the "main" part of the driver, so as not to keep sending
|
||||
* messages once it completes and clears the SI bit.
|
||||
*/
|
||||
if (idev->msg_err) {
|
||||
complete(&idev->msg_complete);
|
||||
return;
|
||||
}
|
||||
|
||||
this_msg = idev->msg_queue++;
|
||||
|
||||
if (idev->current_num < (idev->total_num - 1)) {
|
||||
struct i2c_msg *next_msg = idev->msg_queue;
|
||||
|
||||
idev->restart_needed = next_msg->flags & I2C_M_RD;
|
||||
} else {
|
||||
idev->restart_needed = false;
|
||||
}
|
||||
|
||||
idev->addr = i2c_8bit_addr_from_msg(this_msg);
|
||||
idev->msg_len = this_msg->len;
|
||||
idev->buf = this_msg->buf;
|
||||
|
||||
ctrl = readb(idev->base + CORE_I2C_CTRL);
|
||||
ctrl |= CTRL_STA;
|
||||
writeb(ctrl, idev->base + CORE_I2C_CTRL);
|
||||
|
||||
idev->current_num++;
|
||||
}
|
||||
|
||||
static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
|
||||
{
|
||||
u32 status = idev->isr_status;
|
||||
@ -238,8 +287,6 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
|
||||
ctrl &= ~CTRL_STA;
|
||||
writeb(idev->addr, idev->base + CORE_I2C_DATA);
|
||||
writeb(ctrl, idev->base + CORE_I2C_CTRL);
|
||||
if (idev->msg_len == 0)
|
||||
finished = true;
|
||||
break;
|
||||
case STATUS_M_ARB_LOST:
|
||||
idev->msg_err = -EAGAIN;
|
||||
@ -247,10 +294,14 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
|
||||
break;
|
||||
case STATUS_M_SLAW_ACK:
|
||||
case STATUS_M_TX_DATA_ACK:
|
||||
if (idev->msg_len > 0)
|
||||
if (idev->msg_len > 0) {
|
||||
mchp_corei2c_fill_tx(idev);
|
||||
} else {
|
||||
if (idev->restart_needed)
|
||||
finished = true;
|
||||
else
|
||||
last_byte = true;
|
||||
}
|
||||
break;
|
||||
case STATUS_M_TX_DATA_NACK:
|
||||
case STATUS_M_SLAR_NACK:
|
||||
@ -287,7 +338,7 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
|
||||
mchp_corei2c_stop(idev);
|
||||
|
||||
if (last_byte || finished)
|
||||
complete(&idev->msg_complete);
|
||||
mchp_corei2c_next_msg(idev);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -311,21 +362,48 @@ static irqreturn_t mchp_corei2c_isr(int irq, void *_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev,
|
||||
struct i2c_msg *msg)
|
||||
static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
|
||||
int num)
|
||||
{
|
||||
u8 ctrl;
|
||||
struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
|
||||
struct i2c_msg *this_msg = msgs;
|
||||
unsigned long time_left;
|
||||
|
||||
idev->addr = i2c_8bit_addr_from_msg(msg);
|
||||
idev->msg_len = msg->len;
|
||||
idev->buf = msg->buf;
|
||||
idev->msg_err = 0;
|
||||
|
||||
reinit_completion(&idev->msg_complete);
|
||||
u8 ctrl;
|
||||
|
||||
mchp_corei2c_core_enable(idev);
|
||||
|
||||
/*
|
||||
* The isr controls the flow of a transfer, this info needs to be saved
|
||||
* to a location that it can access the queue information from.
|
||||
*/
|
||||
idev->restart_needed = false;
|
||||
idev->msg_queue = msgs;
|
||||
idev->total_num = num;
|
||||
idev->current_num = 0;
|
||||
|
||||
/*
|
||||
* But the first entry to the isr is triggered by the start in this
|
||||
* function, so the first message needs to be "dequeued".
|
||||
*/
|
||||
idev->addr = i2c_8bit_addr_from_msg(this_msg);
|
||||
idev->msg_len = this_msg->len;
|
||||
idev->buf = this_msg->buf;
|
||||
idev->msg_err = 0;
|
||||
|
||||
if (idev->total_num > 1) {
|
||||
struct i2c_msg *next_msg = msgs + 1;
|
||||
|
||||
idev->restart_needed = next_msg->flags & I2C_M_RD;
|
||||
}
|
||||
|
||||
idev->current_num++;
|
||||
idev->msg_queue++;
|
||||
|
||||
reinit_completion(&idev->msg_complete);
|
||||
|
||||
/*
|
||||
* Send the first start to pass control to the isr
|
||||
*/
|
||||
ctrl = readb(idev->base + CORE_I2C_CTRL);
|
||||
ctrl |= CTRL_STA;
|
||||
writeb(ctrl, idev->base + CORE_I2C_CTRL);
|
||||
@ -335,20 +413,8 @@ static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev,
|
||||
if (!time_left)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
if (idev->msg_err)
|
||||
return idev->msg_err;
|
||||
}
|
||||
|
||||
static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
|
||||
int num)
|
||||
{
|
||||
struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
ret = mchp_corei2c_xfer_msg(idev, msgs++);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return num;
|
||||
}
|
||||
|
@ -690,6 +690,7 @@ cma_validate_port(struct ib_device *device, u32 port,
|
||||
int bound_if_index = dev_addr->bound_dev_if;
|
||||
int dev_type = dev_addr->dev_type;
|
||||
struct net_device *ndev = NULL;
|
||||
struct net_device *pdev = NULL;
|
||||
|
||||
if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
|
||||
goto out;
|
||||
@ -714,6 +715,21 @@ cma_validate_port(struct ib_device *device, u32 port,
|
||||
|
||||
rcu_read_lock();
|
||||
ndev = rcu_dereference(sgid_attr->ndev);
|
||||
if (ndev->ifindex != bound_if_index) {
|
||||
pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index);
|
||||
if (pdev) {
|
||||
if (is_vlan_dev(pdev)) {
|
||||
pdev = vlan_dev_real_dev(pdev);
|
||||
if (ndev->ifindex == pdev->ifindex)
|
||||
bound_if_index = pdev->ifindex;
|
||||
}
|
||||
if (is_vlan_dev(ndev)) {
|
||||
pdev = vlan_dev_real_dev(ndev);
|
||||
if (bound_if_index == pdev->ifindex)
|
||||
bound_if_index = ndev->ifindex;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!net_eq(dev_net(ndev), dev_addr->net) ||
|
||||
ndev->ifindex != bound_if_index) {
|
||||
rdma_put_gid_attr(sgid_attr);
|
||||
|
@ -2833,8 +2833,8 @@ int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
|
||||
enum rdma_nl_notify_event_type type)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int ret = -EMSGSIZE;
|
||||
struct net *net;
|
||||
int ret = 0;
|
||||
void *nlh;
|
||||
|
||||
net = read_pnet(&device->coredev.rdma_net);
|
||||
|
@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
|
||||
{
|
||||
const void __user *res = iter->cur;
|
||||
|
||||
if (iter->cur + len > iter->end)
|
||||
if (len > iter->end - iter->cur)
|
||||
return (void __force __user *)ERR_PTR(-ENOSPC);
|
||||
iter->cur += len;
|
||||
return res;
|
||||
@ -2008,11 +2008,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
|
||||
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
|
||||
if (ret)
|
||||
return ret;
|
||||
wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
|
||||
wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
|
||||
cmd.wr_count));
|
||||
if (IS_ERR(wqes))
|
||||
return PTR_ERR(wqes);
|
||||
sgls = uverbs_request_next_ptr(
|
||||
&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
|
||||
sgls = uverbs_request_next_ptr(&iter,
|
||||
size_mul(cmd.sge_count,
|
||||
sizeof(struct ib_uverbs_sge)));
|
||||
if (IS_ERR(sgls))
|
||||
return PTR_ERR(sgls);
|
||||
ret = uverbs_request_finish(&iter);
|
||||
@ -2198,11 +2200,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
|
||||
if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
|
||||
wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
|
||||
if (IS_ERR(wqes))
|
||||
return ERR_CAST(wqes);
|
||||
sgls = uverbs_request_next_ptr(
|
||||
iter, sge_count * sizeof(struct ib_uverbs_sge));
|
||||
sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
|
||||
sizeof(struct ib_uverbs_sge)));
|
||||
if (IS_ERR(sgls))
|
||||
return ERR_CAST(sgls);
|
||||
ret = uverbs_request_finish(iter);
|
||||
|
@ -199,7 +199,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
|
||||
|
||||
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
|
||||
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
|
||||
ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
|
||||
ib_attr->hw_ver = rdev->en_dev->pdev->revision;
|
||||
ib_attr->max_qp = dev_attr->max_qp;
|
||||
ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
|
||||
ib_attr->device_cap_flags =
|
||||
@ -967,13 +967,13 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
unsigned int flags;
|
||||
int rc;
|
||||
|
||||
bnxt_re_debug_rem_qpinfo(rdev, qp);
|
||||
|
||||
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
|
||||
|
||||
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
|
||||
if (rc) {
|
||||
if (rc)
|
||||
ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (rdma_is_kernel_res(&qp->ib_qp.res)) {
|
||||
flags = bnxt_re_lock_cqs(qp);
|
||||
@ -983,11 +983,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
|
||||
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
|
||||
|
||||
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
|
||||
rc = bnxt_re_destroy_gsi_sqp(qp);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
|
||||
bnxt_re_destroy_gsi_sqp(qp);
|
||||
|
||||
mutex_lock(&rdev->qp_lock);
|
||||
list_del(&qp->list);
|
||||
@ -998,8 +995,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
|
||||
atomic_dec(&rdev->stats.res.ud_qp_count);
|
||||
|
||||
bnxt_re_debug_rem_qpinfo(rdev, qp);
|
||||
|
||||
ib_umem_release(qp->rumem);
|
||||
ib_umem_release(qp->sumem);
|
||||
|
||||
@ -2167,18 +2162,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||
}
|
||||
}
|
||||
|
||||
if (qp_attr->qp_state == IB_QPS_RTR) {
|
||||
enum ib_mtu qpmtu;
|
||||
|
||||
qpmtu = iboe_get_mtu(rdev->netdev->mtu);
|
||||
if (qp_attr_mask & IB_QP_PATH_MTU) {
|
||||
qp->qplib_qp.modify_flags |=
|
||||
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
|
||||
qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
|
||||
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
|
||||
} else if (qp_attr->qp_state == IB_QPS_RTR) {
|
||||
qp->qplib_qp.modify_flags |=
|
||||
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
|
||||
qp->qplib_qp.path_mtu =
|
||||
__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
|
||||
qp->qplib_qp.mtu =
|
||||
ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
|
||||
if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
|
||||
ib_mtu_enum_to_int(qpmtu))
|
||||
return -EINVAL;
|
||||
qpmtu = qp_attr->path_mtu;
|
||||
}
|
||||
|
||||
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
|
||||
qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
|
||||
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
|
||||
}
|
||||
|
||||
if (qp_attr_mask & IB_QP_TIMEOUT) {
|
||||
@ -2328,6 +2325,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
||||
qp_attr->retry_cnt = qplib_qp->retry_cnt;
|
||||
qp_attr->rnr_retry = qplib_qp->rnr_retry;
|
||||
qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
|
||||
qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
|
||||
qp_attr->rq_psn = qplib_qp->rq.psn;
|
||||
qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
|
||||
qp_attr->sq_psn = qplib_qp->sq.psn;
|
||||
@ -2824,6 +2822,7 @@ bad:
|
||||
wr = wr->next;
|
||||
}
|
||||
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
|
||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
||||
return rc;
|
||||
@ -2936,6 +2935,7 @@ bad:
|
||||
wr = wr->next;
|
||||
}
|
||||
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
|
||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
||||
|
||||
|
@ -268,6 +268,10 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
|
||||
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
|
||||
|
||||
static inline u32 __to_ib_port_num(u16 port_id)
|
||||
{
|
||||
return (u32)port_id + 1;
|
||||
}
|
||||
|
||||
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
|
||||
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
|
||||
|
@ -1715,11 +1715,8 @@ static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
|
||||
|
||||
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
int mask = IB_QP_STATE;
|
||||
struct ib_qp_attr qp_attr;
|
||||
struct bnxt_re_qp *qp;
|
||||
|
||||
qp_attr.qp_state = IB_QPS_ERR;
|
||||
mutex_lock(&rdev->qp_lock);
|
||||
list_for_each_entry(qp, &rdev->qp_list, list) {
|
||||
/* Modify the state of all QPs except QP1/Shadow QP */
|
||||
@ -1727,12 +1724,9 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
|
||||
if (qp->qplib_qp.state !=
|
||||
CMDQ_MODIFY_QP_NEW_STATE_RESET &&
|
||||
qp->qplib_qp.state !=
|
||||
CMDQ_MODIFY_QP_NEW_STATE_ERR) {
|
||||
CMDQ_MODIFY_QP_NEW_STATE_ERR)
|
||||
bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
|
||||
1, IB_EVENT_QP_FATAL);
|
||||
bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
|
||||
NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&rdev->qp_lock);
|
||||
|
@ -659,13 +659,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
||||
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
|
||||
GFP_KERNEL);
|
||||
if (!srq->swq) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
srq->dbinfo.flags = 0;
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_CREATE_SRQ,
|
||||
@ -694,9 +687,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
||||
spin_lock_init(&srq->lock);
|
||||
srq->start_idx = 0;
|
||||
srq->last_idx = srq->hwq.max_elements - 1;
|
||||
if (!srq->hwq.is_user) {
|
||||
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
|
||||
GFP_KERNEL);
|
||||
if (!srq->swq) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
for (idx = 0; idx < srq->hwq.max_elements; idx++)
|
||||
srq->swq[idx].next_idx = idx + 1;
|
||||
srq->swq[srq->last_idx].next_idx = -1;
|
||||
}
|
||||
|
||||
srq->id = le32_to_cpu(resp.xid);
|
||||
srq->dbinfo.hwq = &srq->hwq;
|
||||
@ -1000,9 +1001,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
u32 tbl_indx;
|
||||
u16 nsge;
|
||||
|
||||
if (res->dattr)
|
||||
qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
|
||||
|
||||
sq->dbinfo.flags = 0;
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_CREATE_QP,
|
||||
@ -1034,7 +1033,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
: 0;
|
||||
/* Update msn tbl size */
|
||||
if (qp->is_host_msn_tbl && psn_sz) {
|
||||
hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
|
||||
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
|
||||
hwq_attr.aux_depth =
|
||||
roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
|
||||
else
|
||||
hwq_attr.aux_depth =
|
||||
roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
|
||||
qp->msn_tbl_sz = hwq_attr.aux_depth;
|
||||
qp->msn = 0;
|
||||
}
|
||||
@ -1044,13 +1048,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (!sq->hwq.is_user) {
|
||||
rc = bnxt_qplib_alloc_init_swq(sq);
|
||||
if (rc)
|
||||
goto fail_sq;
|
||||
|
||||
if (psn_sz)
|
||||
bnxt_qplib_init_psn_ptr(qp, psn_sz);
|
||||
|
||||
}
|
||||
req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
|
||||
pbl = &sq->hwq.pbl[PBL_LVL_0];
|
||||
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
|
||||
@ -1076,9 +1081,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
|
||||
if (rc)
|
||||
goto sq_swq;
|
||||
if (!rq->hwq.is_user) {
|
||||
rc = bnxt_qplib_alloc_init_swq(rq);
|
||||
if (rc)
|
||||
goto fail_rq;
|
||||
}
|
||||
|
||||
req.rq_size = cpu_to_le32(rq->max_wqe);
|
||||
pbl = &rq->hwq.pbl[PBL_LVL_0];
|
||||
@ -1174,9 +1181,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
rq->dbinfo.db = qp->dpi->dbr;
|
||||
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
|
||||
}
|
||||
spin_lock_bh(&rcfw->tbl_lock);
|
||||
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
|
||||
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
|
||||
rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
|
||||
spin_unlock_bh(&rcfw->tbl_lock);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
@ -1283,7 +1292,8 @@ static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
|
||||
static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_qp *qp,
|
||||
struct cmdq_modify_qp *req)
|
||||
{
|
||||
u32 mandatory_flags = 0;
|
||||
@ -1298,6 +1308,14 @@ static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
|
||||
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
|
||||
}
|
||||
|
||||
if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
|
||||
(qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
|
||||
qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
|
||||
if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
|
||||
mandatory_flags |=
|
||||
CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
|
||||
}
|
||||
|
||||
if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
|
||||
qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
|
||||
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
|
||||
@ -1338,7 +1356,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
/* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
|
||||
if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
|
||||
is_optimized_state_transition(qp))
|
||||
bnxt_set_mandatory_attributes(qp, &req);
|
||||
bnxt_set_mandatory_attributes(res, qp, &req);
|
||||
}
|
||||
bmask = qp->modify_flags;
|
||||
req.modify_mask = cpu_to_le32(qp->modify_flags);
|
||||
@ -1521,6 +1539,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
|
||||
memcpy(qp->smac, sb->src_mac, 6);
|
||||
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
|
||||
qp->port_id = le16_to_cpu(sb->port_id);
|
||||
bail:
|
||||
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
|
||||
sbuf.sb, sbuf.dma_addr);
|
||||
@ -2667,11 +2686,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
|
||||
bnxt_qplib_add_flush_qp(qp);
|
||||
} else {
|
||||
/* Before we complete, do WA 9060 */
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
|
||||
if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
|
||||
cqe_sq_cons)) {
|
||||
*lib_qp = qp;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
|
||||
cqe->status = CQ_REQ_STATUS_OK;
|
||||
cqe++;
|
||||
|
@ -114,7 +114,6 @@ struct bnxt_qplib_sge {
|
||||
u32 size;
|
||||
};
|
||||
|
||||
#define BNXT_QPLIB_QP_MAX_SGL 6
|
||||
struct bnxt_qplib_swq {
|
||||
u64 wr_id;
|
||||
int next_idx;
|
||||
@ -154,7 +153,7 @@ struct bnxt_qplib_swqe {
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
|
||||
#define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
|
||||
struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
|
||||
struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE];
|
||||
int num_sge;
|
||||
/* Max inline data is 96 bytes */
|
||||
u32 inline_len;
|
||||
@ -299,6 +298,7 @@ struct bnxt_qplib_qp {
|
||||
u32 dest_qpn;
|
||||
u8 smac[6];
|
||||
u16 vlan_id;
|
||||
u16 port_id;
|
||||
u8 nw_type;
|
||||
struct bnxt_qplib_ah ah;
|
||||
|
||||
|
@ -424,7 +424,8 @@ static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
|
||||
|
||||
/* Prevent posting if f/w is not in a state to process */
|
||||
if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
|
||||
return bnxt_qplib_map_rc(opcode);
|
||||
return -ENXIO;
|
||||
|
||||
if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
@ -493,7 +494,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
|
||||
rc = __send_message_basic_sanity(rcfw, msg, opcode);
|
||||
if (rc)
|
||||
return rc;
|
||||
return rc == -ENXIO ? bnxt_qplib_map_rc(opcode) : rc;
|
||||
|
||||
rc = __send_message(rcfw, msg, opcode);
|
||||
if (rc)
|
||||
|
@ -584,6 +584,11 @@ static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)
|
||||
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED;
|
||||
}
|
||||
|
||||
static inline bool _is_min_rnr_in_rtr_rts_mandatory(u16 dev_cap_ext_flags2)
|
||||
{
|
||||
return !!(dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED);
|
||||
}
|
||||
|
||||
static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
|
||||
{
|
||||
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;
|
||||
|
@ -129,12 +129,18 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||
attr->max_qp_init_rd_atom =
|
||||
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
|
||||
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
|
||||
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
|
||||
attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
|
||||
if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) {
|
||||
/*
|
||||
* 128 WQEs needs to be reserved for the HW (8916). Prevent
|
||||
* reporting the max number
|
||||
* reporting the max number on legacy devices
|
||||
*/
|
||||
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
|
||||
}
|
||||
|
||||
/* Adjust for max_qp_wqes for variable wqe */
|
||||
if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
|
||||
attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1;
|
||||
|
||||
attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
|
||||
min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
|
||||
|
@ -2215,6 +2215,7 @@ struct creq_query_func_resp_sb {
|
||||
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
|
||||
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
|
||||
CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
|
||||
#define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
|
||||
__le16 max_xp_qp_size;
|
||||
__le16 create_qp_batch_size;
|
||||
__le16 destroy_qp_batch_size;
|
||||
|
@ -931,6 +931,7 @@ struct hns_roce_hem_item {
|
||||
size_t count; /* max ba numbers */
|
||||
int start; /* start buf offset in this hem */
|
||||
int end; /* end buf offset in this hem */
|
||||
bool exist_bt;
|
||||
};
|
||||
|
||||
/* All HEM items are linked in a tree structure */
|
||||
@ -959,6 +960,7 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
|
||||
}
|
||||
}
|
||||
|
||||
hem->exist_bt = exist_bt;
|
||||
hem->count = count;
|
||||
hem->start = start;
|
||||
hem->end = end;
|
||||
@ -969,22 +971,22 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
|
||||
}
|
||||
|
||||
static void hem_list_free_item(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_item *hem, bool exist_bt)
|
||||
struct hns_roce_hem_item *hem)
|
||||
{
|
||||
if (exist_bt)
|
||||
if (hem->exist_bt)
|
||||
dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
|
||||
hem->addr, hem->dma_addr);
|
||||
kfree(hem);
|
||||
}
|
||||
|
||||
static void hem_list_free_all(struct hns_roce_dev *hr_dev,
|
||||
struct list_head *head, bool exist_bt)
|
||||
struct list_head *head)
|
||||
{
|
||||
struct hns_roce_hem_item *hem, *temp_hem;
|
||||
|
||||
list_for_each_entry_safe(hem, temp_hem, head, list) {
|
||||
list_del(&hem->list);
|
||||
hem_list_free_item(hr_dev, hem, exist_bt);
|
||||
hem_list_free_item(hr_dev, hem);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1084,6 +1086,10 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
|
||||
|
||||
for (i = 0; i < region_cnt; i++) {
|
||||
r = (struct hns_roce_buf_region *)®ions[i];
|
||||
/* when r->hopnum = 0, the region should not occupy root_ba. */
|
||||
if (!r->hopnum)
|
||||
continue;
|
||||
|
||||
if (r->hopnum > 1) {
|
||||
step = hem_list_calc_ba_range(r->hopnum, 1, unit);
|
||||
if (step > 0)
|
||||
@ -1177,7 +1183,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
|
||||
|
||||
err_exit:
|
||||
for (level = 1; level < hopnum; level++)
|
||||
hem_list_free_all(hr_dev, &temp_list[level], true);
|
||||
hem_list_free_all(hr_dev, &temp_list[level]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1218,16 +1224,26 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
|
||||
{
|
||||
struct hns_roce_hem_item *hem;
|
||||
|
||||
/* This is on the has_mtt branch, if r->hopnum
|
||||
* is 0, there is no root_ba to reuse for the
|
||||
* region's fake hem, so a dma_alloc request is
|
||||
* necessary here.
|
||||
*/
|
||||
hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
|
||||
r->count, false);
|
||||
r->count, !r->hopnum);
|
||||
if (!hem)
|
||||
return -ENOMEM;
|
||||
|
||||
/* The root_ba can be reused only when r->hopnum > 0. */
|
||||
if (r->hopnum)
|
||||
hem_list_assign_bt(hem, cpu_base, phy_base);
|
||||
list_add(&hem->list, branch_head);
|
||||
list_add(&hem->sibling, leaf_head);
|
||||
|
||||
return r->count;
|
||||
/* If r->hopnum == 0, 0 is returned,
|
||||
* so that the root_bt entry is not occupied.
|
||||
*/
|
||||
return r->hopnum ? r->count : 0;
|
||||
}
|
||||
|
||||
static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
|
||||
@ -1271,7 +1287,7 @@ setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
|
||||
return -ENOMEM;
|
||||
|
||||
total = 0;
|
||||
for (i = 0; i < region_cnt && total < max_ba_num; i++) {
|
||||
for (i = 0; i < region_cnt && total <= max_ba_num; i++) {
|
||||
r = ®ions[i];
|
||||
if (!r->count)
|
||||
continue;
|
||||
@ -1337,9 +1353,9 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
|
||||
region_cnt);
|
||||
if (ret) {
|
||||
for (i = 0; i < region_cnt; i++)
|
||||
hem_list_free_all(hr_dev, &head.branch[i], false);
|
||||
hem_list_free_all(hr_dev, &head.branch[i]);
|
||||
|
||||
hem_list_free_all(hr_dev, &head.root, true);
|
||||
hem_list_free_all(hr_dev, &head.root);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1402,10 +1418,9 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
|
||||
|
||||
for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
|
||||
for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
|
||||
hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
|
||||
j != 0);
|
||||
hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]);
|
||||
|
||||
hem_list_free_all(hr_dev, &hem_list->root_bt, true);
|
||||
hem_list_free_all(hr_dev, &hem_list->root_bt);
|
||||
INIT_LIST_HEAD(&hem_list->btm_bt);
|
||||
hem_list->root_ba = 0;
|
||||
}
|
||||
|
@ -468,7 +468,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
|
||||
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
|
||||
|
||||
ret = set_ud_opcode(ud_sq_wqe, wr);
|
||||
if (WARN_ON(ret))
|
||||
if (WARN_ON_ONCE(ret))
|
||||
return ret;
|
||||
|
||||
ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
|
||||
@ -572,7 +572,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
|
||||
rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
|
||||
|
||||
ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
|
||||
if (WARN_ON(ret))
|
||||
if (WARN_ON_ONCE(ret))
|
||||
return ret;
|
||||
|
||||
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
|
||||
@ -670,6 +670,10 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
|
||||
#define HNS_ROCE_SL_SHIFT 2
|
||||
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
|
||||
|
||||
if (unlikely(qp->state == IB_QPS_ERR)) {
|
||||
flush_cqe(hr_dev, qp);
|
||||
return;
|
||||
}
|
||||
/* All kinds of DirectWQE have the same header field layout */
|
||||
hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
|
||||
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
|
||||
@ -5619,6 +5623,9 @@ static void put_dip_ctx_idx(struct hns_roce_dev *hr_dev,
|
||||
{
|
||||
struct hns_roce_dip *hr_dip = hr_qp->dip;
|
||||
|
||||
if (!hr_dip)
|
||||
return;
|
||||
|
||||
xa_lock(&hr_dev->qp_table.dip_xa);
|
||||
|
||||
hr_dip->qp_cnt--;
|
||||
|
@ -814,11 +814,6 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
|
||||
for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
|
||||
mapped_cnt < page_cnt; i++) {
|
||||
r = &mtr->hem_cfg.region[i];
|
||||
/* if hopnum is 0, no need to map pages in this region */
|
||||
if (!r->hopnum) {
|
||||
mapped_cnt += r->count;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (r->offset + r->count > page_cnt) {
|
||||
ret = -EINVAL;
|
||||
|
@ -2839,7 +2839,7 @@ static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane)
|
||||
int err;
|
||||
|
||||
*num_plane = 0;
|
||||
if (!MLX5_CAP_GEN(mdev, ib_virt))
|
||||
if (!MLX5_CAP_GEN(mdev, ib_virt) || !MLX5_CAP_GEN_2(mdev, multiplane))
|
||||
return 0;
|
||||
|
||||
err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx);
|
||||
@ -3639,7 +3639,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
|
||||
list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
|
||||
list) {
|
||||
if (dev->sys_image_guid == mpi->sys_image_guid &&
|
||||
(mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
|
||||
(mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
|
||||
mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
|
||||
bound = mlx5_ib_bind_slave_port(dev, mpi);
|
||||
}
|
||||
|
||||
@ -4785,7 +4786,8 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
|
||||
|
||||
mutex_lock(&mlx5_ib_multiport_mutex);
|
||||
list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
|
||||
if (dev->sys_image_guid == mpi->sys_image_guid)
|
||||
if (dev->sys_image_guid == mpi->sys_image_guid &&
|
||||
mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
|
||||
bound = mlx5_ib_bind_slave_port(dev, mpi);
|
||||
|
||||
if (bound) {
|
||||
|
@ -40,6 +40,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
|
||||
/* initialize rxe device parameters */
|
||||
static void rxe_init_device_param(struct rxe_dev *rxe)
|
||||
{
|
||||
struct net_device *ndev;
|
||||
|
||||
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
|
||||
|
||||
rxe->attr.vendor_id = RXE_VENDOR_ID;
|
||||
@ -71,8 +73,15 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
|
||||
rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
|
||||
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
|
||||
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
|
||||
if (!ndev)
|
||||
return;
|
||||
|
||||
addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
|
||||
rxe->ndev->dev_addr);
|
||||
ndev->dev_addr);
|
||||
|
||||
dev_put(ndev);
|
||||
|
||||
rxe->max_ucontext = RXE_MAX_UCONTEXT;
|
||||
}
|
||||
@ -109,10 +118,15 @@ static void rxe_init_port_param(struct rxe_port *port)
|
||||
static void rxe_init_ports(struct rxe_dev *rxe)
|
||||
{
|
||||
struct rxe_port *port = &rxe->port;
|
||||
struct net_device *ndev;
|
||||
|
||||
rxe_init_port_param(port);
|
||||
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
|
||||
if (!ndev)
|
||||
return;
|
||||
addrconf_addr_eui48((unsigned char *)&port->port_guid,
|
||||
rxe->ndev->dev_addr);
|
||||
ndev->dev_addr);
|
||||
dev_put(ndev);
|
||||
spin_lock_init(&port->port_lock);
|
||||
}
|
||||
|
||||
@ -167,12 +181,13 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
|
||||
/* called by ifc layer to create new rxe device.
|
||||
* The caller should allocate memory for rxe by calling ib_alloc_device.
|
||||
*/
|
||||
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
|
||||
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
rxe_init(rxe);
|
||||
rxe_set_mtu(rxe, mtu);
|
||||
|
||||
return rxe_register_device(rxe, ibdev_name);
|
||||
return rxe_register_device(rxe, ibdev_name, ndev);
|
||||
}
|
||||
|
||||
static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
|
||||
|
@ -139,7 +139,8 @@ enum resp_states {
|
||||
|
||||
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
|
||||
|
||||
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
|
||||
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
|
||||
struct net_device *ndev);
|
||||
|
||||
void rxe_rcv(struct sk_buff *skb);
|
||||
|
||||
|
@ -31,10 +31,19 @@
|
||||
static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
{
|
||||
unsigned char ll_addr[ETH_ALEN];
|
||||
struct net_device *ndev;
|
||||
int ret;
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
|
||||
if (!ndev)
|
||||
return -ENODEV;
|
||||
|
||||
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
|
||||
|
||||
return dev_mc_add(rxe->ndev, ll_addr);
|
||||
ret = dev_mc_add(ndev, ll_addr);
|
||||
dev_put(ndev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -47,10 +56,19 @@ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
|
||||
{
|
||||
unsigned char ll_addr[ETH_ALEN];
|
||||
struct net_device *ndev;
|
||||
int ret;
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
|
||||
if (!ndev)
|
||||
return -ENODEV;
|
||||
|
||||
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
|
||||
|
||||
return dev_mc_del(rxe->ndev, ll_addr);
|
||||
ret = dev_mc_del(ndev, ll_addr);
|
||||
dev_put(ndev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -524,7 +524,16 @@ out:
|
||||
*/
|
||||
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
|
||||
{
|
||||
return rxe->ndev->name;
|
||||
struct net_device *ndev;
|
||||
char *ndev_name;
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
|
||||
if (!ndev)
|
||||
return NULL;
|
||||
ndev_name = ndev->name;
|
||||
dev_put(ndev);
|
||||
|
||||
return ndev_name;
|
||||
}
|
||||
|
||||
int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
|
||||
@ -536,10 +545,9 @@ int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
|
||||
if (!rxe)
|
||||
return -ENOMEM;
|
||||
|
||||
rxe->ndev = ndev;
|
||||
ib_mark_name_assigned_by_user(&rxe->ib_dev);
|
||||
|
||||
err = rxe_add(rxe, ndev->mtu, ibdev_name);
|
||||
err = rxe_add(rxe, ndev->mtu, ibdev_name, ndev);
|
||||
if (err) {
|
||||
ib_dealloc_device(&rxe->ib_dev);
|
||||
return err;
|
||||
@ -587,10 +595,18 @@ void rxe_port_down(struct rxe_dev *rxe)
|
||||
|
||||
void rxe_set_port_state(struct rxe_dev *rxe)
|
||||
{
|
||||
if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev))
|
||||
struct net_device *ndev;
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
|
||||
if (!ndev)
|
||||
return;
|
||||
|
||||
if (netif_running(ndev) && netif_carrier_ok(ndev))
|
||||
rxe_port_up(rxe);
|
||||
else
|
||||
rxe_port_down(rxe);
|
||||
|
||||
dev_put(ndev);
|
||||
}
|
||||
|
||||
static int rxe_notify(struct notifier_block *not_blk,
|
||||
|
@ -41,6 +41,7 @@ static int rxe_query_port(struct ib_device *ibdev,
|
||||
u32 port_num, struct ib_port_attr *attr)
|
||||
{
|
||||
struct rxe_dev *rxe = to_rdev(ibdev);
|
||||
struct net_device *ndev;
|
||||
int err, ret;
|
||||
|
||||
if (port_num != 1) {
|
||||
@ -49,6 +50,12 @@ static int rxe_query_port(struct ib_device *ibdev,
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(ibdev);
|
||||
if (!ndev) {
|
||||
err = -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
memcpy(attr, &rxe->port.attr, sizeof(*attr));
|
||||
|
||||
mutex_lock(&rxe->usdev_lock);
|
||||
@ -57,13 +64,14 @@ static int rxe_query_port(struct ib_device *ibdev,
|
||||
|
||||
if (attr->state == IB_PORT_ACTIVE)
|
||||
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
|
||||
else if (dev_get_flags(rxe->ndev) & IFF_UP)
|
||||
else if (dev_get_flags(ndev) & IFF_UP)
|
||||
attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
|
||||
else
|
||||
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
|
||||
|
||||
mutex_unlock(&rxe->usdev_lock);
|
||||
|
||||
dev_put(ndev);
|
||||
return ret;
|
||||
|
||||
err_out:
|
||||
@ -1425,9 +1433,16 @@ static const struct attribute_group rxe_attr_group = {
|
||||
static int rxe_enable_driver(struct ib_device *ib_dev)
|
||||
{
|
||||
struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
|
||||
struct net_device *ndev;
|
||||
|
||||
ndev = rxe_ib_device_get_netdev(ib_dev);
|
||||
if (!ndev)
|
||||
return -ENODEV;
|
||||
|
||||
rxe_set_port_state(rxe);
|
||||
dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
|
||||
dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(ndev));
|
||||
|
||||
dev_put(ndev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1495,7 +1510,8 @@ static const struct ib_device_ops rxe_dev_ops = {
|
||||
INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
|
||||
};
|
||||
|
||||
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
|
||||
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
int err;
|
||||
struct ib_device *dev = &rxe->ib_dev;
|
||||
@ -1507,13 +1523,13 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
|
||||
dev->num_comp_vectors = num_possible_cpus();
|
||||
dev->local_dma_lkey = 0;
|
||||
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
|
||||
rxe->ndev->dev_addr);
|
||||
ndev->dev_addr);
|
||||
|
||||
dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
|
||||
BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
|
||||
|
||||
ib_set_device_ops(dev, &rxe_dev_ops);
|
||||
err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
|
||||
err = ib_device_set_netdev(&rxe->ib_dev, ndev, 1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -370,6 +370,7 @@ struct rxe_port {
|
||||
u32 qp_gsi_index;
|
||||
};
|
||||
|
||||
#define RXE_PORT 1
|
||||
struct rxe_dev {
|
||||
struct ib_device ib_dev;
|
||||
struct ib_device_attr attr;
|
||||
@ -377,8 +378,6 @@ struct rxe_dev {
|
||||
int max_inline_data;
|
||||
struct mutex usdev_lock;
|
||||
|
||||
struct net_device *ndev;
|
||||
|
||||
struct rxe_pool uc_pool;
|
||||
struct rxe_pool pd_pool;
|
||||
struct rxe_pool ah_pool;
|
||||
@ -406,6 +405,11 @@ struct rxe_dev {
|
||||
struct crypto_shash *tfm;
|
||||
};
|
||||
|
||||
static inline struct net_device *rxe_ib_device_get_netdev(struct ib_device *dev)
|
||||
{
|
||||
return ib_device_get_netdev(dev, RXE_PORT);
|
||||
}
|
||||
|
||||
static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
|
||||
{
|
||||
atomic64_inc(&rxe->stats_counters[index]);
|
||||
@ -471,6 +475,7 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
|
||||
return to_rpd(mw->ibmw.pd);
|
||||
}
|
||||
|
||||
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
|
||||
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
|
||||
struct net_device *ndev);
|
||||
|
||||
#endif /* RXE_VERBS_H */
|
||||
|
@ -46,6 +46,9 @@
|
||||
*/
|
||||
#define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
|
||||
|
||||
/* There is always only a port 1 per siw device */
|
||||
#define SIW_PORT 1
|
||||
|
||||
struct siw_dev_cap {
|
||||
int max_qp;
|
||||
int max_qp_wr;
|
||||
@ -69,16 +72,12 @@ struct siw_pd {
|
||||
|
||||
struct siw_device {
|
||||
struct ib_device base_dev;
|
||||
struct net_device *netdev;
|
||||
struct siw_dev_cap attrs;
|
||||
|
||||
u32 vendor_part_id;
|
||||
int numa_node;
|
||||
char raw_gid[ETH_ALEN];
|
||||
|
||||
/* physical port state (only one port per device) */
|
||||
enum ib_port_state state;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
struct xarray qp_xa;
|
||||
|
@ -1759,6 +1759,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
{
|
||||
struct socket *s;
|
||||
struct siw_cep *cep = NULL;
|
||||
struct net_device *ndev = NULL;
|
||||
struct siw_device *sdev = to_siw_dev(id->device);
|
||||
int addr_family = id->local_addr.ss_family;
|
||||
int rv = 0;
|
||||
@ -1779,9 +1780,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
|
||||
|
||||
/* For wildcard addr, limit binding to current device only */
|
||||
if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
|
||||
s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
|
||||
|
||||
if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) {
|
||||
ndev = ib_device_get_netdev(id->device, SIW_PORT);
|
||||
if (ndev) {
|
||||
s->sk->sk_bound_dev_if = ndev->ifindex;
|
||||
} else {
|
||||
rv = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
rv = s->ops->bind(s, (struct sockaddr *)laddr,
|
||||
sizeof(struct sockaddr_in));
|
||||
} else {
|
||||
@ -1797,9 +1804,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
}
|
||||
|
||||
/* For wildcard addr, limit binding to current device only */
|
||||
if (ipv6_addr_any(&laddr->sin6_addr))
|
||||
s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
|
||||
|
||||
if (ipv6_addr_any(&laddr->sin6_addr)) {
|
||||
ndev = ib_device_get_netdev(id->device, SIW_PORT);
|
||||
if (ndev) {
|
||||
s->sk->sk_bound_dev_if = ndev->ifindex;
|
||||
} else {
|
||||
rv = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
rv = s->ops->bind(s, (struct sockaddr *)laddr,
|
||||
sizeof(struct sockaddr_in6));
|
||||
}
|
||||
@ -1860,6 +1873,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
}
|
||||
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
|
||||
cep->state = SIW_EPSTATE_LISTENING;
|
||||
dev_put(ndev);
|
||||
|
||||
siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
|
||||
|
||||
@ -1879,6 +1893,7 @@ error:
|
||||
siw_cep_set_free_and_put(cep);
|
||||
}
|
||||
sock_release(s);
|
||||
dev_put(ndev);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
@ -287,7 +287,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
|
||||
return NULL;
|
||||
|
||||
base_dev = &sdev->base_dev;
|
||||
sdev->netdev = netdev;
|
||||
|
||||
if (netdev->addr_len) {
|
||||
memcpy(sdev->raw_gid, netdev->dev_addr,
|
||||
@ -381,12 +380,10 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_UP:
|
||||
sdev->state = IB_PORT_ACTIVE;
|
||||
siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
|
||||
break;
|
||||
|
||||
case NETDEV_DOWN:
|
||||
sdev->state = IB_PORT_DOWN;
|
||||
siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
|
||||
break;
|
||||
|
||||
@ -407,12 +404,8 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
|
||||
siw_port_event(sdev, 1, IB_EVENT_LID_CHANGE);
|
||||
break;
|
||||
/*
|
||||
* Todo: Below netdev events are currently not handled.
|
||||
* All other events are not handled
|
||||
*/
|
||||
case NETDEV_CHANGEMTU:
|
||||
case NETDEV_CHANGE:
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -442,12 +435,6 @@ static int siw_newlink(const char *basedev_name, struct net_device *netdev)
|
||||
sdev = siw_device_create(netdev);
|
||||
if (sdev) {
|
||||
dev_dbg(&netdev->dev, "siw: new device\n");
|
||||
|
||||
if (netif_running(netdev) && netif_carrier_ok(netdev))
|
||||
sdev->state = IB_PORT_ACTIVE;
|
||||
else
|
||||
sdev->state = IB_PORT_DOWN;
|
||||
|
||||
ib_mark_name_assigned_by_user(&sdev->base_dev);
|
||||
rv = siw_device_register(sdev, basedev_name);
|
||||
if (rv)
|
||||
|
@ -171,21 +171,29 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
|
||||
int siw_query_port(struct ib_device *base_dev, u32 port,
|
||||
struct ib_port_attr *attr)
|
||||
{
|
||||
struct siw_device *sdev = to_siw_dev(base_dev);
|
||||
struct net_device *ndev;
|
||||
int rv;
|
||||
|
||||
memset(attr, 0, sizeof(*attr));
|
||||
|
||||
rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
|
||||
&attr->active_width);
|
||||
if (rv)
|
||||
return rv;
|
||||
|
||||
ndev = ib_device_get_netdev(base_dev, SIW_PORT);
|
||||
if (!ndev)
|
||||
return -ENODEV;
|
||||
|
||||
attr->gid_tbl_len = 1;
|
||||
attr->max_msg_sz = -1;
|
||||
attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
|
||||
attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
|
||||
attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
|
||||
attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
|
||||
attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
|
||||
attr->phys_state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
|
||||
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
|
||||
attr->state = attr->phys_state == IB_PORT_PHYS_STATE_LINK_UP ?
|
||||
IB_PORT_ACTIVE : IB_PORT_DOWN;
|
||||
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
|
||||
attr->state = sdev->state;
|
||||
/*
|
||||
* All zero
|
||||
*
|
||||
@ -199,6 +207,7 @@ int siw_query_port(struct ib_device *base_dev, u32 port,
|
||||
* attr->subnet_timeout = 0;
|
||||
* attr->init_type_repy = 0;
|
||||
*/
|
||||
dev_put(ndev);
|
||||
return rv;
|
||||
}
|
||||
|
||||
@ -505,21 +514,24 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
|
||||
{
|
||||
struct siw_qp *qp;
|
||||
struct siw_device *sdev;
|
||||
struct net_device *ndev;
|
||||
|
||||
if (base_qp && qp_attr && qp_init_attr) {
|
||||
if (base_qp && qp_attr && qp_init_attr)
|
||||
qp = to_siw_qp(base_qp);
|
||||
sdev = to_siw_dev(base_qp->device);
|
||||
} else {
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ndev = ib_device_get_netdev(base_qp->device, SIW_PORT);
|
||||
if (!ndev)
|
||||
return -ENODEV;
|
||||
|
||||
qp_attr->qp_state = siw_qp_state_to_ib_qp_state[qp->attrs.state];
|
||||
qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
|
||||
qp_attr->cap.max_send_wr = qp->attrs.sq_size;
|
||||
qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
|
||||
qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
|
||||
qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
|
||||
qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
|
||||
qp_attr->path_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
|
||||
qp_attr->max_rd_atomic = qp->attrs.irq_size;
|
||||
qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
|
||||
|
||||
@ -534,6 +546,7 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
|
||||
|
||||
qp_init_attr->cap = qp_attr->cap;
|
||||
|
||||
dev_put(ndev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -349,6 +349,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
|
||||
struct rtrs_srv_mr *srv_mr;
|
||||
bool need_inval = false;
|
||||
enum ib_send_flags flags;
|
||||
struct ib_sge list;
|
||||
u32 imm;
|
||||
int err;
|
||||
|
||||
@ -401,7 +402,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
|
||||
imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
|
||||
imm_wr.wr.next = NULL;
|
||||
if (always_invalidate) {
|
||||
struct ib_sge list;
|
||||
struct rtrs_msg_rkey_rsp *msg;
|
||||
|
||||
srv_mr = &srv_path->mrs[id->msg_id];
|
||||
|
@ -1867,20 +1867,20 @@ static int sdhci_msm_program_key(struct cqhci_host *cq_host,
|
||||
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
|
||||
union cqhci_crypto_cap_entry cap;
|
||||
|
||||
if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
|
||||
return qcom_ice_evict_key(msm_host->ice, slot);
|
||||
|
||||
/* Only AES-256-XTS has been tested so far. */
|
||||
cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
|
||||
if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
|
||||
cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256)
|
||||
return -EINVAL;
|
||||
|
||||
if (cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE)
|
||||
return qcom_ice_program_key(msm_host->ice,
|
||||
QCOM_ICE_CRYPTO_ALG_AES_XTS,
|
||||
QCOM_ICE_CRYPTO_KEY_SIZE_256,
|
||||
cfg->crypto_key,
|
||||
cfg->data_unit_size, slot);
|
||||
else
|
||||
return qcom_ice_evict_key(msm_host->ice, slot);
|
||||
}
|
||||
|
||||
#else /* CONFIG_MMC_CRYPTO */
|
||||
|
@ -1409,8 +1409,8 @@ static int anfc_parse_cs(struct arasan_nfc *nfc)
|
||||
* case, the "not" chosen CS is assigned to nfc->spare_cs and selected
|
||||
* whenever a GPIO CS must be asserted.
|
||||
*/
|
||||
if (nfc->cs_array && nfc->ncs > 2) {
|
||||
if (!nfc->cs_array[0] && !nfc->cs_array[1]) {
|
||||
if (nfc->cs_array) {
|
||||
if (nfc->ncs > 2 && !nfc->cs_array[0] && !nfc->cs_array[1]) {
|
||||
dev_err(nfc->dev,
|
||||
"Assign a single native CS when using GPIOs\n");
|
||||
return -EINVAL;
|
||||
@ -1478,8 +1478,15 @@ static int anfc_probe(struct platform_device *pdev)
|
||||
|
||||
static void anfc_remove(struct platform_device *pdev)
|
||||
{
|
||||
int i;
|
||||
struct arasan_nfc *nfc = platform_get_drvdata(pdev);
|
||||
|
||||
for (i = 0; i < nfc->ncs; i++) {
|
||||
if (nfc->cs_array[i]) {
|
||||
gpiod_put(nfc->cs_array[i]);
|
||||
}
|
||||
}
|
||||
|
||||
anfc_chips_cleanup(nfc);
|
||||
}
|
||||
|
||||
|
@ -380,10 +380,8 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
|
||||
user->delta = user->dmu + req->ecc.strength + 1;
|
||||
|
||||
gf_tables = atmel_pmecc_get_gf_tables(req);
|
||||
if (IS_ERR(gf_tables)) {
|
||||
kfree(user);
|
||||
if (IS_ERR(gf_tables))
|
||||
return ERR_CAST(gf_tables);
|
||||
}
|
||||
|
||||
user->gf_tables = gf_tables;
|
||||
|
||||
|
@ -1098,7 +1098,7 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
|
||||
(i == 0) && (ip->firstUnit > 0)) {
|
||||
parts[0].name = " DiskOnChip IPL / Media Header partition";
|
||||
parts[0].offset = 0;
|
||||
parts[0].size = mtd->erasesize * ip->firstUnit;
|
||||
parts[0].size = (uint64_t)mtd->erasesize * ip->firstUnit;
|
||||
numparts = 1;
|
||||
}
|
||||
|
||||
|
@ -254,6 +254,10 @@ static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
|
||||
|
||||
/**
|
||||
* omap_nand_data_in_pref - NAND data in using prefetch engine
|
||||
* @chip: NAND chip
|
||||
* @buf: output buffer where NAND data is placed into
|
||||
* @len: length of transfer
|
||||
* @force_8bit: force 8-bit transfers
|
||||
*/
|
||||
static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
|
||||
unsigned int len, bool force_8bit)
|
||||
@ -297,6 +301,10 @@ static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
|
||||
|
||||
/**
|
||||
* omap_nand_data_out_pref - NAND data out using Write Posting engine
|
||||
* @chip: NAND chip
|
||||
* @buf: input buffer that is sent to NAND
|
||||
* @len: length of transfer
|
||||
* @force_8bit: force 8-bit transfers
|
||||
*/
|
||||
static void omap_nand_data_out_pref(struct nand_chip *chip,
|
||||
const void *buf, unsigned int len,
|
||||
@ -440,6 +448,10 @@ out_copy:
|
||||
|
||||
/**
|
||||
* omap_nand_data_in_dma_pref - NAND data in using DMA and Prefetch
|
||||
* @chip: NAND chip
|
||||
* @buf: output buffer where NAND data is placed into
|
||||
* @len: length of transfer
|
||||
* @force_8bit: force 8-bit transfers
|
||||
*/
|
||||
static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
|
||||
unsigned int len, bool force_8bit)
|
||||
@ -460,6 +472,10 @@ static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
|
||||
|
||||
/**
|
||||
* omap_nand_data_out_dma_pref - NAND data out using DMA and write posting
|
||||
* @chip: NAND chip
|
||||
* @buf: input buffer that is sent to NAND
|
||||
* @len: length of transfer
|
||||
* @force_8bit: force 8-bit transfers
|
||||
*/
|
||||
static void omap_nand_data_out_dma_pref(struct nand_chip *chip,
|
||||
const void *buf, unsigned int len,
|
||||
|
@ -2,7 +2,7 @@
|
||||
/*
|
||||
* Microchip KSZ9477 switch driver main logic
|
||||
*
|
||||
* Copyright (C) 2017-2019 Microchip Technology Inc.
|
||||
* Copyright (C) 2017-2024 Microchip Technology Inc.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
@ -983,26 +983,51 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
|
||||
int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
|
||||
{
|
||||
u32 secs = msecs / 1000;
|
||||
u8 value;
|
||||
u8 data;
|
||||
u8 data, mult, value;
|
||||
u32 max_val;
|
||||
int ret;
|
||||
|
||||
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
|
||||
#define MAX_TIMER_VAL ((1 << 8) - 1)
|
||||
|
||||
ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
/* The aging timer comprises a 3-bit multiplier and an 8-bit second
|
||||
* value. Either of them cannot be zero. The maximum timer is then
|
||||
* 7 * 255 = 1785 seconds.
|
||||
*/
|
||||
if (!secs)
|
||||
secs = 1;
|
||||
|
||||
data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
|
||||
/* Return error if too large. */
|
||||
else if (secs > 7 * MAX_TIMER_VAL)
|
||||
return -EINVAL;
|
||||
|
||||
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Check whether there is need to update the multiplier. */
|
||||
mult = FIELD_GET(SW_AGE_CNT_M, value);
|
||||
max_val = MAX_TIMER_VAL;
|
||||
if (mult > 0) {
|
||||
/* Try to use the same multiplier already in the register as
|
||||
* the hardware default uses multiplier 4 and 75 seconds for
|
||||
* 300 seconds.
|
||||
*/
|
||||
max_val = DIV_ROUND_UP(secs, mult);
|
||||
if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
|
||||
max_val = MAX_TIMER_VAL;
|
||||
}
|
||||
|
||||
data = DIV_ROUND_UP(secs, max_val);
|
||||
if (mult != data) {
|
||||
value &= ~SW_AGE_CNT_M;
|
||||
value |= FIELD_PREP(SW_AGE_CNT_M, data);
|
||||
ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
|
||||
value = DIV_ROUND_UP(secs, data);
|
||||
return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
|
||||
}
|
||||
|
||||
void ksz9477_port_queue_split(struct ksz_device *dev, int port)
|
||||
|
@ -2,7 +2,7 @@
|
||||
/*
|
||||
* Microchip KSZ9477 register definitions
|
||||
*
|
||||
* Copyright (C) 2017-2018 Microchip Technology Inc.
|
||||
* Copyright (C) 2017-2024 Microchip Technology Inc.
|
||||
*/
|
||||
|
||||
#ifndef __KSZ9477_REGS_H
|
||||
@ -165,8 +165,6 @@
|
||||
#define SW_VLAN_ENABLE BIT(7)
|
||||
#define SW_DROP_INVALID_VID BIT(6)
|
||||
#define SW_AGE_CNT_M GENMASK(5, 3)
|
||||
#define SW_AGE_CNT_S 3
|
||||
#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
|
||||
#define SW_RESV_MCAST_ENABLE BIT(2)
|
||||
#define SW_HASH_OPTION_M 0x03
|
||||
#define SW_HASH_OPTION_CRC 1
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Microchip LAN937X switch driver main logic
|
||||
* Copyright (C) 2019-2022 Microchip Technology Inc.
|
||||
* Copyright (C) 2019-2024 Microchip Technology Inc.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
@ -461,10 +461,66 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
|
||||
|
||||
int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
|
||||
{
|
||||
u32 secs = msecs / 1000;
|
||||
u32 value;
|
||||
u8 data, mult, value8;
|
||||
bool in_msec = false;
|
||||
u32 max_val, value;
|
||||
u32 secs = msecs;
|
||||
int ret;
|
||||
|
||||
#define MAX_TIMER_VAL ((1 << 20) - 1)
|
||||
|
||||
/* The aging timer comprises a 3-bit multiplier and a 20-bit second
|
||||
* value. Either of them cannot be zero. The maximum timer is then
|
||||
* 7 * 1048575 = 7340025 seconds. As this value is too large for
|
||||
* practical use it can be interpreted as microseconds, making the
|
||||
* maximum timer 7340 seconds with finer control. This allows for
|
||||
* maximum 122 minutes compared to 29 minutes in KSZ9477 switch.
|
||||
*/
|
||||
if (msecs % 1000)
|
||||
in_msec = true;
|
||||
else
|
||||
secs /= 1000;
|
||||
if (!secs)
|
||||
secs = 1;
|
||||
|
||||
/* Return error if too large. */
|
||||
else if (secs > 7 * MAX_TIMER_VAL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Configure how to interpret the number value. */
|
||||
ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC,
|
||||
in_msec ? SW_AGE_CNT_IN_MICROSEC : 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Check whether there is need to update the multiplier. */
|
||||
mult = FIELD_GET(SW_AGE_CNT_M, value8);
|
||||
max_val = MAX_TIMER_VAL;
|
||||
if (mult > 0) {
|
||||
/* Try to use the same multiplier already in the register as
|
||||
* the hardware default uses multiplier 4 and 75 seconds for
|
||||
* 300 seconds.
|
||||
*/
|
||||
max_val = DIV_ROUND_UP(secs, mult);
|
||||
if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
|
||||
max_val = MAX_TIMER_VAL;
|
||||
}
|
||||
|
||||
data = DIV_ROUND_UP(secs, max_val);
|
||||
if (mult != data) {
|
||||
value8 &= ~SW_AGE_CNT_M;
|
||||
value8 |= FIELD_PREP(SW_AGE_CNT_M, data);
|
||||
ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
secs = DIV_ROUND_UP(secs, data);
|
||||
|
||||
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
|
||||
|
||||
ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Microchip LAN937X switch register definitions
|
||||
* Copyright (C) 2019-2021 Microchip Technology Inc.
|
||||
* Copyright (C) 2019-2024 Microchip Technology Inc.
|
||||
*/
|
||||
#ifndef __LAN937X_REG_H
|
||||
#define __LAN937X_REG_H
|
||||
@ -56,8 +56,7 @@
|
||||
|
||||
#define SW_VLAN_ENABLE BIT(7)
|
||||
#define SW_DROP_INVALID_VID BIT(6)
|
||||
#define SW_AGE_CNT_M 0x7
|
||||
#define SW_AGE_CNT_S 3
|
||||
#define SW_AGE_CNT_M GENMASK(5, 3)
|
||||
#define SW_RESV_MCAST_ENABLE BIT(2)
|
||||
|
||||
#define REG_SW_LUE_CTRL_1 0x0311
|
||||
@ -70,6 +69,10 @@
|
||||
#define SW_FAST_AGING BIT(1)
|
||||
#define SW_LINK_AUTO_AGING BIT(0)
|
||||
|
||||
#define REG_SW_LUE_CTRL_2 0x0312
|
||||
|
||||
#define SW_AGE_CNT_IN_MICROSEC BIT(7)
|
||||
|
||||
#define REG_SW_AGE_PERIOD__1 0x0313
|
||||
#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
|
||||
|
||||
|
@ -1933,7 +1933,11 @@ static int bcm_sysport_open(struct net_device *dev)
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
clk_prepare_enable(priv->clk);
|
||||
ret = clk_prepare_enable(priv->clk);
|
||||
if (ret) {
|
||||
netdev_err(dev, "could not enable priv clock\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Reset UniMAC */
|
||||
umac_reset(priv);
|
||||
@ -2591,7 +2595,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
|
||||
goto err_deregister_notifier;
|
||||
}
|
||||
|
||||
clk_prepare_enable(priv->clk);
|
||||
ret = clk_prepare_enable(priv->clk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "could not enable priv clock\n");
|
||||
goto err_deregister_netdev;
|
||||
}
|
||||
|
||||
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
|
||||
dev_info(&pdev->dev,
|
||||
@ -2605,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
|
||||
|
||||
return 0;
|
||||
|
||||
err_deregister_netdev:
|
||||
unregister_netdev(dev);
|
||||
err_deregister_notifier:
|
||||
unregister_netdevice_notifier(&priv->netdev_notifier);
|
||||
err_deregister_fixed_link:
|
||||
@ -2774,7 +2784,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
|
||||
if (!netif_running(dev))
|
||||
return 0;
|
||||
|
||||
clk_prepare_enable(priv->clk);
|
||||
ret = clk_prepare_enable(priv->clk);
|
||||
if (ret) {
|
||||
netdev_err(dev, "could not enable priv clock\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (priv->wolopts)
|
||||
clk_disable_unprepare(priv->wol_clk);
|
||||
|
||||
|
@ -1140,6 +1140,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
|
||||
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
|
||||
bool gve_tx_poll(struct gve_notify_block *block, int budget);
|
||||
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
|
||||
int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
|
||||
int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
|
||||
struct gve_tx_alloc_rings_cfg *cfg);
|
||||
void gve_tx_free_rings_gqi(struct gve_priv *priv,
|
||||
|
@ -333,6 +333,14 @@ int gve_napi_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
if (block->rx) {
|
||||
work_done = gve_rx_poll(block, budget);
|
||||
|
||||
/* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
|
||||
* TX and RX work done.
|
||||
*/
|
||||
if (priv->xdp_prog)
|
||||
work_done = max_t(int, work_done,
|
||||
gve_xsk_tx_poll(block, budget));
|
||||
|
||||
reschedule |= work_done == budget;
|
||||
}
|
||||
|
||||
@ -922,11 +930,13 @@ static void gve_init_sync_stats(struct gve_priv *priv)
|
||||
static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
|
||||
struct gve_tx_alloc_rings_cfg *cfg)
|
||||
{
|
||||
int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0;
|
||||
|
||||
cfg->qcfg = &priv->tx_cfg;
|
||||
cfg->raw_addressing = !gve_is_qpl(priv);
|
||||
cfg->ring_size = priv->tx_desc_cnt;
|
||||
cfg->start_idx = 0;
|
||||
cfg->num_rings = gve_num_tx_queues(priv);
|
||||
cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues;
|
||||
cfg->tx = priv->tx;
|
||||
}
|
||||
|
||||
@ -1623,8 +1633,8 @@ static int gve_xsk_pool_enable(struct net_device *dev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* If XDP prog is not installed, return */
|
||||
if (!priv->xdp_prog)
|
||||
/* If XDP prog is not installed or interface is down, return. */
|
||||
if (!priv->xdp_prog || !netif_running(dev))
|
||||
return 0;
|
||||
|
||||
rx = &priv->rx[qid];
|
||||
@ -1669,21 +1679,16 @@ static int gve_xsk_pool_disable(struct net_device *dev,
|
||||
if (qid >= priv->rx_cfg.num_queues)
|
||||
return -EINVAL;
|
||||
|
||||
/* If XDP prog is not installed, unmap DMA and return */
|
||||
if (!priv->xdp_prog)
|
||||
/* If XDP prog is not installed or interface is down, unmap DMA and
|
||||
* return.
|
||||
*/
|
||||
if (!priv->xdp_prog || !netif_running(dev))
|
||||
goto done;
|
||||
|
||||
tx_qid = gve_xdp_tx_queue_id(priv, qid);
|
||||
if (!netif_running(dev)) {
|
||||
priv->rx[qid].xsk_pool = NULL;
|
||||
xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
|
||||
priv->tx[tx_qid].xsk_pool = NULL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
|
||||
napi_disable(napi_rx); /* make sure current rx poll is done */
|
||||
|
||||
tx_qid = gve_xdp_tx_queue_id(priv, qid);
|
||||
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
|
||||
napi_disable(napi_tx); /* make sure current tx poll is done */
|
||||
|
||||
@ -1709,16 +1714,15 @@ done:
|
||||
static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
|
||||
{
|
||||
struct gve_priv *priv = netdev_priv(dev);
|
||||
int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
|
||||
struct napi_struct *napi;
|
||||
|
||||
if (!gve_get_napi_enabled(priv))
|
||||
return -ENETDOWN;
|
||||
|
||||
if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & XDP_WAKEUP_TX) {
|
||||
struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
|
||||
struct napi_struct *napi =
|
||||
&priv->ntfy_blocks[tx->ntfy_id].napi;
|
||||
|
||||
napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
|
||||
if (!napi_if_scheduled_mark_missed(napi)) {
|
||||
/* Call local_bh_enable to trigger SoftIRQ processing */
|
||||
local_bh_disable();
|
||||
@ -1726,9 +1730,6 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
tx->xdp_xsk_wakeup++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1837,6 +1838,7 @@ int gve_adjust_queues(struct gve_priv *priv,
|
||||
{
|
||||
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
|
||||
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
|
||||
int num_xdp_queues;
|
||||
int err;
|
||||
|
||||
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
|
||||
@ -1847,6 +1849,10 @@ int gve_adjust_queues(struct gve_priv *priv,
|
||||
rx_alloc_cfg.qcfg = &new_rx_config;
|
||||
tx_alloc_cfg.num_rings = new_tx_config.num_queues;
|
||||
|
||||
/* Add dedicated XDP TX queues if enabled. */
|
||||
num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0;
|
||||
tx_alloc_cfg.num_rings += num_xdp_queues;
|
||||
|
||||
if (netif_running(priv->dev)) {
|
||||
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
|
||||
return err;
|
||||
@ -1899,6 +1905,9 @@ static void gve_turndown(struct gve_priv *priv)
|
||||
|
||||
gve_clear_napi_enabled(priv);
|
||||
gve_clear_report_stats(priv);
|
||||
|
||||
/* Make sure that all traffic is finished processing. */
|
||||
synchronize_net();
|
||||
}
|
||||
|
||||
static void gve_turnup(struct gve_priv *priv)
|
||||
|
@ -206,7 +206,10 @@ void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
|
||||
return;
|
||||
|
||||
gve_remove_napi(priv, ntfy_idx);
|
||||
if (tx->q_num < priv->tx_cfg.num_queues)
|
||||
gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
|
||||
else
|
||||
gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
|
||||
netdev_tx_reset_queue(tx->netdev_txq);
|
||||
gve_tx_remove_from_block(priv, idx);
|
||||
}
|
||||
@ -834,9 +837,12 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
struct gve_tx_ring *tx;
|
||||
int i, err = 0, qid;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
|
||||
return -EINVAL;
|
||||
|
||||
if (!gve_get_napi_enabled(priv))
|
||||
return -ENETDOWN;
|
||||
|
||||
qid = gve_xdp_tx_queue_id(priv,
|
||||
smp_processor_id() % priv->num_xdp_queues);
|
||||
|
||||
@ -975,33 +981,41 @@ out:
|
||||
return sent;
|
||||
}
|
||||
|
||||
int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
|
||||
{
|
||||
struct gve_rx_ring *rx = rx_block->rx;
|
||||
struct gve_priv *priv = rx->gve;
|
||||
struct gve_tx_ring *tx;
|
||||
int sent = 0;
|
||||
|
||||
tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
|
||||
if (tx->xsk_pool) {
|
||||
sent = gve_xsk_tx(priv, tx, budget);
|
||||
|
||||
u64_stats_update_begin(&tx->statss);
|
||||
tx->xdp_xsk_sent += sent;
|
||||
u64_stats_update_end(&tx->statss);
|
||||
if (xsk_uses_need_wakeup(tx->xsk_pool))
|
||||
xsk_set_tx_need_wakeup(tx->xsk_pool);
|
||||
}
|
||||
|
||||
return sent;
|
||||
}
|
||||
|
||||
bool gve_xdp_poll(struct gve_notify_block *block, int budget)
|
||||
{
|
||||
struct gve_priv *priv = block->priv;
|
||||
struct gve_tx_ring *tx = block->tx;
|
||||
u32 nic_done;
|
||||
bool repoll;
|
||||
u32 to_do;
|
||||
|
||||
/* Find out how much work there is to be done */
|
||||
nic_done = gve_tx_load_event_counter(priv, tx);
|
||||
to_do = min_t(u32, (nic_done - tx->done), budget);
|
||||
gve_clean_xdp_done(priv, tx, to_do);
|
||||
repoll = nic_done != tx->done;
|
||||
|
||||
if (tx->xsk_pool) {
|
||||
int sent = gve_xsk_tx(priv, tx, budget);
|
||||
|
||||
u64_stats_update_begin(&tx->statss);
|
||||
tx->xdp_xsk_sent += sent;
|
||||
u64_stats_update_end(&tx->statss);
|
||||
repoll |= (sent == budget);
|
||||
if (xsk_uses_need_wakeup(tx->xsk_pool))
|
||||
xsk_set_tx_need_wakeup(tx->xsk_pool);
|
||||
}
|
||||
|
||||
/* If we still have work we want to repoll */
|
||||
return repoll;
|
||||
return nic_done != tx->done;
|
||||
}
|
||||
|
||||
bool gve_tx_poll(struct gve_notify_block *block, int budget)
|
||||
|
@ -2704,9 +2704,15 @@ static struct platform_device *port_platdev[3];
|
||||
|
||||
static void mv643xx_eth_shared_of_remove(void)
|
||||
{
|
||||
struct mv643xx_eth_platform_data *pd;
|
||||
int n;
|
||||
|
||||
for (n = 0; n < 3; n++) {
|
||||
if (!port_platdev[n])
|
||||
continue;
|
||||
pd = dev_get_platdata(&port_platdev[n]->dev);
|
||||
if (pd)
|
||||
of_node_put(pd->phy_node);
|
||||
platform_device_del(port_platdev[n]);
|
||||
port_platdev[n] = NULL;
|
||||
}
|
||||
@ -2769,8 +2775,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
|
||||
}
|
||||
|
||||
ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
|
||||
if (!ppdev)
|
||||
return -ENOMEM;
|
||||
if (!ppdev) {
|
||||
ret = -ENOMEM;
|
||||
goto put_err;
|
||||
}
|
||||
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
ppdev->dev.of_node = pnp;
|
||||
|
||||
@ -2792,6 +2800,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
|
||||
|
||||
port_err:
|
||||
platform_device_put(ppdev);
|
||||
put_err:
|
||||
of_node_put(ppd.phy_node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -130,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
|
||||
|
@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
|
||||
{
|
||||
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
|
||||
struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
|
||||
const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
|
||||
struct mlx5_macsec_rule_attrs rule_attrs;
|
||||
union mlx5_macsec_rule *macsec_rule;
|
||||
|
||||
if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
|
||||
return 0;
|
||||
|
||||
rule_attrs.macsec_obj_id = sa->macsec_obj_id;
|
||||
rule_attrs.sci = sa->sci;
|
||||
rule_attrs.assoc_num = sa->assoc_num;
|
||||
|
@ -6542,8 +6542,23 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
|
||||
|
||||
mlx5_core_uplink_netdev_set(mdev, NULL);
|
||||
mlx5e_dcbnl_delete_app(priv);
|
||||
/* When unload driver, the netdev is in registered state
|
||||
* if it's from legacy mode. If from switchdev mode, it
|
||||
* is already unregistered before changing to NIC profile.
|
||||
*/
|
||||
if (priv->netdev->reg_state == NETREG_REGISTERED) {
|
||||
unregister_netdev(priv->netdev);
|
||||
_mlx5e_suspend(adev, false);
|
||||
} else {
|
||||
struct mlx5_core_dev *pos;
|
||||
int i;
|
||||
|
||||
if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
|
||||
mlx5_sd_for_each_dev(i, mdev, pos)
|
||||
mlx5e_destroy_mdev_resources(pos);
|
||||
else
|
||||
_mlx5e_suspend(adev, true);
|
||||
}
|
||||
/* Avoid cleanup if profile rollback failed. */
|
||||
if (priv->profile)
|
||||
priv->profile->cleanup(priv);
|
||||
|
@ -1509,6 +1509,21 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
|
||||
/* This bit is set when using devlink to change eswitch mode from
|
||||
* switchdev to legacy. As need to keep uplink netdev ifindex, we
|
||||
* detach uplink representor profile and attach NIC profile only.
|
||||
* The netdev will be unregistered later when unload NIC auxiliary
|
||||
* driver for this case.
|
||||
* We explicitly block devlink eswitch mode change if any IPSec rules
|
||||
* offloaded, but can't block other cases, such as driver unload
|
||||
* and devlink reload. We have to unregister netdev before profile
|
||||
* change for those cases. This is to avoid resource leak because
|
||||
* the offloaded rules don't have the chance to be unoffloaded before
|
||||
* cleanup which is triggered by detach uplink representor profile.
|
||||
*/
|
||||
if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
|
||||
unregister_netdev(netdev);
|
||||
|
||||
mlx5e_netdev_attach_nic_profile(priv);
|
||||
}
|
||||
|
||||
|
@ -150,11 +150,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
|
||||
unsigned long i;
|
||||
int err;
|
||||
|
||||
xa_for_each(&esw->offloads.vport_reps, i, rep) {
|
||||
rpriv = rep->rep_data[REP_ETH].priv;
|
||||
if (!rpriv || !rpriv->netdev)
|
||||
mlx5_esw_for_each_rep(esw, i, rep) {
|
||||
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
|
||||
continue;
|
||||
|
||||
rpriv = rep->rep_data[REP_ETH].priv;
|
||||
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
while ((flow = rhashtable_walk_next(&iter)) != NULL) {
|
||||
|
@ -714,6 +714,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
|
||||
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
|
||||
(last) - 1)
|
||||
|
||||
#define mlx5_esw_for_each_rep(esw, i, rep) \
|
||||
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
|
||||
|
||||
struct mlx5_eswitch *__must_check
|
||||
mlx5_devlink_eswitch_get(struct devlink *devlink);
|
||||
|
||||
|
@ -53,9 +53,6 @@
|
||||
#include "lag/lag.h"
|
||||
#include "en/tc/post_meter.h"
|
||||
|
||||
#define mlx5_esw_for_each_rep(esw, i, rep) \
|
||||
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
|
||||
|
||||
/* There are two match-all miss flows, one for unicast dst mac and
|
||||
* one for multicast.
|
||||
*/
|
||||
@ -3780,6 +3777,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
||||
esw->eswitch_operation_in_progress = true;
|
||||
up_write(&esw->mode_lock);
|
||||
|
||||
if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
|
||||
esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
|
||||
mlx5_eswitch_disable_locked(esw);
|
||||
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
|
||||
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
|
||||
|
@ -1067,7 +1067,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
||||
int inlen, err, eqn;
|
||||
void *cqc, *in;
|
||||
__be64 *pas;
|
||||
int vector;
|
||||
u32 i;
|
||||
|
||||
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
|
||||
@ -1096,8 +1095,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
||||
if (!in)
|
||||
goto err_cqwq;
|
||||
|
||||
vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
|
||||
err = mlx5_comp_eqn_get(mdev, vector, &eqn);
|
||||
err = mlx5_comp_eqn_get(mdev, 0, &eqn);
|
||||
if (err) {
|
||||
kvfree(in);
|
||||
goto err_cqwq;
|
||||
|
@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
|
||||
|
||||
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
|
||||
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
|
||||
0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
|
||||
0);
|
||||
0, 0, tun->net, parms.link, tun->fwmark, 0, 0);
|
||||
|
||||
rt = ip_route_output_key(tun->net, &fl4);
|
||||
if (IS_ERR(rt))
|
||||
|
@ -64,7 +64,7 @@ static void fbnic_csr_get_regs_rpc_ram(struct fbnic_dev *fbd, u32 **data_p)
|
||||
u32 i, j;
|
||||
|
||||
*(data++) = start;
|
||||
*(data++) = end - 1;
|
||||
*(data++) = end;
|
||||
|
||||
/* FBNIC_RPC_TCAM_ACT */
|
||||
for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
|
||||
|
@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
|
||||
void *cb_priv);
|
||||
|
||||
static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
|
||||
.key_len = offsetof(struct efx_tc_ct_zone, linkage),
|
||||
.key_len = sizeof_field(struct efx_tc_ct_zone, zone),
|
||||
.key_offset = 0,
|
||||
.head_offset = offsetof(struct efx_tc_ct_zone, linkage),
|
||||
};
|
||||
|
@ -405,22 +405,6 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
|
||||
* @pdev: platform_device structure
|
||||
* @plat: driver data platform structure
|
||||
*
|
||||
* Release resources claimed by stmmac_probe_config_dt().
|
||||
*/
|
||||
static void stmmac_remove_config_dt(struct platform_device *pdev,
|
||||
struct plat_stmmacenet_data *plat)
|
||||
{
|
||||
clk_disable_unprepare(plat->stmmac_clk);
|
||||
clk_disable_unprepare(plat->pclk);
|
||||
of_node_put(plat->phy_node);
|
||||
of_node_put(plat->mdio_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_probe_config_dt - parse device-tree driver parameters
|
||||
* @pdev: platform_device structure
|
||||
@ -490,8 +474,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
|
||||
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
|
||||
|
||||
rc = stmmac_mdio_setup(plat, np, &pdev->dev);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
if (rc) {
|
||||
ret = ERR_PTR(rc);
|
||||
goto error_put_phy;
|
||||
}
|
||||
|
||||
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
|
||||
|
||||
@ -581,8 +567,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
|
||||
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
|
||||
GFP_KERNEL);
|
||||
if (!dma_cfg) {
|
||||
stmmac_remove_config_dt(pdev, plat);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto error_put_mdio;
|
||||
}
|
||||
plat->dma_cfg = dma_cfg;
|
||||
|
||||
@ -610,8 +596,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
|
||||
|
||||
rc = stmmac_mtl_setup(pdev, plat);
|
||||
if (rc) {
|
||||
stmmac_remove_config_dt(pdev, plat);
|
||||
return ERR_PTR(rc);
|
||||
ret = ERR_PTR(rc);
|
||||
goto error_put_mdio;
|
||||
}
|
||||
|
||||
/* clock setup */
|
||||
@ -663,6 +649,10 @@ error_hw_init:
|
||||
clk_disable_unprepare(plat->pclk);
|
||||
error_pclk_get:
|
||||
clk_disable_unprepare(plat->stmmac_clk);
|
||||
error_put_mdio:
|
||||
of_node_put(plat->mdio_node);
|
||||
error_put_phy:
|
||||
of_node_put(plat->phy_node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -671,16 +661,17 @@ static void devm_stmmac_remove_config_dt(void *data)
|
||||
{
|
||||
struct plat_stmmacenet_data *plat = data;
|
||||
|
||||
/* Platform data argument is unused */
|
||||
stmmac_remove_config_dt(NULL, plat);
|
||||
clk_disable_unprepare(plat->stmmac_clk);
|
||||
clk_disable_unprepare(plat->pclk);
|
||||
of_node_put(plat->mdio_node);
|
||||
of_node_put(plat->phy_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_stmmac_probe_config_dt
|
||||
* @pdev: platform_device structure
|
||||
* @mac: MAC address to use
|
||||
* Description: Devres variant of stmmac_probe_config_dt(). Does not require
|
||||
* the user to call stmmac_remove_config_dt() at driver detach.
|
||||
* Description: Devres variant of stmmac_probe_config_dt().
|
||||
*/
|
||||
struct plat_stmmacenet_data *
|
||||
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
|
||||
|
@ -3551,7 +3551,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
|
||||
init_completion(&common->tdown_complete);
|
||||
common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
|
||||
common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
|
||||
common->pf_p0_rx_ptype_rrobin = false;
|
||||
common->pf_p0_rx_ptype_rrobin = true;
|
||||
common->default_vlan = 1;
|
||||
|
||||
common->ports = devm_kcalloc(dev, common->port_num,
|
||||
|
@ -215,6 +215,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
|
||||
for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
|
||||
regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
|
||||
IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
|
||||
|
||||
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
|
||||
IEP_CMP_CFG_CMP_EN(cmp), 0);
|
||||
}
|
||||
|
||||
/* enable reset counter on CMP0 event */
|
||||
@ -780,6 +783,11 @@ int icss_iep_exit(struct icss_iep *iep)
|
||||
}
|
||||
icss_iep_disable(iep);
|
||||
|
||||
if (iep->pps_enabled)
|
||||
icss_iep_pps_enable(iep, false);
|
||||
else if (iep->perout_enabled)
|
||||
icss_iep_perout_enable(iep, NULL, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(icss_iep_exit);
|
||||
|
@ -855,31 +855,6 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(prueth_rx_irq);
|
||||
|
||||
void prueth_emac_stop(struct prueth_emac *emac)
|
||||
{
|
||||
struct prueth *prueth = emac->prueth;
|
||||
int slice;
|
||||
|
||||
switch (emac->port_id) {
|
||||
case PRUETH_PORT_MII0:
|
||||
slice = ICSS_SLICE0;
|
||||
break;
|
||||
case PRUETH_PORT_MII1:
|
||||
slice = ICSS_SLICE1;
|
||||
break;
|
||||
default:
|
||||
netdev_err(emac->ndev, "invalid port\n");
|
||||
return;
|
||||
}
|
||||
|
||||
emac->fw_running = 0;
|
||||
if (!emac->is_sr1)
|
||||
rproc_shutdown(prueth->txpru[slice]);
|
||||
rproc_shutdown(prueth->rtu[slice]);
|
||||
rproc_shutdown(prueth->pru[slice]);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(prueth_emac_stop);
|
||||
|
||||
void prueth_cleanup_tx_ts(struct prueth_emac *emac)
|
||||
{
|
||||
int i;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user