mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 16:53:49 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.12-rc4). Conflicts:107a034d5c
("net/mlx5: qos: Store rate groups in a qos domain")1da9cfd6c4
("net/mlx5: Unregister notifier on eswitch init failure") Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
commit
91afa49a3e
3
.mailmap
3
.mailmap
@ -210,6 +210,9 @@ Felix Moeller <felix@derklecks.de>
|
||||
Fenglin Wu <quic_fenglinw@quicinc.com> <fenglinw@codeaurora.org>
|
||||
Filipe Lautert <filipe@icewall.org>
|
||||
Finn Thain <fthain@linux-m68k.org> <fthain@telegraphics.com.au>
|
||||
Fiona Behrens <me@kloenk.dev>
|
||||
Fiona Behrens <me@kloenk.dev> <me@kloenk.de>
|
||||
Fiona Behrens <me@kloenk.dev> <fin@nyantec.com>
|
||||
Franck Bui-Huu <vagabon.xyz@gmail.com>
|
||||
Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
|
||||
Frank Rowand <frowand.list@gmail.com> <frank.rowand@sony.com>
|
||||
|
@ -0,0 +1,54 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/elgin,jg10309-01.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Elgin JG10309-01 SPI-controlled display
|
||||
|
||||
maintainers:
|
||||
- Fabio Estevam <festevam@gmail.com>
|
||||
|
||||
description: |
|
||||
The Elgin JG10309-01 SPI-controlled display is used on the RV1108-Elgin-r1
|
||||
board and is a custom display.
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/spi/spi-peripheral-props.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: elgin,jg10309-01
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
spi-max-frequency:
|
||||
maximum: 24000000
|
||||
|
||||
spi-cpha: true
|
||||
|
||||
spi-cpol: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- spi-cpha
|
||||
- spi-cpol
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
spi {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
display@0 {
|
||||
compatible = "elgin,jg10309-01";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <24000000>;
|
||||
spi-cpha;
|
||||
spi-cpol;
|
||||
};
|
||||
};
|
@ -82,9 +82,6 @@ allOf:
|
||||
enum:
|
||||
- fsl,ls1043a-extirq
|
||||
- fsl,ls1046a-extirq
|
||||
- fsl,ls1088a-extirq
|
||||
- fsl,ls2080a-extirq
|
||||
- fsl,lx2160a-extirq
|
||||
then:
|
||||
properties:
|
||||
interrupt-map:
|
||||
@ -95,6 +92,29 @@ allOf:
|
||||
- const: 0xf
|
||||
- const: 0
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- fsl,ls1088a-extirq
|
||||
- fsl,ls2080a-extirq
|
||||
- fsl,lx2160a-extirq
|
||||
# The driver(drivers/irqchip/irq-ls-extirq.c) have not use standard DT
|
||||
# function to parser interrupt-map. So it doesn't consider '#address-size'
|
||||
# in parent interrupt controller, such as GIC.
|
||||
#
|
||||
# When dt-binding verify interrupt-map, item data matrix is spitted at
|
||||
# incorrect position. Remove interrupt-map restriction because it always
|
||||
# wrong.
|
||||
|
||||
then:
|
||||
properties:
|
||||
interrupt-map-mask:
|
||||
items:
|
||||
- const: 0xf
|
||||
- const: 0
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
|
@ -113,7 +113,7 @@ properties:
|
||||
|
||||
msi-parent:
|
||||
deprecated: true
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
maxItems: 1
|
||||
description:
|
||||
Describes the MSI controller node handling message
|
||||
interrupts for the MC. When there is no translation
|
||||
|
@ -26,6 +26,7 @@ properties:
|
||||
- brcm,asp-v2.1-mdio
|
||||
- brcm,asp-v2.2-mdio
|
||||
- brcm,unimac-mdio
|
||||
- brcm,bcm6846-mdio
|
||||
|
||||
reg:
|
||||
minItems: 1
|
||||
|
@ -101,8 +101,6 @@ properties:
|
||||
- domintech,dmard09
|
||||
# DMARD10: 3-axis Accelerometer
|
||||
- domintech,dmard10
|
||||
# Elgin SPI-controlled LCD
|
||||
- elgin,jg10309-01
|
||||
# MMA7660FC: 3-Axis Orientation/Motion Detection Sensor
|
||||
- fsl,mma7660
|
||||
# MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
|
||||
|
@ -12944,7 +12944,6 @@ LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
|
||||
M: Viresh Kumar <vireshk@kernel.org>
|
||||
L: linux-ide@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
|
||||
F: drivers/ata/pata_arasan_cf.c
|
||||
F: include/linux/pata_arasan_cf_data.h
|
||||
|
||||
@ -12958,17 +12957,14 @@ LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
|
||||
M: Linus Walleij <linus.walleij@linaro.org>
|
||||
L: linux-ide@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
|
||||
F: drivers/ata/pata_ftide010.c
|
||||
F: drivers/ata/sata_gemini.c
|
||||
F: drivers/ata/sata_gemini.h
|
||||
|
||||
LIBATA SATA AHCI PLATFORM devices support
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Jens Axboe <axboe@kernel.dk>
|
||||
L: linux-ide@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
|
||||
F: drivers/ata/ahci_platform.c
|
||||
F: drivers/ata/libahci_platform.c
|
||||
F: include/linux/ahci_platform.h
|
||||
@ -12977,7 +12973,6 @@ LIBATA SATA AHCI SYNOPSYS DWC CONTROLLER DRIVER
|
||||
M: Serge Semin <fancer.lancer@gmail.com>
|
||||
L: linux-ide@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/libata.git
|
||||
F: Documentation/devicetree/bindings/ata/baikal,bt1-ahci.yaml
|
||||
F: Documentation/devicetree/bindings/ata/snps,dwc-ahci.yaml
|
||||
F: drivers/ata/ahci_dwc.c
|
||||
@ -12986,7 +12981,6 @@ LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER
|
||||
M: Mikael Pettersson <mikpelinux@gmail.com>
|
||||
L: linux-ide@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
|
||||
F: drivers/ata/sata_promise.*
|
||||
|
||||
LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
|
||||
@ -16092,6 +16086,7 @@ F: include/uapi/linux/net_dropmon.h
|
||||
F: net/core/drop_monitor.c
|
||||
|
||||
NETWORKING DRIVERS
|
||||
M: Andrew Lunn <andrew+netdev@lunn.ch>
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
M: Eric Dumazet <edumazet@google.com>
|
||||
M: Jakub Kicinski <kuba@kernel.org>
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -494,6 +494,7 @@ FixupDAR:/* Entry point for dcbx workaround. */
|
||||
bctr /* jump into table */
|
||||
152:
|
||||
mfdar r11
|
||||
mtdar r10
|
||||
mtctr r11 /* restore ctr reg from DAR */
|
||||
mfspr r11, SPRN_SPRG_THREAD
|
||||
stw r10, DAR(r11)
|
||||
|
@ -1032,6 +1032,10 @@ static u64 xen_do_read_msr(unsigned int msr, int *err)
|
||||
switch (msr) {
|
||||
case MSR_IA32_APICBASE:
|
||||
val &= ~X2APIC_ENABLE;
|
||||
if (smp_processor_id() == 0)
|
||||
val |= MSR_IA32_APICBASE_BSP;
|
||||
else
|
||||
val &= ~MSR_IA32_APICBASE_BSP;
|
||||
break;
|
||||
}
|
||||
return val;
|
||||
|
@ -373,7 +373,7 @@ void crypto_alg_tested(const char *name, int err)
|
||||
q->cra_flags |= CRYPTO_ALG_DEAD;
|
||||
alg = test->adult;
|
||||
|
||||
if (list_empty(&alg->cra_list))
|
||||
if (crypto_is_dead(alg))
|
||||
goto complete;
|
||||
|
||||
if (err == -ECANCELED)
|
||||
|
@ -1940,7 +1940,7 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
|
||||
atfm = crypto_alloc_ahash(driver, type, mask);
|
||||
if (IS_ERR(atfm)) {
|
||||
if (PTR_ERR(atfm) == -ENOENT)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
pr_err("alg: hash: failed to allocate transform for %s: %ld\n",
|
||||
driver, PTR_ERR(atfm));
|
||||
return PTR_ERR(atfm);
|
||||
@ -2706,7 +2706,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
|
||||
tfm = crypto_alloc_aead(driver, type, mask);
|
||||
if (IS_ERR(tfm)) {
|
||||
if (PTR_ERR(tfm) == -ENOENT)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
pr_err("alg: aead: failed to allocate transform for %s: %ld\n",
|
||||
driver, PTR_ERR(tfm));
|
||||
return PTR_ERR(tfm);
|
||||
@ -3285,7 +3285,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
|
||||
tfm = crypto_alloc_skcipher(driver, type, mask);
|
||||
if (IS_ERR(tfm)) {
|
||||
if (PTR_ERR(tfm) == -ENOENT)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n",
|
||||
driver, PTR_ERR(tfm));
|
||||
return PTR_ERR(tfm);
|
||||
@ -3700,7 +3700,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
|
||||
tfm = crypto_alloc_cipher(driver, type, mask);
|
||||
if (IS_ERR(tfm)) {
|
||||
if (PTR_ERR(tfm) == -ENOENT)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
printk(KERN_ERR "alg: cipher: Failed to load transform for "
|
||||
"%s: %ld\n", driver, PTR_ERR(tfm));
|
||||
return PTR_ERR(tfm);
|
||||
@ -3726,7 +3726,7 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
|
||||
acomp = crypto_alloc_acomp(driver, type, mask);
|
||||
if (IS_ERR(acomp)) {
|
||||
if (PTR_ERR(acomp) == -ENOENT)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
|
||||
driver, PTR_ERR(acomp));
|
||||
return PTR_ERR(acomp);
|
||||
@ -3740,7 +3740,7 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
|
||||
comp = crypto_alloc_comp(driver, type, mask);
|
||||
if (IS_ERR(comp)) {
|
||||
if (PTR_ERR(comp) == -ENOENT)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
pr_err("alg: comp: Failed to load transform for %s: %ld\n",
|
||||
driver, PTR_ERR(comp));
|
||||
return PTR_ERR(comp);
|
||||
@ -3818,7 +3818,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
|
||||
rng = crypto_alloc_rng(driver, type, mask);
|
||||
if (IS_ERR(rng)) {
|
||||
if (PTR_ERR(rng) == -ENOENT)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
|
||||
"%ld\n", driver, PTR_ERR(rng));
|
||||
return PTR_ERR(rng);
|
||||
@ -3846,12 +3846,11 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
|
||||
|
||||
drng = crypto_alloc_rng(driver, type, mask);
|
||||
if (IS_ERR(drng)) {
|
||||
kfree_sensitive(buf);
|
||||
if (PTR_ERR(drng) == -ENOENT)
|
||||
goto out_no_rng;
|
||||
return 0;
|
||||
printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
|
||||
"%s\n", driver);
|
||||
out_no_rng:
|
||||
kfree_sensitive(buf);
|
||||
return PTR_ERR(drng);
|
||||
}
|
||||
|
||||
@ -4095,7 +4094,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
|
||||
tfm = crypto_alloc_kpp(driver, type, mask);
|
||||
if (IS_ERR(tfm)) {
|
||||
if (PTR_ERR(tfm) == -ENOENT)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
|
||||
driver, PTR_ERR(tfm));
|
||||
return PTR_ERR(tfm);
|
||||
@ -4325,7 +4324,7 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
|
||||
tfm = crypto_alloc_akcipher(driver, type, mask);
|
||||
if (IS_ERR(tfm)) {
|
||||
if (PTR_ERR(tfm) == -ENOENT)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
|
||||
driver, PTR_ERR(tfm));
|
||||
return PTR_ERR(tfm);
|
||||
|
@ -448,73 +448,31 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B1402CBA */
|
||||
/* Asus ExpertBook B1402C* */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1402C"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B1402CVA */
|
||||
/* Asus ExpertBook B1502C* */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1502C"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B1502CBA */
|
||||
/* Asus ExpertBook B2402 (B2402CBA / B2402FBA / B2402CVA / B2402FVA) */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2402"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B1502CGA */
|
||||
/* Asus ExpertBook B2502 (B2502CBA / B2502FBA / B2502CVA / B2502FVA) */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1502CGA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B1502CVA */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1502CVA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B2402CBA */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B2402FBA */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2402FBA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B2502 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B2502FBA */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2502FBA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B2502CVA */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2502CVA"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2502"),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -532,24 +490,10 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook Pro N6506MV */
|
||||
/* Asus Vivobook Pro N6506M* */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "N6506MV"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook Pro N6506MU */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "N6506MU"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook Pro N6506MJ */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "N6506MJ"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "N6506M"),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -4099,10 +4099,20 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
|
||||
|
||||
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
|
||||
|
||||
/* Set all devices attached to the port in standby mode */
|
||||
ata_for_each_link(link, ap, HOST_FIRST) {
|
||||
ata_for_each_dev(dev, link, ENABLED)
|
||||
ata_dev_power_set_standby(dev);
|
||||
/*
|
||||
* We will reach this point for all of the PM events:
|
||||
* PM_EVENT_SUSPEND (if runtime pm, PM_EVENT_AUTO will also be set)
|
||||
* PM_EVENT_FREEZE, and PM_EVENT_HIBERNATE.
|
||||
*
|
||||
* We do not want to perform disk spin down for PM_EVENT_FREEZE.
|
||||
* (Spin down will be performed by the subsequent PM_EVENT_HIBERNATE.)
|
||||
*/
|
||||
if (!(ap->pm_mesg.event & PM_EVENT_FREEZE)) {
|
||||
/* Set all devices attached to the port in standby mode */
|
||||
ata_for_each_link(link, ap, HOST_FIRST) {
|
||||
ata_for_each_dev(dev, link, ENABLED)
|
||||
ata_dev_power_set_standby(dev);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -195,6 +195,7 @@ int dev_pm_domain_attach_list(struct device *dev,
|
||||
struct device *pd_dev = NULL;
|
||||
int ret, i, num_pds = 0;
|
||||
bool by_id = true;
|
||||
size_t size;
|
||||
u32 pd_flags = data ? data->pd_flags : 0;
|
||||
u32 link_flags = pd_flags & PD_FLAG_NO_DEV_LINK ? 0 :
|
||||
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
|
||||
@ -217,19 +218,17 @@ int dev_pm_domain_attach_list(struct device *dev,
|
||||
if (num_pds <= 0)
|
||||
return 0;
|
||||
|
||||
pds = devm_kzalloc(dev, sizeof(*pds), GFP_KERNEL);
|
||||
pds = kzalloc(sizeof(*pds), GFP_KERNEL);
|
||||
if (!pds)
|
||||
return -ENOMEM;
|
||||
|
||||
pds->pd_devs = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_devs),
|
||||
GFP_KERNEL);
|
||||
if (!pds->pd_devs)
|
||||
return -ENOMEM;
|
||||
|
||||
pds->pd_links = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_links),
|
||||
GFP_KERNEL);
|
||||
if (!pds->pd_links)
|
||||
return -ENOMEM;
|
||||
size = sizeof(*pds->pd_devs) + sizeof(*pds->pd_links);
|
||||
pds->pd_devs = kcalloc(num_pds, size, GFP_KERNEL);
|
||||
if (!pds->pd_devs) {
|
||||
ret = -ENOMEM;
|
||||
goto free_pds;
|
||||
}
|
||||
pds->pd_links = (void *)(pds->pd_devs + num_pds);
|
||||
|
||||
if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON)
|
||||
link_flags |= DL_FLAG_RPM_ACTIVE;
|
||||
@ -272,6 +271,9 @@ int dev_pm_domain_attach_list(struct device *dev,
|
||||
device_link_del(pds->pd_links[i]);
|
||||
dev_pm_domain_detach(pds->pd_devs[i], true);
|
||||
}
|
||||
kfree(pds->pd_devs);
|
||||
free_pds:
|
||||
kfree(pds);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list);
|
||||
@ -363,6 +365,9 @@ void dev_pm_domain_detach_list(struct dev_pm_domain_list *list)
|
||||
device_link_del(list->pd_links[i]);
|
||||
dev_pm_domain_detach(list->pd_devs[i], true);
|
||||
}
|
||||
|
||||
kfree(list->pd_devs);
|
||||
kfree(list);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_domain_detach_list);
|
||||
|
||||
|
@ -947,7 +947,7 @@ struct ahash_alg mv_md5_alg = {
|
||||
.base = {
|
||||
.cra_name = "md5",
|
||||
.cra_driver_name = "mv-md5",
|
||||
.cra_priority = 300,
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
@ -1018,7 +1018,7 @@ struct ahash_alg mv_sha1_alg = {
|
||||
.base = {
|
||||
.cra_name = "sha1",
|
||||
.cra_driver_name = "mv-sha1",
|
||||
.cra_priority = 300,
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
@ -1092,7 +1092,7 @@ struct ahash_alg mv_sha256_alg = {
|
||||
.base = {
|
||||
.cra_name = "sha256",
|
||||
.cra_driver_name = "mv-sha256",
|
||||
.cra_priority = 300,
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
@ -1302,7 +1302,7 @@ struct ahash_alg mv_ahmac_md5_alg = {
|
||||
.base = {
|
||||
.cra_name = "hmac(md5)",
|
||||
.cra_driver_name = "mv-hmac-md5",
|
||||
.cra_priority = 300,
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
@ -1373,7 +1373,7 @@ struct ahash_alg mv_ahmac_sha1_alg = {
|
||||
.base = {
|
||||
.cra_name = "hmac(sha1)",
|
||||
.cra_driver_name = "mv-hmac-sha1",
|
||||
.cra_priority = 300,
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
@ -1444,7 +1444,7 @@ struct ahash_alg mv_ahmac_sha256_alg = {
|
||||
.base = {
|
||||
.cra_name = "hmac(sha256)",
|
||||
.cra_driver_name = "mv-hmac-sha256",
|
||||
.cra_priority = 300,
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_ALLOCATES_MEMORY |
|
||||
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
||||
|
@ -406,6 +406,8 @@ static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
|
||||
gpio->dcache[GPIO_BANK(offset)] = reg;
|
||||
|
||||
iowrite32(reg, addr);
|
||||
/* Flush write */
|
||||
ioread32(addr);
|
||||
}
|
||||
|
||||
static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
|
||||
@ -1191,7 +1193,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
|
||||
if (!gpio_id)
|
||||
return -EINVAL;
|
||||
|
||||
gpio->clk = of_clk_get(pdev->dev.of_node, 0);
|
||||
gpio->clk = devm_clk_get_enabled(&pdev->dev, NULL);
|
||||
if (IS_ERR(gpio->clk)) {
|
||||
dev_warn(&pdev->dev,
|
||||
"Failed to get clock from devicetree, debouncing disabled\n");
|
||||
|
@ -1439,8 +1439,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
|
||||
list_add_tail(&vm->vm_list_node,
|
||||
&(vm->process_info->vm_list_head));
|
||||
vm->process_info->n_vms++;
|
||||
|
||||
*ef = dma_fence_get(&vm->process_info->eviction_fence->base);
|
||||
if (ef)
|
||||
*ef = dma_fence_get(&vm->process_info->eviction_fence->base);
|
||||
mutex_unlock(&vm->process_info->lock);
|
||||
|
||||
return 0;
|
||||
|
@ -1702,12 +1702,15 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
|
||||
|
||||
ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
|
||||
&p->kgd_process_info,
|
||||
&ef);
|
||||
p->ef ? NULL : &ef);
|
||||
if (ret) {
|
||||
dev_err(dev->adev->dev, "Failed to create process VM object\n");
|
||||
return ret;
|
||||
}
|
||||
RCU_INIT_POINTER(p->ef, ef);
|
||||
|
||||
if (!p->ef)
|
||||
RCU_INIT_POINTER(p->ef, ef);
|
||||
|
||||
pdd->drm_priv = drm_file->private_data;
|
||||
|
||||
ret = kfd_process_device_reserve_ib_mem(pdd);
|
||||
|
@ -2972,10 +2972,11 @@ static int dm_suspend(void *handle)
|
||||
|
||||
hpd_rx_irq_work_suspend(dm);
|
||||
|
||||
if (adev->dm.dc->caps.ips_support)
|
||||
dc_allow_idle_optimizations(adev->dm.dc, true);
|
||||
|
||||
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
|
||||
|
||||
if (dm->dc->caps.ips_support && adev->in_s0ix)
|
||||
dc_allow_idle_optimizations(dm->dc, true);
|
||||
|
||||
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
|
||||
|
||||
return 0;
|
||||
|
@ -5065,11 +5065,26 @@ static bool update_planes_and_stream_v3(struct dc *dc,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void clear_update_flags(struct dc_surface_update *srf_updates,
|
||||
int surface_count, struct dc_stream_state *stream)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (stream)
|
||||
stream->update_flags.raw = 0;
|
||||
|
||||
for (i = 0; i < surface_count; i++)
|
||||
if (srf_updates[i].surface)
|
||||
srf_updates[i].surface->update_flags.raw = 0;
|
||||
}
|
||||
|
||||
bool dc_update_planes_and_stream(struct dc *dc,
|
||||
struct dc_surface_update *srf_updates, int surface_count,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_stream_update *stream_update)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
dc_exit_ips_for_hw_access(dc);
|
||||
/*
|
||||
* update planes and stream version 3 separates FULL and FAST updates
|
||||
@ -5086,10 +5101,16 @@ bool dc_update_planes_and_stream(struct dc *dc,
|
||||
* features as they are now transparent to the new sequence.
|
||||
*/
|
||||
if (dc->ctx->dce_version >= DCN_VERSION_4_01)
|
||||
return update_planes_and_stream_v3(dc, srf_updates,
|
||||
ret = update_planes_and_stream_v3(dc, srf_updates,
|
||||
surface_count, stream, stream_update);
|
||||
return update_planes_and_stream_v2(dc, srf_updates,
|
||||
else
|
||||
ret = update_planes_and_stream_v2(dc, srf_updates,
|
||||
surface_count, stream, stream_update);
|
||||
|
||||
if (ret)
|
||||
clear_update_flags(srf_updates, surface_count, stream);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dc_commit_updates_for_stream(struct dc *dc,
|
||||
@ -5099,6 +5120,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||
struct dc_stream_update *stream_update,
|
||||
struct dc_state *state)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
dc_exit_ips_for_hw_access(dc);
|
||||
/* TODO: Since change commit sequence can have a huge impact,
|
||||
* we decided to only enable it for DCN3x. However, as soon as
|
||||
@ -5106,17 +5129,17 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||
* the new sequence for all ASICs.
|
||||
*/
|
||||
if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
|
||||
update_planes_and_stream_v3(dc, srf_updates, surface_count,
|
||||
ret = update_planes_and_stream_v3(dc, srf_updates, surface_count,
|
||||
stream, stream_update);
|
||||
return;
|
||||
}
|
||||
if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
|
||||
update_planes_and_stream_v2(dc, srf_updates, surface_count,
|
||||
} else if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
|
||||
ret = update_planes_and_stream_v2(dc, srf_updates, surface_count,
|
||||
stream, stream_update);
|
||||
return;
|
||||
}
|
||||
update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
|
||||
stream_update, state);
|
||||
} else
|
||||
ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
|
||||
stream_update, state);
|
||||
|
||||
if (ret)
|
||||
clear_update_flags(srf_updates, surface_count, stream);
|
||||
}
|
||||
|
||||
uint8_t dc_get_current_stream_count(struct dc *dc)
|
||||
|
@ -60,7 +60,7 @@ struct vi_dpm_level {
|
||||
|
||||
struct vi_dpm_table {
|
||||
uint32_t count;
|
||||
struct vi_dpm_level dpm_level[] __counted_by(count);
|
||||
struct vi_dpm_level dpm_level[];
|
||||
};
|
||||
|
||||
#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
|
||||
@ -91,7 +91,7 @@ struct phm_set_power_state_input {
|
||||
|
||||
struct phm_clock_array {
|
||||
uint32_t count;
|
||||
uint32_t values[] __counted_by(count);
|
||||
uint32_t values[];
|
||||
};
|
||||
|
||||
struct phm_clock_voltage_dependency_record {
|
||||
@ -123,7 +123,7 @@ struct phm_acpclock_voltage_dependency_record {
|
||||
|
||||
struct phm_clock_voltage_dependency_table {
|
||||
uint32_t count;
|
||||
struct phm_clock_voltage_dependency_record entries[] __counted_by(count);
|
||||
struct phm_clock_voltage_dependency_record entries[];
|
||||
};
|
||||
|
||||
struct phm_phase_shedding_limits_record {
|
||||
@ -140,7 +140,7 @@ struct phm_uvd_clock_voltage_dependency_record {
|
||||
|
||||
struct phm_uvd_clock_voltage_dependency_table {
|
||||
uint8_t count;
|
||||
struct phm_uvd_clock_voltage_dependency_record entries[] __counted_by(count);
|
||||
struct phm_uvd_clock_voltage_dependency_record entries[];
|
||||
};
|
||||
|
||||
struct phm_acp_clock_voltage_dependency_record {
|
||||
@ -150,7 +150,7 @@ struct phm_acp_clock_voltage_dependency_record {
|
||||
|
||||
struct phm_acp_clock_voltage_dependency_table {
|
||||
uint32_t count;
|
||||
struct phm_acp_clock_voltage_dependency_record entries[] __counted_by(count);
|
||||
struct phm_acp_clock_voltage_dependency_record entries[];
|
||||
};
|
||||
|
||||
struct phm_vce_clock_voltage_dependency_record {
|
||||
@ -161,32 +161,32 @@ struct phm_vce_clock_voltage_dependency_record {
|
||||
|
||||
struct phm_phase_shedding_limits_table {
|
||||
uint32_t count;
|
||||
struct phm_phase_shedding_limits_record entries[] __counted_by(count);
|
||||
struct phm_phase_shedding_limits_record entries[];
|
||||
};
|
||||
|
||||
struct phm_vceclock_voltage_dependency_table {
|
||||
uint8_t count;
|
||||
struct phm_vceclock_voltage_dependency_record entries[] __counted_by(count);
|
||||
struct phm_vceclock_voltage_dependency_record entries[];
|
||||
};
|
||||
|
||||
struct phm_uvdclock_voltage_dependency_table {
|
||||
uint8_t count;
|
||||
struct phm_uvdclock_voltage_dependency_record entries[] __counted_by(count);
|
||||
struct phm_uvdclock_voltage_dependency_record entries[];
|
||||
};
|
||||
|
||||
struct phm_samuclock_voltage_dependency_table {
|
||||
uint8_t count;
|
||||
struct phm_samuclock_voltage_dependency_record entries[] __counted_by(count);
|
||||
struct phm_samuclock_voltage_dependency_record entries[];
|
||||
};
|
||||
|
||||
struct phm_acpclock_voltage_dependency_table {
|
||||
uint32_t count;
|
||||
struct phm_acpclock_voltage_dependency_record entries[] __counted_by(count);
|
||||
struct phm_acpclock_voltage_dependency_record entries[];
|
||||
};
|
||||
|
||||
struct phm_vce_clock_voltage_dependency_table {
|
||||
uint8_t count;
|
||||
struct phm_vce_clock_voltage_dependency_record entries[] __counted_by(count);
|
||||
struct phm_vce_clock_voltage_dependency_record entries[];
|
||||
};
|
||||
|
||||
|
||||
@ -393,7 +393,7 @@ union phm_cac_leakage_record {
|
||||
|
||||
struct phm_cac_leakage_table {
|
||||
uint32_t count;
|
||||
union phm_cac_leakage_record entries[] __counted_by(count);
|
||||
union phm_cac_leakage_record entries[];
|
||||
};
|
||||
|
||||
struct phm_samu_clock_voltage_dependency_record {
|
||||
@ -404,7 +404,7 @@ struct phm_samu_clock_voltage_dependency_record {
|
||||
|
||||
struct phm_samu_clock_voltage_dependency_table {
|
||||
uint8_t count;
|
||||
struct phm_samu_clock_voltage_dependency_record entries[] __counted_by(count);
|
||||
struct phm_samu_clock_voltage_dependency_record entries[];
|
||||
};
|
||||
|
||||
struct phm_cac_tdp_table {
|
||||
|
@ -50,7 +50,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
|
||||
if (!fb_helper->dev)
|
||||
return;
|
||||
|
||||
fb_deferred_io_cleanup(info);
|
||||
if (info->fbdefio)
|
||||
fb_deferred_io_cleanup(info);
|
||||
drm_fb_helper_fini(fb_helper);
|
||||
|
||||
drm_client_buffer_vunmap(fb_helper->buffer);
|
||||
|
@ -1094,7 +1094,8 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
|
||||
hdcp->value = value;
|
||||
if (update_property) {
|
||||
drm_connector_get(&connector->base);
|
||||
queue_work(i915->unordered_wq, &hdcp->prop_work);
|
||||
if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
|
||||
drm_connector_put(&connector->base);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2524,7 +2525,8 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
||||
mutex_lock(&hdcp->mutex);
|
||||
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
|
||||
drm_connector_get(&connector->base);
|
||||
queue_work(i915->unordered_wq, &hdcp->prop_work);
|
||||
if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
|
||||
drm_connector_put(&connector->base);
|
||||
mutex_unlock(&hdcp->mutex);
|
||||
}
|
||||
|
||||
@ -2541,7 +2543,9 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
||||
*/
|
||||
if (!desired_and_not_enabled && !content_protection_type_changed) {
|
||||
drm_connector_get(&connector->base);
|
||||
queue_work(i915->unordered_wq, &hdcp->prop_work);
|
||||
if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
|
||||
drm_connector_put(&connector->base);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -210,7 +210,7 @@ struct nvkm_gsp {
|
||||
} *rm;
|
||||
|
||||
struct {
|
||||
struct mutex mutex;;
|
||||
struct mutex mutex;
|
||||
struct idr idr;
|
||||
} client_id;
|
||||
|
||||
|
@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
|
||||
if (!spage || !(src & MIGRATE_PFN_MIGRATE))
|
||||
goto done;
|
||||
|
||||
dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
|
||||
dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address);
|
||||
if (!dpage)
|
||||
goto done;
|
||||
|
||||
|
@ -331,7 +331,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm)
|
||||
return;
|
||||
}
|
||||
|
||||
ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
|
||||
ret = nouveau_channel_new(&drm->client, true, runm, NvDmaFB, NvDmaTT, &drm->cechan);
|
||||
if (ret)
|
||||
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
|
||||
}
|
||||
|
@ -228,10 +228,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
|
||||
{
|
||||
struct drm_device *dev = radeon_connector->base.dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
|
||||
radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
|
||||
radeon_connector->ddc_bus->aux.drm_dev = radeon_connector->base.dev;
|
||||
if (ASIC_IS_DCE5(rdev)) {
|
||||
if (radeon_auxch)
|
||||
@ -242,11 +240,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
|
||||
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
|
||||
}
|
||||
|
||||
ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
|
||||
if (!ret)
|
||||
radeon_connector->ddc_bus->has_aux = true;
|
||||
|
||||
WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret);
|
||||
drm_dp_aux_init(&radeon_connector->ddc_bus->aux);
|
||||
radeon_connector->ddc_bus->has_aux = true;
|
||||
}
|
||||
|
||||
/***** general DP utility functions *****/
|
||||
|
@ -1786,6 +1786,20 @@ static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static int
|
||||
radeon_connector_late_register(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
int r = 0;
|
||||
|
||||
if (radeon_connector->ddc_bus->has_aux) {
|
||||
radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
|
||||
r = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
|
||||
.get_modes = radeon_dp_get_modes,
|
||||
.mode_valid = radeon_dp_mode_valid,
|
||||
@ -1800,6 +1814,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
|
||||
.early_unregister = radeon_connector_unregister,
|
||||
.destroy = radeon_connector_destroy,
|
||||
.force = radeon_dvi_force,
|
||||
.late_register = radeon_connector_late_register,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs radeon_edp_connector_funcs = {
|
||||
@ -1810,6 +1825,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
|
||||
.early_unregister = radeon_connector_unregister,
|
||||
.destroy = radeon_connector_destroy,
|
||||
.force = radeon_dvi_force,
|
||||
.late_register = radeon_connector_late_register,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
|
||||
@ -1820,6 +1836,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
|
||||
.early_unregister = radeon_connector_unregister,
|
||||
.destroy = radeon_connector_destroy,
|
||||
.force = radeon_dvi_force,
|
||||
.late_register = radeon_connector_late_register,
|
||||
};
|
||||
|
||||
void
|
||||
|
@ -44,8 +44,6 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
int radeon_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void radeon_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
|
||||
const struct drm_gem_object_funcs radeon_gem_object_funcs;
|
||||
|
||||
static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
|
||||
@ -132,7 +130,6 @@ int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
|
||||
return r;
|
||||
}
|
||||
*obj = &robj->tbo.base;
|
||||
(*obj)->funcs = &radeon_gem_object_funcs;
|
||||
robj->pid = task_pid_nr(current);
|
||||
|
||||
mutex_lock(&rdev->gem.mutex);
|
||||
|
@ -151,6 +151,7 @@ int radeon_bo_create(struct radeon_device *rdev,
|
||||
if (bo == NULL)
|
||||
return -ENOMEM;
|
||||
drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size);
|
||||
bo->tbo.base.funcs = &radeon_gem_object_funcs;
|
||||
bo->rdev = rdev;
|
||||
bo->surface_reg = -1;
|
||||
INIT_LIST_HEAD(&bo->list);
|
||||
|
@ -87,6 +87,12 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "gpu_scheduler_trace.h"
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
static struct lockdep_map drm_sched_lockdep_map = {
|
||||
.name = "drm_sched_lockdep_map"
|
||||
};
|
||||
#endif
|
||||
|
||||
#define to_drm_sched_job(sched_job) \
|
||||
container_of((sched_job), struct drm_sched_job, queue_node)
|
||||
|
||||
@ -1269,7 +1275,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
|
||||
sched->submit_wq = submit_wq;
|
||||
sched->own_submit_wq = false;
|
||||
} else {
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
|
||||
&drm_sched_lockdep_map);
|
||||
#else
|
||||
sched->submit_wq = alloc_ordered_workqueue(name, 0);
|
||||
#endif
|
||||
if (!sched->submit_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -46,7 +46,6 @@ struct gr3d {
|
||||
unsigned int nclocks;
|
||||
struct reset_control_bulk_data resets[RST_GR3D_MAX];
|
||||
unsigned int nresets;
|
||||
struct dev_pm_domain_list *pd_list;
|
||||
|
||||
DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
|
||||
};
|
||||
@ -370,12 +369,18 @@ static int gr3d_power_up_legacy_domain(struct device *dev, const char *name,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gr3d_del_link(void *link)
|
||||
{
|
||||
device_link_del(link);
|
||||
}
|
||||
|
||||
static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
|
||||
{
|
||||
struct dev_pm_domain_attach_data pd_data = {
|
||||
.pd_names = (const char *[]) { "3d0", "3d1" },
|
||||
.num_pd_names = 2,
|
||||
};
|
||||
static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL };
|
||||
const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
|
||||
struct device **opp_virt_devs, *pd_dev;
|
||||
struct device_link *link;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
err = of_count_phandle_with_args(dev->of_node, "power-domains",
|
||||
@ -409,10 +414,29 @@ static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
|
||||
if (dev->pm_domain)
|
||||
return 0;
|
||||
|
||||
err = dev_pm_domain_attach_list(dev, &pd_data, &gr3d->pd_list);
|
||||
if (err < 0)
|
||||
err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (i = 0; opp_genpd_names[i]; i++) {
|
||||
pd_dev = opp_virt_devs[i];
|
||||
if (!pd_dev) {
|
||||
dev_err(dev, "failed to get %s power domain\n",
|
||||
opp_genpd_names[i]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
link = device_link_add(dev, pd_dev, link_flags);
|
||||
if (!link) {
|
||||
dev_err(dev, "failed to link to %s\n", dev_name(pd_dev));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = devm_add_action_or_reset(dev, gr3d_del_link, link);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -503,13 +527,13 @@ static int gr3d_probe(struct platform_device *pdev)
|
||||
|
||||
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
|
||||
if (err)
|
||||
goto err;
|
||||
return err;
|
||||
|
||||
err = host1x_client_register(&gr3d->client.base);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
|
||||
err);
|
||||
goto err;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* initialize address register map */
|
||||
@ -517,9 +541,6 @@ static int gr3d_probe(struct platform_device *pdev)
|
||||
set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
dev_pm_domain_detach_list(gr3d->pd_list);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void gr3d_remove(struct platform_device *pdev)
|
||||
@ -528,7 +549,6 @@ static void gr3d_remove(struct platform_device *pdev)
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
host1x_client_unregister(&gr3d->client.base);
|
||||
dev_pm_domain_detach_list(gr3d->pd_list);
|
||||
}
|
||||
|
||||
static int __maybe_unused gr3d_runtime_suspend(struct device *dev)
|
||||
|
@ -306,6 +306,11 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv)
|
||||
static int v3d_perfmon_idr_del(int id, void *elem, void *data)
|
||||
{
|
||||
struct v3d_perfmon *perfmon = elem;
|
||||
struct v3d_dev *v3d = (struct v3d_dev *)data;
|
||||
|
||||
/* If the active perfmon is being destroyed, stop it first */
|
||||
if (perfmon == v3d->active_perfmon)
|
||||
v3d_perfmon_stop(v3d, perfmon, false);
|
||||
|
||||
v3d_perfmon_put(perfmon);
|
||||
|
||||
@ -314,8 +319,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data)
|
||||
|
||||
void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv)
|
||||
{
|
||||
struct v3d_dev *v3d = v3d_priv->v3d;
|
||||
|
||||
mutex_lock(&v3d_priv->perfmon.lock);
|
||||
idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL);
|
||||
idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d);
|
||||
idr_destroy(&v3d_priv->perfmon.idr);
|
||||
mutex_unlock(&v3d_priv->perfmon.lock);
|
||||
mutex_destroy(&v3d_priv->perfmon.lock);
|
||||
|
@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file)
|
||||
static int vc4_perfmon_idr_del(int id, void *elem, void *data)
|
||||
{
|
||||
struct vc4_perfmon *perfmon = elem;
|
||||
struct vc4_dev *vc4 = (struct vc4_dev *)data;
|
||||
|
||||
/* If the active perfmon is being destroyed, stop it first */
|
||||
if (perfmon == vc4->active_perfmon)
|
||||
vc4_perfmon_stop(vc4, perfmon, false);
|
||||
|
||||
vc4_perfmon_put(perfmon);
|
||||
|
||||
@ -130,7 +135,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
|
||||
return;
|
||||
|
||||
mutex_lock(&vc4file->perfmon.lock);
|
||||
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
|
||||
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4);
|
||||
idr_destroy(&vc4file->perfmon.idr);
|
||||
mutex_unlock(&vc4file->perfmon.lock);
|
||||
mutex_destroy(&vc4file->perfmon.lock);
|
||||
|
@ -187,7 +187,7 @@ void xe_debugfs_register(struct xe_device *xe)
|
||||
debugfs_create_file("forcewake_all", 0400, root, xe,
|
||||
&forcewake_all_fops);
|
||||
|
||||
debugfs_create_file("wedged_mode", 0400, root, xe,
|
||||
debugfs_create_file("wedged_mode", 0600, root, xe,
|
||||
&wedged_mode_fops);
|
||||
|
||||
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
|
||||
|
@ -874,7 +874,9 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
|
||||
int ret = 0;
|
||||
|
||||
if ((!xe_uc_fw_is_available(>->uc.gsc.fw) ||
|
||||
xe_uc_fw_is_loaded(>->uc.gsc.fw)) && XE_WA(gt, 22019338487))
|
||||
xe_uc_fw_is_loaded(>->uc.gsc.fw) ||
|
||||
xe_uc_fw_is_in_error_state(>->uc.gsc.fw)) &&
|
||||
XE_WA(gt, 22019338487))
|
||||
ret = xe_guc_pc_restore_stashed_freq(>->uc.guc.pc);
|
||||
|
||||
return ret;
|
||||
|
@ -667,16 +667,12 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
|
||||
num_g2h = 1;
|
||||
|
||||
if (g2h_fence_needs_alloc(g2h_fence)) {
|
||||
void *ptr;
|
||||
|
||||
g2h_fence->seqno = next_ct_seqno(ct, true);
|
||||
ptr = xa_store(&ct->fence_lookup,
|
||||
g2h_fence->seqno,
|
||||
g2h_fence, GFP_ATOMIC);
|
||||
if (IS_ERR(ptr)) {
|
||||
ret = PTR_ERR(ptr);
|
||||
ret = xa_err(xa_store(&ct->fence_lookup,
|
||||
g2h_fence->seqno, g2h_fence,
|
||||
GFP_ATOMIC));
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
seqno = g2h_fence->seqno;
|
||||
@ -879,14 +875,11 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
|
||||
retry_same_fence:
|
||||
ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
|
||||
if (unlikely(ret == -ENOMEM)) {
|
||||
void *ptr;
|
||||
|
||||
/* Retry allocation /w GFP_KERNEL */
|
||||
ptr = xa_store(&ct->fence_lookup,
|
||||
g2h_fence.seqno,
|
||||
&g2h_fence, GFP_KERNEL);
|
||||
if (IS_ERR(ptr))
|
||||
return PTR_ERR(ptr);
|
||||
ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
|
||||
&g2h_fence, GFP_KERNEL));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
goto retry_same_fence;
|
||||
} else if (unlikely(ret)) {
|
||||
@ -903,16 +896,26 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
|
||||
}
|
||||
|
||||
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
|
||||
|
||||
/*
|
||||
* Ensure we serialize with completion side to prevent UAF with fence going out of scope on
|
||||
* the stack, since we have no clue if it will fire after the timeout before we can erase
|
||||
* from the xa. Also we have some dependent loads and stores below for which we need the
|
||||
* correct ordering, and we lack the needed barriers.
|
||||
*/
|
||||
mutex_lock(&ct->lock);
|
||||
if (!ret) {
|
||||
xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x",
|
||||
g2h_fence.seqno, action[0]);
|
||||
xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
|
||||
g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
|
||||
xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
|
||||
mutex_unlock(&ct->lock);
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
if (g2h_fence.retry) {
|
||||
xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
|
||||
action[0], g2h_fence.reason);
|
||||
mutex_unlock(&ct->lock);
|
||||
goto retry;
|
||||
}
|
||||
if (g2h_fence.fail) {
|
||||
@ -921,7 +924,12 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret;
|
||||
if (ret > 0)
|
||||
ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
|
||||
|
||||
mutex_unlock(&ct->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -224,80 +224,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
|
||||
EXEC_QUEUE_STATE_BANNED));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
static int alloc_submit_wq(struct xe_guc *guc)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
|
||||
guc->submission_state.submit_wq_pool[i] =
|
||||
alloc_ordered_workqueue("submit_wq", 0);
|
||||
if (!guc->submission_state.submit_wq_pool[i])
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
while (i)
|
||||
destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void free_submit_wq(struct xe_guc *guc)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_SUBMIT_WQ; ++i)
|
||||
destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
|
||||
}
|
||||
|
||||
static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
|
||||
{
|
||||
int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
|
||||
|
||||
return guc->submission_state.submit_wq_pool[idx];
|
||||
}
|
||||
#else
|
||||
static int alloc_submit_wq(struct xe_guc *guc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_submit_wq(struct xe_guc *guc)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void xe_guc_submit_fini(struct xe_guc *guc)
|
||||
{
|
||||
struct xe_device *xe = guc_to_xe(guc);
|
||||
struct xe_gt *gt = guc_to_gt(guc);
|
||||
int ret;
|
||||
|
||||
ret = wait_event_timeout(guc->submission_state.fini_wq,
|
||||
xa_empty(&guc->submission_state.exec_queue_lookup),
|
||||
HZ * 5);
|
||||
|
||||
drain_workqueue(xe->destroy_wq);
|
||||
|
||||
xe_gt_assert(gt, ret);
|
||||
}
|
||||
|
||||
static void guc_submit_fini(struct drm_device *drm, void *arg)
|
||||
{
|
||||
struct xe_guc *guc = arg;
|
||||
|
||||
xe_guc_submit_fini(guc);
|
||||
xa_destroy(&guc->submission_state.exec_queue_lookup);
|
||||
free_submit_wq(guc);
|
||||
}
|
||||
|
||||
static void guc_submit_wedged_fini(void *arg)
|
||||
@ -359,10 +290,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = alloc_submit_wq(guc);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
gt->exec_queue_ops = &guc_exec_queue_ops;
|
||||
|
||||
xa_init(&guc->submission_state.exec_queue_lookup);
|
||||
@ -393,7 +320,6 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
|
||||
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||
{
|
||||
int ret;
|
||||
void *ptr;
|
||||
int i;
|
||||
|
||||
/*
|
||||
@ -413,12 +339,10 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||
q->guc->id = ret;
|
||||
|
||||
for (i = 0; i < q->width; ++i) {
|
||||
ptr = xa_store(&guc->submission_state.exec_queue_lookup,
|
||||
q->guc->id + i, q, GFP_NOWAIT);
|
||||
if (IS_ERR(ptr)) {
|
||||
ret = PTR_ERR(ptr);
|
||||
ret = xa_err(xa_store(&guc->submission_state.exec_queue_lookup,
|
||||
q->guc->id + i, q, GFP_NOWAIT));
|
||||
if (ret)
|
||||
goto err_release;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1482,8 +1406,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
|
||||
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
|
||||
msecs_to_jiffies(q->sched_props.job_timeout_ms);
|
||||
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
|
||||
get_submit_wq(guc),
|
||||
q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
|
||||
NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
|
||||
timeout, guc_to_gt(guc)->ordered_wq, NULL,
|
||||
q->name, gt_to_xe(q->gt)->drm.dev);
|
||||
if (err)
|
||||
|
@ -72,13 +72,6 @@ struct xe_guc {
|
||||
atomic_t stopped;
|
||||
/** @submission_state.lock: protects submission state */
|
||||
struct mutex lock;
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
#define NUM_SUBMIT_WQ 256
|
||||
/** @submission_state.submit_wq_pool: submission ordered workqueues pool */
|
||||
struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
|
||||
/** @submission_state.submit_wq_idx: submission ordered workqueue index */
|
||||
int submit_wq_idx;
|
||||
#endif
|
||||
/** @submission_state.enabled: submission is enabled */
|
||||
bool enabled;
|
||||
/** @submission_state.fini_wq: submit fini wait queue */
|
||||
|
@ -236,9 +236,9 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
|
||||
cl_data->in_data = in_data;
|
||||
|
||||
for (i = 0; i < cl_data->num_hid_devices; i++) {
|
||||
in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
|
||||
&cl_data->sensor_dma_addr[i],
|
||||
GFP_KERNEL);
|
||||
in_data->sensor_virt_addr[i] = dmam_alloc_coherent(dev, sizeof(int) * 8,
|
||||
&cl_data->sensor_dma_addr[i],
|
||||
GFP_KERNEL);
|
||||
if (!in_data->sensor_virt_addr[i]) {
|
||||
rc = -ENOMEM;
|
||||
goto cleanup;
|
||||
@ -331,7 +331,6 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
|
||||
int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
|
||||
{
|
||||
struct amdtp_cl_data *cl_data = privdata->cl_data;
|
||||
struct amd_input_data *in_data = cl_data->in_data;
|
||||
int i, status;
|
||||
|
||||
for (i = 0; i < cl_data->num_hid_devices; i++) {
|
||||
@ -351,12 +350,5 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
|
||||
cancel_delayed_work_sync(&cl_data->work_buffer);
|
||||
amdtp_hid_remove(cl_data);
|
||||
|
||||
for (i = 0; i < cl_data->num_hid_devices; i++) {
|
||||
if (in_data->sensor_virt_addr[i]) {
|
||||
dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
|
||||
in_data->sensor_virt_addr[i],
|
||||
cl_data->sensor_dma_addr[i]);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1036,6 +1036,8 @@
|
||||
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056
|
||||
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES 0xc057
|
||||
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES 0xc058
|
||||
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES 0x430c
|
||||
#define USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES 0x431e
|
||||
|
||||
#define USB_VENDOR_ID_PANASONIC 0x04da
|
||||
#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
|
||||
|
@ -2026,6 +2026,10 @@ static const struct hid_device_id mt_devices[] = {
|
||||
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
|
||||
USB_VENDOR_ID_ELAN, 0x3148) },
|
||||
|
||||
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
|
||||
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
|
||||
USB_VENDOR_ID_ELAN, 0x32ae) },
|
||||
|
||||
/* Elitegroup panel */
|
||||
{ .driver_data = MT_CLS_SERIAL,
|
||||
MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
|
||||
@ -2095,6 +2099,11 @@ static const struct hid_device_id mt_devices[] = {
|
||||
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
|
||||
0x347d, 0x7853) },
|
||||
|
||||
/* HONOR MagicBook Art 14 touchpad */
|
||||
{ .driver_data = MT_CLS_VTL,
|
||||
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
|
||||
0x35cc, 0x0104) },
|
||||
|
||||
/* Ilitek dual touch panel */
|
||||
{ .driver_data = MT_CLS_NSMU,
|
||||
MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
|
||||
|
@ -38,8 +38,10 @@
|
||||
(usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
|
||||
|
||||
#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
|
||||
#define PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS BIT(1)
|
||||
|
||||
#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
|
||||
#define PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT 220 /* ms */
|
||||
|
||||
struct plt_drv_data {
|
||||
unsigned long device_type;
|
||||
@ -137,6 +139,21 @@ static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
|
||||
|
||||
drv_data->last_volume_key_ts = cur_ts;
|
||||
}
|
||||
if (drv_data->quirks & PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS) {
|
||||
unsigned long prev_ts, cur_ts;
|
||||
|
||||
/* Usages are filtered in plantronics_usages. */
|
||||
|
||||
if (!value) /* Handle key presses only. */
|
||||
return 0;
|
||||
|
||||
prev_ts = drv_data->last_volume_key_ts;
|
||||
cur_ts = jiffies;
|
||||
if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT)
|
||||
return 1; /* Ignore the followed opposite volume key. */
|
||||
|
||||
drv_data->last_volume_key_ts = cur_ts;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -210,6 +227,12 @@ static const struct hid_device_id plantronics_devices[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
|
||||
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
|
||||
.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
|
||||
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES),
|
||||
.driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
|
||||
USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES),
|
||||
.driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
|
||||
{ }
|
||||
};
|
||||
|
@ -635,7 +635,7 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data,
|
||||
const struct firmware *fw,
|
||||
const struct shim_fw_info fw_info)
|
||||
{
|
||||
int rv;
|
||||
int rv = 0;
|
||||
void *dma_buf;
|
||||
dma_addr_t dma_buf_phy;
|
||||
u32 fragment_offset, fragment_size, payload_max_size;
|
||||
|
@ -2567,6 +2567,8 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
|
||||
/* Going into range select tool */
|
||||
if (wacom_wac->hid_data.invert_state)
|
||||
wacom_wac->tool[0] = BTN_TOOL_RUBBER;
|
||||
else if (wacom_wac->features.quirks & WACOM_QUIRK_AESPEN)
|
||||
wacom_wac->tool[0] = BTN_TOOL_PEN;
|
||||
else if (wacom_wac->id[0])
|
||||
wacom_wac->tool[0] = wacom_intuos_get_tool_type(wacom_wac->id[0]);
|
||||
else
|
||||
|
@ -162,6 +162,7 @@ config SENSORS_ADM9240
|
||||
tristate "Analog Devices ADM9240 and compatibles"
|
||||
depends on I2C
|
||||
select HWMON_VID
|
||||
select REGMAP_I2C
|
||||
help
|
||||
If you say yes here you get support for Analog Devices ADM9240,
|
||||
Dallas DS1780, National Semiconductor LM81 sensor chips.
|
||||
@ -223,6 +224,7 @@ config SENSORS_ADT7462
|
||||
config SENSORS_ADT7470
|
||||
tristate "Analog Devices ADT7470"
|
||||
depends on I2C
|
||||
select REGMAP_I2C
|
||||
help
|
||||
If you say yes here you get support for the Analog Devices
|
||||
ADT7470 temperature monitoring chips.
|
||||
@ -999,6 +1001,7 @@ config SENSORS_LTC2990
|
||||
config SENSORS_LTC2991
|
||||
tristate "Analog Devices LTC2991"
|
||||
depends on I2C
|
||||
select REGMAP_I2C
|
||||
help
|
||||
If you say yes here you get support for Analog Devices LTC2991
|
||||
Octal I2C Voltage, Current, and Temperature Monitor. The LTC2991
|
||||
@ -1146,6 +1149,7 @@ config SENSORS_MAX1619
|
||||
config SENSORS_MAX1668
|
||||
tristate "Maxim MAX1668 and compatibles"
|
||||
depends on I2C
|
||||
select REGMAP_I2C
|
||||
help
|
||||
If you say yes here you get support for MAX1668, MAX1989 and
|
||||
MAX1805 chips.
|
||||
@ -1275,6 +1279,7 @@ config SENSORS_MAX31790
|
||||
config SENSORS_MC34VR500
|
||||
tristate "NXP MC34VR500 hardware monitoring driver"
|
||||
depends on I2C
|
||||
select REGMAP_I2C
|
||||
help
|
||||
If you say yes here you get support for the temperature and input
|
||||
voltage sensors of the NXP MC34VR500.
|
||||
@ -2312,6 +2317,7 @@ config SENSORS_TMP464
|
||||
config SENSORS_TMP513
|
||||
tristate "Texas Instruments TMP513 and compatibles"
|
||||
depends on I2C
|
||||
select REGMAP_I2C
|
||||
help
|
||||
If you say yes here you get support for Texas Instruments TMP512,
|
||||
and TMP513 temperature and power supply sensor chips.
|
||||
|
@ -1735,11 +1735,10 @@ static int adt7475_pwm_properties_parse_args(struct fwnode_handle *fwnode,
|
||||
static int adt7475_fan_pwm_config(struct i2c_client *client)
|
||||
{
|
||||
struct adt7475_data *data = i2c_get_clientdata(client);
|
||||
struct fwnode_handle *child;
|
||||
struct adt7475_pwm_config cfg = {};
|
||||
int ret;
|
||||
|
||||
device_for_each_child_node(&client->dev, child) {
|
||||
device_for_each_child_node_scoped(&client->dev, child) {
|
||||
if (!fwnode_property_present(child, "pwms"))
|
||||
continue;
|
||||
|
||||
|
@ -358,7 +358,7 @@ static const struct m10bmc_sdata n6000bmc_temp_tbl[] = {
|
||||
{ 0x4f0, 0x4f4, 0x4f8, 0x52c, 0x0, 500, "Board Top Near FPGA Temperature" },
|
||||
{ 0x4fc, 0x500, 0x504, 0x52c, 0x0, 500, "Board Bottom Near CVL Temperature" },
|
||||
{ 0x508, 0x50c, 0x510, 0x52c, 0x0, 500, "Board Top East Near VRs Temperature" },
|
||||
{ 0x514, 0x518, 0x51c, 0x52c, 0x0, 500, "Columbiaville Die Temperature" },
|
||||
{ 0x514, 0x518, 0x51c, 0x52c, 0x0, 500, "CVL Die Temperature" },
|
||||
{ 0x520, 0x524, 0x528, 0x52c, 0x0, 500, "Board Rear Side Temperature" },
|
||||
{ 0x530, 0x534, 0x538, 0x52c, 0x0, 500, "Board Front Side Temperature" },
|
||||
{ 0x53c, 0x540, 0x544, 0x0, 0x0, 500, "QSFP1 Case Temperature" },
|
||||
|
@ -269,6 +269,8 @@ rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in)
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
if (!ret && dev && is_vlan_dev(dev))
|
||||
dev = vlan_dev_real_dev(dev);
|
||||
return ret ? ERR_PTR(ret) : dev;
|
||||
}
|
||||
|
||||
|
@ -2816,6 +2816,8 @@ int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
|
||||
nlh = nlmsg_put(skb, 0, 0,
|
||||
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_MONITOR),
|
||||
0, 0);
|
||||
if (!nlh)
|
||||
goto err_free;
|
||||
|
||||
switch (type) {
|
||||
case RDMA_REGISTER_EVENT:
|
||||
|
@ -366,7 +366,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
if (rdev->pacing.dbr_pacing)
|
||||
if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
|
||||
bnxt_re_copy_db_pacing_stats(rdev, stats);
|
||||
}
|
||||
|
||||
|
@ -1307,7 +1307,11 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
|
||||
0 : BNXT_QPLIB_RESERVED_QP_WRS;
|
||||
entries = bnxt_re_init_depth(entries + diff + 1, uctx);
|
||||
sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
|
||||
sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
|
||||
if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
|
||||
sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
|
||||
else
|
||||
sq->max_sw_wqe = sq->max_wqe;
|
||||
|
||||
}
|
||||
sq->q_full_delta = diff + 1;
|
||||
/*
|
||||
|
@ -188,8 +188,11 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
|
||||
|
||||
bnxt_re_set_db_offset(rdev);
|
||||
rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
kfree(rdev->chip_ctx);
|
||||
rdev->chip_ctx = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (bnxt_qplib_determine_atomics(en_dev->pdev))
|
||||
ibdev_info(&rdev->ibdev,
|
||||
@ -531,6 +534,7 @@ static bool is_dbr_fifo_full(struct bnxt_re_dev *rdev)
|
||||
static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
|
||||
u32 retry_fifo_check = 1000;
|
||||
u32 fifo_occup;
|
||||
|
||||
/* loop shouldn't run infintely as the occupancy usually goes
|
||||
@ -544,6 +548,14 @@ static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
|
||||
|
||||
if (fifo_occup < pacing_data->pacing_th)
|
||||
break;
|
||||
if (!retry_fifo_check--) {
|
||||
dev_info_once(rdev_to_dev(rdev),
|
||||
"%s: fifo_occup = 0x%xfifo_max_depth = 0x%x pacing_th = 0x%x\n",
|
||||
__func__, fifo_occup, pacing_data->fifo_max_depth,
|
||||
pacing_data->pacing_th);
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -957,7 +969,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
|
||||
return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
|
||||
}
|
||||
|
||||
static struct bnxt_re_dev *bnxt_re_dev_add(struct bnxt_aux_priv *aux_priv,
|
||||
static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev,
|
||||
struct bnxt_en_dev *en_dev)
|
||||
{
|
||||
struct bnxt_re_dev *rdev;
|
||||
@ -973,6 +985,7 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct bnxt_aux_priv *aux_priv,
|
||||
rdev->nb.notifier_call = NULL;
|
||||
rdev->netdev = en_dev->net;
|
||||
rdev->en_dev = en_dev;
|
||||
rdev->adev = adev;
|
||||
rdev->id = rdev->en_dev->pdev->devfn;
|
||||
INIT_LIST_HEAD(&rdev->qp_list);
|
||||
mutex_init(&rdev->qp_lock);
|
||||
@ -1025,12 +1038,15 @@ static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
|
||||
static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
|
||||
struct bnxt_re_qp *qp)
|
||||
{
|
||||
struct bnxt_re_srq *srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq,
|
||||
qplib_srq);
|
||||
struct creq_qp_error_notification *err_event;
|
||||
struct bnxt_re_srq *srq = NULL;
|
||||
struct ib_event event = {};
|
||||
unsigned int flags;
|
||||
|
||||
if (qp->qplib_qp.srq)
|
||||
srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq,
|
||||
qplib_srq);
|
||||
|
||||
if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
|
||||
rdma_is_kernel_res(&qp->ib_qp.res)) {
|
||||
flags = bnxt_re_lock_cqs(qp);
|
||||
@ -1258,15 +1274,9 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
|
||||
{
|
||||
struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
|
||||
qplib_cq);
|
||||
u32 *cq_ptr;
|
||||
|
||||
if (cq->ib_cq.comp_handler) {
|
||||
if (cq->uctx_cq_page) {
|
||||
cq_ptr = (u32 *)cq->uctx_cq_page;
|
||||
*cq_ptr = cq->qplib_cq.toggle;
|
||||
}
|
||||
if (cq->ib_cq.comp_handler)
|
||||
(*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1823,7 +1833,6 @@ static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev,
|
||||
*/
|
||||
rtnl_lock();
|
||||
en_info->rdev = rdev;
|
||||
rdev->adev = adev;
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
@ -1840,7 +1849,7 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type)
|
||||
en_dev = en_info->en_dev;
|
||||
|
||||
|
||||
rdev = bnxt_re_dev_add(aux_priv, en_dev);
|
||||
rdev = bnxt_re_dev_add(adev, en_dev);
|
||||
if (!rdev || !rdev_to_dev(rdev)) {
|
||||
rc = -ENOMEM;
|
||||
goto exit;
|
||||
@ -1865,12 +1874,14 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type)
|
||||
rdev->nb.notifier_call = NULL;
|
||||
pr_err("%s: Cannot register to netdevice_notifier",
|
||||
ROCE_DRV_MODULE_NAME);
|
||||
return rc;
|
||||
goto re_dev_unreg;
|
||||
}
|
||||
bnxt_re_setup_cc(rdev, true);
|
||||
|
||||
return 0;
|
||||
|
||||
re_dev_unreg:
|
||||
ib_unregister_device(&rdev->ibdev);
|
||||
re_dev_uninit:
|
||||
bnxt_re_update_en_info_rdev(NULL, en_info, adev);
|
||||
bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
|
||||
@ -2014,15 +2025,7 @@ static int bnxt_re_probe(struct auxiliary_device *adev,
|
||||
auxiliary_set_drvdata(adev, en_info);
|
||||
|
||||
rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT);
|
||||
if (rc)
|
||||
goto err;
|
||||
mutex_unlock(&bnxt_re_mutex);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mutex_unlock(&bnxt_re_mutex);
|
||||
bnxt_re_remove(adev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -327,6 +327,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
|
||||
case NQ_BASE_TYPE_CQ_NOTIFICATION:
|
||||
{
|
||||
struct nq_cn *nqcne = (struct nq_cn *)nqe;
|
||||
struct bnxt_re_cq *cq_p;
|
||||
|
||||
q_handle = le32_to_cpu(nqcne->cq_handle_low);
|
||||
q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
|
||||
@ -337,6 +338,10 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
|
||||
cq->toggle = (le16_to_cpu(nqe->info10_type) &
|
||||
NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
|
||||
cq->dbinfo.toggle = cq->toggle;
|
||||
cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq);
|
||||
if (cq_p->uctx_cq_page)
|
||||
*((u32 *)cq_p->uctx_cq_page) = cq->toggle;
|
||||
|
||||
bnxt_qplib_armen_db(&cq->dbinfo,
|
||||
DBC_DBC_TYPE_CQ_ARMENA);
|
||||
spin_lock_bh(&cq->compl_lock);
|
||||
|
@ -170,7 +170,7 @@ struct bnxt_qplib_swqe {
|
||||
};
|
||||
u32 q_key;
|
||||
u32 dst_qp;
|
||||
u16 avid;
|
||||
u32 avid;
|
||||
} send;
|
||||
|
||||
/* Send Raw Ethernet and QP1 */
|
||||
|
@ -525,7 +525,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
||||
/* failed with status */
|
||||
dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
|
||||
cookie, opcode, evnt->status);
|
||||
rc = -EFAULT;
|
||||
rc = -EIO;
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
@ -244,6 +244,8 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
|
||||
sginfo.pgsize = npde * pg_size;
|
||||
sginfo.npages = 1;
|
||||
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
/* Alloc PBL pages */
|
||||
sginfo.npages = npbl;
|
||||
@ -255,22 +257,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
|
||||
dst_virt_ptr =
|
||||
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
|
||||
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
|
||||
if (hwq_attr->type == HWQ_TYPE_MR) {
|
||||
/* For MR it is expected that we supply only 1 contigous
|
||||
* page i.e only 1 entry in the PDL that will contain
|
||||
* all the PBLs for the user supplied memory region
|
||||
*/
|
||||
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
|
||||
i++)
|
||||
dst_virt_ptr[0][i] = src_phys_ptr[i] |
|
||||
flag;
|
||||
} else {
|
||||
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
|
||||
i++)
|
||||
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
|
||||
src_phys_ptr[i] |
|
||||
PTU_PDE_VALID;
|
||||
}
|
||||
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
|
||||
dst_virt_ptr[0][i] = src_phys_ptr[i] | flag;
|
||||
|
||||
/* Alloc or init PTEs */
|
||||
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
|
||||
hwq_attr->sginfo);
|
||||
|
@ -140,6 +140,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||
min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
|
||||
attr->max_cq = le32_to_cpu(sb->max_cq);
|
||||
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
|
||||
if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
|
||||
attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes);
|
||||
attr->max_cq_sges = attr->max_qp_sges;
|
||||
attr->max_mr = le32_to_cpu(sb->max_mr);
|
||||
attr->max_mw = le32_to_cpu(sb->max_mw);
|
||||
@ -157,7 +159,14 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||
if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
|
||||
attr->l2_db_size = (sb->l2_db_space_size + 1) *
|
||||
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
|
||||
attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
|
||||
/*
|
||||
* Read the max gid supported by HW.
|
||||
* For each entry in HW GID in HW table, we consume 2
|
||||
* GID entries in the kernel GID table. So max_gid reported
|
||||
* to stack can be up to twice the value reported by the HW, up to 256 gids.
|
||||
*/
|
||||
attr->max_sgid = le32_to_cpu(sb->max_gid);
|
||||
attr->max_sgid = min_t(u32, BNXT_QPLIB_NUM_GIDS_SUPPORTED, 2 * attr->max_sgid);
|
||||
attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
|
||||
attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2);
|
||||
|
||||
|
@ -56,6 +56,7 @@ struct bnxt_qplib_dev_attr {
|
||||
u32 max_qp_wqes;
|
||||
u32 max_qp_sges;
|
||||
u32 max_cq;
|
||||
#define BNXT_QPLIB_MAX_CQ_WQES 0xfffff
|
||||
u32 max_cq_wqes;
|
||||
u32 max_cq_sges;
|
||||
u32 max_mr;
|
||||
|
@ -2086,7 +2086,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
||||
err = -ENOMEM;
|
||||
if (n->dev->flags & IFF_LOOPBACK) {
|
||||
if (iptype == 4)
|
||||
pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
|
||||
pdev = __ip_dev_find(&init_net, *(__be32 *)peer_ip, false);
|
||||
else if (IS_ENABLED(CONFIG_IPV6))
|
||||
for_each_netdev(&init_net, pdev) {
|
||||
if (ipv6_chk_addr(&init_net,
|
||||
@ -2101,12 +2101,12 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
if (is_vlan_dev(pdev))
|
||||
pdev = vlan_dev_real_dev(pdev);
|
||||
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
|
||||
n, pdev, rt_tos2priority(tos));
|
||||
if (!ep->l2t) {
|
||||
dev_put(pdev);
|
||||
if (!ep->l2t)
|
||||
goto out;
|
||||
}
|
||||
ep->mtu = pdev->mtu;
|
||||
ep->tx_chan = cxgb4_port_chan(pdev);
|
||||
ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
|
||||
@ -2119,7 +2119,6 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
||||
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
|
||||
cxgb4_port_idx(pdev) * step];
|
||||
set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
|
||||
dev_put(pdev);
|
||||
} else {
|
||||
pdev = get_real_dev(n->dev);
|
||||
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
|
||||
|
@ -3631,7 +3631,7 @@ void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
|
||||
/**
|
||||
* irdma_accept - registered call for connection to be accepted
|
||||
* @cm_id: cm information for passive connection
|
||||
* @conn_param: accpet parameters
|
||||
* @conn_param: accept parameters
|
||||
*/
|
||||
int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
{
|
||||
|
@ -331,6 +331,8 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
|
||||
msg.msg_flags &= ~MSG_MORE;
|
||||
|
||||
tcp_rate_check_app_limited(sk);
|
||||
if (!sendpage_ok(page[i]))
|
||||
msg.msg_flags &= ~MSG_SPLICE_PAGES;
|
||||
bvec_set_page(&bvec, page[i], bytes, offset);
|
||||
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
|
||||
|
||||
|
@ -68,6 +68,8 @@ MODULE_LICENSE("Dual BSD/GPL");
|
||||
static u64 srpt_service_guid;
|
||||
static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
|
||||
static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
|
||||
static DEFINE_MUTEX(srpt_mc_mutex); /* Protects srpt_memory_caches. */
|
||||
static DEFINE_XARRAY(srpt_memory_caches); /* See also srpt_memory_cache_entry */
|
||||
|
||||
static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
|
||||
module_param(srp_max_req_size, int, 0444);
|
||||
@ -105,6 +107,63 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
|
||||
static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
|
||||
static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
|
||||
|
||||
/* Type of the entries in srpt_memory_caches. */
|
||||
struct srpt_memory_cache_entry {
|
||||
refcount_t ref;
|
||||
struct kmem_cache *c;
|
||||
};
|
||||
|
||||
static struct kmem_cache *srpt_cache_get(unsigned int object_size)
|
||||
{
|
||||
struct srpt_memory_cache_entry *e;
|
||||
char name[32];
|
||||
void *res;
|
||||
|
||||
guard(mutex)(&srpt_mc_mutex);
|
||||
e = xa_load(&srpt_memory_caches, object_size);
|
||||
if (e) {
|
||||
refcount_inc(&e->ref);
|
||||
return e->c;
|
||||
}
|
||||
snprintf(name, sizeof(name), "srpt-%u", object_size);
|
||||
e = kmalloc(sizeof(*e), GFP_KERNEL);
|
||||
if (!e)
|
||||
return NULL;
|
||||
refcount_set(&e->ref, 1);
|
||||
e->c = kmem_cache_create(name, object_size, /*align=*/512, 0, NULL);
|
||||
if (!e->c)
|
||||
goto free_entry;
|
||||
res = xa_store(&srpt_memory_caches, object_size, e, GFP_KERNEL);
|
||||
if (xa_is_err(res))
|
||||
goto destroy_cache;
|
||||
return e->c;
|
||||
|
||||
destroy_cache:
|
||||
kmem_cache_destroy(e->c);
|
||||
|
||||
free_entry:
|
||||
kfree(e);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void srpt_cache_put(struct kmem_cache *c)
|
||||
{
|
||||
struct srpt_memory_cache_entry *e = NULL;
|
||||
unsigned long object_size;
|
||||
|
||||
guard(mutex)(&srpt_mc_mutex);
|
||||
xa_for_each(&srpt_memory_caches, object_size, e)
|
||||
if (e->c == c)
|
||||
break;
|
||||
if (WARN_ON_ONCE(!e))
|
||||
return;
|
||||
if (!refcount_dec_and_test(&e->ref))
|
||||
return;
|
||||
WARN_ON_ONCE(xa_erase(&srpt_memory_caches, object_size) != e);
|
||||
kmem_cache_destroy(e->c);
|
||||
kfree(e);
|
||||
}
|
||||
|
||||
/*
|
||||
* The only allowed channel state changes are those that change the channel
|
||||
* state into a state with a higher numerical value. Hence the new > prev test.
|
||||
@ -2119,13 +2178,13 @@ static void srpt_release_channel_work(struct work_struct *w)
|
||||
ch->sport->sdev, ch->rq_size,
|
||||
ch->rsp_buf_cache, DMA_TO_DEVICE);
|
||||
|
||||
kmem_cache_destroy(ch->rsp_buf_cache);
|
||||
srpt_cache_put(ch->rsp_buf_cache);
|
||||
|
||||
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
|
||||
sdev, ch->rq_size,
|
||||
ch->req_buf_cache, DMA_FROM_DEVICE);
|
||||
|
||||
kmem_cache_destroy(ch->req_buf_cache);
|
||||
srpt_cache_put(ch->req_buf_cache);
|
||||
|
||||
kref_put(&ch->kref, srpt_free_ch);
|
||||
}
|
||||
@ -2245,8 +2304,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
|
||||
INIT_LIST_HEAD(&ch->cmd_wait_list);
|
||||
ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
|
||||
|
||||
ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
|
||||
512, 0, NULL);
|
||||
ch->rsp_buf_cache = srpt_cache_get(ch->max_rsp_size);
|
||||
if (!ch->rsp_buf_cache)
|
||||
goto free_ch;
|
||||
|
||||
@ -2280,8 +2338,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
|
||||
alignment_offset = round_up(imm_data_offset, 512) -
|
||||
imm_data_offset;
|
||||
req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
|
||||
ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
|
||||
512, 0, NULL);
|
||||
ch->req_buf_cache = srpt_cache_get(req_sz);
|
||||
if (!ch->req_buf_cache)
|
||||
goto free_rsp_ring;
|
||||
|
||||
@ -2478,7 +2535,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
|
||||
ch->req_buf_cache, DMA_FROM_DEVICE);
|
||||
|
||||
free_recv_cache:
|
||||
kmem_cache_destroy(ch->req_buf_cache);
|
||||
srpt_cache_put(ch->req_buf_cache);
|
||||
|
||||
free_rsp_ring:
|
||||
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
|
||||
@ -2486,7 +2543,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
|
||||
ch->rsp_buf_cache, DMA_TO_DEVICE);
|
||||
|
||||
free_rsp_cache:
|
||||
kmem_cache_destroy(ch->rsp_buf_cache);
|
||||
srpt_cache_put(ch->rsp_buf_cache);
|
||||
|
||||
free_ch:
|
||||
if (rdma_cm_id)
|
||||
@ -3055,7 +3112,7 @@ static void srpt_free_srq(struct srpt_device *sdev)
|
||||
srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
|
||||
sdev->srq_size, sdev->req_buf_cache,
|
||||
DMA_FROM_DEVICE);
|
||||
kmem_cache_destroy(sdev->req_buf_cache);
|
||||
srpt_cache_put(sdev->req_buf_cache);
|
||||
sdev->srq = NULL;
|
||||
}
|
||||
|
||||
@ -3082,8 +3139,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
|
||||
pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
|
||||
sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
|
||||
|
||||
sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
|
||||
srp_max_req_size, 0, 0, NULL);
|
||||
sdev->req_buf_cache = srpt_cache_get(srp_max_req_size);
|
||||
if (!sdev->req_buf_cache)
|
||||
goto free_srq;
|
||||
|
||||
@ -3105,7 +3161,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
|
||||
return 0;
|
||||
|
||||
free_cache:
|
||||
kmem_cache_destroy(sdev->req_buf_cache);
|
||||
srpt_cache_put(sdev->req_buf_cache);
|
||||
|
||||
free_srq:
|
||||
ib_destroy_srq(srq);
|
||||
|
@ -388,7 +388,8 @@ static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
|
||||
|
||||
blk_queue_rq_timeout(mq->queue, 60 * HZ);
|
||||
|
||||
dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
|
||||
if (mmc_dev(host)->dma_parms)
|
||||
dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
|
||||
|
||||
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
|
||||
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
|
||||
|
@ -38,9 +38,8 @@ struct mvsd_host {
|
||||
unsigned int xfer_mode;
|
||||
unsigned int intr_en;
|
||||
unsigned int ctrl;
|
||||
bool use_pio;
|
||||
struct sg_mapping_iter sg_miter;
|
||||
unsigned int pio_size;
|
||||
void *pio_ptr;
|
||||
unsigned int sg_frags;
|
||||
unsigned int ns_per_clk;
|
||||
unsigned int clock;
|
||||
@ -115,18 +114,11 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
|
||||
* data when the buffer is not aligned on a 64 byte
|
||||
* boundary.
|
||||
*/
|
||||
unsigned int miter_flags = SG_MITER_ATOMIC; /* Used from IRQ */
|
||||
|
||||
if (data->flags & MMC_DATA_READ)
|
||||
miter_flags |= SG_MITER_TO_SG;
|
||||
else
|
||||
miter_flags |= SG_MITER_FROM_SG;
|
||||
|
||||
host->pio_size = data->blocks * data->blksz;
|
||||
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, miter_flags);
|
||||
host->pio_ptr = sg_virt(data->sg);
|
||||
if (!nodma)
|
||||
dev_dbg(host->dev, "fallback to PIO for data\n");
|
||||
host->use_pio = true;
|
||||
dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n",
|
||||
host->pio_ptr, host->pio_size);
|
||||
return 1;
|
||||
} else {
|
||||
dma_addr_t phys_addr;
|
||||
@ -137,7 +129,6 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
|
||||
phys_addr = sg_dma_address(data->sg);
|
||||
mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff);
|
||||
mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16);
|
||||
host->use_pio = false;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -297,8 +288,8 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
|
||||
{
|
||||
void __iomem *iobase = host->base;
|
||||
|
||||
if (host->use_pio) {
|
||||
sg_miter_stop(&host->sg_miter);
|
||||
if (host->pio_ptr) {
|
||||
host->pio_ptr = NULL;
|
||||
host->pio_size = 0;
|
||||
} else {
|
||||
dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
|
||||
@ -353,12 +344,9 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
|
||||
static irqreturn_t mvsd_irq(int irq, void *dev)
|
||||
{
|
||||
struct mvsd_host *host = dev;
|
||||
struct sg_mapping_iter *sgm = &host->sg_miter;
|
||||
void __iomem *iobase = host->base;
|
||||
u32 intr_status, intr_done_mask;
|
||||
int irq_handled = 0;
|
||||
u16 *p;
|
||||
int s;
|
||||
|
||||
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
|
||||
dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n",
|
||||
@ -382,36 +370,15 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
|
||||
spin_lock(&host->lock);
|
||||
|
||||
/* PIO handling, if needed. Messy business... */
|
||||
if (host->use_pio) {
|
||||
/*
|
||||
* As we set sgm->consumed this always gives a valid buffer
|
||||
* position.
|
||||
*/
|
||||
if (!sg_miter_next(sgm)) {
|
||||
/* This should not happen */
|
||||
dev_err(host->dev, "ran out of scatter segments\n");
|
||||
spin_unlock(&host->lock);
|
||||
host->intr_en &=
|
||||
~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W |
|
||||
MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W);
|
||||
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
p = sgm->addr;
|
||||
s = sgm->length;
|
||||
if (s > host->pio_size)
|
||||
s = host->pio_size;
|
||||
}
|
||||
|
||||
if (host->use_pio &&
|
||||
if (host->pio_size &&
|
||||
(intr_status & host->intr_en &
|
||||
(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) {
|
||||
|
||||
u16 *p = host->pio_ptr;
|
||||
int s = host->pio_size;
|
||||
while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) {
|
||||
readsw(iobase + MVSD_FIFO, p, 16);
|
||||
p += 16;
|
||||
s -= 32;
|
||||
sgm->consumed += 32;
|
||||
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
|
||||
}
|
||||
/*
|
||||
@ -424,7 +391,6 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
|
||||
put_unaligned(mvsd_read(MVSD_FIFO), p++);
|
||||
put_unaligned(mvsd_read(MVSD_FIFO), p++);
|
||||
s -= 4;
|
||||
sgm->consumed += 4;
|
||||
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
|
||||
}
|
||||
if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) {
|
||||
@ -432,13 +398,10 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
|
||||
val[0] = mvsd_read(MVSD_FIFO);
|
||||
val[1] = mvsd_read(MVSD_FIFO);
|
||||
memcpy(p, ((void *)&val) + 4 - s, s);
|
||||
sgm->consumed += s;
|
||||
s = 0;
|
||||
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
|
||||
}
|
||||
/* PIO transfer done */
|
||||
host->pio_size -= sgm->consumed;
|
||||
if (host->pio_size == 0) {
|
||||
if (s == 0) {
|
||||
host->intr_en &=
|
||||
~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W);
|
||||
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
|
||||
@ -450,10 +413,14 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
|
||||
}
|
||||
dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
|
||||
s, intr_status, mvsd_read(MVSD_HW_STATE));
|
||||
host->pio_ptr = p;
|
||||
host->pio_size = s;
|
||||
irq_handled = 1;
|
||||
} else if (host->use_pio &&
|
||||
} else if (host->pio_size &&
|
||||
(intr_status & host->intr_en &
|
||||
(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) {
|
||||
u16 *p = host->pio_ptr;
|
||||
int s = host->pio_size;
|
||||
/*
|
||||
* The TX_FIFO_8W bit is unreliable. When set, bursting
|
||||
* 16 halfwords all at once in the FIFO drops data. Actually
|
||||
@ -464,7 +431,6 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
|
||||
mvsd_write(MVSD_FIFO, get_unaligned(p++));
|
||||
mvsd_write(MVSD_FIFO, get_unaligned(p++));
|
||||
s -= 4;
|
||||
sgm->consumed += 4;
|
||||
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
|
||||
}
|
||||
if (s < 4) {
|
||||
@ -473,13 +439,10 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
|
||||
memcpy(((void *)&val) + 4 - s, p, s);
|
||||
mvsd_write(MVSD_FIFO, val[0]);
|
||||
mvsd_write(MVSD_FIFO, val[1]);
|
||||
sgm->consumed += s;
|
||||
s = 0;
|
||||
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
|
||||
}
|
||||
/* PIO transfer done */
|
||||
host->pio_size -= sgm->consumed;
|
||||
if (host->pio_size == 0) {
|
||||
if (s == 0) {
|
||||
host->intr_en &=
|
||||
~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W);
|
||||
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
|
||||
@ -487,6 +450,8 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
|
||||
}
|
||||
dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
|
||||
s, intr_status, mvsd_read(MVSD_HW_STATE));
|
||||
host->pio_ptr = p;
|
||||
host->pio_size = s;
|
||||
irq_handled = 1;
|
||||
}
|
||||
|
||||
|
@ -852,6 +852,14 @@ static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask)
|
||||
|
||||
sdhci_reset(host, mask);
|
||||
|
||||
/* The T-Head 1520 SoC does not comply with the SDHCI specification
|
||||
* regarding the "Software Reset for CMD line should clear 'Command
|
||||
* Complete' in the Normal Interrupt Status Register." Clear the bit
|
||||
* here to compensate for this quirk.
|
||||
*/
|
||||
if (mask & SDHCI_RESET_CMD)
|
||||
sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
|
||||
|
||||
if (priv->flags & FLAG_IO_FIXED_1V8) {
|
||||
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
|
||||
if (!(ctrl_2 & SDHCI_CTRL_VDD_180)) {
|
||||
|
@ -6360,7 +6360,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
||||
.invalid_port_mask = BIT(1) | BIT(2) | BIT(8),
|
||||
.num_internal_phys = 5,
|
||||
.internal_phys_offset = 3,
|
||||
.max_vid = 4095,
|
||||
.max_vid = 8191,
|
||||
.max_sid = 63,
|
||||
.port_base_addr = 0x0,
|
||||
.phy_base_addr = 0x0,
|
||||
|
@ -851,7 +851,6 @@ static int vsc73xx_setup(struct dsa_switch *ds)
|
||||
|
||||
dev_info(vsc->dev, "set up the switch\n");
|
||||
|
||||
ds->untag_bridge_pvid = true;
|
||||
ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
|
||||
ds->fdb_isolation = true;
|
||||
|
||||
|
@ -484,7 +484,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
if (unlikely(skb->len > MAX_FRAME_SIZE)) {
|
||||
dev->stats.tx_errors++;
|
||||
goto out;
|
||||
goto len_error;
|
||||
}
|
||||
|
||||
/* Save skb pointer. */
|
||||
@ -575,6 +575,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
|
||||
map_error:
|
||||
if (net_ratelimit())
|
||||
dev_warn(greth->dev, "Could not create TX DMA mapping\n");
|
||||
len_error:
|
||||
dev_kfree_skb(skb);
|
||||
out:
|
||||
return err;
|
||||
|
@ -322,6 +322,7 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
/* Rewind so we do not have a hole */
|
||||
spb_index = intf->tx_spb_index;
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -1359,6 +1359,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
|
||||
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
|
||||
skb->data, skb_len);
|
||||
ret = NETDEV_TX_OK;
|
||||
dev_kfree_skb_any(skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -930,9 +930,6 @@ static int macb_mdiobus_register(struct macb *bp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (of_phy_is_fixed_link(np))
|
||||
return mdiobus_register(bp->mii_bus);
|
||||
|
||||
/* Only create the PHY from the device tree if at least one PHY is
|
||||
* described. Otherwise scan the entire MDIO bus. We do this to support
|
||||
* old device tree that did not follow the best practices and did not
|
||||
@ -953,8 +950,19 @@ static int macb_mdiobus_register(struct macb *bp)
|
||||
|
||||
static int macb_mii_init(struct macb *bp)
|
||||
{
|
||||
struct device_node *child, *np = bp->pdev->dev.of_node;
|
||||
int err = -ENXIO;
|
||||
|
||||
/* With fixed-link, we don't need to register the MDIO bus,
|
||||
* except if we have a child named "mdio" in the device tree.
|
||||
* In that case, some devices may be attached to the MACB's MDIO bus.
|
||||
*/
|
||||
child = of_get_child_by_name(np, "mdio");
|
||||
if (child)
|
||||
of_node_put(child);
|
||||
else if (of_phy_is_fixed_link(np))
|
||||
return macb_mii_probe(bp->dev);
|
||||
|
||||
/* Enable management port */
|
||||
macb_writel(bp, NCR, MACB_BIT(MPE));
|
||||
|
||||
|
@ -902,6 +902,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
|
||||
|
||||
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
|
||||
__netif_subqueue_stopped(ndev, tx_ring->index) &&
|
||||
!test_bit(ENETC_TX_DOWN, &priv->flags) &&
|
||||
(enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
|
||||
netif_wake_subqueue(ndev, tx_ring->index);
|
||||
}
|
||||
@ -1377,6 +1378,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
|
||||
int xdp_tx_bd_cnt, i, k;
|
||||
int xdp_tx_frm_cnt = 0;
|
||||
|
||||
if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags)))
|
||||
return -ENETDOWN;
|
||||
|
||||
enetc_lock_mdio();
|
||||
|
||||
tx_ring = priv->xdp_tx_ring[smp_processor_id()];
|
||||
@ -1521,7 +1525,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
|
||||
&rx_ring->rx_swbd[rx_ring_first]);
|
||||
enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
|
||||
}
|
||||
rx_ring->stats.xdp_drops++;
|
||||
}
|
||||
|
||||
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
|
||||
@ -1586,6 +1589,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
|
||||
fallthrough;
|
||||
case XDP_DROP:
|
||||
enetc_xdp_drop(rx_ring, orig_i, i);
|
||||
rx_ring->stats.xdp_drops++;
|
||||
break;
|
||||
case XDP_PASS:
|
||||
rxbd = orig_rxbd;
|
||||
@ -1602,6 +1606,12 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
|
||||
break;
|
||||
case XDP_TX:
|
||||
tx_ring = priv->xdp_tx_ring[rx_ring->index];
|
||||
if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) {
|
||||
enetc_xdp_drop(rx_ring, orig_i, i);
|
||||
tx_ring->stats.xdp_tx_drops++;
|
||||
break;
|
||||
}
|
||||
|
||||
xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
|
||||
rx_ring,
|
||||
orig_i, i);
|
||||
@ -2223,16 +2233,22 @@ static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
|
||||
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
|
||||
}
|
||||
|
||||
static void enetc_enable_bdrs(struct enetc_ndev_priv *priv)
|
||||
static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv)
|
||||
{
|
||||
struct enetc_hw *hw = &priv->si->hw;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->num_rx_rings; i++)
|
||||
enetc_enable_rxbdr(hw, priv->rx_ring[i]);
|
||||
}
|
||||
|
||||
static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv)
|
||||
{
|
||||
struct enetc_hw *hw = &priv->si->hw;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->num_tx_rings; i++)
|
||||
enetc_enable_txbdr(hw, priv->tx_ring[i]);
|
||||
|
||||
for (i = 0; i < priv->num_rx_rings; i++)
|
||||
enetc_enable_rxbdr(hw, priv->rx_ring[i]);
|
||||
}
|
||||
|
||||
static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
|
||||
@ -2251,16 +2267,22 @@ static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
|
||||
enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
|
||||
}
|
||||
|
||||
static void enetc_disable_bdrs(struct enetc_ndev_priv *priv)
|
||||
static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv)
|
||||
{
|
||||
struct enetc_hw *hw = &priv->si->hw;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->num_rx_rings; i++)
|
||||
enetc_disable_rxbdr(hw, priv->rx_ring[i]);
|
||||
}
|
||||
|
||||
static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv)
|
||||
{
|
||||
struct enetc_hw *hw = &priv->si->hw;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->num_tx_rings; i++)
|
||||
enetc_disable_txbdr(hw, priv->tx_ring[i]);
|
||||
|
||||
for (i = 0; i < priv->num_rx_rings; i++)
|
||||
enetc_disable_rxbdr(hw, priv->rx_ring[i]);
|
||||
}
|
||||
|
||||
static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
|
||||
@ -2460,9 +2482,13 @@ void enetc_start(struct net_device *ndev)
|
||||
enable_irq(irq);
|
||||
}
|
||||
|
||||
enetc_enable_bdrs(priv);
|
||||
enetc_enable_tx_bdrs(priv);
|
||||
|
||||
enetc_enable_rx_bdrs(priv);
|
||||
|
||||
netif_tx_start_all_queues(ndev);
|
||||
|
||||
clear_bit(ENETC_TX_DOWN, &priv->flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enetc_start);
|
||||
|
||||
@ -2520,9 +2546,15 @@ void enetc_stop(struct net_device *ndev)
|
||||
struct enetc_ndev_priv *priv = netdev_priv(ndev);
|
||||
int i;
|
||||
|
||||
set_bit(ENETC_TX_DOWN, &priv->flags);
|
||||
|
||||
netif_tx_stop_all_queues(ndev);
|
||||
|
||||
enetc_disable_bdrs(priv);
|
||||
enetc_disable_rx_bdrs(priv);
|
||||
|
||||
enetc_wait_bdrs(priv);
|
||||
|
||||
enetc_disable_tx_bdrs(priv);
|
||||
|
||||
for (i = 0; i < priv->bdr_int_num; i++) {
|
||||
int irq = pci_irq_vector(priv->si->pdev,
|
||||
@ -2533,8 +2565,6 @@ void enetc_stop(struct net_device *ndev)
|
||||
napi_disable(&priv->int_vector[i]->napi);
|
||||
}
|
||||
|
||||
enetc_wait_bdrs(priv);
|
||||
|
||||
enetc_clear_interrupts(priv);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enetc_stop);
|
||||
|
@ -325,6 +325,7 @@ enum enetc_active_offloads {
|
||||
|
||||
enum enetc_flags_bit {
|
||||
ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
|
||||
ENETC_TX_DOWN,
|
||||
};
|
||||
|
||||
/* interrupt coalescing modes */
|
||||
|
@ -2411,7 +2411,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
|
||||
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
|
||||
if (!(cfg & BIT_ULL(12)))
|
||||
continue;
|
||||
bmap |= (1 << i);
|
||||
bmap |= BIT_ULL(i);
|
||||
cfg &= ~BIT_ULL(12);
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
|
||||
@ -2432,7 +2432,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
|
||||
|
||||
/* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
|
||||
for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
|
||||
if (!(bmap & (1 << i)))
|
||||
if (!(bmap & BIT_ULL(i)))
|
||||
continue;
|
||||
cfg = rvu_read64(rvu, blkaddr,
|
||||
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
|
||||
|
@ -1171,7 +1171,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
|
||||
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
for (i = 0; i < len; i++) {
|
||||
struct mtk_tx_dma_v2 *txd;
|
||||
|
||||
txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
|
||||
|
@ -1765,6 +1765,10 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
|
||||
}
|
||||
}
|
||||
|
||||
#define MLX5_MAX_MANAGE_PAGES_CMD_ENT 1
|
||||
#define MLX5_CMD_MASK ((1UL << (cmd->vars.max_reg_cmds + \
|
||||
MLX5_MAX_MANAGE_PAGES_CMD_ENT)) - 1)
|
||||
|
||||
static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_cmd *cmd = &dev->cmd;
|
||||
@ -1776,7 +1780,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
|
||||
/* wait for pending handlers to complete */
|
||||
mlx5_eq_synchronize_cmd_irq(dev);
|
||||
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
|
||||
vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
|
||||
vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK;
|
||||
if (!vector)
|
||||
goto no_trig;
|
||||
|
||||
@ -2361,7 +2365,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
|
||||
|
||||
cmd->state = MLX5_CMDIF_STATE_DOWN;
|
||||
cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
|
||||
cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
|
||||
cmd->vars.bitmask = MLX5_CMD_MASK;
|
||||
|
||||
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
|
||||
sema_init(&cmd->vars.pages_sem, 1);
|
||||
|
@ -6509,7 +6509,9 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
|
||||
mlx5e_dcbnl_delete_app(priv);
|
||||
unregister_netdev(priv->netdev);
|
||||
_mlx5e_suspend(adev, false);
|
||||
priv->profile->cleanup(priv);
|
||||
/* Avoid cleanup if profile rollback failed. */
|
||||
if (priv->profile)
|
||||
priv->profile->cleanup(priv);
|
||||
mlx5e_destroy_netdev(priv);
|
||||
mlx5e_devlink_port_unregister(mlx5e_dev);
|
||||
mlx5e_destroy_devlink(mlx5e_dev);
|
||||
|
@ -1061,6 +1061,12 @@ int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
|
||||
struct mlx5_eq_comp *eq;
|
||||
int ret = 0;
|
||||
|
||||
if (vecidx >= table->max_comp_eqs) {
|
||||
mlx5_core_dbg(dev, "Requested vector index %u should be less than %u",
|
||||
vecidx, table->max_comp_eqs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&table->comp_lock);
|
||||
eq = xa_load(&table->comp_eqs, vecidx);
|
||||
if (eq) {
|
||||
|
@ -691,7 +691,6 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
|
||||
static int
|
||||
hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
|
||||
{
|
||||
u32 num_of_rules;
|
||||
int ret;
|
||||
|
||||
/* If the current matcher size is already at its max size, we can't
|
||||
@ -705,8 +704,7 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
|
||||
* Need to check again if we really need rehash.
|
||||
* If the reason for rehash was size, but not any more - skip rehash.
|
||||
*/
|
||||
num_of_rules = __atomic_load_n(&bwc_matcher->num_of_rules, __ATOMIC_RELAXED);
|
||||
if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))
|
||||
if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules))
|
||||
return 0;
|
||||
|
||||
/* Now we're done all the checking - do the rehash:
|
||||
|
@ -46,6 +46,7 @@ struct mlx5hws_context {
|
||||
struct mlx5hws_send_engine *send_queue;
|
||||
size_t queues;
|
||||
struct mutex *bwc_send_queue_locks; /* protect BWC queues */
|
||||
struct lock_class_key *bwc_lock_class_keys;
|
||||
struct list_head tbl_list;
|
||||
struct mlx5hws_context_debug_info debug_info;
|
||||
struct xarray peer_ctx_xa;
|
||||
|
@ -1925,7 +1925,7 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
|
||||
ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
|
||||
if (ret) {
|
||||
mlx5hws_err(ctx, "Failed to convert items to header layout\n");
|
||||
goto free_fc;
|
||||
goto free_match_hl;
|
||||
}
|
||||
|
||||
/* Find the match definer layout for header layout match union */
|
||||
@ -1946,7 +1946,7 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
|
||||
|
||||
free_fc:
|
||||
kfree(mt->fc);
|
||||
|
||||
free_match_hl:
|
||||
kfree(match_hl);
|
||||
return ret;
|
||||
}
|
||||
|
@ -941,14 +941,18 @@ static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues)
|
||||
|
||||
static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
|
||||
{
|
||||
int bwc_queues = ctx->queues - 1;
|
||||
int bwc_queues = mlx5hws_bwc_queues(ctx);
|
||||
int i;
|
||||
|
||||
if (!mlx5hws_context_bwc_supported(ctx))
|
||||
return;
|
||||
|
||||
for (i = 0; i < bwc_queues; i++)
|
||||
for (i = 0; i < bwc_queues; i++) {
|
||||
mutex_destroy(&ctx->bwc_send_queue_locks[i]);
|
||||
lockdep_unregister_key(ctx->bwc_lock_class_keys + i);
|
||||
}
|
||||
|
||||
kfree(ctx->bwc_lock_class_keys);
|
||||
kfree(ctx->bwc_send_queue_locks);
|
||||
}
|
||||
|
||||
@ -977,10 +981,22 @@ static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
|
||||
if (!ctx->bwc_send_queue_locks)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < bwc_queues; i++)
|
||||
ctx->bwc_lock_class_keys = kcalloc(bwc_queues,
|
||||
sizeof(*ctx->bwc_lock_class_keys),
|
||||
GFP_KERNEL);
|
||||
if (!ctx->bwc_lock_class_keys)
|
||||
goto err_lock_class_keys;
|
||||
|
||||
for (i = 0; i < bwc_queues; i++) {
|
||||
mutex_init(&ctx->bwc_send_queue_locks[i]);
|
||||
lockdep_register_key(ctx->bwc_lock_class_keys + i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_lock_class_keys:
|
||||
kfree(ctx->bwc_send_queue_locks);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
|
||||
|
@ -401,28 +401,21 @@ static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci,
|
||||
u32 nano_seconds = 0;
|
||||
u32 seconds = 0;
|
||||
|
||||
if (ts) {
|
||||
if (ts->tv_sec > 0xFFFFFFFFLL ||
|
||||
ts->tv_sec < 0) {
|
||||
netif_warn(adapter, drv, adapter->netdev,
|
||||
"ts->tv_sec out of range, %lld\n",
|
||||
ts->tv_sec);
|
||||
return -ERANGE;
|
||||
}
|
||||
if (ts->tv_nsec >= 1000000000L ||
|
||||
ts->tv_nsec < 0) {
|
||||
netif_warn(adapter, drv, adapter->netdev,
|
||||
"ts->tv_nsec out of range, %ld\n",
|
||||
ts->tv_nsec);
|
||||
return -ERANGE;
|
||||
}
|
||||
seconds = ts->tv_sec;
|
||||
nano_seconds = ts->tv_nsec;
|
||||
lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0);
|
||||
} else {
|
||||
netif_warn(adapter, drv, adapter->netdev, "ts == NULL\n");
|
||||
return -EINVAL;
|
||||
if (ts->tv_sec > 0xFFFFFFFFLL) {
|
||||
netif_warn(adapter, drv, adapter->netdev,
|
||||
"ts->tv_sec out of range, %lld\n",
|
||||
ts->tv_sec);
|
||||
return -ERANGE;
|
||||
}
|
||||
if (ts->tv_nsec < 0) {
|
||||
netif_warn(adapter, drv, adapter->netdev,
|
||||
"ts->tv_nsec out of range, %ld\n",
|
||||
ts->tv_nsec);
|
||||
return -ERANGE;
|
||||
}
|
||||
seconds = ts->tv_sec;
|
||||
nano_seconds = ts->tv_nsec;
|
||||
lan743x_ptp_clock_set(adapter, seconds, nano_seconds, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -31,10 +31,10 @@ static u64 sparx5_mirror_port_get(struct sparx5 *sparx5, u32 idx)
|
||||
/* Add port to mirror (only front ports) */
|
||||
static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno)
|
||||
{
|
||||
u32 val, reg = portno;
|
||||
u64 reg = portno;
|
||||
u32 val;
|
||||
|
||||
reg = portno / BITS_PER_BYTE;
|
||||
val = BIT(portno % BITS_PER_BYTE);
|
||||
val = BIT(do_div(reg, 32));
|
||||
|
||||
if (reg == 0)
|
||||
return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
|
||||
@ -45,10 +45,10 @@ static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno)
|
||||
/* Delete port from mirror (only front ports) */
|
||||
static void sparx5_mirror_port_del(struct sparx5 *sparx5, u32 idx, u32 portno)
|
||||
{
|
||||
u32 val, reg = portno;
|
||||
u64 reg = portno;
|
||||
u32 val;
|
||||
|
||||
reg = portno / BITS_PER_BYTE;
|
||||
val = BIT(portno % BITS_PER_BYTE);
|
||||
val = BIT(do_div(reg, 32));
|
||||
|
||||
if (reg == 0)
|
||||
return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
|
||||
|
@ -1444,6 +1444,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
|
||||
|
||||
ret = vcap_del_rule(&test_vctrl, &test_netdev, id);
|
||||
KUNIT_EXPECT_EQ(test, 0, ret);
|
||||
|
||||
vcap_free_rule(rule);
|
||||
}
|
||||
|
||||
static void vcap_api_set_rule_counter_test(struct kunit *test)
|
||||
|
@ -1745,20 +1745,19 @@ static int ravb_get_ts_info(struct net_device *ndev,
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
const struct ravb_hw_info *hw_info = priv->info;
|
||||
|
||||
info->so_timestamping =
|
||||
SOF_TIMESTAMPING_TX_SOFTWARE |
|
||||
SOF_TIMESTAMPING_TX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RAW_HARDWARE;
|
||||
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
|
||||
info->rx_filters =
|
||||
(1 << HWTSTAMP_FILTER_NONE) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_ALL);
|
||||
if (hw_info->gptp || hw_info->ccc_gac)
|
||||
if (hw_info->gptp || hw_info->ccc_gac) {
|
||||
info->so_timestamping =
|
||||
SOF_TIMESTAMPING_TX_SOFTWARE |
|
||||
SOF_TIMESTAMPING_TX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RAW_HARDWARE;
|
||||
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
|
||||
info->rx_filters =
|
||||
(1 << HWTSTAMP_FILTER_NONE) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_ALL);
|
||||
info->phc_index = ptp_clock_index(priv->ptp.clock);
|
||||
else
|
||||
info->phc_index = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1057,6 +1057,7 @@ static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
if (skb->len >= TX_DS) {
|
||||
priv->stats.tx_dropped++;
|
||||
priv->stats.tx_errors++;
|
||||
dev_kfree_skb_any(skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -127,10 +127,12 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
|
||||
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
usleep_range(10, 20); /* 500ns min delay needed as per HW design */
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
@ -143,22 +145,30 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
|
||||
return err;
|
||||
}
|
||||
|
||||
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
msleep(30); /* 30ms delay needed as per HW design */
|
||||
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
|
||||
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
|
||||
|
||||
err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value,
|
||||
value & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS,
|
||||
500, 500 * 2000);
|
||||
|
@ -1051,6 +1051,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
if (net_ratelimit())
|
||||
netdev_err(ndev, "TX DMA mapping error\n");
|
||||
ndev->stats.tx_dropped++;
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
desc_set_phys_addr(lp, phys, cur_p);
|
||||
@ -1071,6 +1072,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
ndev->stats.tx_dropped++;
|
||||
axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
|
||||
true, NULL, 0);
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
desc_set_phys_addr(lp, phys, cur_p);
|
||||
|
@ -154,19 +154,6 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
|
||||
return sa;
|
||||
}
|
||||
|
||||
static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
|
||||
{
|
||||
struct macsec_rx_sa *sa = NULL;
|
||||
int an;
|
||||
|
||||
for (an = 0; an < MACSEC_NUM_AN; an++) {
|
||||
sa = macsec_rxsa_get(rx_sc->sa[an]);
|
||||
if (sa)
|
||||
break;
|
||||
}
|
||||
return sa;
|
||||
}
|
||||
|
||||
static void free_rx_sc_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
|
||||
@ -1208,15 +1195,12 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
|
||||
/* If validateFrames is Strict or the C bit in the
|
||||
* SecTAG is set, discard
|
||||
*/
|
||||
struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
|
||||
if (hdr->tci_an & MACSEC_TCI_C ||
|
||||
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
|
||||
u64_stats_update_begin(&rxsc_stats->syncp);
|
||||
rxsc_stats->stats.InPktsNotUsingSA++;
|
||||
u64_stats_update_end(&rxsc_stats->syncp);
|
||||
DEV_STATS_INC(secy->netdev, rx_errors);
|
||||
if (active_rx_sa)
|
||||
this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
|
||||
goto drop_nosa;
|
||||
}
|
||||
|
||||
@ -1226,8 +1210,6 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
|
||||
u64_stats_update_begin(&rxsc_stats->syncp);
|
||||
rxsc_stats->stats.InPktsUnusedSA++;
|
||||
u64_stats_update_end(&rxsc_stats->syncp);
|
||||
if (active_rx_sa)
|
||||
this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
|
||||
goto deliver;
|
||||
}
|
||||
|
||||
|
@ -337,6 +337,7 @@ static const struct of_device_id unimac_mdio_ids[] = {
|
||||
{ .compatible = "brcm,asp-v2.2-mdio", },
|
||||
{ .compatible = "brcm,asp-v2.1-mdio", },
|
||||
{ .compatible = "brcm,asp-v2.0-mdio", },
|
||||
{ .compatible = "brcm,bcm6846-mdio", },
|
||||
{ .compatible = "brcm,genet-mdio-v5", },
|
||||
{ .compatible = "brcm,genet-mdio-v4", },
|
||||
{ .compatible = "brcm,genet-mdio-v3", },
|
||||
|
@ -836,7 +836,8 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
|
||||
nsim_dev = nsim_trap_data->nsim_dev;
|
||||
|
||||
if (!devl_trylock(priv_to_devlink(nsim_dev))) {
|
||||
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
|
||||
queue_delayed_work(system_unbound_wq,
|
||||
&nsim_dev->trap_data->trap_report_dw, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -848,11 +849,12 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
|
||||
continue;
|
||||
|
||||
nsim_dev_trap_report(nsim_dev_port);
|
||||
cond_resched();
|
||||
}
|
||||
devl_unlock(priv_to_devlink(nsim_dev));
|
||||
|
||||
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
|
||||
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
|
||||
queue_delayed_work(system_unbound_wq,
|
||||
&nsim_dev->trap_data->trap_report_dw,
|
||||
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
|
||||
}
|
||||
|
||||
static int nsim_dev_traps_init(struct devlink *devlink)
|
||||
@ -907,8 +909,9 @@ static int nsim_dev_traps_init(struct devlink *devlink)
|
||||
|
||||
INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
|
||||
nsim_dev_trap_report_work);
|
||||
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
|
||||
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
|
||||
queue_delayed_work(system_unbound_wq,
|
||||
&nsim_dev->trap_data->trap_report_dw,
|
||||
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1870,6 +1870,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
||||
* may trigger an error resubmitting itself and, worse,
|
||||
* schedule a timer. So we kill it all just in case.
|
||||
*/
|
||||
usbnet_mark_going_away(dev);
|
||||
cancel_work_sync(&dev->kevent);
|
||||
del_timer_sync(&dev->delay);
|
||||
free_netdev(net);
|
||||
|
@ -148,7 +148,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
|
||||
} else { /* XDP buffer from page pool */
|
||||
page = virt_to_page(xdpf->data);
|
||||
tbi->dma_addr = page_pool_get_dma_addr(page) +
|
||||
VMXNET3_XDP_HEADROOM;
|
||||
(xdpf->data - (void *)xdpf);
|
||||
dma_sync_single_for_device(&adapter->pdev->dev,
|
||||
tbi->dma_addr, buf_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
@ -10,6 +10,19 @@
|
||||
#include <kunit/test.h>
|
||||
#include <kunit/resource.h>
|
||||
|
||||
#include "of_private.h"
|
||||
|
||||
/**
|
||||
* of_root_kunit_skip() - Skip test if the root node isn't populated
|
||||
* @test: test to skip if the root node isn't populated
|
||||
*/
|
||||
void of_root_kunit_skip(struct kunit *test)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64) && IS_ENABLED(CONFIG_ACPI) && !of_root)
|
||||
kunit_skip(test, "arm64+acpi doesn't populate a root node");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_root_kunit_skip);
|
||||
|
||||
#if defined(CONFIG_OF_OVERLAY) && defined(CONFIG_OF_EARLY_FLATTREE)
|
||||
|
||||
static void of_overlay_fdt_apply_kunit_exit(void *ovcs_id)
|
||||
@ -36,6 +49,8 @@ int of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
|
||||
int ret;
|
||||
int *copy_id;
|
||||
|
||||
of_root_kunit_skip(test);
|
||||
|
||||
copy_id = kunit_kmalloc(test, sizeof(*copy_id), GFP_KERNEL);
|
||||
if (!copy_id)
|
||||
return -ENOMEM;
|
||||
|
@ -42,6 +42,9 @@ extern raw_spinlock_t devtree_lock;
|
||||
extern struct list_head aliases_lookup;
|
||||
extern struct kset *of_kset;
|
||||
|
||||
struct kunit;
|
||||
extern void of_root_kunit_skip(struct kunit *test);
|
||||
|
||||
#if defined(CONFIG_OF_DYNAMIC)
|
||||
extern int of_property_notify(int action, struct device_node *np,
|
||||
struct property *prop, struct property *old_prop);
|
||||
|
@ -7,6 +7,8 @@
|
||||
|
||||
#include <kunit/test.h>
|
||||
|
||||
#include "of_private.h"
|
||||
|
||||
/*
|
||||
* Test that the root node "/" can be found by path.
|
||||
*/
|
||||
@ -36,6 +38,7 @@ static struct kunit_case of_dtb_test_cases[] = {
|
||||
|
||||
static int of_dtb_test_init(struct kunit *test)
|
||||
{
|
||||
of_root_kunit_skip(test);
|
||||
if (!IS_ENABLED(CONFIG_OF_EARLY_FLATTREE))
|
||||
kunit_skip(test, "requires CONFIG_OF_EARLY_FLATTREE");
|
||||
|
||||
|
@ -11,6 +11,8 @@
|
||||
#include <kunit/of.h>
|
||||
#include <kunit/test.h>
|
||||
|
||||
#include "of_private.h"
|
||||
|
||||
static const char * const kunit_node_name = "kunit-test";
|
||||
static const char * const kunit_compatible = "test,empty";
|
||||
|
||||
@ -62,6 +64,7 @@ static void of_overlay_apply_kunit_cleanup(struct kunit *test)
|
||||
struct device *dev;
|
||||
struct device_node *np;
|
||||
|
||||
of_root_kunit_skip(test);
|
||||
if (!IS_ENABLED(CONFIG_OF_EARLY_FLATTREE))
|
||||
kunit_skip(test, "requires CONFIG_OF_EARLY_FLATTREE for root node");
|
||||
|
||||
@ -73,7 +76,7 @@ static void of_overlay_apply_kunit_cleanup(struct kunit *test)
|
||||
|
||||
np = of_find_node_by_name(NULL, kunit_node_name);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, np);
|
||||
of_node_put_kunit(test, np);
|
||||
of_node_put_kunit(&fake, np);
|
||||
|
||||
pdev = of_find_device_by_node(np);
|
||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user