mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
pci-v6.12-changes
-----BEGIN PGP SIGNATURE----- iQJIBAABCgAyFiEEgMe7l+5h9hnxdsnuWYigwDrT+vwFAmbseugUHGJoZWxnYWFz QGdvb2dsZS5jb20ACgkQWYigwDrT+vxdwxAAvdvDyTuiPo2R8pQtvKg4YL2IUnK5 UR28mBxZDK5DFhLtD/QzmVVG/eaLY6bJHthHgJgTApzekkqU0h9dcRI0eegXrvcz I3HRsZK2yatUky9l8O148OLzF897r7vXL3QtGe6qjKU+9D83IEeooLKgBca+GoBC bRLvG/fYRzdjOe8UHFqCoeMIg3IOY7CNifvFOihAGpJpxfZQktj6hSKu6q7BL1Rx NRgYlxh0eLcb7vAJqz6RZpQ8PRCwhAjlDuu0BOkES8/6EwisD1xUh3qdDxfVgNA6 FpcAb/53yr46cs4tM9ZTwluka86AskuXj3jwSKf7nE3zqr4nM9OD3sGOSYzK8UdE EDBKj+9iEpYRC6rJMk5gNH2AZkR1OEpNUisR6+kEn81A9yNNoTmkHdHUOWo8TuxD btc0sTM+eWApvTiZwgL4VjMZulQllV51K8tcfvODRhlMkbOPNWGWdmpWqEbUS2HU i7+zzQC3DC5iPlAKgRSeYB0aad6la6brqPW16sGhGovNhgwbzakDLCUJJGn/LNuO wd0UNpJTnHlfChbvNh2bBxiMOo0cab1tJ5Jp97STQYhLg2nW93s/dAfdpSAsYO4S 5YzjSADWeyeuDsHE1RdUdDvYAPMb1VZBUd2OSHis5zw7kmh25c9KYXEkDJ25q/ju sVXK4oMNW/Gnd5M= =L3s9 -----END PGP SIGNATURE----- Merge tag 'pci-v6.12-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci Pull pci updates from Bjorn Helgaas: "Enumeration: - Wait for device readiness after reset by polling Vendor ID and looking for Configuration RRS instead of polling the Command register and looking for non-error completions, to avoid hardware retries done for RRS on non-Vendor ID reads (Bjorn Helgaas) - Rename CRS Completion Status to RRS ('Request Retry Status') to match PCIe r6.0 spec usage (Bjorn Helgaas) - Clear LBMS bit after a manual link retrain so we don't try to retrain a link when there's no downstream device anymore (Maciej W. Rozycki) - Revert to the original link speed after retraining fails instead of leaving it restricted to 2.5GT/s, so a future device has a chance to use higher speeds (Maciej W. Rozycki) - Wait for each level of downstream bus, not just the first, to become accessible before restoring devices on that bus (Ilpo Järvinen) - Add ARCH_PCI_DEV_GROUPS so s390 can add its own attribute_groups without having to stomp on the core's pdev->dev.groups (Lukas Wunner) Driver binding: - Export pcim_request_region(), a managed counterpart of pci_request_region(), for use by drivers (Philipp Stanner) - Export pcim_iomap_region() and deprecate pcim_iomap_regions() (Philipp Stanner) - Request the PCI BAR used by xboxvideo (Philipp Stanner) - Request and map drm/ast BARs with pcim_iomap_region() (Philipp Stanner) MSI: - Add MSI_FLAG_NO_AFFINITY flag for devices that mux MSIs onto a single IRQ line and cannot set the affinity of each MSI to a specific CPU core (Marek Vasut) - Use MSI_FLAG_NO_AFFINITY and remove unnecessary .irq_set_affinity() implementations in aardvark, altera, brcmstb, dwc, mediatek-gen3, mediatek, mobiveil, plda, rcar, tegra, vmd, xilinx-nwl, xilinx-xdma, and xilinx drivers to avoid 'IRQ: set affinity failed' warnings (Marek Vasut) Power management: - Add pwrctl support for ATH11K inside the WCN6855 package (Konrad Dybcio) PCI device hotplug: - Remove unnecessary hpc_ops struct from shpchp (ngn) - Check for PCI_POSSIBLE_ERROR(), not 0xffffffff, in cpqphp (weiyufeng) Virtualization: - Mark Creative Labs EMU20k2 INTx masking as broken (Alex Williamson) - Add an ACS quirk for Qualcomm SA8775P, which doesn't advertise ACS but does provide ACS-like features (Subramanian Ananthanarayanan) IOMMU: - Add function 0 DMA alias quirk for Glenfly Arise audio function, which uses the function 0 Requester ID (WangYuli) NPEM: - Add Native PCIe Enclosure Management (NPEM) support for sysfs control of NVMe RAID storage indicators (ok/fail/locate/ rebuild/etc) (Mariusz Tkaczyk) - Add support for the ACPI _DSM PCIe SSD status LED management, which is functionally similar to NPEM but mediated by platform firmware (Mariusz Tkaczyk) Device trees: - Drop minItems and maxItems from ranges in PCI generic host binding since host bridges may have several MMIO and I/O port apertures (Frank Li) - Add kirin, rcar-gen2, uniphier DT binding top-level constraints for clocks (Krzysztof Kozlowski) Altera PCIe controller driver: - Convert altera DT bindings from text to YAML (Matthew Gerlach) - Replace TLP_REQ_ID() with macro PCI_DEVID(), which does the same thing and is what other drivers use (Jinjie Ruan) Broadcom STB PCIe controller driver: - Add DT binding maxItems for reset controllers (Jim Quinlan) - Use the 'bridge' reset method if described in the DT (Jim Quinlan) - Use the 'swinit' reset method if described in the DT (Jim Quinlan) - Add 'has_phy' so the existence of a 'rescal' reset controller doesn't imply software control of it (Jim Quinlan) - Add support for many inbound DMA windows (Jim Quinlan) - Rename SoC 'type' to 'soc_base' express the fact that SoCs come in families of multiple similar devices (Jim Quinlan) - Add Broadcom 7712 DT description and driver support (Jim Quinlan) - Sort enums, pcie_offsets[], pcie_cfg_data, .compatible strings for maintainability (Bjorn Helgaas) Freescale i.MX6 PCIe controller driver: - Add imx6q-pcie 'dbi2' and 'atu' reg-names for i.MX8M Endpoints (Richard Zhu) - Fix a code restructuring error that caused i.MX8MM and i.MX8MP Endpoints to fail to establish link (Richard Zhu) - Fix i.MX8MP Endpoint occasional failure to trigger MSI by enforcing outbound alignment requirement (Richard Zhu) - Call phy_power_off() in the .probe() error path (Frank Li) - Rename internal names from imx6_* to imx_* since i.MX7/8/9 are also supported (Frank Li) - Manage Refclk by using SoC-specific callbacks instead of switch statements (Frank Li) - Manage core reset by using SoC-specific callbacks instead of switch statements (Frank Li) - Expand comments for erratum ERR010728 workaround (Frank Li) - Use generic PHY APIs to configure mode, speed, and submode, which is harmless for devices that implement their own internal PHY management and don't set the generic imx_pcie->phy (Frank Li) - Add i.MX8Q (i.MX8QM, i.MX8QXP, and i.MX8DXL) DT binding and driver Root Complex support (Richard Zhu) Freescale Layerscape PCIe controller driver: - Replace layerscape-pcie DT binding compatible fsl,lx2160a-pcie with fsl,lx2160ar2-pcie (Frank Li) - Add layerscape-pcie DT binding deprecated 'num-viewport' property to address a DT checker warning (Frank Li) - Change layerscape-pcie DT binding 'fsl,pcie-scfg' to phandle-array (Frank Li) Loongson PCIe controller driver: - Increase max PCI hosts to 8 for Loongson-3C6000 and newer chipsets (Huacai Chen) Marvell Aardvark PCIe controller driver: - Fix issue with emulating Configuration RRS for two-byte reads of Vendor ID; previously it only worked for four-byte reads (Bjorn Helgaas) MediaTek PCIe Gen3 controller driver: - Add per-SoC struct mtk_gen3_pcie_pdata to support multiple SoC types (Lorenzo Bianconi) - Use reset_bulk APIs to manage PHY reset lines (Lorenzo Bianconi) - Add DT and driver support for Airoha EN7581 PCIe controller (Lorenzo Bianconi) Qualcomm PCIe controller driver: - Update qcom,pcie-sc7280 DT binding with eight interrupts (Rayyan Ansari) - Add back DT 'vddpe-3v3-supply', which was incorrectly removed earlier (Johan Hovold) - Drop endpoint redundant masking of global IRQ events (Manivannan Sadhasivam) - Clarify unknown global IRQ message and only log it once to avoid a flood (Manivannan Sadhasivam) - Add 'linux,pci-domain' property to endpoint DT binding (Manivannan Sadhasivam) - Assign PCI domain number for endpoint controllers (Manivannan Sadhasivam) - Add 'qcom_pcie_ep' and the PCI domain number to IRQ names for endpoint controller (Manivannan Sadhasivam) - Add global SPI interrupt for PCIe link events to DT binding (Manivannan Sadhasivam) - Add global RC interrupt handler to handle 'Link up' events and automatically enumerate hot-added devices (Manivannan Sadhasivam) - Avoid mirroring of DBI and iATU register space so it doesn't overlap BAR MMIO space (Prudhvi Yarlagadda) - Enable controller resources like PHY only after PERST# is deasserted to partially avoid the problem that the endpoint SoC crashes when accessing things when Refclk is absent (Manivannan Sadhasivam) - Add 16.0 GT/s equalization and RX lane margining settings (Shashank Babu Chinta Venkata) - Pass domain number to pci_bus_release_domain_nr() explicitly to avoid a NULL pointer dereference (Manivannan Sadhasivam) Renesas R-Car PCIe controller driver: - Make the read-only const array 'check_addr' static (Colin Ian King) - Add R-Car V4M (R8A779H0) PCIe host and endpoint to DT binding (Yoshihiro Shimoda) TI DRA7xx PCIe controller driver: - Request IRQF_ONESHOT for 'dra7xx-pcie-main' IRQ since the primary handler is NULL (Siddharth Vadapalli) - Handle IRQ request errors during root port and endpoint probe (Siddharth Vadapalli) TI J721E PCIe driver: - Add DT 'ti,syscon-acspcie-proxy-ctrl' and driver support to enable the ACSPCIE module to drive Refclk for the Endpoint (Siddharth Vadapalli) - Extract the cadence link setup from cdns_pcie_host_setup() so link setup can be done separately during resume (Thomas Richard) - Add T_PERST_CLK_US definition for the mandatory delay between Refclk becoming stable and PERST# being deasserted (Thomas Richard) - Add j721e suspend and resume support (Théo Lebrun) TI Keystone PCIe controller driver: - Fix NULL pointer checking when applying MRRS limitation quirk for AM65x SR 1.0 Errata #i2037 (Dan Carpenter) Xilinx NWL PCIe controller driver: - Fix off-by-one error in INTx IRQ handler that caused INTx interrupts to be lost or delivered as the wrong interrupt (Sean Anderson) - Rate-limit misc interrupt messages (Sean Anderson) - Turn off the clock on probe failure and device removal (Sean Anderson) - Add DT binding and driver support for enabling/disabling PHYs (Sean Anderson) - Add PCIe phy bindings for the ZCU102 (Sean Anderson) Xilinx XDMA PCIe controller driver: - Add support for Xilinx QDMA Soft IP PCIe Root Port Bridge to DT binding and xilinx-dma-pl driver (Thippeswamy Havalige) Miscellaneous: - Fix buffer overflow in kirin_pcie_parse_port() (Alexandra Diupina) - Fix minor kerneldoc issues and typos (Bjorn Helgaas) - Use PCI_DEVID() macro in aer_inject() instead of open-coding it (Jinjie Ruan) - Check pcie_find_root_port() return in x86 fixups to avoid NULL pointer dereferences (Samasth Norway Ananda) - Make pci_bus_type constant (Kunwu Chan) - Remove unused declarations of __pci_pme_wakeup() and pci_vpd_release() (Yue Haibing) - Remove any leftover .*.cmd files with make clean (zhang jiao) - Remove unused BILLION macro (zhang jiao)" * tag 'pci-v6.12-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci: (132 commits) PCI: Fix typos dt-bindings: PCI: qcom: Allow 'vddpe-3v3-supply' again tools: PCI: Remove unused BILLION macro tools: PCI: Remove .*.cmd files with make clean PCI: Pass domain number to pci_bus_release_domain_nr() explicitly PCI: dra7xx: Fix error handling when IRQ request fails in probe PCI: dra7xx: Fix threaded IRQ request for "dra7xx-pcie-main" IRQ PCI: qcom: Add RX lane margining settings for 16.0 GT/s PCI: qcom: Add equalization settings for 16.0 GT/s PCI: dwc: Always cache the maximum link speed value in dw_pcie::max_link_speed PCI: dwc: Rename 'dw_pcie::link_gen' to 'dw_pcie::max_link_speed' PCI: qcom-ep: Enable controller resources like PHY only after refclk is available PCI: Mark Creative Labs EMU20k2 INTx masking as broken dt-bindings: PCI: imx6q-pcie: Add reg-name "dbi2" and "atu" for i.MX8M PCIe Endpoint dt-bindings: PCI: altera: msi: Convert to YAML PCI: imx6: Add i.MX8Q PCIe Root Complex (RC) support PCI: Rename CRS Completion Status to RRS PCI: aardvark: Correct Configuration RRS checking PCI: Wait for device readiness with Configuration RRS PCI: brcmstb: Sort enums, pcie_offsets[], pcie_cfg_data, .compatible strings ...
This commit is contained in:
commit
3a37872316
@ -500,3 +500,75 @@ Description:
|
||||
console drivers from the device. Raw users of pci-sysfs
|
||||
resourceN attributes must be terminated prior to resizing.
|
||||
Success of the resizing operation is not guaranteed.
|
||||
|
||||
What: /sys/bus/pci/devices/.../leds/*:enclosure:*/brightness
|
||||
What: /sys/class/leds/*:enclosure:*/brightness
|
||||
Date: August 2024
|
||||
KernelVersion: 6.12
|
||||
Description:
|
||||
LED indications on PCIe storage enclosures which are controlled
|
||||
through the NPEM interface (Native PCIe Enclosure Management,
|
||||
PCIe r6.1 sec 6.28) are accessible as led class devices, both
|
||||
below /sys/class/leds and below NPEM-capable PCI devices.
|
||||
|
||||
Although these led class devices could be manipulated manually,
|
||||
in practice they are typically manipulated automatically by an
|
||||
application such as ledmon(8).
|
||||
|
||||
The name of a led class device is as follows:
|
||||
<bdf>:enclosure:<indication>
|
||||
where:
|
||||
|
||||
- <bdf> is the domain, bus, device and function number
|
||||
(e.g. 10000:02:05.0)
|
||||
- <indication> is a short description of the LED indication
|
||||
|
||||
Valid indications per PCIe r6.1 table 6-27 are:
|
||||
|
||||
- ok (drive is functioning normally)
|
||||
- locate (drive is being identified by an admin)
|
||||
- fail (drive is not functioning properly)
|
||||
- rebuild (drive is part of an array that is rebuilding)
|
||||
- pfa (drive is predicted to fail soon)
|
||||
- hotspare (drive is marked to be used as a replacement)
|
||||
- ica (drive is part of an array that is degraded)
|
||||
- ifa (drive is part of an array that is failed)
|
||||
- idt (drive is not the right type for the connector)
|
||||
- disabled (drive is disabled, removal is safe)
|
||||
- specific0 to specific7 (enclosure-specific indications)
|
||||
|
||||
Broadly, the indications fall into one of these categories:
|
||||
|
||||
- to signify drive state (ok, locate, fail, idt, disabled)
|
||||
- to signify drive role or state in a software RAID array
|
||||
(rebuild, pfa, hotspare, ica, ifa)
|
||||
- to signify any other role or state (specific0 to specific7)
|
||||
|
||||
Mandatory indications per PCIe r6.1 sec 7.9.19.2 comprise:
|
||||
ok, locate, fail, rebuild. All others are optional.
|
||||
A led class device is only visible if the corresponding
|
||||
indication is supported by the device.
|
||||
|
||||
To manipulate the indications, write 0 (LED_OFF) or 1 (LED_ON)
|
||||
to the "brightness" file. Note that manipulating an indication
|
||||
may implicitly manipulate other indications at the vendor's
|
||||
discretion. E.g. when the user lights up the "ok" indication,
|
||||
the vendor may choose to automatically turn off the "fail"
|
||||
indication. The current state of an indication can be
|
||||
retrieved by reading its "brightness" file.
|
||||
|
||||
The PCIe Base Specification allows vendors leeway to choose
|
||||
different colors or blinking patterns for the indications,
|
||||
but they typically follow the IBPI standard. E.g. the "locate"
|
||||
indication is usually presented as one or two LEDs blinking at
|
||||
4 Hz frequency:
|
||||
https://en.wikipedia.org/wiki/International_Blinking_Pattern_Interpretation
|
||||
|
||||
PCI Firmware Specification r3.3 sec 4.7 defines a DSM interface
|
||||
to facilitate shared access by operating system and platform
|
||||
firmware to a device's NPEM registers. The kernel will use
|
||||
this DSM interface where available, instead of accessing NPEM
|
||||
registers directly. The DSM interface does not support the
|
||||
enclosure-specific indications "specific0" to "specific7",
|
||||
hence the corresponding led class devices are unavailable if
|
||||
the DSM interface is used.
|
||||
|
@ -1,27 +0,0 @@
|
||||
* Altera PCIe MSI controller
|
||||
|
||||
Required properties:
|
||||
- compatible: should contain "altr,msi-1.0"
|
||||
- reg: specifies the physical base address of the controller and
|
||||
the length of the memory mapped region.
|
||||
- reg-names: must include the following entries:
|
||||
"csr": CSR registers
|
||||
"vector_slave": vectors slave port region
|
||||
- interrupts: specifies the interrupt source of the parent interrupt
|
||||
controller. The format of the interrupt specifier depends on the
|
||||
parent interrupt controller.
|
||||
- num-vectors: number of vectors, range 1 to 32.
|
||||
- msi-controller: indicates that this is MSI controller node
|
||||
|
||||
|
||||
Example
|
||||
msi0: msi@0xFF200000 {
|
||||
compatible = "altr,msi-1.0";
|
||||
reg = <0xFF200000 0x00000010
|
||||
0xFF200010 0x00000080>;
|
||||
reg-names = "csr", "vector_slave";
|
||||
interrupt-parent = <&hps_0_arm_gic_0>;
|
||||
interrupts = <0 42 4>;
|
||||
msi-controller;
|
||||
num-vectors = <32>;
|
||||
};
|
@ -1,50 +0,0 @@
|
||||
* Altera PCIe controller
|
||||
|
||||
Required properties:
|
||||
- compatible : should contain "altr,pcie-root-port-1.0" or "altr,pcie-root-port-2.0"
|
||||
- reg: a list of physical base address and length for TXS and CRA.
|
||||
For "altr,pcie-root-port-2.0", additional HIP base address and length.
|
||||
- reg-names: must include the following entries:
|
||||
"Txs": TX slave port region
|
||||
"Cra": Control register access region
|
||||
"Hip": Hard IP region (if "altr,pcie-root-port-2.0")
|
||||
- interrupts: specifies the interrupt source of the parent interrupt
|
||||
controller. The format of the interrupt specifier depends
|
||||
on the parent interrupt controller.
|
||||
- device_type: must be "pci"
|
||||
- #address-cells: set to <3>
|
||||
- #size-cells: set to <2>
|
||||
- #interrupt-cells: set to <1>
|
||||
- ranges: describes the translation of addresses for root ports and
|
||||
standard PCI regions.
|
||||
- interrupt-map-mask and interrupt-map: standard PCI properties to define the
|
||||
mapping of the PCIe interface to interrupt numbers.
|
||||
|
||||
Optional properties:
|
||||
- msi-parent: Link to the hardware entity that serves as the MSI controller
|
||||
for this PCIe controller.
|
||||
- bus-range: PCI bus numbers covered
|
||||
|
||||
Example
|
||||
pcie_0: pcie@c00000000 {
|
||||
compatible = "altr,pcie-root-port-1.0";
|
||||
reg = <0xc0000000 0x20000000>,
|
||||
<0xff220000 0x00004000>;
|
||||
reg-names = "Txs", "Cra";
|
||||
interrupt-parent = <&hps_0_arm_gic_0>;
|
||||
interrupts = <0 40 4>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
bus-range = <0x0 0xFF>;
|
||||
device_type = "pci";
|
||||
msi-parent = <&msi_to_gic_gen_0>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_0 1>,
|
||||
<0 0 0 2 &pcie_0 2>,
|
||||
<0 0 0 3 &pcie_0 3>,
|
||||
<0 0 0 4 &pcie_0 4>;
|
||||
ranges = <0x82000000 0x00000000 0x00000000 0xc0000000 0x00000000 0x10000000
|
||||
0x82000000 0x00000000 0x10000000 0xd0000000 0x00000000 0x10000000>;
|
||||
};
|
@ -0,0 +1,65 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||
# Copyright (C) 2015, 2024, Intel Corporation
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/altr,msi-controller.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Altera PCIe MSI controller
|
||||
|
||||
maintainers:
|
||||
- Matthew Gerlach <matthew.gerlach@linux.intel.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- altr,msi-1.0
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: CSR registers
|
||||
- description: Vectors slave port region
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: csr
|
||||
- const: vector_slave
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
msi-controller: true
|
||||
|
||||
num-vectors:
|
||||
description: number of vectors
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
minimum: 1
|
||||
maximum: 32
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- reg-names
|
||||
- interrupts
|
||||
- msi-controller
|
||||
- num-vectors
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/interrupt-controller/msi-controller.yaml#
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
msi@ff200000 {
|
||||
compatible = "altr,msi-1.0";
|
||||
reg = <0xff200000 0x00000010>,
|
||||
<0xff200010 0x00000080>;
|
||||
reg-names = "csr", "vector_slave";
|
||||
interrupt-parent = <&hps_0_arm_gic_0>;
|
||||
interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
|
||||
msi-controller;
|
||||
num-vectors = <32>;
|
||||
};
|
114
Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml
Normal file
114
Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml
Normal file
@ -0,0 +1,114 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||
# Copyright (C) 2015, 2019, 2024, Intel Corporation
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/altr,pcie-root-port.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Altera PCIe Root Port
|
||||
|
||||
maintainers:
|
||||
- Matthew Gerlach <matthew.gerlach@linux.intel.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- altr,pcie-root-port-1.0
|
||||
- altr,pcie-root-port-2.0
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: TX slave port region
|
||||
- description: Control register access region
|
||||
- description: Hard IP region
|
||||
minItems: 2
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: Txs
|
||||
- const: Cra
|
||||
- const: Hip
|
||||
minItems: 2
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
interrupt-controller: true
|
||||
|
||||
interrupt-map-mask:
|
||||
items:
|
||||
- const: 0
|
||||
- const: 0
|
||||
- const: 0
|
||||
- const: 7
|
||||
|
||||
interrupt-map:
|
||||
maxItems: 4
|
||||
|
||||
"#interrupt-cells":
|
||||
const: 1
|
||||
|
||||
msi-parent: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- reg-names
|
||||
- interrupts
|
||||
- "#interrupt-cells"
|
||||
- interrupt-controller
|
||||
- interrupt-map
|
||||
- interrupt-map-mask
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/pci/pci-host-bridge.yaml#
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- altr,pcie-root-port-1.0
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
maxItems: 2
|
||||
|
||||
reg-names:
|
||||
maxItems: 2
|
||||
|
||||
else:
|
||||
properties:
|
||||
reg:
|
||||
minItems: 3
|
||||
|
||||
reg-names:
|
||||
minItems: 3
|
||||
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
pcie_0: pcie@c00000000 {
|
||||
compatible = "altr,pcie-root-port-1.0";
|
||||
reg = <0xc0000000 0x20000000>,
|
||||
<0xff220000 0x00004000>;
|
||||
reg-names = "Txs", "Cra";
|
||||
interrupt-parent = <&hps_0_arm_gic_0>;
|
||||
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
bus-range = <0x0 0xff>;
|
||||
device_type = "pci";
|
||||
msi-parent = <&msi_to_gic_gen_0>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_0 0 0 0 1>,
|
||||
<0 0 0 2 &pcie_0 0 0 0 2>,
|
||||
<0 0 0 3 &pcie_0 0 0 0 3>,
|
||||
<0 0 0 4 &pcie_0 0 0 0 4>;
|
||||
ranges = <0x82000000 0x00000000 0x00000000 0xc0000000 0x00000000 0x10000000>,
|
||||
<0x82000000 0x00000000 0x10000000 0xd0000000 0x00000000 0x10000000>;
|
||||
};
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Brcmstb PCIe Host Controller
|
||||
|
||||
maintainers:
|
||||
- Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
|
||||
- Jim Quinlan <james.quinlan@broadcom.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
@ -16,11 +16,12 @@ properties:
|
||||
- brcm,bcm2711-pcie # The Raspberry Pi 4
|
||||
- brcm,bcm4908-pcie
|
||||
- brcm,bcm7211-pcie # Broadcom STB version of RPi4
|
||||
- brcm,bcm7278-pcie # Broadcom 7278 Arm
|
||||
- brcm,bcm7216-pcie # Broadcom 7216 Arm
|
||||
- brcm,bcm7445-pcie # Broadcom 7445 Arm
|
||||
- brcm,bcm7278-pcie # Broadcom 7278 Arm
|
||||
- brcm,bcm7425-pcie # Broadcom 7425 MIPs
|
||||
- brcm,bcm7435-pcie # Broadcom 7435 MIPs
|
||||
- brcm,bcm7445-pcie # Broadcom 7445 Arm
|
||||
- brcm,bcm7712-pcie # Broadcom STB sibling of Rpi 5
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
@ -95,6 +96,14 @@ properties:
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
|
||||
reset-names:
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
@ -118,8 +127,7 @@ allOf:
|
||||
then:
|
||||
properties:
|
||||
resets:
|
||||
items:
|
||||
- description: reset controller handling the PERST# signal
|
||||
maxItems: 1
|
||||
|
||||
reset-names:
|
||||
items:
|
||||
@ -136,8 +144,7 @@ allOf:
|
||||
then:
|
||||
properties:
|
||||
resets:
|
||||
items:
|
||||
- description: phandle pointing to the RESCAL reset controller
|
||||
maxItems: 1
|
||||
|
||||
reset-names:
|
||||
items:
|
||||
@ -147,6 +154,27 @@ allOf:
|
||||
- resets
|
||||
- reset-names
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: brcm,bcm7712-pcie
|
||||
then:
|
||||
properties:
|
||||
resets:
|
||||
minItems: 3
|
||||
maxItems: 3
|
||||
|
||||
reset-names:
|
||||
items:
|
||||
- const: rescal
|
||||
- const: bridge
|
||||
- const: swinit
|
||||
|
||||
required:
|
||||
- resets
|
||||
- reset-names
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
|
@ -65,12 +65,14 @@ allOf:
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
minItems: 4
|
||||
maxItems: 4
|
||||
reg-names:
|
||||
items:
|
||||
- const: dbi
|
||||
- const: addr_space
|
||||
- const: dbi2
|
||||
- const: atu
|
||||
|
||||
- if:
|
||||
properties:
|
||||
@ -129,8 +131,11 @@ examples:
|
||||
|
||||
pcie_ep: pcie-ep@33800000 {
|
||||
compatible = "fsl,imx8mp-pcie-ep";
|
||||
reg = <0x33800000 0x000400000>, <0x18000000 0x08000000>;
|
||||
reg-names = "dbi", "addr_space";
|
||||
reg = <0x33800000 0x100000>,
|
||||
<0x18000000 0x8000000>,
|
||||
<0x33900000 0x100000>,
|
||||
<0x33b00000 0x100000>;
|
||||
reg-names = "dbi", "addr_space", "dbi2", "atu";
|
||||
clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
|
||||
<&clk IMX8MP_CLK_HSIO_AXI>,
|
||||
<&clk IMX8MP_CLK_PCIE_ROOT>;
|
||||
|
@ -30,6 +30,7 @@ properties:
|
||||
- fsl,imx8mm-pcie
|
||||
- fsl,imx8mp-pcie
|
||||
- fsl,imx95-pcie
|
||||
- fsl,imx8q-pcie
|
||||
|
||||
clocks:
|
||||
minItems: 3
|
||||
@ -184,6 +185,21 @@ allOf:
|
||||
- const: pcie_bus
|
||||
- const: pcie_aux
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- fsl,imx8q-pcie
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 3
|
||||
clock-names:
|
||||
items:
|
||||
- const: dbi
|
||||
- const: mstr
|
||||
- const: slv
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
|
@ -22,18 +22,20 @@ description:
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- fsl,ls1021a-pcie
|
||||
- fsl,ls2080a-pcie
|
||||
- fsl,ls2085a-pcie
|
||||
- fsl,ls2088a-pcie
|
||||
- fsl,ls1088a-pcie
|
||||
- fsl,ls1046a-pcie
|
||||
- fsl,ls1043a-pcie
|
||||
- fsl,ls1012a-pcie
|
||||
- fsl,ls1028a-pcie
|
||||
- fsl,lx2160a-pcie
|
||||
|
||||
oneOf:
|
||||
- enum:
|
||||
- fsl,ls1012a-pcie
|
||||
- fsl,ls1021a-pcie
|
||||
- fsl,ls1028a-pcie
|
||||
- fsl,ls1043a-pcie
|
||||
- fsl,ls1046a-pcie
|
||||
- fsl,ls1088a-pcie
|
||||
- fsl,ls2080a-pcie
|
||||
- fsl,ls2085a-pcie
|
||||
- fsl,ls2088a-pcie
|
||||
- items:
|
||||
- const: fsl,lx2160ar2-pcie
|
||||
- const: fsl,ls2088a-pcie
|
||||
reg:
|
||||
maxItems: 2
|
||||
|
||||
@ -43,10 +45,15 @@ properties:
|
||||
- const: config
|
||||
|
||||
fsl,pcie-scfg:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
description: A phandle to the SCFG device node. The second entry is the
|
||||
physical PCIe controller index starting from '0'. This is used to get
|
||||
SCFG PEXN registers.
|
||||
items:
|
||||
items:
|
||||
- description: A phandle to the SCFG device node
|
||||
- description: PCIe controller index starting from '0'
|
||||
maxItems: 1
|
||||
|
||||
big-endian:
|
||||
$ref: /schemas/types.yaml#/definitions/flag
|
||||
@ -67,6 +74,14 @@ properties:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
num-viewport:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
deprecated: true
|
||||
description:
|
||||
Number of outbound view ports configured in hardware. It's the same as
|
||||
the number of outbound AT windows.
|
||||
maximum: 256
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
@ -37,7 +37,8 @@ properties:
|
||||
minItems: 3
|
||||
maxItems: 4
|
||||
|
||||
clocks: true
|
||||
clocks:
|
||||
maxItems: 5
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
|
@ -102,8 +102,6 @@ properties:
|
||||
As described in IEEE Std 1275-1994, but must provide at least a
|
||||
definition of non-prefetchable memory. One or both of prefetchable Memory
|
||||
and IO Space may also be provided.
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
|
||||
dma-coherent: true
|
||||
iommu-map: true
|
||||
|
@ -53,6 +53,7 @@ properties:
|
||||
- mediatek,mt8195-pcie
|
||||
- const: mediatek,mt8192-pcie
|
||||
- const: mediatek,mt8192-pcie
|
||||
- const: airoha,en7581-pcie
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
@ -76,20 +77,20 @@ properties:
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
maxItems: 3
|
||||
|
||||
reset-names:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
maxItems: 3
|
||||
items:
|
||||
enum: [ phy, mac ]
|
||||
enum: [ phy, mac, phy-lane0, phy-lane1, phy-lane2 ]
|
||||
|
||||
clocks:
|
||||
minItems: 4
|
||||
minItems: 1
|
||||
maxItems: 6
|
||||
|
||||
clock-names:
|
||||
minItems: 4
|
||||
minItems: 1
|
||||
maxItems: 6
|
||||
|
||||
assigned-clocks:
|
||||
@ -147,6 +148,9 @@ allOf:
|
||||
const: mediatek,mt8192-pcie
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 4
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: pl_250m
|
||||
@ -155,6 +159,15 @@ allOf:
|
||||
- const: tl_32k
|
||||
- const: peri_26m
|
||||
- const: top_133m
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
reset-names:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
@ -164,6 +177,9 @@ allOf:
|
||||
- mediatek,mt8195-pcie
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 4
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: pl_250m
|
||||
@ -172,6 +188,15 @@ allOf:
|
||||
- const: tl_32k
|
||||
- const: peri_26m
|
||||
- const: peri_mem
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
reset-names:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
@ -180,6 +205,9 @@ allOf:
|
||||
- mediatek,mt7986-pcie
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 4
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: pl_250m
|
||||
@ -187,6 +215,36 @@ allOf:
|
||||
- const: peri_26m
|
||||
- const: top_133m
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
reset-names:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
const: airoha,en7581-pcie
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: sys-ck
|
||||
|
||||
resets:
|
||||
minItems: 3
|
||||
|
||||
reset-names:
|
||||
items:
|
||||
- const: phy-lane0
|
||||
- const: phy-lane1
|
||||
- const: phy-lane2
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
|
@ -10,7 +10,8 @@ description: |
|
||||
Common properties for PCI Endpoint Controller Nodes.
|
||||
|
||||
maintainers:
|
||||
- Kishon Vijay Abraham I <kishon@ti.com>
|
||||
- Kishon Vijay Abraham I <kishon@kernel.org>
|
||||
- Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
|
||||
properties:
|
||||
$nodename:
|
||||
@ -41,6 +42,17 @@ properties:
|
||||
default: 1
|
||||
maximum: 16
|
||||
|
||||
linux,pci-domain:
|
||||
description:
|
||||
If present this property assigns a fixed PCI domain number to a PCI
|
||||
Endpoint Controller, otherwise an unstable (across boots) unique number
|
||||
will be assigned. It is required to either not set this property at all
|
||||
or set it for all PCI endpoint controllers in the system, otherwise
|
||||
potentially conflicting domain numbers may be assigned to endpoint
|
||||
controllers. The domain number for each endpoint controller in the system
|
||||
must be unique.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
||||
|
@ -21,11 +21,11 @@ properties:
|
||||
|
||||
interrupts:
|
||||
minItems: 1
|
||||
maxItems: 8
|
||||
maxItems: 9
|
||||
|
||||
interrupt-names:
|
||||
minItems: 1
|
||||
maxItems: 8
|
||||
maxItems: 9
|
||||
|
||||
iommu-map:
|
||||
minItems: 1
|
||||
@ -78,6 +78,9 @@ properties:
|
||||
description: GPIO controlled connection to WAKE# signal
|
||||
maxItems: 1
|
||||
|
||||
vddpe-3v3-supply:
|
||||
description: PCIe endpoint power supply
|
||||
|
||||
required:
|
||||
- reg
|
||||
- reg-names
|
||||
|
@ -280,4 +280,5 @@ examples:
|
||||
phy-names = "pciephy";
|
||||
max-link-speed = <3>;
|
||||
num-lanes = <2>;
|
||||
linux,pci-domain = <0>;
|
||||
};
|
||||
|
@ -53,11 +53,19 @@ properties:
|
||||
- const: aggre1 # Aggre NoC PCIe1 AXI clock
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
minItems: 8
|
||||
maxItems: 8
|
||||
|
||||
interrupt-names:
|
||||
items:
|
||||
- const: msi
|
||||
- const: msi0
|
||||
- const: msi1
|
||||
- const: msi2
|
||||
- const: msi3
|
||||
- const: msi4
|
||||
- const: msi5
|
||||
- const: msi6
|
||||
- const: msi7
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
@ -66,9 +74,6 @@ properties:
|
||||
items:
|
||||
- const: pci
|
||||
|
||||
vddpe-3v3-supply:
|
||||
description: PCIe endpoint power supply
|
||||
|
||||
allOf:
|
||||
- $ref: qcom,pcie-common.yaml#
|
||||
|
||||
@ -137,8 +142,16 @@ examples:
|
||||
|
||||
dma-coherent;
|
||||
|
||||
interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "msi";
|
||||
interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "msi0", "msi1", "msi2", "msi3",
|
||||
"msi4", "msi5", "msi6", "msi7";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0x7>;
|
||||
interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>,
|
||||
|
@ -58,9 +58,6 @@ properties:
|
||||
items:
|
||||
- const: pci
|
||||
|
||||
vddpe-3v3-supply:
|
||||
description: A phandle to the PCIe endpoint power supply
|
||||
|
||||
required:
|
||||
- interconnects
|
||||
- interconnect-names
|
||||
|
@ -55,8 +55,8 @@ properties:
|
||||
- const: aggre1 # Aggre NoC PCIe1 AXI clock
|
||||
|
||||
interrupts:
|
||||
minItems: 8
|
||||
maxItems: 8
|
||||
minItems: 9
|
||||
maxItems: 9
|
||||
|
||||
interrupt-names:
|
||||
items:
|
||||
@ -68,6 +68,7 @@ properties:
|
||||
- const: msi5
|
||||
- const: msi6
|
||||
- const: msi7
|
||||
- const: global
|
||||
|
||||
operating-points-v2: true
|
||||
opp-table:
|
||||
@ -149,9 +150,10 @@ examples:
|
||||
<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
|
||||
<GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "msi0", "msi1", "msi2", "msi3",
|
||||
"msi4", "msi5", "msi6", "msi7";
|
||||
"msi4", "msi5", "msi6", "msi7", "global";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0x7>;
|
||||
interrupt-map = <0 0 0 1 &intc 0 0 0 149 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
|
||||
|
@ -91,6 +91,9 @@ properties:
|
||||
vdda_refclk-supply:
|
||||
description: A phandle to the core analog power supply for IC which generates reference clock
|
||||
|
||||
vddpe-3v3-supply:
|
||||
description: A phandle to the PCIe endpoint power supply
|
||||
|
||||
phys:
|
||||
maxItems: 1
|
||||
|
||||
|
@ -19,6 +19,7 @@ properties:
|
||||
- enum:
|
||||
- renesas,r8a779f0-pcie-ep # R-Car S4-8
|
||||
- renesas,r8a779g0-pcie-ep # R-Car V4H
|
||||
- renesas,r8a779h0-pcie-ep # R-Car V4M
|
||||
- const: renesas,rcar-gen4-pcie-ep # R-Car Gen4
|
||||
|
||||
reg:
|
||||
|
@ -19,6 +19,7 @@ properties:
|
||||
- enum:
|
||||
- renesas,r8a779f0-pcie # R-Car S4-8
|
||||
- renesas,r8a779g0-pcie # R-Car V4H
|
||||
- renesas,r8a779h0-pcie # R-Car V4M
|
||||
- const: renesas,rcar-gen4-pcie # R-Car Gen4
|
||||
|
||||
reg:
|
||||
|
@ -42,9 +42,13 @@ properties:
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks: true
|
||||
clocks:
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
|
||||
clock-names: true
|
||||
clock-names:
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
@ -38,13 +38,17 @@ properties:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
clock-names: true
|
||||
clock-names:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
reset-names: true
|
||||
reset-names:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
num-ib-windows:
|
||||
const: 16
|
||||
|
@ -38,6 +38,16 @@ properties:
|
||||
- const: reg
|
||||
- const: cfg
|
||||
|
||||
ti,syscon-acspcie-proxy-ctrl:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
items:
|
||||
- items:
|
||||
- description: Phandle to the ACSPCIE Proxy Control Register
|
||||
- description: Bitmask corresponding to the PAD IO Buffer
|
||||
output enable fields (Active Low).
|
||||
description: Specifier for enabling the ACSPCIE PAD outputs to drive
|
||||
the reference clock to the Endpoint device.
|
||||
|
||||
ti,syscon-pcie-ctrl:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
items:
|
||||
|
@ -61,6 +61,11 @@ properties:
|
||||
interrupt-map:
|
||||
maxItems: 4
|
||||
|
||||
phys:
|
||||
minItems: 1
|
||||
maxItems: 4
|
||||
description: One phy per logical lane, in order
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
@ -110,6 +115,7 @@ examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
#include <dt-bindings/phy/phy.h>
|
||||
#include <dt-bindings/power/xlnx-zynqmp-power.h>
|
||||
soc {
|
||||
#address-cells = <2>;
|
||||
@ -138,6 +144,7 @@ examples:
|
||||
<0x0 0x0 0x0 0x3 &pcie_intc 0x3>,
|
||||
<0x0 0x0 0x0 0x4 &pcie_intc 0x4>;
|
||||
msi-parent = <&nwl_pcie>;
|
||||
phys = <&psgtr 0 PHY_TYPE_PCIE 0 0>;
|
||||
power-domains = <&zynqmp_firmware PD_PCIE>;
|
||||
iommus = <&smmu 0x4d0>;
|
||||
pcie_intc: legacy-interrupt-controller {
|
||||
|
@ -14,10 +14,21 @@ allOf:
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: xlnx,xdma-host-3.00
|
||||
enum:
|
||||
- xlnx,xdma-host-3.00
|
||||
- xlnx,qdma-host-3.00
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
items:
|
||||
- description: configuration region and XDMA bridge register.
|
||||
- description: QDMA bridge register.
|
||||
minItems: 1
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: cfg
|
||||
- const: breg
|
||||
minItems: 1
|
||||
|
||||
ranges:
|
||||
maxItems: 2
|
||||
@ -76,6 +87,27 @@ required:
|
||||
- "#interrupt-cells"
|
||||
- interrupt-controller
|
||||
|
||||
if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- xlnx,qdma-host-3.00
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
minItems: 2
|
||||
reg-names:
|
||||
minItems: 2
|
||||
required:
|
||||
- reg-names
|
||||
else:
|
||||
properties:
|
||||
reg:
|
||||
maxItems: 1
|
||||
reg-names:
|
||||
maxItems: 1
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
|
@ -2793,7 +2793,7 @@ F: drivers/iommu/msm*
|
||||
F: drivers/mfd/ssbi.c
|
||||
F: drivers/mmc/host/mmci_qcom*
|
||||
F: drivers/mmc/host/sdhci-msm.c
|
||||
F: drivers/pci/controller/dwc/pcie-qcom.c
|
||||
F: drivers/pci/controller/dwc/pcie-qcom*
|
||||
F: drivers/phy/qualcomm/
|
||||
F: drivers/power/*/msm*
|
||||
F: drivers/reset/reset-qcom-*
|
||||
@ -17544,7 +17544,7 @@ PCI DRIVER FOR ALTERA PCIE IP
|
||||
M: Joyce Ooi <joyce.ooi@intel.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/pci/altera-pcie.txt
|
||||
F: Documentation/devicetree/bindings/pci/altr,pcie-root-port.yaml
|
||||
F: drivers/pci/controller/pcie-altera.c
|
||||
|
||||
PCI DRIVER FOR APPLIEDMICRO XGENE
|
||||
@ -17776,7 +17776,7 @@ PCI MSI DRIVER FOR ALTERA MSI IP
|
||||
M: Joyce Ooi <joyce.ooi@intel.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/pci/altera-pcie-msi.txt
|
||||
F: Documentation/devicetree/bindings/pci/altr,msi-controller.yaml
|
||||
F: drivers/pci/controller/pcie-altera-msi.c
|
||||
|
||||
PCI MSI DRIVER FOR APPLIEDMICRO XGENE
|
||||
@ -17929,6 +17929,7 @@ M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/pci/controller/dwc/pcie-qcom-common.c
|
||||
F: drivers/pci/controller/dwc/pcie-qcom.c
|
||||
|
||||
PCIE DRIVER FOR ROCKCHIP
|
||||
@ -17965,6 +17966,7 @@ L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
|
||||
F: drivers/pci/controller/dwc/pcie-qcom-common.c
|
||||
F: drivers/pci/controller/dwc/pcie-qcom-ep.c
|
||||
|
||||
PCMCIA SUBSYSTEM
|
||||
|
@ -941,6 +941,7 @@ conf-pull-none {
|
||||
|
||||
&pcie {
|
||||
status = "okay";
|
||||
phys = <&psgtr 0 PHY_TYPE_PCIE 0 0>;
|
||||
};
|
||||
|
||||
&psgtr {
|
||||
|
@ -191,7 +191,14 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
|
||||
return (zdev->fh & (1UL << 31)) ? true : false;
|
||||
}
|
||||
|
||||
extern const struct attribute_group *zpci_attr_groups[];
|
||||
extern const struct attribute_group zpci_attr_group;
|
||||
extern const struct attribute_group pfip_attr_group;
|
||||
extern const struct attribute_group zpci_ident_attr_group;
|
||||
|
||||
#define ARCH_PCI_DEV_GROUPS &zpci_attr_group, \
|
||||
&pfip_attr_group, \
|
||||
&zpci_ident_attr_group,
|
||||
|
||||
extern unsigned int s390_pci_force_floating __initdata;
|
||||
extern unsigned int s390_pci_no_rid;
|
||||
|
||||
|
@ -3,7 +3,8 @@
|
||||
# Makefile for the s390 PCI subsystem.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_clp.o pci_sysfs.o \
|
||||
obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_clp.o \
|
||||
pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
|
||||
pci_bus.o pci_kvm_hook.o
|
||||
obj-$(CONFIG_PCI_IOV) += pci_iov.o
|
||||
obj-$(CONFIG_SYSFS) += pci_sysfs.o
|
||||
|
@ -587,7 +587,6 @@ int pcibios_device_add(struct pci_dev *pdev)
|
||||
if (pdev->is_physfn)
|
||||
pdev->no_vf_scan = 1;
|
||||
|
||||
pdev->dev.groups = zpci_attr_groups;
|
||||
zpci_map_resources(pdev);
|
||||
|
||||
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
|
||||
|
@ -197,7 +197,7 @@ static struct attribute *zpci_ident_attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group zpci_ident_attr_group = {
|
||||
const struct attribute_group zpci_ident_attr_group = {
|
||||
.attrs = zpci_ident_attrs,
|
||||
.is_visible = zpci_index_is_visible,
|
||||
};
|
||||
@ -223,7 +223,7 @@ static struct attribute *zpci_dev_attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group zpci_attr_group = {
|
||||
const struct attribute_group zpci_attr_group = {
|
||||
.attrs = zpci_dev_attrs,
|
||||
.bin_attrs = zpci_bin_attrs,
|
||||
};
|
||||
@ -235,14 +235,8 @@ static struct attribute *pfip_attrs[] = {
|
||||
&dev_attr_segment3.attr,
|
||||
NULL,
|
||||
};
|
||||
static struct attribute_group pfip_attr_group = {
|
||||
|
||||
const struct attribute_group pfip_attr_group = {
|
||||
.name = "pfip",
|
||||
.attrs = pfip_attrs,
|
||||
};
|
||||
|
||||
const struct attribute_group *zpci_attr_groups[] = {
|
||||
&zpci_attr_group,
|
||||
&pfip_attr_group,
|
||||
&zpci_ident_attr_group,
|
||||
NULL,
|
||||
};
|
||||
|
@ -980,7 +980,7 @@ static void amd_rp_pme_suspend(struct pci_dev *dev)
|
||||
return;
|
||||
|
||||
rp = pcie_find_root_port(dev);
|
||||
if (!rp->pm_cap)
|
||||
if (!rp || !rp->pm_cap)
|
||||
return;
|
||||
|
||||
rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
|
||||
@ -994,7 +994,7 @@ static void amd_rp_pme_resume(struct pci_dev *dev)
|
||||
u16 pmc;
|
||||
|
||||
rp = pcie_find_root_port(dev);
|
||||
if (!rp->pm_cap)
|
||||
if (!rp || !rp->pm_cap)
|
||||
return;
|
||||
|
||||
pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);
|
||||
|
@ -17,6 +17,9 @@ obj-$(CONFIG_PINCTRL) += pinctrl/
|
||||
obj-$(CONFIG_GPIOLIB) += gpio/
|
||||
obj-y += pwm/
|
||||
|
||||
# LEDs must come before PCI, it is needed by NPEM driver
|
||||
obj-y += leds/
|
||||
|
||||
obj-y += pci/
|
||||
|
||||
obj-$(CONFIG_PARISC) += parisc/
|
||||
@ -130,7 +133,6 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/
|
||||
obj-y += mmc/
|
||||
obj-y += ufs/
|
||||
obj-$(CONFIG_MEMSTICK) += memstick/
|
||||
obj-y += leds/
|
||||
obj-$(CONFIG_INFINIBAND) += infiniband/
|
||||
obj-y += firmware/
|
||||
obj-$(CONFIG_CRYPTO) += crypto/
|
||||
|
@ -181,6 +181,18 @@ static struct mcfg_fixup mcfg_quirks[] = {
|
||||
LOONGSON_ECAM_MCFG("LOONGSON", 0),
|
||||
LOONGSON_ECAM_MCFG("\0", 1),
|
||||
LOONGSON_ECAM_MCFG("LOONGSON", 1),
|
||||
LOONGSON_ECAM_MCFG("\0", 2),
|
||||
LOONGSON_ECAM_MCFG("LOONGSON", 2),
|
||||
LOONGSON_ECAM_MCFG("\0", 3),
|
||||
LOONGSON_ECAM_MCFG("LOONGSON", 3),
|
||||
LOONGSON_ECAM_MCFG("\0", 4),
|
||||
LOONGSON_ECAM_MCFG("LOONGSON", 4),
|
||||
LOONGSON_ECAM_MCFG("\0", 5),
|
||||
LOONGSON_ECAM_MCFG("LOONGSON", 5),
|
||||
LOONGSON_ECAM_MCFG("\0", 6),
|
||||
LOONGSON_ECAM_MCFG("LOONGSON", 6),
|
||||
LOONGSON_ECAM_MCFG("\0", 7),
|
||||
LOONGSON_ECAM_MCFG("LOONGSON", 7),
|
||||
#endif /* LOONGARCH */
|
||||
};
|
||||
|
||||
|
@ -334,7 +334,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
|
||||
}
|
||||
|
||||
/* If the root port is capable of returning Config Request
|
||||
* Retry Status (CRS) Completion Status to software then
|
||||
* Retry Status (RRS) Completion Status to software then
|
||||
* enable the feature.
|
||||
*/
|
||||
static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
|
||||
@ -348,10 +348,10 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
|
||||
NULL);
|
||||
root_cap = cap_ptr + PCI_EXP_RTCAP;
|
||||
bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
|
||||
if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
|
||||
/* Enable CRS software visibility */
|
||||
if (val16 & BCMA_CORE_PCI_RC_RRS_VISIBILITY) {
|
||||
/* Enable Configuration RRS Software Visibility */
|
||||
root_ctrl = cap_ptr + PCI_EXP_RTCTL;
|
||||
val16 = PCI_EXP_RTCTL_CRSSVE;
|
||||
val16 = PCI_EXP_RTCTL_RRS_SVE;
|
||||
bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
|
||||
sizeof(u16));
|
||||
|
||||
@ -360,7 +360,7 @@ static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
|
||||
* 100 ms wait time from the end of Reset. If the device is
|
||||
* not done with its internal initialization, it must at
|
||||
* least return a completion TLP, with a completion status
|
||||
* of "Configuration Request Retry Status (CRS)". The root
|
||||
* of "Configuration Request Retry Status (RRS)". The root
|
||||
* complex must complete the request to the host by returning
|
||||
* a read-data value of 0001h for the Vendor ID field and
|
||||
* all 1s for any additional bytes included in the request.
|
||||
|
@ -287,9 +287,9 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
regs = pcim_iomap(pdev, 1, 0);
|
||||
if (!regs)
|
||||
return -EIO;
|
||||
regs = pcim_iomap_region(pdev, 1, "ast");
|
||||
if (IS_ERR(regs))
|
||||
return PTR_ERR(regs);
|
||||
|
||||
if (pdev->revision >= 0x40) {
|
||||
/*
|
||||
@ -311,9 +311,9 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
if (len < AST_IO_MM_LENGTH)
|
||||
return -EIO;
|
||||
ioregs = pcim_iomap(pdev, 2, 0);
|
||||
if (!ioregs)
|
||||
return -EIO;
|
||||
ioregs = pcim_iomap_region(pdev, 2, "ast");
|
||||
if (IS_ERR(ioregs))
|
||||
return PTR_ERR(ioregs);
|
||||
} else {
|
||||
/*
|
||||
* Anything else is best effort.
|
||||
|
@ -114,6 +114,10 @@ int vbox_hw_init(struct vbox_private *vbox)
|
||||
|
||||
DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
|
||||
|
||||
ret = pcim_request_region(pdev, 0, "vboxvideo");
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Map guest-heap at end of vram */
|
||||
vbox->guest_heap = pcim_iomap_range(pdev, 0,
|
||||
GUEST_HEAP_OFFSET(vbox), GUEST_HEAP_SIZE);
|
||||
|
@ -143,6 +143,15 @@ config PCI_IOV
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config PCI_NPEM
|
||||
bool "Native PCIe Enclosure Management"
|
||||
depends on LEDS_CLASS=y
|
||||
help
|
||||
Support for Native PCIe Enclosure Management. It allows managing LED
|
||||
indications in storage enclosures. Enclosure must support following
|
||||
indications: OK, Locate, Fail, Rebuild, other indications are
|
||||
optional.
|
||||
|
||||
config PCI_PRI
|
||||
bool "PCI PRI support"
|
||||
select PCI_ATS
|
||||
|
@ -35,6 +35,7 @@ obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
|
||||
obj-$(CONFIG_VGA_ARB) += vgaarb.o
|
||||
obj-$(CONFIG_PCI_DOE) += doe.o
|
||||
obj-$(CONFIG_PCI_DYNAMIC_OF_NODES) += of_property.o
|
||||
obj-$(CONFIG_PCI_NPEM) += npem.o
|
||||
|
||||
# Endpoint library must be initialized before its users
|
||||
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
|
||||
|
@ -488,8 +488,8 @@ void pci_restore_pasid_state(struct pci_dev *pdev)
|
||||
* pci_pasid_features - Check which PASID features are supported
|
||||
* @pdev: PCI device structure
|
||||
*
|
||||
* Returns a negative value when no PASI capability is present.
|
||||
* Otherwise is returns a bitmask with supported features. Current
|
||||
* Return a negative value when no PASID capability is present.
|
||||
* Otherwise return a bitmask with supported features. Current
|
||||
* features reported are:
|
||||
* PCI_PASID_CAP_EXEC - Execute permission supported
|
||||
* PCI_PASID_CAP_PRIV - Privileged mode supported
|
||||
|
@ -196,7 +196,7 @@ config PCIE_MEDIATEK
|
||||
|
||||
config PCIE_MEDIATEK_GEN3
|
||||
tristate "MediaTek Gen3 PCIe controller"
|
||||
depends on ARCH_MEDIATEK || COMPILE_TEST
|
||||
depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
|
||||
depends on PCI_MSI
|
||||
help
|
||||
Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
|
||||
|
@ -38,7 +38,7 @@ config PCIE_CADENCE_PLAT_EP
|
||||
select PCIE_CADENCE_EP
|
||||
select PCIE_CADENCE_PLAT
|
||||
help
|
||||
Say Y here if you want to support the Cadence PCIe platform controller in
|
||||
Say Y here if you want to support the Cadence PCIe platform controller in
|
||||
endpoint mode. This PCIe controller may be embedded into many
|
||||
different vendors SoCs.
|
||||
|
||||
|
@ -7,6 +7,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/container_of.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/io.h>
|
||||
@ -22,6 +24,8 @@
|
||||
#include "../../pci.h"
|
||||
#include "pcie-cadence.h"
|
||||
|
||||
#define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie)
|
||||
|
||||
#define ENABLE_REG_SYS_2 0x108
|
||||
#define STATUS_REG_SYS_2 0x508
|
||||
#define STATUS_CLR_REG_SYS_2 0x708
|
||||
@ -44,6 +48,7 @@ enum link_status {
|
||||
#define J721E_MODE_RC BIT(7)
|
||||
#define LANE_COUNT(n) ((n) << 8)
|
||||
|
||||
#define ACSPCIE_PAD_DISABLE_MASK GENMASK(1, 0)
|
||||
#define GENERATION_SEL_MASK GENMASK(1, 0)
|
||||
|
||||
struct j721e_pcie {
|
||||
@ -52,6 +57,7 @@ struct j721e_pcie {
|
||||
u32 mode;
|
||||
u32 num_lanes;
|
||||
u32 max_lanes;
|
||||
struct gpio_desc *reset_gpio;
|
||||
void __iomem *user_cfg_base;
|
||||
void __iomem *intd_cfg_base;
|
||||
u32 linkdown_irq_regfield;
|
||||
@ -220,6 +226,36 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
|
||||
struct regmap *syscon)
|
||||
{
|
||||
struct device *dev = pcie->cdns_pcie->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
u32 mask = ACSPCIE_PAD_DISABLE_MASK;
|
||||
struct of_phandle_args args;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = of_parse_phandle_with_fixed_args(node,
|
||||
"ti,syscon-acspcie-proxy-ctrl",
|
||||
1, 0, &args);
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
"ti,syscon-acspcie-proxy-ctrl has invalid arguments\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Clear PAD IO disable bits to enable refclk output */
|
||||
val = ~(args.args[0]);
|
||||
ret = regmap_update_bits(syscon, 0, mask, val);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to enable ACSPCIE refclk: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
|
||||
{
|
||||
struct device *dev = pcie->cdns_pcie->dev;
|
||||
@ -259,7 +295,13 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
/* Enable ACSPCIE refclk output if the optional property exists */
|
||||
syscon = syscon_regmap_lookup_by_phandle_optional(node,
|
||||
"ti,syscon-acspcie-proxy-ctrl");
|
||||
if (!syscon)
|
||||
return 0;
|
||||
|
||||
return j721e_enable_acspcie_refclk(pcie, syscon);
|
||||
}
|
||||
|
||||
static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
|
||||
@ -482,20 +524,20 @@ static int j721e_pcie_probe(struct platform_device *pdev)
|
||||
pm_runtime_enable(dev);
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "pm_runtime_get_sync failed\n");
|
||||
dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
|
||||
goto err_get_sync;
|
||||
}
|
||||
|
||||
ret = j721e_pcie_ctrl_init(pcie);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "pm_runtime_get_sync failed\n");
|
||||
dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
|
||||
goto err_get_sync;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
|
||||
"j721e-pcie-link-down-irq", pcie);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to request link state IRQ %d\n", irq);
|
||||
dev_err_probe(dev, ret, "failed to request link state IRQ %d\n", irq);
|
||||
goto err_get_sync;
|
||||
}
|
||||
|
||||
@ -505,42 +547,40 @@ static int j721e_pcie_probe(struct platform_device *pdev)
|
||||
case PCI_MODE_RC:
|
||||
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(gpiod)) {
|
||||
ret = PTR_ERR(gpiod);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "Failed to get reset GPIO\n");
|
||||
ret = dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get reset GPIO\n");
|
||||
goto err_get_sync;
|
||||
}
|
||||
pcie->reset_gpio = gpiod;
|
||||
|
||||
ret = cdns_pcie_init_phy(dev, cdns_pcie);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to init phy\n");
|
||||
dev_err_probe(dev, ret, "Failed to init phy\n");
|
||||
goto err_get_sync;
|
||||
}
|
||||
|
||||
clk = devm_clk_get_optional(dev, "pcie_refclk");
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(dev, "failed to get pcie_refclk\n");
|
||||
ret = dev_err_probe(dev, PTR_ERR(clk), "failed to get pcie_refclk\n");
|
||||
goto err_pcie_setup;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to enable pcie_refclk\n");
|
||||
dev_err_probe(dev, ret, "failed to enable pcie_refclk\n");
|
||||
goto err_pcie_setup;
|
||||
}
|
||||
pcie->refclk = clk;
|
||||
|
||||
/*
|
||||
* "Power Sequencing and Reset Signal Timings" table in
|
||||
* PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
|
||||
* indicates PERST# should be deasserted after minimum of 100us
|
||||
* once REFCLK is stable. The REFCLK to the connector in RC
|
||||
* mode is selected while enabling the PHY. So deassert PERST#
|
||||
* after 100 us.
|
||||
* The "Power Sequencing and Reset Signal Timings" table of the
|
||||
* PCI Express Card Electromechanical Specification, Revision
|
||||
* 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST#
|
||||
* should be deasserted after minimum of 100us once REFCLK is
|
||||
* stable. The REFCLK to the connector in RC mode is selected
|
||||
* while enabling the PHY. So deassert PERST# after 100 us.
|
||||
*/
|
||||
if (gpiod) {
|
||||
usleep_range(100, 200);
|
||||
fsleep(PCIE_T_PERST_CLK_US);
|
||||
gpiod_set_value_cansleep(gpiod, 1);
|
||||
}
|
||||
|
||||
@ -554,7 +594,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
|
||||
case PCI_MODE_EP:
|
||||
ret = cdns_pcie_init_phy(dev, cdns_pcie);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to init phy\n");
|
||||
dev_err_probe(dev, ret, "Failed to init phy\n");
|
||||
goto err_get_sync;
|
||||
}
|
||||
|
||||
@ -589,6 +629,87 @@ static void j721e_pcie_remove(struct platform_device *pdev)
|
||||
pm_runtime_disable(dev);
|
||||
}
|
||||
|
||||
static int j721e_pcie_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct j721e_pcie *pcie = dev_get_drvdata(dev);
|
||||
|
||||
if (pcie->mode == PCI_MODE_RC) {
|
||||
gpiod_set_value_cansleep(pcie->reset_gpio, 0);
|
||||
clk_disable_unprepare(pcie->refclk);
|
||||
}
|
||||
|
||||
cdns_pcie_disable_phy(pcie->cdns_pcie);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int j721e_pcie_resume_noirq(struct device *dev)
|
||||
{
|
||||
struct j721e_pcie *pcie = dev_get_drvdata(dev);
|
||||
struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
|
||||
int ret;
|
||||
|
||||
ret = j721e_pcie_ctrl_init(pcie);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
j721e_pcie_config_link_irq(pcie);
|
||||
|
||||
/*
|
||||
* This is not called explicitly in the probe, it is called by
|
||||
* cdns_pcie_init_phy().
|
||||
*/
|
||||
ret = cdns_pcie_enable_phy(pcie->cdns_pcie);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (pcie->mode == PCI_MODE_RC) {
|
||||
struct cdns_pcie_rc *rc = cdns_pcie_to_rc(cdns_pcie);
|
||||
|
||||
ret = clk_prepare_enable(pcie->refclk);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The "Power Sequencing and Reset Signal Timings" table of the
|
||||
* PCI Express Card Electromechanical Specification, Revision
|
||||
* 5.1, Section 2.9.2, Symbol "T_PERST-CLK", indicates PERST#
|
||||
* should be deasserted after minimum of 100us once REFCLK is
|
||||
* stable. The REFCLK to the connector in RC mode is selected
|
||||
* while enabling the PHY. So deassert PERST# after 100 us.
|
||||
*/
|
||||
if (pcie->reset_gpio) {
|
||||
fsleep(PCIE_T_PERST_CLK_US);
|
||||
gpiod_set_value_cansleep(pcie->reset_gpio, 1);
|
||||
}
|
||||
|
||||
ret = cdns_pcie_host_link_setup(rc);
|
||||
if (ret < 0) {
|
||||
clk_disable_unprepare(pcie->refclk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset internal status of BARs to force reinitialization in
|
||||
* cdns_pcie_host_init().
|
||||
*/
|
||||
for (enum cdns_pcie_rp_bar bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
|
||||
rc->avail_ib_bar[bar] = true;
|
||||
|
||||
ret = cdns_pcie_host_init(rc);
|
||||
if (ret) {
|
||||
clk_disable_unprepare(pcie->refclk);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_NOIRQ_DEV_PM_OPS(j721e_pcie_pm_ops,
|
||||
j721e_pcie_suspend_noirq,
|
||||
j721e_pcie_resume_noirq);
|
||||
|
||||
static struct platform_driver j721e_pcie_driver = {
|
||||
.probe = j721e_pcie_probe,
|
||||
.remove_new = j721e_pcie_remove,
|
||||
@ -596,6 +717,7 @@ static struct platform_driver j721e_pcie_driver = {
|
||||
.name = "j721e-pcie",
|
||||
.of_match_table = of_j721e_pcie_match,
|
||||
.suppress_bind_attrs = true,
|
||||
.pm = pm_sleep_ptr(&j721e_pcie_pm_ops),
|
||||
},
|
||||
};
|
||||
builtin_platform_driver(j721e_pcie_driver);
|
||||
|
@ -485,8 +485,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
|
||||
return cdns_pcie_host_map_dma_ranges(rc);
|
||||
}
|
||||
|
||||
static int cdns_pcie_host_init(struct device *dev,
|
||||
struct cdns_pcie_rc *rc)
|
||||
int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -497,6 +496,30 @@ static int cdns_pcie_host_init(struct device *dev,
|
||||
return cdns_pcie_host_init_address_translation(rc);
|
||||
}
|
||||
|
||||
int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
|
||||
{
|
||||
struct cdns_pcie *pcie = &rc->pcie;
|
||||
struct device *dev = rc->pcie.dev;
|
||||
int ret;
|
||||
|
||||
if (rc->quirk_detect_quiet_flag)
|
||||
cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
|
||||
|
||||
cdns_pcie_host_enable_ptm_response(pcie);
|
||||
|
||||
ret = cdns_pcie_start_link(pcie);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to start link\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = cdns_pcie_host_start_link(rc);
|
||||
if (ret)
|
||||
dev_dbg(dev, "PCIe link never came up\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
|
||||
{
|
||||
struct device *dev = rc->pcie.dev;
|
||||
@ -533,25 +556,14 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
|
||||
return PTR_ERR(rc->cfg_base);
|
||||
rc->cfg_res = res;
|
||||
|
||||
if (rc->quirk_detect_quiet_flag)
|
||||
cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
|
||||
|
||||
cdns_pcie_host_enable_ptm_response(pcie);
|
||||
|
||||
ret = cdns_pcie_start_link(pcie);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to start link\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = cdns_pcie_host_start_link(rc);
|
||||
ret = cdns_pcie_host_link_setup(rc);
|
||||
if (ret)
|
||||
dev_dbg(dev, "PCIe link never came up\n");
|
||||
return ret;
|
||||
|
||||
for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
|
||||
rc->avail_ib_bar[bar] = true;
|
||||
|
||||
ret = cdns_pcie_host_init(dev, rc);
|
||||
ret = cdns_pcie_host_init(rc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -314,7 +314,6 @@ struct cdns_pcie {
|
||||
/**
|
||||
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
|
||||
* @pcie: Cadence PCIe controller
|
||||
* @dev: pointer to PCIe device
|
||||
* @cfg_res: start/end offsets in the physical system memory to map PCI
|
||||
* configuration space accesses
|
||||
* @cfg_base: IO mapped window to access the PCI configuration space of a
|
||||
@ -521,10 +520,22 @@ static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCIE_CADENCE_HOST
|
||||
int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
|
||||
int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
|
||||
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
|
||||
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
|
||||
int where);
|
||||
#else
|
||||
static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
|
||||
{
|
||||
return 0;
|
||||
|
@ -265,12 +265,16 @@ config PCIE_DW_PLAT_EP
|
||||
order to enable device-specific features PCI_DW_PLAT_EP must be
|
||||
selected.
|
||||
|
||||
config PCIE_QCOM_COMMON
|
||||
bool
|
||||
|
||||
config PCIE_QCOM
|
||||
bool "Qualcomm PCIe controller (host mode)"
|
||||
depends on OF && (ARCH_QCOM || COMPILE_TEST)
|
||||
depends on PCI_MSI
|
||||
select PCIE_DW_HOST
|
||||
select CRC8
|
||||
select PCIE_QCOM_COMMON
|
||||
help
|
||||
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
|
||||
PCIe controller uses the DesignWare core plus Qualcomm-specific
|
||||
@ -281,6 +285,7 @@ config PCIE_QCOM_EP
|
||||
depends on OF && (ARCH_QCOM || COMPILE_TEST)
|
||||
depends on PCI_ENDPOINT
|
||||
select PCIE_DW_EP
|
||||
select PCIE_QCOM_COMMON
|
||||
help
|
||||
Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
|
||||
to work in endpoint mode. The PCIe controller uses the DesignWare core
|
||||
|
@ -12,6 +12,7 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
|
||||
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
|
||||
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
|
||||
obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
|
||||
obj-$(CONFIG_PCIE_QCOM_COMMON) += pcie-qcom-common.o
|
||||
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
|
||||
obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
|
||||
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
|
||||
|
@ -850,14 +850,21 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
|
||||
dra7xx->mode = mode;
|
||||
|
||||
ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
|
||||
IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
|
||||
IRQF_SHARED | IRQF_ONESHOT,
|
||||
"dra7xx-pcie-main", dra7xx);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to request irq\n");
|
||||
goto err_gpio;
|
||||
goto err_deinit;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_deinit:
|
||||
if (dra7xx->mode == DW_PCIE_RC_TYPE)
|
||||
dw_pcie_host_deinit(&dra7xx->pci->pp);
|
||||
else
|
||||
dw_pcie_ep_deinit(&dra7xx->pci->ep);
|
||||
|
||||
err_gpio:
|
||||
err_get_sync:
|
||||
pm_runtime_put(dev);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -189,12 +189,6 @@ static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
(int)data->hwirq, msg->address_hi, msg->address_lo);
|
||||
}
|
||||
|
||||
static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void ks_pcie_msi_mask(struct irq_data *data)
|
||||
{
|
||||
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
|
||||
@ -247,7 +241,6 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
|
||||
.name = "KEYSTONE-PCI-MSI",
|
||||
.irq_ack = ks_pcie_msi_irq_ack,
|
||||
.irq_compose_msi_msg = ks_pcie_compose_msi_msg,
|
||||
.irq_set_affinity = ks_pcie_msi_set_affinity,
|
||||
.irq_mask = ks_pcie_msi_mask,
|
||||
.irq_unmask = ks_pcie_msi_unmask,
|
||||
};
|
||||
@ -577,7 +570,7 @@ static void ks_pcie_quirk(struct pci_dev *dev)
|
||||
*/
|
||||
if (pci_match_id(am6_pci_devids, bridge)) {
|
||||
bridge_dev = pci_get_host_bridge_device(dev);
|
||||
if (!bridge_dev && !bridge_dev->parent)
|
||||
if (!bridge_dev || !bridge_dev->parent)
|
||||
return;
|
||||
|
||||
ks_pcie = dev_get_drvdata(bridge_dev->parent);
|
||||
|
@ -48,8 +48,9 @@ static struct irq_chip dw_pcie_msi_irq_chip = {
|
||||
};
|
||||
|
||||
static struct msi_domain_info dw_pcie_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
|
||||
MSI_FLAG_MULTI_PCI_MSI,
|
||||
.chip = &dw_pcie_msi_irq_chip,
|
||||
};
|
||||
|
||||
@ -116,12 +117,6 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
|
||||
(int)d->hwirq, msg->address_hi, msg->address_lo);
|
||||
}
|
||||
|
||||
static int dw_pci_msi_set_affinity(struct irq_data *d,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void dw_pci_bottom_mask(struct irq_data *d)
|
||||
{
|
||||
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
|
||||
@ -177,7 +172,6 @@ static struct irq_chip dw_pci_msi_bottom_irq_chip = {
|
||||
.name = "DWPCI-MSI",
|
||||
.irq_ack = dw_pci_bottom_ack,
|
||||
.irq_compose_msi_msg = dw_pci_setup_msi_msg,
|
||||
.irq_set_affinity = dw_pci_msi_set_affinity,
|
||||
.irq_mask = dw_pci_bottom_mask,
|
||||
.irq_unmask = dw_pci_bottom_unmask,
|
||||
};
|
||||
|
@ -112,6 +112,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
|
||||
pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
|
||||
if (IS_ERR(pci->dbi_base))
|
||||
return PTR_ERR(pci->dbi_base);
|
||||
pci->dbi_phys_addr = res->start;
|
||||
}
|
||||
|
||||
/* DBI2 is mainly useful for the endpoint controller */
|
||||
@ -134,6 +135,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
|
||||
pci->atu_base = devm_ioremap_resource(pci->dev, res);
|
||||
if (IS_ERR(pci->atu_base))
|
||||
return PTR_ERR(pci->atu_base);
|
||||
pci->atu_phys_addr = res->start;
|
||||
} else {
|
||||
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
|
||||
}
|
||||
@ -166,8 +168,8 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (pci->link_gen < 1)
|
||||
pci->link_gen = of_pci_get_max_link_speed(np);
|
||||
if (pci->max_link_speed < 1)
|
||||
pci->max_link_speed = of_pci_get_max_link_speed(np);
|
||||
|
||||
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
|
||||
|
||||
@ -687,16 +689,27 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
|
||||
|
||||
static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
|
||||
static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
|
||||
{
|
||||
u32 cap, ctrl2, link_speed;
|
||||
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
|
||||
|
||||
cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
|
||||
|
||||
/*
|
||||
* Even if the platform doesn't want to limit the maximum link speed,
|
||||
* just cache the hardware default value so that the vendor drivers can
|
||||
* use it to do any link specific configuration.
|
||||
*/
|
||||
if (pci->max_link_speed < 1) {
|
||||
pci->max_link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
|
||||
return;
|
||||
}
|
||||
|
||||
ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
|
||||
ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
|
||||
|
||||
switch (pcie_link_speed[link_gen]) {
|
||||
switch (pcie_link_speed[pci->max_link_speed]) {
|
||||
case PCIE_SPEED_2_5GT:
|
||||
link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
|
||||
break;
|
||||
@ -1058,8 +1071,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if (pci->link_gen > 0)
|
||||
dw_pcie_link_set_max_speed(pci, pci->link_gen);
|
||||
dw_pcie_link_set_max_speed(pci);
|
||||
|
||||
/* Configure Gen1 N_FTS */
|
||||
if (pci->n_fts[0]) {
|
||||
|
@ -125,6 +125,19 @@
|
||||
#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
|
||||
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
|
||||
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
|
||||
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT 0x1
|
||||
|
||||
#define GEN3_EQ_CONTROL_OFF 0x8A8
|
||||
#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
|
||||
#define GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE BIT(4)
|
||||
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC GENMASK(23, 8)
|
||||
#define GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL BIT(24)
|
||||
|
||||
#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC
|
||||
#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0)
|
||||
#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5)
|
||||
#define GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA GENMASK(13, 10)
|
||||
#define GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA GENMASK(17, 14)
|
||||
|
||||
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
|
||||
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
|
||||
@ -197,6 +210,24 @@
|
||||
|
||||
#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
|
||||
|
||||
/*
|
||||
* 16.0 GT/s (Gen 4) lane margining register definitions
|
||||
*/
|
||||
#define GEN4_LANE_MARGINING_1_OFF 0xB80
|
||||
#define MARGINING_MAX_VOLTAGE_OFFSET GENMASK(29, 24)
|
||||
#define MARGINING_NUM_VOLTAGE_STEPS GENMASK(22, 16)
|
||||
#define MARGINING_MAX_TIMING_OFFSET GENMASK(13, 8)
|
||||
#define MARGINING_NUM_TIMING_STEPS GENMASK(5, 0)
|
||||
|
||||
#define GEN4_LANE_MARGINING_2_OFF 0xB84
|
||||
#define MARGINING_IND_ERROR_SAMPLER BIT(28)
|
||||
#define MARGINING_SAMPLE_REPORTING_METHOD BIT(27)
|
||||
#define MARGINING_IND_LEFT_RIGHT_TIMING BIT(26)
|
||||
#define MARGINING_IND_UP_DOWN_VOLTAGE BIT(25)
|
||||
#define MARGINING_VOLTAGE_SUPPORTED BIT(24)
|
||||
#define MARGINING_MAXLANES GENMASK(20, 16)
|
||||
#define MARGINING_SAMPLE_RATE_TIMING GENMASK(13, 8)
|
||||
#define MARGINING_SAMPLE_RATE_VOLTAGE GENMASK(5, 0)
|
||||
/*
|
||||
* iATU Unroll-specific register definitions
|
||||
* From 4.80 core version the address translation will be made by unroll
|
||||
@ -407,8 +438,10 @@ struct dw_pcie_ops {
|
||||
struct dw_pcie {
|
||||
struct device *dev;
|
||||
void __iomem *dbi_base;
|
||||
resource_size_t dbi_phys_addr;
|
||||
void __iomem *dbi_base2;
|
||||
void __iomem *atu_base;
|
||||
resource_size_t atu_phys_addr;
|
||||
size_t atu_size;
|
||||
u32 num_ib_windows;
|
||||
u32 num_ob_windows;
|
||||
@ -421,7 +454,7 @@ struct dw_pcie {
|
||||
u32 type;
|
||||
unsigned long caps;
|
||||
int num_lanes;
|
||||
int link_gen;
|
||||
int max_link_speed;
|
||||
u8 n_fts[2];
|
||||
struct dw_edma_chip edma;
|
||||
struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
|
||||
|
@ -132,7 +132,7 @@ static void intel_pcie_link_setup(struct intel_pcie *pcie)
|
||||
|
||||
static void intel_pcie_init_n_fts(struct dw_pcie *pci)
|
||||
{
|
||||
switch (pci->link_gen) {
|
||||
switch (pci->max_link_speed) {
|
||||
case 3:
|
||||
pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
|
||||
break;
|
||||
@ -252,7 +252,7 @@ static int intel_pcie_wait_l2(struct intel_pcie *pcie)
|
||||
int ret;
|
||||
struct dw_pcie *pci = &pcie->pci;
|
||||
|
||||
if (pci->link_gen < 3)
|
||||
if (pci->max_link_speed < 3)
|
||||
return 0;
|
||||
|
||||
/* Send PME_TURN_OFF message */
|
||||
|
@ -420,11 +420,11 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
|
||||
"unable to get a valid reset gpio\n");
|
||||
}
|
||||
|
||||
pcie->num_slots++;
|
||||
if (pcie->num_slots > MAX_PCI_SLOTS) {
|
||||
if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) {
|
||||
dev_err(dev, "Too many PCI slots!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pcie->num_slots++;
|
||||
|
||||
ret = of_pci_get_devfn(child);
|
||||
if (ret < 0) {
|
||||
|
78
drivers/pci/controller/dwc/pcie-qcom-common.c
Normal file
78
drivers/pci/controller/dwc/pcie-qcom-common.c
Normal file
@ -0,0 +1,78 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "pcie-designware.h"
|
||||
#include "pcie-qcom-common.h"
|
||||
|
||||
void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
/*
|
||||
* GEN3_RELATED_OFF register is repurposed to apply equalization
|
||||
* settings at various data transmission rates through registers namely
|
||||
* GEN3_EQ_*. The RATE_SHADOW_SEL bit field of GEN3_RELATED_OFF
|
||||
* determines the data rate for which these equalization settings are
|
||||
* applied.
|
||||
*/
|
||||
reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
|
||||
reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
|
||||
reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
|
||||
reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
|
||||
GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT);
|
||||
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
|
||||
|
||||
reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
|
||||
reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
|
||||
GEN3_EQ_FMDC_N_EVALS |
|
||||
GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA |
|
||||
GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA);
|
||||
reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
|
||||
FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
|
||||
FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA, 0x5) |
|
||||
FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA, 0x5);
|
||||
dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
|
||||
|
||||
reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
|
||||
reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
|
||||
GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
|
||||
GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
|
||||
GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
|
||||
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_equalization);
|
||||
|
||||
void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_1_OFF);
|
||||
reg &= ~(MARGINING_MAX_VOLTAGE_OFFSET |
|
||||
MARGINING_NUM_VOLTAGE_STEPS |
|
||||
MARGINING_MAX_TIMING_OFFSET |
|
||||
MARGINING_NUM_TIMING_STEPS);
|
||||
reg |= FIELD_PREP(MARGINING_MAX_VOLTAGE_OFFSET, 0x24) |
|
||||
FIELD_PREP(MARGINING_NUM_VOLTAGE_STEPS, 0x78) |
|
||||
FIELD_PREP(MARGINING_MAX_TIMING_OFFSET, 0x32) |
|
||||
FIELD_PREP(MARGINING_NUM_TIMING_STEPS, 0x10);
|
||||
dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_1_OFF, reg);
|
||||
|
||||
reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_2_OFF);
|
||||
reg |= MARGINING_IND_ERROR_SAMPLER |
|
||||
MARGINING_SAMPLE_REPORTING_METHOD |
|
||||
MARGINING_IND_LEFT_RIGHT_TIMING |
|
||||
MARGINING_VOLTAGE_SUPPORTED;
|
||||
reg &= ~(MARGINING_IND_UP_DOWN_VOLTAGE |
|
||||
MARGINING_MAXLANES |
|
||||
MARGINING_SAMPLE_RATE_TIMING |
|
||||
MARGINING_SAMPLE_RATE_VOLTAGE);
|
||||
reg |= FIELD_PREP(MARGINING_MAXLANES, pci->num_lanes) |
|
||||
FIELD_PREP(MARGINING_SAMPLE_RATE_TIMING, 0x3f) |
|
||||
FIELD_PREP(MARGINING_SAMPLE_RATE_VOLTAGE, 0x3f);
|
||||
dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_2_OFF, reg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_lane_margining);
|
14
drivers/pci/controller/dwc/pcie-qcom-common.h
Normal file
14
drivers/pci/controller/dwc/pcie-qcom-common.h
Normal file
@ -0,0 +1,14 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _PCIE_QCOM_COMMON_H
|
||||
#define _PCIE_QCOM_COMMON_H
|
||||
|
||||
struct dw_pcie;
|
||||
|
||||
void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci);
|
||||
void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci);
|
||||
|
||||
#endif
|
@ -25,6 +25,7 @@
|
||||
|
||||
#include "../../pci.h"
|
||||
#include "pcie-designware.h"
|
||||
#include "pcie-qcom-common.h"
|
||||
|
||||
/* PARF registers */
|
||||
#define PARF_SYS_CTRL 0x00
|
||||
@ -498,6 +499,11 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
|
||||
goto err_disable_resources;
|
||||
}
|
||||
|
||||
if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
|
||||
qcom_pcie_common_set_16gt_equalization(pci);
|
||||
qcom_pcie_common_set_16gt_lane_margining(pci);
|
||||
}
|
||||
|
||||
/*
|
||||
* The physical address of the MMIO region which is exposed as the BAR
|
||||
* should be written to MHI BASE registers.
|
||||
@ -659,11 +665,9 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
|
||||
struct dw_pcie *pci = &pcie_ep->pci;
|
||||
struct device *dev = pci->dev;
|
||||
u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
|
||||
u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK);
|
||||
u32 dstate, val;
|
||||
|
||||
writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
|
||||
status &= mask;
|
||||
|
||||
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
|
||||
dev_dbg(dev, "Received Linkdown event\n");
|
||||
@ -693,7 +697,8 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
|
||||
dw_pcie_ep_linkup(&pci->ep);
|
||||
pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
|
||||
} else {
|
||||
dev_err(dev, "Received unknown event: %d\n", status);
|
||||
dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
|
||||
status);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -724,8 +729,15 @@ static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
|
||||
static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
|
||||
struct qcom_pcie_ep *pcie_ep)
|
||||
{
|
||||
struct device *dev = pcie_ep->pci.dev;
|
||||
char *name;
|
||||
int ret;
|
||||
|
||||
name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_global_irq%d",
|
||||
pcie_ep->pci.ep.epc->domain_nr);
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
|
||||
pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
|
||||
if (pcie_ep->global_irq < 0)
|
||||
return pcie_ep->global_irq;
|
||||
@ -733,18 +745,23 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
|
||||
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
|
||||
qcom_pcie_ep_global_irq_thread,
|
||||
IRQF_ONESHOT,
|
||||
"global_irq", pcie_ep);
|
||||
name, pcie_ep);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to request Global IRQ\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_perst_irq%d",
|
||||
pcie_ep->pci.ep.epc->domain_nr);
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
|
||||
pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
|
||||
irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
|
||||
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
|
||||
qcom_pcie_ep_perst_irq_thread,
|
||||
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
|
||||
"perst_irq", pcie_ep);
|
||||
name, pcie_ep);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
|
||||
disable_irq(pcie_ep->global_irq);
|
||||
@ -858,21 +875,15 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = qcom_pcie_enable_resources(pcie_ep);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to enable resources: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
|
||||
goto err_disable_resources;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
|
||||
if (ret)
|
||||
goto err_disable_resources;
|
||||
goto err_ep_deinit;
|
||||
|
||||
name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
|
||||
if (!name) {
|
||||
@ -889,8 +900,8 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
|
||||
disable_irq(pcie_ep->global_irq);
|
||||
disable_irq(pcie_ep->perst_irq);
|
||||
|
||||
err_disable_resources:
|
||||
qcom_pcie_disable_resources(pcie_ep);
|
||||
err_ep_deinit:
|
||||
dw_pcie_ep_deinit(&pcie_ep->pci.ep);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -35,6 +35,7 @@
|
||||
|
||||
#include "../../pci.h"
|
||||
#include "pcie-designware.h"
|
||||
#include "pcie-qcom-common.h"
|
||||
|
||||
/* PARF registers */
|
||||
#define PARF_SYS_CTRL 0x00
|
||||
@ -45,15 +46,24 @@
|
||||
#define PARF_PHY_REFCLK 0x4c
|
||||
#define PARF_CONFIG_BITS 0x50
|
||||
#define PARF_DBI_BASE_ADDR 0x168
|
||||
#define PARF_SLV_ADDR_SPACE_SIZE 0x16c
|
||||
#define PARF_MHI_CLOCK_RESET_CTRL 0x174
|
||||
#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
|
||||
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
|
||||
#define PARF_Q2A_FLUSH 0x1ac
|
||||
#define PARF_LTSSM 0x1b0
|
||||
#define PARF_INT_ALL_STATUS 0x224
|
||||
#define PARF_INT_ALL_CLEAR 0x228
|
||||
#define PARF_INT_ALL_MASK 0x22c
|
||||
#define PARF_SID_OFFSET 0x234
|
||||
#define PARF_BDF_TRANSLATE_CFG 0x24c
|
||||
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
|
||||
#define PARF_DBI_BASE_ADDR_V2 0x350
|
||||
#define PARF_DBI_BASE_ADDR_V2_HI 0x354
|
||||
#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
|
||||
#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
|
||||
#define PARF_NO_SNOOP_OVERIDE 0x3d4
|
||||
#define PARF_ATU_BASE_ADDR 0x634
|
||||
#define PARF_ATU_BASE_ADDR_HI 0x638
|
||||
#define PARF_DEVICE_TYPE 0x1000
|
||||
#define PARF_BDF_TO_SID_TABLE_N 0x2000
|
||||
#define PARF_BDF_TO_SID_CFG 0x2c00
|
||||
@ -108,7 +118,7 @@
|
||||
#define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x)
|
||||
|
||||
/* PARF_SLV_ADDR_SPACE_SIZE register value */
|
||||
#define SLV_ADDR_SPACE_SZ 0x10000000
|
||||
#define SLV_ADDR_SPACE_SZ 0x80000000
|
||||
|
||||
/* PARF_MHI_CLOCK_RESET_CTRL register fields */
|
||||
#define AHB_CLK_EN BIT(0)
|
||||
@ -121,6 +131,9 @@
|
||||
/* PARF_LTSSM register fields */
|
||||
#define LTSSM_EN BIT(8)
|
||||
|
||||
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
|
||||
#define PARF_INT_ALL_LINK_UP BIT(13)
|
||||
|
||||
/* PARF_NO_SNOOP_OVERIDE register fields */
|
||||
#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
|
||||
#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
|
||||
@ -284,6 +297,11 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
|
||||
{
|
||||
struct qcom_pcie *pcie = to_qcom_pcie(pci);
|
||||
|
||||
if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
|
||||
qcom_pcie_common_set_16gt_equalization(pci);
|
||||
qcom_pcie_common_set_16gt_lane_margining(pci);
|
||||
}
|
||||
|
||||
/* Enable Link Training state machine */
|
||||
if (pcie->cfg->ops->ltssm_enable)
|
||||
pcie->cfg->ops->ltssm_enable(pcie);
|
||||
@ -325,6 +343,50 @@ static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
}
|
||||
|
||||
static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie)
|
||||
{
|
||||
struct dw_pcie *pci = pcie->pci;
|
||||
|
||||
if (pci->dbi_phys_addr) {
|
||||
/*
|
||||
* PARF_DBI_BASE_ADDR register is in CPU domain and require to
|
||||
* be programmed with CPU physical address.
|
||||
*/
|
||||
writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
|
||||
PARF_DBI_BASE_ADDR);
|
||||
writel(SLV_ADDR_SPACE_SZ, pcie->parf +
|
||||
PARF_SLV_ADDR_SPACE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
|
||||
{
|
||||
struct dw_pcie *pci = pcie->pci;
|
||||
|
||||
if (pci->dbi_phys_addr) {
|
||||
/*
|
||||
* PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are
|
||||
* in CPU domain and require to be programmed with CPU
|
||||
* physical addresses.
|
||||
*/
|
||||
writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
|
||||
PARF_DBI_BASE_ADDR_V2);
|
||||
writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf +
|
||||
PARF_DBI_BASE_ADDR_V2_HI);
|
||||
|
||||
if (pci->atu_phys_addr) {
|
||||
writel(lower_32_bits(pci->atu_phys_addr), pcie->parf +
|
||||
PARF_ATU_BASE_ADDR);
|
||||
writel(upper_32_bits(pci->atu_phys_addr), pcie->parf +
|
||||
PARF_ATU_BASE_ADDR_HI);
|
||||
}
|
||||
|
||||
writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2);
|
||||
writel(SLV_ADDR_SPACE_SZ, pcie->parf +
|
||||
PARF_SLV_ADDR_SPACE_SIZE_V2_HI);
|
||||
}
|
||||
}
|
||||
|
||||
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
|
||||
{
|
||||
u32 val;
|
||||
@ -541,8 +603,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
|
||||
|
||||
static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
|
||||
{
|
||||
/* change DBI base address */
|
||||
writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
|
||||
qcom_pcie_configure_dbi_base(pcie);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
|
||||
@ -629,8 +690,7 @@ static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
|
||||
val &= ~PHY_TEST_PWR_DOWN;
|
||||
writel(val, pcie->parf + PARF_PHY_CTRL);
|
||||
|
||||
/* change DBI base address */
|
||||
writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
|
||||
qcom_pcie_configure_dbi_base(pcie);
|
||||
|
||||
/* MAC PHY_POWERDOWN MUX DISABLE */
|
||||
val = readl(pcie->parf + PARF_SYS_CTRL);
|
||||
@ -812,13 +872,11 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
|
||||
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
|
||||
u32 val;
|
||||
|
||||
writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
|
||||
|
||||
val = readl(pcie->parf + PARF_PHY_CTRL);
|
||||
val &= ~PHY_TEST_PWR_DOWN;
|
||||
writel(val, pcie->parf + PARF_PHY_CTRL);
|
||||
|
||||
writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
|
||||
qcom_pcie_configure_dbi_atu_base(pcie);
|
||||
|
||||
writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
|
||||
| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
|
||||
@ -914,8 +972,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
|
||||
val &= ~PHY_TEST_PWR_DOWN;
|
||||
writel(val, pcie->parf + PARF_PHY_CTRL);
|
||||
|
||||
/* change DBI base address */
|
||||
writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
|
||||
qcom_pcie_configure_dbi_atu_base(pcie);
|
||||
|
||||
/* MAC PHY_POWERDOWN MUX DISABLE */
|
||||
val = readl(pcie->parf + PARF_SYS_CTRL);
|
||||
@ -1124,14 +1181,11 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
|
||||
u32 val;
|
||||
int i;
|
||||
|
||||
writel(SLV_ADDR_SPACE_SZ,
|
||||
pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
|
||||
|
||||
val = readl(pcie->parf + PARF_PHY_CTRL);
|
||||
val &= ~PHY_TEST_PWR_DOWN;
|
||||
writel(val, pcie->parf + PARF_PHY_CTRL);
|
||||
|
||||
writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
|
||||
qcom_pcie_configure_dbi_atu_base(pcie);
|
||||
|
||||
writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
|
||||
writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
|
||||
@ -1489,6 +1543,29 @@ static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
|
||||
qcom_pcie_link_transition_count);
|
||||
}
|
||||
|
||||
static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
|
||||
{
|
||||
struct qcom_pcie *pcie = data;
|
||||
struct dw_pcie_rp *pp = &pcie->pci->pp;
|
||||
struct device *dev = pcie->pci->dev;
|
||||
u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS);
|
||||
|
||||
writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
|
||||
|
||||
if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
|
||||
dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
|
||||
/* Rescan the bus to enumerate endpoint devices */
|
||||
pci_lock_rescan_remove();
|
||||
pci_rescan_bus(pp->bridge->bus);
|
||||
pci_unlock_rescan_remove();
|
||||
} else {
|
||||
dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
|
||||
status);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int qcom_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct qcom_pcie_cfg *pcie_cfg;
|
||||
@ -1499,7 +1576,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
|
||||
struct dw_pcie_rp *pp;
|
||||
struct resource *res;
|
||||
struct dw_pcie *pci;
|
||||
int ret;
|
||||
int ret, irq;
|
||||
char *name;
|
||||
|
||||
pcie_cfg = of_device_get_match_data(dev);
|
||||
if (!pcie_cfg || !pcie_cfg->ops) {
|
||||
@ -1620,6 +1698,27 @@ static int qcom_pcie_probe(struct platform_device *pdev)
|
||||
goto err_phy_exit;
|
||||
}
|
||||
|
||||
name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d",
|
||||
pci_domain_nr(pp->bridge->bus));
|
||||
if (!name) {
|
||||
ret = -ENOMEM;
|
||||
goto err_host_deinit;
|
||||
}
|
||||
|
||||
irq = platform_get_irq_byname_optional(pdev, "global");
|
||||
if (irq > 0) {
|
||||
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
|
||||
qcom_pcie_global_irq_thread,
|
||||
IRQF_ONESHOT, name, pcie);
|
||||
if (ret) {
|
||||
dev_err_probe(&pdev->dev, ret,
|
||||
"Failed to request Global IRQ\n");
|
||||
goto err_host_deinit;
|
||||
}
|
||||
|
||||
writel_relaxed(PARF_INT_ALL_LINK_UP, pcie->parf + PARF_INT_ALL_MASK);
|
||||
}
|
||||
|
||||
qcom_pcie_icc_opp_update(pcie);
|
||||
|
||||
if (pcie->mhi)
|
||||
@ -1627,6 +1726,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
|
||||
|
||||
return 0;
|
||||
|
||||
err_host_deinit:
|
||||
dw_pcie_host_deinit(pp);
|
||||
err_phy_exit:
|
||||
phy_exit(pcie->phy);
|
||||
err_pm_runtime_put:
|
||||
|
@ -141,10 +141,10 @@ static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)
|
||||
}
|
||||
|
||||
/*
|
||||
* Require direct speed change with retrying here if the link_gen is
|
||||
* PCIe Gen2 or higher.
|
||||
* Require direct speed change with retrying here if the max_link_speed
|
||||
* is PCIe Gen2 or higher.
|
||||
*/
|
||||
changes = min_not_zero(dw->link_gen, RCAR_MAX_LINK_SPEED) - 1;
|
||||
changes = min_not_zero(dw->max_link_speed, RCAR_MAX_LINK_SPEED) - 1;
|
||||
|
||||
/*
|
||||
* Since dw_pcie_setup_rc() sets it once, PCIe Gen2 will be trained.
|
||||
@ -606,7 +606,12 @@ static int rcar_gen4_pcie_reg_test_bit(struct rcar_gen4_pcie *rcar,
|
||||
static int rcar_gen4_pcie_download_phy_firmware(struct rcar_gen4_pcie *rcar)
|
||||
{
|
||||
/* The check_addr values are magical numbers in the datasheet */
|
||||
const u32 check_addr[] = { 0x00101018, 0x00101118, 0x00101021, 0x00101121};
|
||||
static const u32 check_addr[] = {
|
||||
0x00101018,
|
||||
0x00101118,
|
||||
0x00101021,
|
||||
0x00101121,
|
||||
};
|
||||
struct dw_pcie *dw = &rcar->dw;
|
||||
const struct firmware *fw;
|
||||
unsigned int i, timeout;
|
||||
|
@ -233,7 +233,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
if (of_property_read_bool(np, "st,pcie-is-gen1"))
|
||||
pci->link_gen = 1;
|
||||
pci->max_link_speed = 1;
|
||||
|
||||
platform_set_drvdata(pdev, spear13xx_pcie);
|
||||
|
||||
|
@ -177,17 +177,12 @@
|
||||
#define N_FTS_VAL 52
|
||||
#define FTS_VAL 52
|
||||
|
||||
#define GEN3_EQ_CONTROL_OFF 0x8a8
|
||||
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
|
||||
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
|
||||
#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
|
||||
|
||||
#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
|
||||
#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
|
||||
#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
|
||||
#define AMBA_ERROR_RESPONSE_CRS_OKAY 0
|
||||
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
|
||||
#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
|
||||
#define AMBA_ERROR_RESPONSE_RRS_SHIFT 3
|
||||
#define AMBA_ERROR_RESPONSE_RRS_MASK GENMASK(1, 0)
|
||||
#define AMBA_ERROR_RESPONSE_RRS_OKAY 0
|
||||
#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF 1
|
||||
#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 2
|
||||
|
||||
#define MSIX_ADDR_MATCH_LOW_OFF 0x940
|
||||
#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
|
||||
@ -861,9 +856,9 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
|
||||
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
|
||||
|
||||
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
|
||||
val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
|
||||
val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
|
||||
val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
|
||||
val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
|
||||
val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, 0x3ff);
|
||||
val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
|
||||
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
|
||||
|
||||
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
|
||||
@ -872,10 +867,10 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
|
||||
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
|
||||
|
||||
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
|
||||
val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
|
||||
val |= (pcie->of_data->gen4_preset_vec <<
|
||||
GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
|
||||
val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
|
||||
val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
|
||||
val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC,
|
||||
pcie->of_data->gen4_preset_vec);
|
||||
val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
|
||||
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
|
||||
|
||||
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
|
||||
@ -907,11 +902,11 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
|
||||
|
||||
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
|
||||
|
||||
/* Enable as 0xFFFF0001 response for CRS */
|
||||
/* Enable as 0xFFFF0001 response for RRS */
|
||||
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
|
||||
val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
|
||||
val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
|
||||
AMBA_ERROR_RESPONSE_CRS_SHIFT);
|
||||
val &= ~(AMBA_ERROR_RESPONSE_RRS_MASK << AMBA_ERROR_RESPONSE_RRS_SHIFT);
|
||||
val |= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 <<
|
||||
AMBA_ERROR_RESPONSE_RRS_SHIFT);
|
||||
dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
|
||||
|
||||
/* Clear Slot Clock Configuration bit if SRNS configuration */
|
||||
|
@ -360,8 +360,8 @@ static struct irq_chip mobiveil_msi_irq_chip = {
|
||||
};
|
||||
|
||||
static struct msi_domain_info mobiveil_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX),
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
|
||||
.chip = &mobiveil_msi_irq_chip,
|
||||
};
|
||||
|
||||
@ -378,16 +378,9 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
(int)data->hwirq, msg->address_hi, msg->address_lo);
|
||||
}
|
||||
|
||||
static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct irq_chip mobiveil_msi_bottom_irq_chip = {
|
||||
.name = "Mobiveil MSI",
|
||||
.irq_compose_msi_msg = mobiveil_compose_msi_msg,
|
||||
.irq_set_affinity = mobiveil_msi_set_affinity,
|
||||
};
|
||||
|
||||
static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
|
||||
|
@ -50,7 +50,7 @@
|
||||
#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
|
||||
#define PIO_COMPLETION_STATUS_OK 0
|
||||
#define PIO_COMPLETION_STATUS_UR 1
|
||||
#define PIO_COMPLETION_STATUS_CRS 2
|
||||
#define PIO_COMPLETION_STATUS_RRS 2
|
||||
#define PIO_COMPLETION_STATUS_CA 4
|
||||
#define PIO_NON_POSTED_REQ BIT(10)
|
||||
#define PIO_ERR_STATUS BIT(11)
|
||||
@ -262,7 +262,7 @@ enum {
|
||||
|
||||
#define MSI_IRQ_NUM 32
|
||||
|
||||
#define CFG_RD_CRS_VAL 0xffff0001
|
||||
#define CFG_RD_RRS_VAL 0xffff0001
|
||||
|
||||
struct advk_pcie {
|
||||
struct platform_device *pdev;
|
||||
@ -649,7 +649,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
||||
advk_pcie_train_link(pcie);
|
||||
}
|
||||
|
||||
static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
|
||||
static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_rrs, u32 *val)
|
||||
{
|
||||
struct device *dev = &pcie->pdev->dev;
|
||||
u32 reg;
|
||||
@ -669,7 +669,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
|
||||
* 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
|
||||
* means a PIO write error, and for PIO read it is successful with
|
||||
* a read value of 0xFFFFFFFF.
|
||||
* 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
|
||||
* 3) value Config Request Retry Status(RRS) of COMPLETION_STATUS(bit9:7)
|
||||
* only means a PIO write error, and for PIO read it is successful
|
||||
* with a read value of 0xFFFF0001.
|
||||
* 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
|
||||
@ -694,10 +694,10 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
|
||||
strcomp_status = "UR";
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
case PIO_COMPLETION_STATUS_CRS:
|
||||
if (allow_crs && val) {
|
||||
/* PCIe r4.0, sec 2.3.2, says:
|
||||
* If CRS Software Visibility is enabled:
|
||||
case PIO_COMPLETION_STATUS_RRS:
|
||||
if (allow_rrs && val) {
|
||||
/* PCIe r6.0, sec 2.3.2, says:
|
||||
* If Configuration RRS Software Visibility is enabled:
|
||||
* For a Configuration Read Request that includes both
|
||||
* bytes of the Vendor ID field of a device Function's
|
||||
* Configuration Space Header, the Root Complex must
|
||||
@ -706,22 +706,22 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
|
||||
* all '1's for any additional bytes included in the
|
||||
* request.
|
||||
*
|
||||
* So CRS in this case is not an error status.
|
||||
* So RRS in this case is not an error status.
|
||||
*/
|
||||
*val = CFG_RD_CRS_VAL;
|
||||
*val = CFG_RD_RRS_VAL;
|
||||
strcomp_status = NULL;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
/* PCIe r4.0, sec 2.3.2, says:
|
||||
* If CRS Software Visibility is not enabled, the Root Complex
|
||||
/* PCIe r6.0, sec 2.3.2, says:
|
||||
* If RRS Software Visibility is not enabled, the Root Complex
|
||||
* must re-issue the Configuration Request as a new Request.
|
||||
* If CRS Software Visibility is enabled: For a Configuration
|
||||
* If RRS Software Visibility is enabled: For a Configuration
|
||||
* Write Request or for any other Configuration Read Request,
|
||||
* the Root Complex must re-issue the Configuration Request as
|
||||
* a new Request.
|
||||
* A Root Complex implementation may choose to limit the number
|
||||
* of Configuration Request/CRS Completion Status loops before
|
||||
* of Configuration Request/RRS Completion Status loops before
|
||||
* determining that something is wrong with the target of the
|
||||
* Request and taking appropriate action, e.g., complete the
|
||||
* Request to the host as a failed transaction.
|
||||
@ -729,7 +729,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
|
||||
* So return -EAGAIN and caller (pci-aardvark.c driver) will
|
||||
* re-issue request again up to the PIO_RETRY_CNT retries.
|
||||
*/
|
||||
strcomp_status = "CRS";
|
||||
strcomp_status = "RRS";
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
case PIO_COMPLETION_STATUS_CA:
|
||||
@ -920,8 +920,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
|
||||
|
||||
case PCI_EXP_RTCTL: {
|
||||
u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
|
||||
/* Only emulation of PMEIE and CRSSVE bits is provided */
|
||||
rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
|
||||
/* Only emulation of PMEIE and RRS_SVE bits is provided */
|
||||
rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_RRS_SVE;
|
||||
bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
|
||||
break;
|
||||
}
|
||||
@ -1075,7 +1075,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
|
||||
bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
|
||||
|
||||
/* Indicates supports for Completion Retry Status */
|
||||
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
|
||||
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_RRS_SV);
|
||||
|
||||
bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
|
||||
bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
|
||||
@ -1141,7 +1141,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
||||
{
|
||||
struct advk_pcie *pcie = bus->sysdata;
|
||||
int retry_count;
|
||||
bool allow_crs;
|
||||
bool allow_rrs;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
@ -1153,16 +1153,16 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
||||
size, val);
|
||||
|
||||
/*
|
||||
* Completion Retry Status is possible to return only when reading all
|
||||
* 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
|
||||
* CRSSVE flag on Root Bridge is enabled.
|
||||
* Configuration Request Retry Status (RRS) is possible to return
|
||||
* only when reading both bytes from PCI_VENDOR_ID at once and
|
||||
* RRS_SVE flag on Root Port is enabled.
|
||||
*/
|
||||
allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
|
||||
allow_rrs = (where == PCI_VENDOR_ID) && (size >= 2) &&
|
||||
(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
|
||||
PCI_EXP_RTCTL_CRSSVE);
|
||||
PCI_EXP_RTCTL_RRS_SVE);
|
||||
|
||||
if (advk_pcie_pio_is_running(pcie))
|
||||
goto try_crs;
|
||||
goto try_rrs;
|
||||
|
||||
/* Program the control register */
|
||||
reg = advk_readl(pcie, PIO_CTRL);
|
||||
@ -1189,12 +1189,12 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
||||
|
||||
ret = advk_pcie_wait_pio(pcie);
|
||||
if (ret < 0)
|
||||
goto try_crs;
|
||||
goto try_rrs;
|
||||
|
||||
retry_count += ret;
|
||||
|
||||
/* Check PIO status and get the read result */
|
||||
ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
|
||||
ret = advk_pcie_check_pio_status(pcie, allow_rrs, val);
|
||||
} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
|
||||
|
||||
if (ret < 0)
|
||||
@ -1207,13 +1207,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
||||
|
||||
return PCIBIOS_SUCCESSFUL;
|
||||
|
||||
try_crs:
|
||||
try_rrs:
|
||||
/*
|
||||
* If it is possible, return Completion Retry Status so that caller
|
||||
* tries to issue the request again instead of failing.
|
||||
* If it is possible, return Configuration Request Retry Status so
|
||||
* that caller tries to issue the request again instead of failing.
|
||||
*/
|
||||
if (allow_crs) {
|
||||
*val = CFG_RD_CRS_VAL;
|
||||
if (allow_rrs) {
|
||||
*val = CFG_RD_RRS_VAL;
|
||||
return PCIBIOS_SUCCESSFUL;
|
||||
}
|
||||
|
||||
@ -1304,12 +1304,6 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
|
||||
msg->data = data->hwirq;
|
||||
}
|
||||
|
||||
static int advk_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void advk_msi_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct advk_pcie *pcie = d->domain->host_data;
|
||||
@ -1353,7 +1347,6 @@ static void advk_msi_top_irq_unmask(struct irq_data *d)
|
||||
static struct irq_chip advk_msi_bottom_irq_chip = {
|
||||
.name = "MSI",
|
||||
.irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
|
||||
.irq_set_affinity = advk_msi_set_affinity,
|
||||
.irq_mask = advk_msi_irq_mask,
|
||||
.irq_unmask = advk_msi_irq_unmask,
|
||||
};
|
||||
@ -1451,7 +1444,8 @@ static struct irq_chip advk_msi_irq_chip = {
|
||||
|
||||
static struct msi_domain_info advk_msi_domain_info = {
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI |
|
||||
MSI_FLAG_PCI_MSIX,
|
||||
.chip = &advk_msi_irq_chip,
|
||||
};
|
||||
|
||||
|
@ -1629,11 +1629,6 @@ static void tegra_msi_irq_unmask(struct irq_data *d)
|
||||
spin_unlock_irqrestore(&msi->mask_lock, flags);
|
||||
}
|
||||
|
||||
static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
|
||||
@ -1648,7 +1643,6 @@ static struct irq_chip tegra_msi_bottom_chip = {
|
||||
.irq_ack = tegra_msi_irq_ack,
|
||||
.irq_mask = tegra_msi_irq_mask,
|
||||
.irq_unmask = tegra_msi_irq_unmask,
|
||||
.irq_set_affinity = tegra_msi_set_affinity,
|
||||
.irq_compose_msi_msg = tegra_compose_msi_msg,
|
||||
};
|
||||
|
||||
@ -1697,8 +1691,8 @@ static const struct irq_domain_ops tegra_msi_domain_ops = {
|
||||
};
|
||||
|
||||
static struct msi_domain_info tegra_msi_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX),
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
|
||||
.chip = &tegra_msi_top_chip,
|
||||
};
|
||||
|
||||
|
@ -171,17 +171,17 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
|
||||
|
||||
/*
|
||||
* The v1 controller has a bug in its Configuration Request Retry
|
||||
* Status (CRS) logic: when CRS Software Visibility is enabled and
|
||||
* Status (RRS) logic: when RRS Software Visibility is enabled and
|
||||
* we read the Vendor and Device ID of a non-existent device, the
|
||||
* controller fabricates return data of 0xFFFF0001 ("device exists
|
||||
* but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE)
|
||||
* ("device does not exist"). This causes the PCI core to retry
|
||||
* the read until it times out. Avoid this by not claiming to
|
||||
* support CRS SV.
|
||||
* support RRS SV.
|
||||
*/
|
||||
if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
|
||||
((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
|
||||
*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
|
||||
*val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
|
||||
|
||||
if (size <= 2)
|
||||
*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
|
||||
|
@ -81,8 +81,8 @@ static struct irq_chip altera_msi_irq_chip = {
|
||||
};
|
||||
|
||||
static struct msi_domain_info altera_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX),
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
|
||||
.chip = &altera_msi_irq_chip,
|
||||
};
|
||||
|
||||
@ -99,16 +99,9 @@ static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
(int)data->hwirq, msg->address_hi, msg->address_lo);
|
||||
}
|
||||
|
||||
static int altera_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct irq_chip altera_msi_bottom_irq_chip = {
|
||||
.name = "Altera MSI",
|
||||
.irq_compose_msi_msg = altera_compose_msi_msg,
|
||||
.irq_set_affinity = altera_msi_set_affinity,
|
||||
};
|
||||
|
||||
static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
|
@ -55,12 +55,11 @@
|
||||
#define TLP_READ_TAG 0x1d
|
||||
#define TLP_WRITE_TAG 0x10
|
||||
#define RP_DEVFN 0
|
||||
#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
|
||||
#define TLP_CFG_DW0(pcie, cfg) \
|
||||
(((cfg) << 24) | \
|
||||
TLP_PAYLOAD_SIZE)
|
||||
#define TLP_CFG_DW1(pcie, tag, be) \
|
||||
(((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
|
||||
(((PCI_DEVID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
|
||||
#define TLP_CFG_DW2(bus, devfn, offset) \
|
||||
(((bus) << 24) | ((devfn) << 16) | (offset))
|
||||
#define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
|
||||
|
@ -75,15 +75,19 @@
|
||||
#define PCIE_MEM_WIN0_HI(win) \
|
||||
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
|
||||
|
||||
/*
|
||||
* NOTE: You may see the term "BAR" in a number of register names used by
|
||||
* this driver. The term is an artifact of when the HW core was an
|
||||
* endpoint device (EP). Now it is a root complex (RC) and anywhere a
|
||||
* register has the term "BAR" it is related to an inbound window.
|
||||
*/
|
||||
|
||||
#define PCIE_BRCM_MAX_INBOUND_WINS 16
|
||||
#define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c
|
||||
#define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f
|
||||
|
||||
#define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034
|
||||
#define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f
|
||||
#define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038
|
||||
#define PCIE_MISC_RC_BAR4_CONFIG_LO 0x40d4
|
||||
|
||||
#define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c
|
||||
#define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f
|
||||
|
||||
#define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044
|
||||
#define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048
|
||||
@ -122,7 +126,6 @@
|
||||
#define PCIE_MEM_WIN0_LIMIT_HI(win) \
|
||||
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
|
||||
|
||||
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
|
||||
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
|
||||
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK 0x200000
|
||||
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
|
||||
@ -131,9 +134,13 @@
|
||||
(PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \
|
||||
PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK)
|
||||
|
||||
#define PCIE_INTR2_CPU_BASE 0x4300
|
||||
#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP 0x40ac
|
||||
#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK BIT(0)
|
||||
#define PCIE_MISC_UBUS_BAR4_CONFIG_REMAP 0x410c
|
||||
|
||||
#define PCIE_MSI_INTR2_BASE 0x4500
|
||||
/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
|
||||
|
||||
/* Offsets from INTR2_CPU and MSI_INTR2 BASE offsets */
|
||||
#define MSI_INT_STATUS 0x0
|
||||
#define MSI_INT_CLR 0x8
|
||||
#define MSI_INT_MASK_SET 0x10
|
||||
@ -184,9 +191,11 @@
|
||||
#define SSC_STATUS_PLL_LOCK_MASK 0x800
|
||||
#define PCIE_BRCM_MAX_MEMC 3
|
||||
|
||||
#define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX])
|
||||
#define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA])
|
||||
#define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1])
|
||||
#define IDX_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_INDEX])
|
||||
#define DATA_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_DATA])
|
||||
#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->reg_offsets[RGR1_SW_INIT_1])
|
||||
#define HARD_DEBUG(pcie) ((pcie)->reg_offsets[PCIE_HARD_DEBUG])
|
||||
#define INTR2_CPU_BASE(pcie) ((pcie)->reg_offsets[PCIE_INTR2_CPU_BASE])
|
||||
|
||||
/* Rescal registers */
|
||||
#define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
|
||||
@ -205,27 +214,33 @@ enum {
|
||||
RGR1_SW_INIT_1,
|
||||
EXT_CFG_INDEX,
|
||||
EXT_CFG_DATA,
|
||||
PCIE_HARD_DEBUG,
|
||||
PCIE_INTR2_CPU_BASE,
|
||||
};
|
||||
|
||||
enum {
|
||||
RGR1_SW_INIT_1_INIT_MASK,
|
||||
RGR1_SW_INIT_1_INIT_SHIFT,
|
||||
};
|
||||
|
||||
enum pcie_type {
|
||||
enum pcie_soc_base {
|
||||
GENERIC,
|
||||
BCM7425,
|
||||
BCM7435,
|
||||
BCM2711,
|
||||
BCM4908,
|
||||
BCM7278,
|
||||
BCM2711,
|
||||
BCM7425,
|
||||
BCM7435,
|
||||
BCM7712,
|
||||
};
|
||||
|
||||
struct inbound_win {
|
||||
u64 size;
|
||||
u64 pci_offset;
|
||||
u64 cpu_addr;
|
||||
};
|
||||
|
||||
struct pcie_cfg_data {
|
||||
const int *offsets;
|
||||
const enum pcie_type type;
|
||||
void (*perst_set)(struct brcm_pcie *pcie, u32 val);
|
||||
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
|
||||
const enum pcie_soc_base soc_base;
|
||||
const bool has_phy;
|
||||
u8 num_inbound_wins;
|
||||
int (*perst_set)(struct brcm_pcie *pcie, u32 val);
|
||||
int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
|
||||
};
|
||||
|
||||
struct subdev_regulators {
|
||||
@ -262,21 +277,25 @@ struct brcm_pcie {
|
||||
u64 msi_target_addr;
|
||||
struct brcm_msi *msi;
|
||||
const int *reg_offsets;
|
||||
enum pcie_type type;
|
||||
enum pcie_soc_base soc_base;
|
||||
struct reset_control *rescal;
|
||||
struct reset_control *perst_reset;
|
||||
struct reset_control *bridge_reset;
|
||||
struct reset_control *swinit_reset;
|
||||
int num_memc;
|
||||
u64 memc_size[PCIE_BRCM_MAX_MEMC];
|
||||
u32 hw_rev;
|
||||
void (*perst_set)(struct brcm_pcie *pcie, u32 val);
|
||||
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
|
||||
int (*perst_set)(struct brcm_pcie *pcie, u32 val);
|
||||
int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
|
||||
struct subdev_regulators *sr;
|
||||
bool ep_wakeup_capable;
|
||||
bool has_phy;
|
||||
u8 num_inbound_wins;
|
||||
};
|
||||
|
||||
static inline bool is_bmips(const struct brcm_pcie *pcie)
|
||||
{
|
||||
return pcie->type == BCM7435 || pcie->type == BCM7425;
|
||||
return pcie->soc_base == BCM7435 || pcie->soc_base == BCM7425;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -394,7 +413,7 @@ static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
|
||||
}
|
||||
|
||||
static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
|
||||
unsigned int win, u64 cpu_addr,
|
||||
u8 win, u64 cpu_addr,
|
||||
u64 pcie_addr, u64 size)
|
||||
{
|
||||
u32 cpu_addr_mb_high, limit_addr_mb_high;
|
||||
@ -445,8 +464,8 @@ static struct irq_chip brcm_msi_irq_chip = {
|
||||
};
|
||||
|
||||
static struct msi_domain_info brcm_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_MULTI_PCI_MSI),
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
|
||||
.chip = &brcm_msi_irq_chip,
|
||||
};
|
||||
|
||||
@ -484,12 +503,6 @@ static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
|
||||
}
|
||||
|
||||
static int brcm_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void brcm_msi_ack_irq(struct irq_data *data)
|
||||
{
|
||||
struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
|
||||
@ -502,7 +515,6 @@ static void brcm_msi_ack_irq(struct irq_data *data)
|
||||
static struct irq_chip brcm_msi_bottom_irq_chip = {
|
||||
.name = "BRCM STB MSI",
|
||||
.irq_compose_msi_msg = brcm_msi_compose_msi_msg,
|
||||
.irq_set_affinity = brcm_msi_set_affinity,
|
||||
.irq_ack = brcm_msi_ack_irq,
|
||||
};
|
||||
|
||||
@ -649,7 +661,7 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
|
||||
BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
|
||||
|
||||
if (msi->legacy) {
|
||||
msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
|
||||
msi->intr_base = msi->base + INTR2_CPU_BASE(pcie);
|
||||
msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
|
||||
msi->legacy_shift = 24;
|
||||
} else {
|
||||
@ -730,17 +742,33 @@ static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
|
||||
return base + DATA_ADDR(pcie);
|
||||
}
|
||||
|
||||
static void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
|
||||
static int brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
|
||||
{
|
||||
u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
|
||||
u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
|
||||
u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
|
||||
int ret = 0;
|
||||
|
||||
if (pcie->bridge_reset) {
|
||||
if (val)
|
||||
ret = reset_control_assert(pcie->bridge_reset);
|
||||
else
|
||||
ret = reset_control_deassert(pcie->bridge_reset);
|
||||
|
||||
if (ret)
|
||||
dev_err(pcie->dev, "failed to %s 'bridge' reset, err=%d\n",
|
||||
val ? "assert" : "deassert", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
|
||||
tmp = (tmp & ~mask) | ((val << shift) & mask);
|
||||
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
|
||||
static int brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
|
||||
{
|
||||
u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK;
|
||||
u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
|
||||
@ -748,20 +776,29 @@ static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
|
||||
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
|
||||
tmp = (tmp & ~mask) | ((val << shift) & mask);
|
||||
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
|
||||
static int brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
|
||||
return;
|
||||
return -EINVAL;
|
||||
|
||||
if (val)
|
||||
reset_control_assert(pcie->perst_reset);
|
||||
ret = reset_control_assert(pcie->perst_reset);
|
||||
else
|
||||
reset_control_deassert(pcie->perst_reset);
|
||||
ret = reset_control_deassert(pcie->perst_reset);
|
||||
|
||||
if (ret)
|
||||
dev_err(pcie->dev, "failed to %s 'perst' reset, err=%d\n",
|
||||
val ? "assert" : "deassert", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
|
||||
static int brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
@ -769,34 +806,77 @@ static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
|
||||
tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
|
||||
u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
|
||||
writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
|
||||
static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
|
||||
u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
|
||||
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
|
||||
u64 *rc_bar2_size,
|
||||
u64 *rc_bar2_offset)
|
||||
static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size,
|
||||
u64 cpu_addr, u64 pci_offset)
|
||||
{
|
||||
b->size = size;
|
||||
b->cpu_addr = cpu_addr;
|
||||
b->pci_offset = pci_offset;
|
||||
(*count)++;
|
||||
}
|
||||
|
||||
static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
|
||||
struct inbound_win inbound_wins[])
|
||||
{
|
||||
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
|
||||
u64 pci_offset, cpu_addr, size = 0, tot_size = 0;
|
||||
struct resource_entry *entry;
|
||||
struct device *dev = pcie->dev;
|
||||
u64 lowest_pcie_addr = ~(u64)0;
|
||||
int ret, i = 0;
|
||||
u64 size = 0;
|
||||
u8 n = 0;
|
||||
|
||||
/*
|
||||
* The HW registers (and PCIe) use order-1 numbering for BARs. As such,
|
||||
* we have inbound_wins[0] unused and BAR1 starts at inbound_wins[1].
|
||||
*/
|
||||
struct inbound_win *b_begin = &inbound_wins[1];
|
||||
struct inbound_win *b = b_begin;
|
||||
|
||||
/*
|
||||
* STB chips beside 7712 disable the first inbound window default.
|
||||
* Rather being mapped to system memory it is mapped to the
|
||||
* internal registers of the SoC. This feature is deprecated, has
|
||||
* security considerations, and is not implemented in our modern
|
||||
* SoCs.
|
||||
*/
|
||||
if (pcie->soc_base != BCM7712)
|
||||
add_inbound_win(b++, &n, 0, 0, 0);
|
||||
|
||||
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
|
||||
u64 pcie_beg = entry->res->start - entry->offset;
|
||||
u64 pcie_start = entry->res->start - entry->offset;
|
||||
u64 cpu_start = entry->res->start;
|
||||
|
||||
size += entry->res->end - entry->res->start + 1;
|
||||
if (pcie_beg < lowest_pcie_addr)
|
||||
lowest_pcie_addr = pcie_beg;
|
||||
size = resource_size(entry->res);
|
||||
tot_size += size;
|
||||
if (pcie_start < lowest_pcie_addr)
|
||||
lowest_pcie_addr = pcie_start;
|
||||
/*
|
||||
* 7712 and newer chips may have many BARs, with each
|
||||
* offering a non-overlapping viewport to system memory.
|
||||
* That being said, each BARs size must still be a power of
|
||||
* two.
|
||||
*/
|
||||
if (pcie->soc_base == BCM7712)
|
||||
add_inbound_win(b++, &n, size, cpu_start, pcie_start);
|
||||
|
||||
if (n > pcie->num_inbound_wins)
|
||||
break;
|
||||
}
|
||||
|
||||
if (lowest_pcie_addr == ~(u64)0) {
|
||||
@ -804,13 +884,20 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* 7712 and newer chips do not have an internal memory mapping system
|
||||
* that enables multiple memory controllers. As such, it can return
|
||||
* now w/o doing special configuration.
|
||||
*/
|
||||
if (pcie->soc_base == BCM7712)
|
||||
return n;
|
||||
|
||||
ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
|
||||
PCIE_BRCM_MAX_MEMC);
|
||||
|
||||
if (ret <= 0) {
|
||||
/* Make an educated guess */
|
||||
pcie->num_memc = 1;
|
||||
pcie->memc_size[0] = 1ULL << fls64(size - 1);
|
||||
pcie->memc_size[0] = 1ULL << fls64(tot_size - 1);
|
||||
} else {
|
||||
pcie->num_memc = ret;
|
||||
}
|
||||
@ -819,10 +906,15 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
|
||||
for (i = 0, size = 0; i < pcie->num_memc; i++)
|
||||
size += pcie->memc_size[i];
|
||||
|
||||
/* System memory starts at this address in PCIe-space */
|
||||
*rc_bar2_offset = lowest_pcie_addr;
|
||||
/* The sum of all memc views must also be a power of 2 */
|
||||
*rc_bar2_size = 1ULL << fls64(size - 1);
|
||||
/* Our HW mandates that the window size must be a power of 2 */
|
||||
size = 1ULL << fls64(size - 1);
|
||||
|
||||
/*
|
||||
* For STB chips, the BAR2 cpu_addr is hardwired to the start
|
||||
* of system memory, so we set it to 0.
|
||||
*/
|
||||
cpu_addr = 0;
|
||||
pci_offset = lowest_pcie_addr;
|
||||
|
||||
/*
|
||||
* We validate the inbound memory view even though we should trust
|
||||
@ -857,44 +949,119 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
|
||||
* outbound memory @ 3GB). So instead it will start at the 1x
|
||||
* multiple of its size
|
||||
*/
|
||||
if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
|
||||
(*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
|
||||
dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
|
||||
*rc_bar2_size, *rc_bar2_offset);
|
||||
if (!size || (pci_offset & (size - 1)) ||
|
||||
(pci_offset < SZ_4G && pci_offset > SZ_2G)) {
|
||||
dev_err(dev, "Invalid inbound_win2_offset/size: size 0x%llx, off 0x%llx\n",
|
||||
size, pci_offset);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
/* Enable inbound window 2, the main inbound window for STB chips */
|
||||
add_inbound_win(b++, &n, size, cpu_addr, pci_offset);
|
||||
|
||||
/*
|
||||
* Disable inbound window 3. On some chips presents the same
|
||||
* window as #2 but the data appears in a settable endianness.
|
||||
*/
|
||||
add_inbound_win(b++, &n, 0, 0, 0);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static u32 brcm_bar_reg_offset(int bar)
|
||||
{
|
||||
if (bar <= 3)
|
||||
return PCIE_MISC_RC_BAR1_CONFIG_LO + 8 * (bar - 1);
|
||||
else
|
||||
return PCIE_MISC_RC_BAR4_CONFIG_LO + 8 * (bar - 4);
|
||||
}
|
||||
|
||||
static u32 brcm_ubus_reg_offset(int bar)
|
||||
{
|
||||
if (bar <= 3)
|
||||
return PCIE_MISC_UBUS_BAR1_CONFIG_REMAP + 8 * (bar - 1);
|
||||
else
|
||||
return PCIE_MISC_UBUS_BAR4_CONFIG_REMAP + 8 * (bar - 4);
|
||||
}
|
||||
|
||||
static void set_inbound_win_registers(struct brcm_pcie *pcie,
|
||||
const struct inbound_win *inbound_wins,
|
||||
u8 num_inbound_wins)
|
||||
{
|
||||
void __iomem *base = pcie->base;
|
||||
int i;
|
||||
|
||||
for (i = 1; i <= num_inbound_wins; i++) {
|
||||
u64 pci_offset = inbound_wins[i].pci_offset;
|
||||
u64 cpu_addr = inbound_wins[i].cpu_addr;
|
||||
u64 size = inbound_wins[i].size;
|
||||
u32 reg_offset = brcm_bar_reg_offset(i);
|
||||
u32 tmp = lower_32_bits(pci_offset);
|
||||
|
||||
u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(size),
|
||||
PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK);
|
||||
|
||||
/* Write low */
|
||||
writel_relaxed(tmp, base + reg_offset);
|
||||
/* Write high */
|
||||
writel_relaxed(upper_32_bits(pci_offset), base + reg_offset + 4);
|
||||
|
||||
/*
|
||||
* Most STB chips:
|
||||
* Do nothing.
|
||||
* 7712:
|
||||
* All of their BARs need to be set.
|
||||
*/
|
||||
if (pcie->soc_base == BCM7712) {
|
||||
/* BUS remap register settings */
|
||||
reg_offset = brcm_ubus_reg_offset(i);
|
||||
tmp = lower_32_bits(cpu_addr) & ~0xfff;
|
||||
tmp |= PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK;
|
||||
writel_relaxed(tmp, base + reg_offset);
|
||||
tmp = upper_32_bits(cpu_addr);
|
||||
writel_relaxed(tmp, base + reg_offset + 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int brcm_pcie_setup(struct brcm_pcie *pcie)
|
||||
{
|
||||
u64 rc_bar2_offset, rc_bar2_size;
|
||||
struct inbound_win inbound_wins[PCIE_BRCM_MAX_INBOUND_WINS];
|
||||
void __iomem *base = pcie->base;
|
||||
struct pci_host_bridge *bridge;
|
||||
struct resource_entry *entry;
|
||||
u32 tmp, burst, aspm_support;
|
||||
int num_out_wins = 0;
|
||||
int ret, memc;
|
||||
u8 num_out_wins = 0;
|
||||
int num_inbound_wins = 0;
|
||||
int memc, ret;
|
||||
|
||||
/* Reset the bridge */
|
||||
pcie->bridge_sw_init_set(pcie, 1);
|
||||
ret = pcie->bridge_sw_init_set(pcie, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Ensure that PERST# is asserted; some bootloaders may deassert it. */
|
||||
if (pcie->type == BCM2711)
|
||||
pcie->perst_set(pcie, 1);
|
||||
if (pcie->soc_base == BCM2711) {
|
||||
ret = pcie->perst_set(pcie, 1);
|
||||
if (ret) {
|
||||
pcie->bridge_sw_init_set(pcie, 0);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
usleep_range(100, 200);
|
||||
|
||||
/* Take the bridge out of reset */
|
||||
pcie->bridge_sw_init_set(pcie, 0);
|
||||
ret = pcie->bridge_sw_init_set(pcie, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
|
||||
tmp = readl(base + HARD_DEBUG(pcie));
|
||||
if (is_bmips(pcie))
|
||||
tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
|
||||
else
|
||||
tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
|
||||
writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
|
||||
writel(tmp, base + HARD_DEBUG(pcie));
|
||||
/* Wait for SerDes to be stable */
|
||||
usleep_range(100, 200);
|
||||
|
||||
@ -905,9 +1072,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
|
||||
*/
|
||||
if (is_bmips(pcie))
|
||||
burst = 0x1; /* 256 bytes */
|
||||
else if (pcie->type == BCM2711)
|
||||
else if (pcie->soc_base == BCM2711)
|
||||
burst = 0x0; /* 128 bytes */
|
||||
else if (pcie->type == BCM7278)
|
||||
else if (pcie->soc_base == BCM7278)
|
||||
burst = 0x3; /* 512 bytes */
|
||||
else
|
||||
burst = 0x2; /* 512 bytes */
|
||||
@ -924,17 +1091,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
|
||||
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK);
|
||||
writel(tmp, base + PCIE_MISC_MISC_CTRL);
|
||||
|
||||
ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
|
||||
&rc_bar2_offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
num_inbound_wins = brcm_pcie_get_inbound_wins(pcie, inbound_wins);
|
||||
if (num_inbound_wins < 0)
|
||||
return num_inbound_wins;
|
||||
|
||||
tmp = lower_32_bits(rc_bar2_offset);
|
||||
u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
|
||||
PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
|
||||
writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
|
||||
writel(upper_32_bits(rc_bar2_offset),
|
||||
base + PCIE_MISC_RC_BAR2_CONFIG_HI);
|
||||
set_inbound_win_registers(pcie, inbound_wins, num_inbound_wins);
|
||||
|
||||
if (!brcm_pcie_rc_mode(pcie)) {
|
||||
dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tmp = readl(base + PCIE_MISC_MISC_CTRL);
|
||||
for (memc = 0; memc < pcie->num_memc; memc++) {
|
||||
@ -956,25 +1122,12 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
|
||||
* 4GB or when the inbound area is smaller than 4GB (taking into
|
||||
* account the rounding-up we're forced to perform).
|
||||
*/
|
||||
if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
|
||||
if (inbound_wins[2].pci_offset >= SZ_4G ||
|
||||
(inbound_wins[2].size + inbound_wins[2].pci_offset) < SZ_4G)
|
||||
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
|
||||
else
|
||||
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
|
||||
|
||||
if (!brcm_pcie_rc_mode(pcie)) {
|
||||
dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* disable the PCIe->GISB memory window (RC_BAR1) */
|
||||
tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
|
||||
tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
|
||||
writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
|
||||
|
||||
/* disable the PCIe->SCB memory window (RC_BAR3) */
|
||||
tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
|
||||
tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
|
||||
writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
|
||||
|
||||
/* Don't advertise L0s capability if 'aspm-no-l0s' */
|
||||
aspm_support = PCIE_LINK_STATE_L1;
|
||||
@ -1025,7 +1178,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
|
||||
num_out_wins++;
|
||||
}
|
||||
|
||||
/* PCIe->SCB endian mode for BAR */
|
||||
/* PCIe->SCB endian mode for inbound window */
|
||||
tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
|
||||
u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
|
||||
PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
|
||||
@ -1045,6 +1198,10 @@ static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
|
||||
const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8;
|
||||
u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
|
||||
|
||||
/* 7712 does not have this (RGR1) timer */
|
||||
if (pcie->soc_base == BCM7712)
|
||||
return;
|
||||
|
||||
/* Each unit in timeout register is 1/216,000,000 seconds */
|
||||
writel(216 * timeout_us, pcie->base + REG_OFFSET);
|
||||
}
|
||||
@ -1063,7 +1220,7 @@ static void brcm_config_clkreq(struct brcm_pcie *pcie)
|
||||
}
|
||||
|
||||
/* Start out assuming safe mode (both mode bits cleared) */
|
||||
clkreq_cntl = readl(pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
|
||||
clkreq_cntl = readl(pcie->base + HARD_DEBUG(pcie));
|
||||
clkreq_cntl &= ~PCIE_CLKREQ_MASK;
|
||||
|
||||
if (strcmp(mode, "no-l1ss") == 0) {
|
||||
@ -1106,7 +1263,7 @@ static void brcm_config_clkreq(struct brcm_pcie *pcie)
|
||||
dev_err(pcie->dev, err_msg);
|
||||
mode = "safe";
|
||||
}
|
||||
writel(clkreq_cntl, pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
|
||||
writel(clkreq_cntl, pcie->base + HARD_DEBUG(pcie));
|
||||
|
||||
dev_info(pcie->dev, "clkreq-mode set to %s\n", mode);
|
||||
}
|
||||
@ -1120,7 +1277,9 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
|
||||
int ret, i;
|
||||
|
||||
/* Unassert the fundamental reset */
|
||||
pcie->perst_set(pcie, 0);
|
||||
ret = pcie->perst_set(pcie, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Wait for 100ms after PERST# deassertion; see PCIe CEM specification
|
||||
@ -1304,23 +1463,25 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
|
||||
|
||||
static inline int brcm_phy_start(struct brcm_pcie *pcie)
|
||||
{
|
||||
return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
|
||||
return pcie->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
|
||||
}
|
||||
|
||||
static inline int brcm_phy_stop(struct brcm_pcie *pcie)
|
||||
{
|
||||
return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
|
||||
return pcie->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
|
||||
}
|
||||
|
||||
static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
|
||||
static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
|
||||
{
|
||||
void __iomem *base = pcie->base;
|
||||
int tmp;
|
||||
int tmp, ret;
|
||||
|
||||
if (brcm_pcie_link_up(pcie))
|
||||
brcm_pcie_enter_l23(pcie);
|
||||
/* Assert fundamental reset */
|
||||
pcie->perst_set(pcie, 1);
|
||||
ret = pcie->perst_set(pcie, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Deassert request for L23 in case it was asserted */
|
||||
tmp = readl(base + PCIE_MISC_PCIE_CTRL);
|
||||
@ -1328,12 +1489,14 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
|
||||
writel(tmp, base + PCIE_MISC_PCIE_CTRL);
|
||||
|
||||
/* Turn off SerDes */
|
||||
tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
|
||||
tmp = readl(base + HARD_DEBUG(pcie));
|
||||
u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
|
||||
writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
|
||||
writel(tmp, base + HARD_DEBUG(pcie));
|
||||
|
||||
/* Shutdown PCIe bridge */
|
||||
pcie->bridge_sw_init_set(pcie, 1);
|
||||
ret = pcie->bridge_sw_init_set(pcie, 1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
|
||||
@ -1351,9 +1514,12 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct brcm_pcie *pcie = dev_get_drvdata(dev);
|
||||
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
|
||||
int ret;
|
||||
int ret, rret;
|
||||
|
||||
ret = brcm_pcie_turn_off(pcie);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
brcm_pcie_turn_off(pcie);
|
||||
/*
|
||||
* If brcm_phy_stop() returns an error, just dev_err(). If we
|
||||
* return the error it will cause the suspend to fail and this is a
|
||||
@ -1382,7 +1548,10 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
|
||||
pcie->sr->supplies);
|
||||
if (ret) {
|
||||
dev_err(dev, "Could not turn off regulators\n");
|
||||
reset_control_reset(pcie->rescal);
|
||||
rret = reset_control_reset(pcie->rescal);
|
||||
if (rret)
|
||||
dev_err(dev, "failed to reset 'rascal' controller ret=%d\n",
|
||||
rret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -1397,7 +1566,7 @@ static int brcm_pcie_resume_noirq(struct device *dev)
|
||||
struct brcm_pcie *pcie = dev_get_drvdata(dev);
|
||||
void __iomem *base;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
int ret, rret;
|
||||
|
||||
base = pcie->base;
|
||||
ret = clk_prepare_enable(pcie->clk);
|
||||
@ -1416,9 +1585,9 @@ static int brcm_pcie_resume_noirq(struct device *dev)
|
||||
pcie->bridge_sw_init_set(pcie, 0);
|
||||
|
||||
/* SERDES_IDDQ = 0 */
|
||||
tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
|
||||
tmp = readl(base + HARD_DEBUG(pcie));
|
||||
u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
|
||||
writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
|
||||
writel(tmp, base + HARD_DEBUG(pcie));
|
||||
|
||||
/* wait for serdes to be stable */
|
||||
udelay(100);
|
||||
@ -1459,7 +1628,9 @@ static int brcm_pcie_resume_noirq(struct device *dev)
|
||||
if (pcie->sr)
|
||||
regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
|
||||
err_reset:
|
||||
reset_control_rearm(pcie->rescal);
|
||||
rret = reset_control_rearm(pcie->rescal);
|
||||
if (rret)
|
||||
dev_err(pcie->dev, "failed to rearm 'rescal' reset, err=%d\n", rret);
|
||||
err_disable_clk:
|
||||
clk_disable_unprepare(pcie->clk);
|
||||
return ret;
|
||||
@ -1487,74 +1658,111 @@ static void brcm_pcie_remove(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
static const int pcie_offsets[] = {
|
||||
[RGR1_SW_INIT_1] = 0x9210,
|
||||
[EXT_CFG_INDEX] = 0x9000,
|
||||
[EXT_CFG_DATA] = 0x9004,
|
||||
[RGR1_SW_INIT_1] = 0x9210,
|
||||
[EXT_CFG_INDEX] = 0x9000,
|
||||
[EXT_CFG_DATA] = 0x9004,
|
||||
[PCIE_HARD_DEBUG] = 0x4204,
|
||||
[PCIE_INTR2_CPU_BASE] = 0x4300,
|
||||
};
|
||||
|
||||
static const int pcie_offsets_bmips_7425[] = {
|
||||
[RGR1_SW_INIT_1] = 0x8010,
|
||||
[EXT_CFG_INDEX] = 0x8300,
|
||||
[EXT_CFG_DATA] = 0x8304,
|
||||
static const int pcie_offsets_bcm7278[] = {
|
||||
[RGR1_SW_INIT_1] = 0xc010,
|
||||
[EXT_CFG_INDEX] = 0x9000,
|
||||
[EXT_CFG_DATA] = 0x9004,
|
||||
[PCIE_HARD_DEBUG] = 0x4204,
|
||||
[PCIE_INTR2_CPU_BASE] = 0x4300,
|
||||
};
|
||||
|
||||
static const int pcie_offsets_bcm7425[] = {
|
||||
[RGR1_SW_INIT_1] = 0x8010,
|
||||
[EXT_CFG_INDEX] = 0x8300,
|
||||
[EXT_CFG_DATA] = 0x8304,
|
||||
[PCIE_HARD_DEBUG] = 0x4204,
|
||||
[PCIE_INTR2_CPU_BASE] = 0x4300,
|
||||
};
|
||||
|
||||
static const int pcie_offsets_bcm7712[] = {
|
||||
[EXT_CFG_INDEX] = 0x9000,
|
||||
[EXT_CFG_DATA] = 0x9004,
|
||||
[PCIE_HARD_DEBUG] = 0x4304,
|
||||
[PCIE_INTR2_CPU_BASE] = 0x4400,
|
||||
};
|
||||
|
||||
static const struct pcie_cfg_data generic_cfg = {
|
||||
.offsets = pcie_offsets,
|
||||
.type = GENERIC,
|
||||
.soc_base = GENERIC,
|
||||
.perst_set = brcm_pcie_perst_set_generic,
|
||||
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
|
||||
};
|
||||
|
||||
static const struct pcie_cfg_data bcm7425_cfg = {
|
||||
.offsets = pcie_offsets_bmips_7425,
|
||||
.type = BCM7425,
|
||||
.perst_set = brcm_pcie_perst_set_generic,
|
||||
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
|
||||
};
|
||||
|
||||
static const struct pcie_cfg_data bcm7435_cfg = {
|
||||
.offsets = pcie_offsets,
|
||||
.type = BCM7435,
|
||||
.perst_set = brcm_pcie_perst_set_generic,
|
||||
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
|
||||
};
|
||||
|
||||
static const struct pcie_cfg_data bcm4908_cfg = {
|
||||
.offsets = pcie_offsets,
|
||||
.type = BCM4908,
|
||||
.perst_set = brcm_pcie_perst_set_4908,
|
||||
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
|
||||
};
|
||||
|
||||
static const int pcie_offset_bcm7278[] = {
|
||||
[RGR1_SW_INIT_1] = 0xc010,
|
||||
[EXT_CFG_INDEX] = 0x9000,
|
||||
[EXT_CFG_DATA] = 0x9004,
|
||||
};
|
||||
|
||||
static const struct pcie_cfg_data bcm7278_cfg = {
|
||||
.offsets = pcie_offset_bcm7278,
|
||||
.type = BCM7278,
|
||||
.perst_set = brcm_pcie_perst_set_7278,
|
||||
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
|
||||
.num_inbound_wins = 3,
|
||||
};
|
||||
|
||||
static const struct pcie_cfg_data bcm2711_cfg = {
|
||||
.offsets = pcie_offsets,
|
||||
.type = BCM2711,
|
||||
.soc_base = BCM2711,
|
||||
.perst_set = brcm_pcie_perst_set_generic,
|
||||
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
|
||||
.num_inbound_wins = 3,
|
||||
};
|
||||
|
||||
static const struct pcie_cfg_data bcm4908_cfg = {
|
||||
.offsets = pcie_offsets,
|
||||
.soc_base = BCM4908,
|
||||
.perst_set = brcm_pcie_perst_set_4908,
|
||||
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
|
||||
.num_inbound_wins = 3,
|
||||
};
|
||||
|
||||
static const struct pcie_cfg_data bcm7278_cfg = {
|
||||
.offsets = pcie_offsets_bcm7278,
|
||||
.soc_base = BCM7278,
|
||||
.perst_set = brcm_pcie_perst_set_7278,
|
||||
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
|
||||
.num_inbound_wins = 3,
|
||||
};
|
||||
|
||||
static const struct pcie_cfg_data bcm7425_cfg = {
|
||||
.offsets = pcie_offsets_bcm7425,
|
||||
.soc_base = BCM7425,
|
||||
.perst_set = brcm_pcie_perst_set_generic,
|
||||
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
|
||||
.num_inbound_wins = 3,
|
||||
};
|
||||
|
||||
static const struct pcie_cfg_data bcm7435_cfg = {
|
||||
.offsets = pcie_offsets,
|
||||
.soc_base = BCM7435,
|
||||
.perst_set = brcm_pcie_perst_set_generic,
|
||||
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
|
||||
.num_inbound_wins = 3,
|
||||
};
|
||||
|
||||
static const struct pcie_cfg_data bcm7216_cfg = {
|
||||
.offsets = pcie_offsets_bcm7278,
|
||||
.soc_base = BCM7278,
|
||||
.perst_set = brcm_pcie_perst_set_7278,
|
||||
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
|
||||
.has_phy = true,
|
||||
.num_inbound_wins = 3,
|
||||
};
|
||||
|
||||
static const struct pcie_cfg_data bcm7712_cfg = {
|
||||
.offsets = pcie_offsets_bcm7712,
|
||||
.perst_set = brcm_pcie_perst_set_7278,
|
||||
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
|
||||
.soc_base = BCM7712,
|
||||
.num_inbound_wins = 10,
|
||||
};
|
||||
|
||||
static const struct of_device_id brcm_pcie_match[] = {
|
||||
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
|
||||
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
|
||||
{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
|
||||
{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg },
|
||||
{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
|
||||
{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
|
||||
{ .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
|
||||
{ .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
|
||||
{ .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
|
||||
{ .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
|
||||
{ .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
|
||||
{ .compatible = "brcm,bcm7712-pcie", .data = &bcm7712_cfg },
|
||||
{},
|
||||
};
|
||||
|
||||
@ -1596,9 +1804,11 @@ static int brcm_pcie_probe(struct platform_device *pdev)
|
||||
pcie->dev = &pdev->dev;
|
||||
pcie->np = np;
|
||||
pcie->reg_offsets = data->offsets;
|
||||
pcie->type = data->type;
|
||||
pcie->soc_base = data->soc_base;
|
||||
pcie->perst_set = data->perst_set;
|
||||
pcie->bridge_sw_init_set = data->bridge_sw_init_set;
|
||||
pcie->has_phy = data->has_phy;
|
||||
pcie->num_inbound_wins = data->num_inbound_wins;
|
||||
|
||||
pcie->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(pcie->base))
|
||||
@ -1613,25 +1823,52 @@ static int brcm_pcie_probe(struct platform_device *pdev)
|
||||
|
||||
pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
|
||||
|
||||
ret = clk_prepare_enable(pcie->clk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "could not enable clock\n");
|
||||
return ret;
|
||||
}
|
||||
pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
|
||||
if (IS_ERR(pcie->rescal)) {
|
||||
clk_disable_unprepare(pcie->clk);
|
||||
if (IS_ERR(pcie->rescal))
|
||||
return PTR_ERR(pcie->rescal);
|
||||
}
|
||||
|
||||
pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst");
|
||||
if (IS_ERR(pcie->perst_reset)) {
|
||||
clk_disable_unprepare(pcie->clk);
|
||||
if (IS_ERR(pcie->perst_reset))
|
||||
return PTR_ERR(pcie->perst_reset);
|
||||
|
||||
pcie->bridge_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "bridge");
|
||||
if (IS_ERR(pcie->bridge_reset))
|
||||
return PTR_ERR(pcie->bridge_reset);
|
||||
|
||||
pcie->swinit_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "swinit");
|
||||
if (IS_ERR(pcie->swinit_reset))
|
||||
return PTR_ERR(pcie->swinit_reset);
|
||||
|
||||
ret = clk_prepare_enable(pcie->clk);
|
||||
if (ret)
|
||||
return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
|
||||
|
||||
pcie->bridge_sw_init_set(pcie, 0);
|
||||
|
||||
if (pcie->swinit_reset) {
|
||||
ret = reset_control_assert(pcie->swinit_reset);
|
||||
if (ret) {
|
||||
clk_disable_unprepare(pcie->clk);
|
||||
return dev_err_probe(&pdev->dev, ret,
|
||||
"could not assert reset 'swinit'\n");
|
||||
}
|
||||
|
||||
/* HW team recommends 1us for proper sync and propagation of reset */
|
||||
udelay(1);
|
||||
|
||||
ret = reset_control_deassert(pcie->swinit_reset);
|
||||
if (ret) {
|
||||
clk_disable_unprepare(pcie->clk);
|
||||
return dev_err_probe(&pdev->dev, ret,
|
||||
"could not de-assert reset 'swinit'\n");
|
||||
}
|
||||
}
|
||||
|
||||
ret = reset_control_reset(pcie->rescal);
|
||||
if (ret)
|
||||
dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
|
||||
if (ret) {
|
||||
clk_disable_unprepare(pcie->clk);
|
||||
return dev_err_probe(&pdev->dev, ret, "failed to deassert 'rescal'\n");
|
||||
}
|
||||
|
||||
ret = brcm_phy_start(pcie);
|
||||
if (ret) {
|
||||
@ -1645,7 +1882,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
|
||||
goto fail;
|
||||
|
||||
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
|
||||
if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
|
||||
if (pcie->soc_base == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
|
||||
dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
|
||||
ret = -ENODEV;
|
||||
goto fail;
|
||||
@ -1660,7 +1897,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
|
||||
}
|
||||
}
|
||||
|
||||
bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
|
||||
bridge->ops = pcie->soc_base == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
|
||||
bridge->sysdata = pcie;
|
||||
|
||||
platform_set_drvdata(pdev, pcie);
|
||||
@ -1678,6 +1915,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
|
||||
|
||||
fail:
|
||||
__brcm_pcie_remove(pcie);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@
|
||||
|
||||
#define CFG_RD_SUCCESS 0
|
||||
#define CFG_RD_UR 1
|
||||
#define CFG_RD_CRS 2
|
||||
#define CFG_RD_RRS 2
|
||||
#define CFG_RD_CA 3
|
||||
#define CFG_RETRY_STATUS 0xffff0001
|
||||
#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
|
||||
@ -485,31 +485,31 @@ static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
|
||||
u32 status;
|
||||
|
||||
/*
|
||||
* As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
|
||||
* As per PCIe r6.0, sec 2.3.2, Config RRS Software Visibility only
|
||||
* affects config reads of the Vendor ID. For config writes or any
|
||||
* other config reads, the Root may automatically reissue the
|
||||
* configuration request again as a new request.
|
||||
*
|
||||
* For config reads, this hardware returns CFG_RETRY_STATUS data
|
||||
* when it receives a CRS completion, regardless of the address of
|
||||
* the read or the CRS Software Visibility Enable bit. As a
|
||||
* when it receives a RRS completion, regardless of the address of
|
||||
* the read or the RRS Software Visibility Enable bit. As a
|
||||
* partial workaround for this, we retry in software any read that
|
||||
* returns CFG_RETRY_STATUS.
|
||||
*
|
||||
* Note that a non-Vendor ID config register may have a value of
|
||||
* CFG_RETRY_STATUS. If we read that, we can't distinguish it from
|
||||
* a CRS completion, so we will incorrectly retry the read and
|
||||
* a RRS completion, so we will incorrectly retry the read and
|
||||
* eventually return the wrong data (0xffffffff).
|
||||
*/
|
||||
data = readl(cfg_data_p);
|
||||
while (data == CFG_RETRY_STATUS && timeout--) {
|
||||
/*
|
||||
* CRS state is set in CFG_RD status register
|
||||
* RRS state is set in CFG_RD status register
|
||||
* This will handle the case where CFG_RETRY_STATUS is
|
||||
* valid config data.
|
||||
*/
|
||||
status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
|
||||
if (status != CFG_RD_CRS)
|
||||
if (status != CFG_RD_RRS)
|
||||
return data;
|
||||
|
||||
udelay(1);
|
||||
@ -556,8 +556,8 @@ static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
|
||||
break;
|
||||
|
||||
case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
|
||||
/* Don't advertise CRS SV support */
|
||||
*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
|
||||
/* Don't advertise RRS SV support */
|
||||
*val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -6,7 +6,9 @@
|
||||
* Author: Jianjun Wang <jianjun.wang@mediatek.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/irq.h>
|
||||
@ -15,6 +17,8 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/platform_device.h>
|
||||
@ -29,6 +33,12 @@
|
||||
#define PCI_CLASS(class) (class << 8)
|
||||
#define PCIE_RC_MODE BIT(0)
|
||||
|
||||
#define PCIE_EQ_PRESET_01_REG 0x100
|
||||
#define PCIE_VAL_LN0_DOWNSTREAM GENMASK(6, 0)
|
||||
#define PCIE_VAL_LN0_UPSTREAM GENMASK(14, 8)
|
||||
#define PCIE_VAL_LN1_DOWNSTREAM GENMASK(22, 16)
|
||||
#define PCIE_VAL_LN1_UPSTREAM GENMASK(30, 24)
|
||||
|
||||
#define PCIE_CFGNUM_REG 0x140
|
||||
#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
|
||||
#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
|
||||
@ -68,6 +78,14 @@
|
||||
#define PCIE_MSI_SET_ENABLE_REG 0x190
|
||||
#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
|
||||
|
||||
#define PCIE_PIPE4_PIE8_REG 0x338
|
||||
#define PCIE_K_FINETUNE_MAX GENMASK(5, 0)
|
||||
#define PCIE_K_FINETUNE_ERR GENMASK(7, 6)
|
||||
#define PCIE_K_PRESET_TO_USE GENMASK(18, 8)
|
||||
#define PCIE_K_PHYPARAM_QUERY BIT(19)
|
||||
#define PCIE_K_QUERY_TIMEOUT BIT(20)
|
||||
#define PCIE_K_PRESET_TO_USE_16G GENMASK(31, 21)
|
||||
|
||||
#define PCIE_MSI_SET_BASE_REG 0xc00
|
||||
#define PCIE_MSI_SET_OFFSET 0x10
|
||||
#define PCIE_MSI_SET_STATUS_OFFSET 0x04
|
||||
@ -100,6 +118,26 @@
|
||||
#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
|
||||
#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
|
||||
|
||||
#define MAX_NUM_PHY_RESETS 3
|
||||
|
||||
/* Time in ms needed to complete PCIe reset on EN7581 SoC */
|
||||
#define PCIE_EN7581_RESET_TIME_MS 100
|
||||
|
||||
struct mtk_gen3_pcie;
|
||||
|
||||
/**
|
||||
* struct mtk_gen3_pcie_pdata - differentiate between host generations
|
||||
* @power_up: pcie power_up callback
|
||||
* @phy_resets: phy reset lines SoC data.
|
||||
*/
|
||||
struct mtk_gen3_pcie_pdata {
|
||||
int (*power_up)(struct mtk_gen3_pcie *pcie);
|
||||
struct {
|
||||
const char *id[MAX_NUM_PHY_RESETS];
|
||||
int num_resets;
|
||||
} phy_resets;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mtk_msi_set - MSI information for each set
|
||||
* @base: IO mapped register base
|
||||
@ -118,7 +156,7 @@ struct mtk_msi_set {
|
||||
* @base: IO mapped register base
|
||||
* @reg_base: physical register base
|
||||
* @mac_reset: MAC reset control
|
||||
* @phy_reset: PHY reset control
|
||||
* @phy_resets: PHY reset controllers
|
||||
* @phy: PHY controller block
|
||||
* @clks: PCIe clocks
|
||||
* @num_clks: PCIe clocks count for this port
|
||||
@ -131,13 +169,14 @@ struct mtk_msi_set {
|
||||
* @msi_sets: MSI sets information
|
||||
* @lock: lock protecting IRQ bit map
|
||||
* @msi_irq_in_use: bit map for assigned MSI IRQ
|
||||
* @soc: pointer to SoC-dependent operations
|
||||
*/
|
||||
struct mtk_gen3_pcie {
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
phys_addr_t reg_base;
|
||||
struct reset_control *mac_reset;
|
||||
struct reset_control *phy_reset;
|
||||
struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
|
||||
struct phy *phy;
|
||||
struct clk_bulk_data *clks;
|
||||
int num_clks;
|
||||
@ -151,6 +190,8 @@ struct mtk_gen3_pcie {
|
||||
struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
|
||||
struct mutex lock;
|
||||
DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
|
||||
|
||||
const struct mtk_gen3_pcie_pdata *soc;
|
||||
};
|
||||
|
||||
/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
|
||||
@ -424,12 +465,6 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_pcie_set_affinity(struct irq_data *data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void mtk_pcie_msi_irq_mask(struct irq_data *data)
|
||||
{
|
||||
pci_msi_mask_irq(data);
|
||||
@ -450,8 +485,9 @@ static struct irq_chip mtk_msi_irq_chip = {
|
||||
};
|
||||
|
||||
static struct msi_domain_info mtk_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
|
||||
MSI_FLAG_MULTI_PCI_MSI,
|
||||
.chip = &mtk_msi_irq_chip,
|
||||
};
|
||||
|
||||
@ -517,7 +553,6 @@ static struct irq_chip mtk_msi_bottom_irq_chip = {
|
||||
.irq_mask = mtk_msi_bottom_irq_mask,
|
||||
.irq_unmask = mtk_msi_bottom_irq_unmask,
|
||||
.irq_compose_msi_msg = mtk_compose_msi_msg,
|
||||
.irq_set_affinity = mtk_pcie_set_affinity,
|
||||
.name = "MSI",
|
||||
};
|
||||
|
||||
@ -618,7 +653,6 @@ static struct irq_chip mtk_intx_irq_chip = {
|
||||
.irq_mask = mtk_intx_mask,
|
||||
.irq_unmask = mtk_intx_unmask,
|
||||
.irq_eoi = mtk_intx_eoi,
|
||||
.irq_set_affinity = mtk_pcie_set_affinity,
|
||||
.name = "INTx",
|
||||
};
|
||||
|
||||
@ -775,10 +809,10 @@ static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
|
||||
|
||||
static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
|
||||
{
|
||||
int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
|
||||
struct device *dev = pcie->dev;
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct resource *regs;
|
||||
int ret;
|
||||
|
||||
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
|
||||
if (!regs)
|
||||
@ -791,12 +825,12 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
|
||||
|
||||
pcie->reg_base = regs->start;
|
||||
|
||||
pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
|
||||
if (IS_ERR(pcie->phy_reset)) {
|
||||
ret = PTR_ERR(pcie->phy_reset);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "failed to get PHY reset\n");
|
||||
for (i = 0; i < num_resets; i++)
|
||||
pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
|
||||
|
||||
ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to get PHY bulk reset\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -827,13 +861,96 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
|
||||
{
|
||||
struct device *dev = pcie->dev;
|
||||
int err;
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* Wait for the time needed to complete the bulk assert in
|
||||
* mtk_pcie_setup for EN7581 SoC.
|
||||
*/
|
||||
mdelay(PCIE_EN7581_RESET_TIME_MS);
|
||||
|
||||
err = phy_init(pcie->phy);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to initialize PHY\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = phy_power_on(pcie->phy);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to power on PHY\n");
|
||||
goto err_phy_on;
|
||||
}
|
||||
|
||||
err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to deassert PHYs\n");
|
||||
goto err_phy_deassert;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for the time needed to complete the bulk de-assert above.
|
||||
* This time is specific for EN7581 SoC.
|
||||
*/
|
||||
mdelay(PCIE_EN7581_RESET_TIME_MS);
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_get_sync(dev);
|
||||
|
||||
err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to prepare clock\n");
|
||||
goto err_clk_prepare;
|
||||
}
|
||||
|
||||
val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
|
||||
FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
|
||||
FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
|
||||
FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
|
||||
writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
|
||||
|
||||
val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
|
||||
FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
|
||||
FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
|
||||
FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
|
||||
writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
|
||||
|
||||
err = clk_bulk_enable(pcie->num_clks, pcie->clks);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to prepare clock\n");
|
||||
goto err_clk_enable;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_clk_enable:
|
||||
clk_bulk_unprepare(pcie->num_clks, pcie->clks);
|
||||
err_clk_prepare:
|
||||
pm_runtime_put_sync(dev);
|
||||
pm_runtime_disable(dev);
|
||||
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
|
||||
err_phy_deassert:
|
||||
phy_power_off(pcie->phy);
|
||||
err_phy_on:
|
||||
phy_exit(pcie->phy);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
|
||||
{
|
||||
struct device *dev = pcie->dev;
|
||||
int err;
|
||||
|
||||
/* PHY power on and enable pipe clock */
|
||||
reset_control_deassert(pcie->phy_reset);
|
||||
err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to deassert PHYs\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = phy_init(pcie->phy);
|
||||
if (err) {
|
||||
@ -869,7 +986,7 @@ static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
|
||||
err_phy_on:
|
||||
phy_exit(pcie->phy);
|
||||
err_phy_init:
|
||||
reset_control_assert(pcie->phy_reset);
|
||||
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -884,7 +1001,7 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
|
||||
|
||||
phy_power_off(pcie->phy);
|
||||
phy_exit(pcie->phy);
|
||||
reset_control_assert(pcie->phy_reset);
|
||||
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
|
||||
}
|
||||
|
||||
static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
|
||||
@ -895,16 +1012,22 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Deassert the line in order to avoid unbalance in deassert_count
|
||||
* counter since the bulk is shared.
|
||||
*/
|
||||
reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
|
||||
/*
|
||||
* The controller may have been left out of reset by the bootloader
|
||||
* so make sure that we get a clean start by asserting resets here.
|
||||
*/
|
||||
reset_control_assert(pcie->phy_reset);
|
||||
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
|
||||
|
||||
reset_control_assert(pcie->mac_reset);
|
||||
usleep_range(10, 20);
|
||||
|
||||
/* Don't touch the hardware registers before power up */
|
||||
err = mtk_pcie_power_up(pcie);
|
||||
err = pcie->soc->power_up(pcie);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -939,6 +1062,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
|
||||
pcie = pci_host_bridge_priv(host);
|
||||
|
||||
pcie->dev = dev;
|
||||
pcie->soc = device_get_match_data(dev);
|
||||
platform_set_drvdata(pdev, pcie);
|
||||
|
||||
err = mtk_pcie_setup(pcie);
|
||||
@ -1054,7 +1178,7 @@ static int mtk_pcie_resume_noirq(struct device *dev)
|
||||
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
|
||||
int err;
|
||||
|
||||
err = mtk_pcie_power_up(pcie);
|
||||
err = pcie->soc->power_up(pcie);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -1074,8 +1198,27 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = {
|
||||
mtk_pcie_resume_noirq)
|
||||
};
|
||||
|
||||
static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
|
||||
.power_up = mtk_pcie_power_up,
|
||||
.phy_resets = {
|
||||
.id[0] = "phy",
|
||||
.num_resets = 1,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
|
||||
.power_up = mtk_pcie_en7581_power_up,
|
||||
.phy_resets = {
|
||||
.id[0] = "phy-lane0",
|
||||
.id[1] = "phy-lane1",
|
||||
.id[2] = "phy-lane2",
|
||||
.num_resets = 3,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct of_device_id mtk_pcie_of_match[] = {
|
||||
{ .compatible = "mediatek,mt8192-pcie" },
|
||||
{ .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
|
||||
{ .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
|
||||
|
@ -211,7 +211,6 @@ struct mtk_pcie_port {
|
||||
* @base: IO mapped register base
|
||||
* @cfg: IO mapped register map for PCIe config
|
||||
* @free_ck: free-run reference clock
|
||||
* @mem: non-prefetchable memory resource
|
||||
* @ports: pointer to PCIe port information
|
||||
* @soc: pointer to SoC-dependent operations
|
||||
*/
|
||||
@ -407,12 +406,6 @@ static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
(int)data->hwirq, msg->address_hi, msg->address_lo);
|
||||
}
|
||||
|
||||
static int mtk_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void mtk_msi_ack_irq(struct irq_data *data)
|
||||
{
|
||||
struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
|
||||
@ -424,7 +417,6 @@ static void mtk_msi_ack_irq(struct irq_data *data)
|
||||
static struct irq_chip mtk_msi_bottom_irq_chip = {
|
||||
.name = "MTK MSI",
|
||||
.irq_compose_msi_msg = mtk_compose_msi_msg,
|
||||
.irq_set_affinity = mtk_msi_set_affinity,
|
||||
.irq_ack = mtk_msi_ack_irq,
|
||||
};
|
||||
|
||||
@ -486,8 +478,8 @@ static struct irq_chip mtk_msi_irq_chip = {
|
||||
};
|
||||
|
||||
static struct msi_domain_info mtk_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX),
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
|
||||
.chip = &mtk_msi_irq_chip,
|
||||
};
|
||||
|
||||
|
@ -658,11 +658,6 @@ static void rcar_msi_irq_unmask(struct irq_data *d)
|
||||
spin_unlock_irqrestore(&msi->mask_lock, flags);
|
||||
}
|
||||
|
||||
static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
|
||||
@ -678,7 +673,6 @@ static struct irq_chip rcar_msi_bottom_chip = {
|
||||
.irq_ack = rcar_msi_irq_ack,
|
||||
.irq_mask = rcar_msi_irq_mask,
|
||||
.irq_unmask = rcar_msi_irq_unmask,
|
||||
.irq_set_affinity = rcar_msi_set_affinity,
|
||||
.irq_compose_msi_msg = rcar_compose_msi_msg,
|
||||
};
|
||||
|
||||
@ -725,8 +719,8 @@ static const struct irq_domain_ops rcar_msi_domain_ops = {
|
||||
};
|
||||
|
||||
static struct msi_domain_info rcar_msi_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_MULTI_PCI_MSI),
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
|
||||
.chip = &rcar_msi_top_chip,
|
||||
};
|
||||
|
||||
|
@ -71,10 +71,24 @@
|
||||
|
||||
/* Phy Status/Control Register definitions */
|
||||
#define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11)
|
||||
#define QDMA_BRIDGE_BASE_OFF 0xcd8
|
||||
|
||||
/* Number of MSI IRQs */
|
||||
#define XILINX_NUM_MSI_IRQS 64
|
||||
|
||||
enum xilinx_pl_dma_version {
|
||||
XDMA,
|
||||
QDMA,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xilinx_pl_dma_variant - PL DMA PCIe variant information
|
||||
* @version: DMA version
|
||||
*/
|
||||
struct xilinx_pl_dma_variant {
|
||||
enum xilinx_pl_dma_version version;
|
||||
};
|
||||
|
||||
struct xilinx_msi {
|
||||
struct irq_domain *msi_domain;
|
||||
unsigned long *bitmap;
|
||||
@ -88,6 +102,7 @@ struct xilinx_msi {
|
||||
* struct pl_dma_pcie - PCIe port information
|
||||
* @dev: Device pointer
|
||||
* @reg_base: IO Mapped Register Base
|
||||
* @cfg_base: IO Mapped Configuration Base
|
||||
* @irq: Interrupt number
|
||||
* @cfg: Holds mappings of config space window
|
||||
* @phys_reg_base: Physical address of reg base
|
||||
@ -97,10 +112,12 @@ struct xilinx_msi {
|
||||
* @msi: MSI information
|
||||
* @intx_irq: INTx error interrupt number
|
||||
* @lock: Lock protecting shared register access
|
||||
* @variant: PL DMA PCIe version check pointer
|
||||
*/
|
||||
struct pl_dma_pcie {
|
||||
struct device *dev;
|
||||
void __iomem *reg_base;
|
||||
void __iomem *cfg_base;
|
||||
int irq;
|
||||
struct pci_config_window *cfg;
|
||||
phys_addr_t phys_reg_base;
|
||||
@ -110,16 +127,23 @@ struct pl_dma_pcie {
|
||||
struct xilinx_msi msi;
|
||||
int intx_irq;
|
||||
raw_spinlock_t lock;
|
||||
const struct xilinx_pl_dma_variant *variant;
|
||||
};
|
||||
|
||||
static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg)
|
||||
{
|
||||
if (port->variant->version == QDMA)
|
||||
return readl(port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
|
||||
|
||||
return readl(port->reg_base + reg);
|
||||
}
|
||||
|
||||
static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg)
|
||||
{
|
||||
writel(val, port->reg_base + reg);
|
||||
if (port->variant->version == QDMA)
|
||||
writel(val, port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
|
||||
else
|
||||
writel(val, port->reg_base + reg);
|
||||
}
|
||||
|
||||
static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port)
|
||||
@ -173,6 +197,9 @@ static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus,
|
||||
if (!xilinx_pl_dma_pcie_valid_device(bus, devfn))
|
||||
return NULL;
|
||||
|
||||
if (port->variant->version == QDMA)
|
||||
return port->cfg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
|
||||
|
||||
return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
|
||||
}
|
||||
|
||||
@ -355,8 +382,8 @@ static struct irq_chip xilinx_msi_irq_chip = {
|
||||
};
|
||||
|
||||
static struct msi_domain_info xilinx_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_MULTI_PCI_MSI),
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
|
||||
.chip = &xilinx_msi_irq_chip,
|
||||
};
|
||||
|
||||
@ -370,16 +397,9 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
msg->data = data->hwirq;
|
||||
}
|
||||
|
||||
static int xilinx_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct irq_chip xilinx_irq_chip = {
|
||||
.name = "pl_dma:MSI",
|
||||
.irq_compose_msi_msg = xilinx_compose_msi_msg,
|
||||
.irq_set_affinity = xilinx_msi_set_affinity,
|
||||
};
|
||||
|
||||
static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
@ -731,6 +751,15 @@ static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port,
|
||||
|
||||
port->reg_base = port->cfg->win;
|
||||
|
||||
if (port->variant->version == QDMA) {
|
||||
port->cfg_base = port->cfg->win;
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
|
||||
port->reg_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(port->reg_base))
|
||||
return PTR_ERR(port->reg_base);
|
||||
port->phys_reg_base = res->start;
|
||||
}
|
||||
|
||||
err = xilinx_request_msi_irq(port);
|
||||
if (err) {
|
||||
pci_ecam_free(port->cfg);
|
||||
@ -760,6 +789,8 @@ static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
|
||||
if (!bus)
|
||||
return -ENODEV;
|
||||
|
||||
port->variant = of_device_get_match_data(dev);
|
||||
|
||||
err = xilinx_pl_dma_pcie_parse_dt(port, bus->res);
|
||||
if (err) {
|
||||
dev_err(dev, "Parsing DT failed\n");
|
||||
@ -791,9 +822,22 @@ static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct xilinx_pl_dma_variant xdma_host = {
|
||||
.version = XDMA,
|
||||
};
|
||||
|
||||
static const struct xilinx_pl_dma_variant qdma_host = {
|
||||
.version = QDMA,
|
||||
};
|
||||
|
||||
static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = {
|
||||
{
|
||||
.compatible = "xlnx,xdma-host-3.00",
|
||||
.data = &xdma_host,
|
||||
},
|
||||
{
|
||||
.compatible = "xlnx,qdma-host-3.00",
|
||||
.data = &qdma_host,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci-ecam.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
|
||||
@ -80,8 +81,8 @@
|
||||
#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
|
||||
#define MSGF_MISC_SR_FATAL_DEV BIT(23)
|
||||
#define MSGF_MISC_SR_LINK_DOWN BIT(24)
|
||||
#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
|
||||
#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
|
||||
#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25)
|
||||
#define MSGF_MISC_SR_LINK_BWIDTH BIT(26)
|
||||
|
||||
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
|
||||
MSGF_MISC_SR_RXMSG_OVER | \
|
||||
@ -96,8 +97,8 @@
|
||||
MSGF_MISC_SR_NON_FATAL_DEV | \
|
||||
MSGF_MISC_SR_FATAL_DEV | \
|
||||
MSGF_MISC_SR_LINK_DOWN | \
|
||||
MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
|
||||
MSGF_MSIC_SR_LINK_BWIDTH)
|
||||
MSGF_MISC_SR_LINK_AUTO_BWIDTH | \
|
||||
MSGF_MISC_SR_LINK_BWIDTH)
|
||||
|
||||
/* Legacy interrupt status mask bits */
|
||||
#define MSGF_LEG_SR_INTA BIT(0)
|
||||
@ -157,6 +158,7 @@ struct nwl_pcie {
|
||||
void __iomem *breg_base;
|
||||
void __iomem *pcireg_base;
|
||||
void __iomem *ecam_base;
|
||||
struct phy *phy[4];
|
||||
phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
|
||||
phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
|
||||
phys_addr_t phys_ecam_base; /* Physical Configuration Base */
|
||||
@ -267,42 +269,42 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
|
||||
return IRQ_NONE;
|
||||
|
||||
if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
|
||||
dev_err(dev, "Received Message FIFO Overflow\n");
|
||||
dev_err_ratelimited(dev, "Received Message FIFO Overflow\n");
|
||||
|
||||
if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
|
||||
dev_err(dev, "Slave error\n");
|
||||
dev_err_ratelimited(dev, "Slave error\n");
|
||||
|
||||
if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
|
||||
dev_err(dev, "Master error\n");
|
||||
dev_err_ratelimited(dev, "Master error\n");
|
||||
|
||||
if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
|
||||
dev_err(dev, "In Misc Ingress address translation error\n");
|
||||
dev_err_ratelimited(dev, "In Misc Ingress address translation error\n");
|
||||
|
||||
if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
|
||||
dev_err(dev, "In Misc Egress address translation error\n");
|
||||
dev_err_ratelimited(dev, "In Misc Egress address translation error\n");
|
||||
|
||||
if (misc_stat & MSGF_MISC_SR_FATAL_AER)
|
||||
dev_err(dev, "Fatal Error in AER Capability\n");
|
||||
dev_err_ratelimited(dev, "Fatal Error in AER Capability\n");
|
||||
|
||||
if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
|
||||
dev_err(dev, "Non-Fatal Error in AER Capability\n");
|
||||
dev_err_ratelimited(dev, "Non-Fatal Error in AER Capability\n");
|
||||
|
||||
if (misc_stat & MSGF_MISC_SR_CORR_AER)
|
||||
dev_err(dev, "Correctable Error in AER Capability\n");
|
||||
dev_err_ratelimited(dev, "Correctable Error in AER Capability\n");
|
||||
|
||||
if (misc_stat & MSGF_MISC_SR_UR_DETECT)
|
||||
dev_err(dev, "Unsupported request Detected\n");
|
||||
dev_err_ratelimited(dev, "Unsupported request Detected\n");
|
||||
|
||||
if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
|
||||
dev_err(dev, "Non-Fatal Error Detected\n");
|
||||
dev_err_ratelimited(dev, "Non-Fatal Error Detected\n");
|
||||
|
||||
if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
|
||||
dev_err(dev, "Fatal Error Detected\n");
|
||||
dev_err_ratelimited(dev, "Fatal Error Detected\n");
|
||||
|
||||
if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
|
||||
if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH)
|
||||
dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
|
||||
|
||||
if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
|
||||
if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH)
|
||||
dev_info(dev, "Link Bandwidth Management Status bit set\n");
|
||||
|
||||
/* Clear misc interrupt status */
|
||||
@ -371,7 +373,7 @@ static void nwl_mask_intx_irq(struct irq_data *data)
|
||||
u32 mask;
|
||||
u32 val;
|
||||
|
||||
mask = 1 << (data->hwirq - 1);
|
||||
mask = 1 << data->hwirq;
|
||||
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
|
||||
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
|
||||
nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
|
||||
@ -385,7 +387,7 @@ static void nwl_unmask_intx_irq(struct irq_data *data)
|
||||
u32 mask;
|
||||
u32 val;
|
||||
|
||||
mask = 1 << (data->hwirq - 1);
|
||||
mask = 1 << data->hwirq;
|
||||
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
|
||||
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
|
||||
nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
|
||||
@ -425,8 +427,8 @@ static struct irq_chip nwl_msi_irq_chip = {
|
||||
};
|
||||
|
||||
static struct msi_domain_info nwl_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_MULTI_PCI_MSI),
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
|
||||
.chip = &nwl_msi_irq_chip,
|
||||
};
|
||||
#endif
|
||||
@ -441,16 +443,9 @@ static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
msg->data = data->hwirq;
|
||||
}
|
||||
|
||||
static int nwl_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct irq_chip nwl_irq_chip = {
|
||||
.name = "Xilinx MSI",
|
||||
.irq_compose_msi_msg = nwl_compose_msi_msg,
|
||||
.irq_set_affinity = nwl_msi_set_affinity,
|
||||
};
|
||||
|
||||
static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
@ -521,6 +516,60 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nwl_pcie_phy_power_off(struct nwl_pcie *pcie, int i)
|
||||
{
|
||||
int err = phy_power_off(pcie->phy[i]);
|
||||
|
||||
if (err)
|
||||
dev_err(pcie->dev, "could not power off phy %d (err=%d)\n", i,
|
||||
err);
|
||||
}
|
||||
|
||||
static void nwl_pcie_phy_exit(struct nwl_pcie *pcie, int i)
|
||||
{
|
||||
int err = phy_exit(pcie->phy[i]);
|
||||
|
||||
if (err)
|
||||
dev_err(pcie->dev, "could not exit phy %d (err=%d)\n", i, err);
|
||||
}
|
||||
|
||||
static int nwl_pcie_phy_enable(struct nwl_pcie *pcie)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
|
||||
ret = phy_init(pcie->phy[i]);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = phy_power_on(pcie->phy[i]);
|
||||
if (ret) {
|
||||
nwl_pcie_phy_exit(pcie, i);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
while (i--) {
|
||||
nwl_pcie_phy_power_off(pcie, i);
|
||||
nwl_pcie_phy_exit(pcie, i);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nwl_pcie_phy_disable(struct nwl_pcie *pcie)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = ARRAY_SIZE(pcie->phy); i--;) {
|
||||
nwl_pcie_phy_power_off(pcie, i);
|
||||
nwl_pcie_phy_exit(pcie, i);
|
||||
}
|
||||
}
|
||||
|
||||
static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
|
||||
{
|
||||
struct device *dev = pcie->dev;
|
||||
@ -732,6 +781,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
|
||||
{
|
||||
struct device *dev = pcie->dev;
|
||||
struct resource *res;
|
||||
int i;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
|
||||
pcie->breg_base = devm_ioremap_resource(dev, res);
|
||||
@ -759,6 +809,18 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
|
||||
irq_set_chained_handler_and_data(pcie->irq_intx,
|
||||
nwl_pcie_leg_handler, pcie);
|
||||
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
|
||||
pcie->phy[i] = devm_of_phy_get_by_index(dev, dev->of_node, i);
|
||||
if (PTR_ERR(pcie->phy[i]) == -ENODEV) {
|
||||
pcie->phy[i] = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (IS_ERR(pcie->phy[i]))
|
||||
return PTR_ERR(pcie->phy[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -779,6 +841,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
|
||||
return -ENODEV;
|
||||
|
||||
pcie = pci_host_bridge_priv(bridge);
|
||||
platform_set_drvdata(pdev, pcie);
|
||||
|
||||
pcie->dev = dev;
|
||||
|
||||
@ -798,16 +861,22 @@ static int nwl_pcie_probe(struct platform_device *pdev)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = nwl_pcie_phy_enable(pcie);
|
||||
if (err) {
|
||||
dev_err(dev, "could not enable PHYs\n");
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
err = nwl_pcie_bridge_init(pcie);
|
||||
if (err) {
|
||||
dev_err(dev, "HW Initialization failed\n");
|
||||
return err;
|
||||
goto err_phy;
|
||||
}
|
||||
|
||||
err = nwl_pcie_init_irq_domain(pcie);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed creating IRQ Domain\n");
|
||||
return err;
|
||||
goto err_phy;
|
||||
}
|
||||
|
||||
bridge->sysdata = pcie;
|
||||
@ -817,11 +886,27 @@ static int nwl_pcie_probe(struct platform_device *pdev)
|
||||
err = nwl_pcie_enable_msi(pcie);
|
||||
if (err < 0) {
|
||||
dev_err(dev, "failed to enable MSI support: %d\n", err);
|
||||
return err;
|
||||
goto err_phy;
|
||||
}
|
||||
}
|
||||
|
||||
return pci_host_probe(bridge);
|
||||
err = pci_host_probe(bridge);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
err_phy:
|
||||
nwl_pcie_phy_disable(pcie);
|
||||
err_clk:
|
||||
clk_disable_unprepare(pcie->clk);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void nwl_pcie_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct nwl_pcie *pcie = platform_get_drvdata(pdev);
|
||||
|
||||
nwl_pcie_phy_disable(pcie);
|
||||
clk_disable_unprepare(pcie->clk);
|
||||
}
|
||||
|
||||
static struct platform_driver nwl_pcie_driver = {
|
||||
@ -831,5 +916,6 @@ static struct platform_driver nwl_pcie_driver = {
|
||||
.of_match_table = nwl_pcie_of_match,
|
||||
},
|
||||
.probe = nwl_pcie_probe,
|
||||
.remove_new = nwl_pcie_remove,
|
||||
};
|
||||
builtin_platform_driver(nwl_pcie_driver);
|
||||
|
@ -208,11 +208,6 @@ static struct irq_chip xilinx_msi_top_chip = {
|
||||
.irq_ack = xilinx_msi_top_irq_ack,
|
||||
};
|
||||
|
||||
static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data);
|
||||
@ -225,7 +220,6 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
|
||||
static struct irq_chip xilinx_msi_bottom_chip = {
|
||||
.name = "Xilinx MSI",
|
||||
.irq_set_affinity = xilinx_msi_set_affinity,
|
||||
.irq_compose_msi_msg = xilinx_compose_msi_msg,
|
||||
};
|
||||
|
||||
@ -271,7 +265,8 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = {
|
||||
};
|
||||
|
||||
static struct msi_domain_info xilinx_msi_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_NO_AFFINITY,
|
||||
.chip = &xilinx_msi_top_chip,
|
||||
};
|
||||
|
||||
|
@ -76,17 +76,10 @@ static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
(int)data->hwirq, msg->address_hi, msg->address_lo);
|
||||
}
|
||||
|
||||
static int plda_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct irq_chip plda_msi_bottom_irq_chip = {
|
||||
.name = "PLDA MSI",
|
||||
.irq_ack = plda_msi_bottom_irq_ack,
|
||||
.irq_compose_msi_msg = plda_compose_msi_msg,
|
||||
.irq_set_affinity = plda_msi_set_affinity,
|
||||
};
|
||||
|
||||
static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
|
||||
@ -146,8 +139,8 @@ static struct irq_chip plda_msi_irq_chip = {
|
||||
};
|
||||
|
||||
static struct msi_domain_info plda_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX),
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
|
||||
.chip = &plda_msi_irq_chip,
|
||||
};
|
||||
|
||||
|
@ -204,22 +204,11 @@ static void vmd_irq_disable(struct irq_data *data)
|
||||
raw_spin_unlock_irqrestore(&list_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: Stubbed until we develop acceptable way to not create conflicts with
|
||||
* other devices sharing the same vector.
|
||||
*/
|
||||
static int vmd_irq_set_affinity(struct irq_data *data,
|
||||
const struct cpumask *dest, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct irq_chip vmd_msi_controller = {
|
||||
.name = "VMD-MSI",
|
||||
.irq_enable = vmd_irq_enable,
|
||||
.irq_disable = vmd_irq_disable,
|
||||
.irq_compose_msi_msg = vmd_compose_msi_msg,
|
||||
.irq_set_affinity = vmd_irq_set_affinity,
|
||||
};
|
||||
|
||||
static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
|
||||
@ -326,7 +315,7 @@ static struct msi_domain_ops vmd_msi_domain_ops = {
|
||||
|
||||
static struct msi_domain_info vmd_msi_domain_info = {
|
||||
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX,
|
||||
MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
|
||||
.ops = &vmd_msi_domain_ops,
|
||||
.chip = &vmd_msi_controller,
|
||||
};
|
||||
@ -1053,9 +1042,9 @@ static void vmd_remove(struct pci_dev *dev)
|
||||
|
||||
static void vmd_shutdown(struct pci_dev *dev)
|
||||
{
|
||||
struct vmd_dev *vmd = pci_get_drvdata(dev);
|
||||
struct vmd_dev *vmd = pci_get_drvdata(dev);
|
||||
|
||||
vmd_remove_irq_domain(vmd);
|
||||
vmd_remove_irq_domain(vmd);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
@ -730,7 +730,7 @@ EXPORT_SYMBOL(pcim_iounmap);
|
||||
* Mapping and region will get automatically released on driver detach. If
|
||||
* desired, release manually only with pcim_iounmap_region().
|
||||
*/
|
||||
static void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
|
||||
void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
|
||||
const char *name)
|
||||
{
|
||||
int ret;
|
||||
@ -763,6 +763,7 @@ static void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
|
||||
|
||||
return IOMEM_ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(pcim_iomap_region);
|
||||
|
||||
/**
|
||||
* pcim_iounmap_region - Unmap and release a PCI BAR
|
||||
@ -785,7 +786,7 @@ static void pcim_iounmap_region(struct pci_dev *pdev, int bar)
|
||||
}
|
||||
|
||||
/**
|
||||
* pcim_iomap_regions - Request and iomap PCI BARs
|
||||
* pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED)
|
||||
* @pdev: PCI device to map IO resources for
|
||||
* @mask: Mask of BARs to request and iomap
|
||||
* @name: Name associated with the requests
|
||||
@ -793,6 +794,9 @@ static void pcim_iounmap_region(struct pci_dev *pdev, int bar)
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* Request and iomap regions specified by @mask.
|
||||
*
|
||||
* This function is DEPRECATED. Do not use it in new code.
|
||||
* Use pcim_iomap_region() instead.
|
||||
*/
|
||||
int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
|
||||
{
|
||||
@ -865,6 +869,7 @@ int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
|
||||
{
|
||||
return _pcim_request_region(pdev, bar, name, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(pcim_request_region);
|
||||
|
||||
/**
|
||||
* pcim_request_region_exclusive - Request a PCI BAR exclusively
|
||||
|
@ -838,6 +838,10 @@ void pci_epc_destroy(struct pci_epc *epc)
|
||||
{
|
||||
pci_ep_cfs_remove_epc_group(epc->group);
|
||||
device_unregister(&epc->dev);
|
||||
|
||||
#ifdef CONFIG_PCI_DOMAINS_GENERIC
|
||||
pci_bus_release_domain_nr(&epc->dev, epc->domain_nr);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_destroy);
|
||||
|
||||
@ -900,6 +904,16 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
|
||||
epc->dev.release = pci_epc_release;
|
||||
epc->ops = ops;
|
||||
|
||||
#ifdef CONFIG_PCI_DOMAINS_GENERIC
|
||||
epc->domain_nr = pci_bus_find_domain_nr(NULL, dev);
|
||||
#else
|
||||
/*
|
||||
* TODO: If the architecture doesn't support generic PCI
|
||||
* domains, then a custom implementation has to be used.
|
||||
*/
|
||||
WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n");
|
||||
#endif
|
||||
|
||||
ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
|
||||
if (ret)
|
||||
goto put_dev;
|
||||
|
@ -51,11 +51,6 @@ ibmphp:
|
||||
|
||||
shpchp:
|
||||
|
||||
* There is only a single implementation of struct hpc_ops. Can the struct be
|
||||
removed and its functions invoked directly? This has already been done in
|
||||
pciehp with commit 82a9e79ef132 ("PCI: pciehp: remove hpc_ops"). Clarify
|
||||
if there was a specific reason not to apply the same change to shpchp.
|
||||
|
||||
* The hardirq handler shpc_isr() queues events on a workqueue. It can be
|
||||
simplified by converting it to threaded IRQ handling. Use pciehp as a
|
||||
template.
|
||||
|
@ -328,7 +328,7 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
|
||||
} else {
|
||||
/* Did not get a match on the target PCI device. Check
|
||||
* if the current IRQ table entry is a PCI-to-PCI
|
||||
* bridge device. If so, and it's secondary bus
|
||||
* bridge device. If so, and its secondary bus
|
||||
* matches the bus number for the target device, I need
|
||||
* to save the bridge's slot number. If I can not find
|
||||
* an entry for the target device, I will have to
|
||||
|
@ -138,7 +138,7 @@ static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 o
|
||||
|
||||
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID) == -1)
|
||||
return -1;
|
||||
if (vendID == 0xffffffff)
|
||||
if (PCI_POSSIBLE_ERROR(vendID))
|
||||
return -1;
|
||||
return pci_bus_read_config_dword(bus, devfn, offset, value);
|
||||
}
|
||||
@ -253,7 +253,7 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
|
||||
*dev_num = tdevice;
|
||||
ctrl->pci_bus->number = tbus;
|
||||
pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
|
||||
if (!nobridge || (work == 0xffffffff))
|
||||
if (!nobridge || PCI_POSSIBLE_ERROR(work))
|
||||
return 0;
|
||||
|
||||
dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
|
||||
|
@ -112,7 +112,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
|
||||
|
||||
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
|
||||
{
|
||||
/* if the slot exits it always contains a function */
|
||||
/* if the slot exists it always contains a function */
|
||||
*value = 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -72,7 +72,6 @@ struct slot {
|
||||
u8 latch_save;
|
||||
u8 pwr_save;
|
||||
struct controller *ctrl;
|
||||
const struct hpc_ops *hpc_ops;
|
||||
struct hotplug_slot hotplug_slot;
|
||||
struct list_head slot_list;
|
||||
struct delayed_work work; /* work for button event */
|
||||
@ -94,7 +93,6 @@ struct controller {
|
||||
int slot_num_inc; /* 1 or -1 */
|
||||
struct pci_dev *pci_dev;
|
||||
struct list_head slot_list;
|
||||
const struct hpc_ops *hpc_ops;
|
||||
wait_queue_head_t queue; /* sleep & wake process */
|
||||
u8 slot_device_offset;
|
||||
u32 pcix_misc2_reg; /* for amd pogo errata */
|
||||
@ -300,24 +298,22 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot)
|
||||
pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, pcix_misc2_temp);
|
||||
}
|
||||
|
||||
struct hpc_ops {
|
||||
int (*power_on_slot)(struct slot *slot);
|
||||
int (*slot_enable)(struct slot *slot);
|
||||
int (*slot_disable)(struct slot *slot);
|
||||
int (*set_bus_speed_mode)(struct slot *slot, enum pci_bus_speed speed);
|
||||
int (*get_power_status)(struct slot *slot, u8 *status);
|
||||
int (*get_attention_status)(struct slot *slot, u8 *status);
|
||||
int (*set_attention_status)(struct slot *slot, u8 status);
|
||||
int (*get_latch_status)(struct slot *slot, u8 *status);
|
||||
int (*get_adapter_status)(struct slot *slot, u8 *status);
|
||||
int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed);
|
||||
int (*get_prog_int)(struct slot *slot, u8 *prog_int);
|
||||
int (*query_power_fault)(struct slot *slot);
|
||||
void (*green_led_on)(struct slot *slot);
|
||||
void (*green_led_off)(struct slot *slot);
|
||||
void (*green_led_blink)(struct slot *slot);
|
||||
void (*release_ctlr)(struct controller *ctrl);
|
||||
int (*check_cmd_status)(struct controller *ctrl);
|
||||
};
|
||||
int shpchp_power_on_slot(struct slot *slot);
|
||||
int shpchp_slot_enable(struct slot *slot);
|
||||
int shpchp_slot_disable(struct slot *slot);
|
||||
int shpchp_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed speed);
|
||||
int shpchp_get_power_status(struct slot *slot, u8 *status);
|
||||
int shpchp_get_attention_status(struct slot *slot, u8 *status);
|
||||
int shpchp_set_attention_status(struct slot *slot, u8 status);
|
||||
int shpchp_get_latch_status(struct slot *slot, u8 *status);
|
||||
int shpchp_get_adapter_status(struct slot *slot, u8 *status);
|
||||
int shpchp_get_adapter_speed(struct slot *slot, enum pci_bus_speed *speed);
|
||||
int shpchp_get_prog_int(struct slot *slot, u8 *prog_int);
|
||||
int shpchp_query_power_fault(struct slot *slot);
|
||||
void shpchp_green_led_on(struct slot *slot);
|
||||
void shpchp_green_led_off(struct slot *slot);
|
||||
void shpchp_green_led_blink(struct slot *slot);
|
||||
void shpchp_release_ctlr(struct controller *ctrl);
|
||||
int shpchp_check_cmd_status(struct controller *ctrl);
|
||||
|
||||
#endif /* _SHPCHP_H */
|
||||
|
@ -81,7 +81,6 @@ static int init_slots(struct controller *ctrl)
|
||||
slot->ctrl = ctrl;
|
||||
slot->bus = ctrl->pci_dev->subordinate->number;
|
||||
slot->device = ctrl->slot_device_offset + i;
|
||||
slot->hpc_ops = ctrl->hpc_ops;
|
||||
slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
|
||||
|
||||
slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number);
|
||||
@ -150,7 +149,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
|
||||
__func__, slot_name(slot));
|
||||
|
||||
slot->attention_save = status;
|
||||
slot->hpc_ops->set_attention_status(slot, status);
|
||||
shpchp_set_attention_status(slot, status);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -183,7 +182,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
|
||||
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
|
||||
__func__, slot_name(slot));
|
||||
|
||||
retval = slot->hpc_ops->get_power_status(slot, value);
|
||||
retval = shpchp_get_power_status(slot, value);
|
||||
if (retval < 0)
|
||||
*value = slot->pwr_save;
|
||||
|
||||
@ -198,7 +197,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
|
||||
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
|
||||
__func__, slot_name(slot));
|
||||
|
||||
retval = slot->hpc_ops->get_attention_status(slot, value);
|
||||
retval = shpchp_get_attention_status(slot, value);
|
||||
if (retval < 0)
|
||||
*value = slot->attention_save;
|
||||
|
||||
@ -213,7 +212,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
|
||||
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
|
||||
__func__, slot_name(slot));
|
||||
|
||||
retval = slot->hpc_ops->get_latch_status(slot, value);
|
||||
retval = shpchp_get_latch_status(slot, value);
|
||||
if (retval < 0)
|
||||
*value = slot->latch_save;
|
||||
|
||||
@ -228,7 +227,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
|
||||
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
|
||||
__func__, slot_name(slot));
|
||||
|
||||
retval = slot->hpc_ops->get_adapter_status(slot, value);
|
||||
retval = shpchp_get_adapter_status(slot, value);
|
||||
if (retval < 0)
|
||||
*value = slot->presence_save;
|
||||
|
||||
@ -293,7 +292,7 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
err_cleanup_slots:
|
||||
cleanup_slots(ctrl);
|
||||
err_out_release_ctlr:
|
||||
ctrl->hpc_ops->release_ctlr(ctrl);
|
||||
shpchp_release_ctlr(ctrl);
|
||||
err_out_free_ctrl:
|
||||
kfree(ctrl);
|
||||
err_out_none:
|
||||
@ -306,7 +305,7 @@ static void shpc_remove(struct pci_dev *dev)
|
||||
|
||||
dev->shpc_managed = 0;
|
||||
shpchp_remove_ctrl_files(ctrl);
|
||||
ctrl->hpc_ops->release_ctlr(ctrl);
|
||||
shpchp_release_ctlr(ctrl);
|
||||
kfree(ctrl);
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
|
||||
ctrl_dbg(ctrl, "Attention button interrupt received\n");
|
||||
|
||||
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
|
||||
p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
|
||||
shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
|
||||
|
||||
/*
|
||||
* Button pressed - See if need to TAKE ACTION!!!
|
||||
@ -75,8 +75,8 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
|
||||
ctrl_dbg(ctrl, "Switch interrupt received\n");
|
||||
|
||||
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
|
||||
p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
|
||||
p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
|
||||
shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
|
||||
shpchp_get_latch_status(p_slot, &getstatus);
|
||||
ctrl_dbg(ctrl, "Card present %x Power status %x\n",
|
||||
p_slot->presence_save, p_slot->pwr_save);
|
||||
|
||||
@ -116,7 +116,7 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
|
||||
/*
|
||||
* Save the presence state
|
||||
*/
|
||||
p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
|
||||
shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
|
||||
if (p_slot->presence_save) {
|
||||
/*
|
||||
* Card Present
|
||||
@ -148,7 +148,7 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
|
||||
|
||||
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
|
||||
|
||||
if (!(p_slot->hpc_ops->query_power_fault(p_slot))) {
|
||||
if (!(shpchp_query_power_fault(p_slot))) {
|
||||
/*
|
||||
* Power fault Cleared
|
||||
*/
|
||||
@ -181,7 +181,7 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
|
||||
int rc = 0;
|
||||
|
||||
ctrl_dbg(ctrl, "Change speed to %d\n", speed);
|
||||
rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed);
|
||||
rc = shpchp_set_bus_speed_mode(p_slot, speed);
|
||||
if (rc) {
|
||||
ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
|
||||
__func__);
|
||||
@ -241,14 +241,14 @@ static int board_added(struct slot *p_slot)
|
||||
__func__, p_slot->device, ctrl->slot_device_offset, hp_slot);
|
||||
|
||||
/* Power on slot without connecting to bus */
|
||||
rc = p_slot->hpc_ops->power_on_slot(p_slot);
|
||||
rc = shpchp_power_on_slot(p_slot);
|
||||
if (rc) {
|
||||
ctrl_err(ctrl, "Failed to power on slot\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
|
||||
rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
|
||||
rc = shpchp_set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
|
||||
if (rc) {
|
||||
ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
|
||||
__func__);
|
||||
@ -256,14 +256,14 @@ static int board_added(struct slot *p_slot)
|
||||
}
|
||||
|
||||
/* turn on board, blink green LED, turn off Amber LED */
|
||||
rc = p_slot->hpc_ops->slot_enable(p_slot);
|
||||
rc = shpchp_slot_enable(p_slot);
|
||||
if (rc) {
|
||||
ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp);
|
||||
rc = shpchp_get_adapter_speed(p_slot, &asp);
|
||||
if (rc) {
|
||||
ctrl_err(ctrl, "Can't get adapter speed or bus mode mismatch\n");
|
||||
return WRONG_BUS_FREQUENCY;
|
||||
@ -285,7 +285,7 @@ static int board_added(struct slot *p_slot)
|
||||
return rc;
|
||||
|
||||
/* turn on board, blink green LED, turn off Amber LED */
|
||||
rc = p_slot->hpc_ops->slot_enable(p_slot);
|
||||
rc = shpchp_slot_enable(p_slot);
|
||||
if (rc) {
|
||||
ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
|
||||
return rc;
|
||||
@ -313,13 +313,13 @@ static int board_added(struct slot *p_slot)
|
||||
p_slot->is_a_board = 0x01;
|
||||
p_slot->pwr_save = 1;
|
||||
|
||||
p_slot->hpc_ops->green_led_on(p_slot);
|
||||
shpchp_green_led_on(p_slot);
|
||||
|
||||
return 0;
|
||||
|
||||
err_exit:
|
||||
/* turn off slot, turn on Amber LED, turn off Green LED */
|
||||
rc = p_slot->hpc_ops->slot_disable(p_slot);
|
||||
rc = shpchp_slot_disable(p_slot);
|
||||
if (rc) {
|
||||
ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
|
||||
__func__);
|
||||
@ -352,14 +352,14 @@ static int remove_board(struct slot *p_slot)
|
||||
p_slot->status = 0x01;
|
||||
|
||||
/* turn off slot, turn on Amber LED, turn off Green LED */
|
||||
rc = p_slot->hpc_ops->slot_disable(p_slot);
|
||||
rc = shpchp_slot_disable(p_slot);
|
||||
if (rc) {
|
||||
ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
|
||||
__func__);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = p_slot->hpc_ops->set_attention_status(p_slot, 0);
|
||||
rc = shpchp_set_attention_status(p_slot, 0);
|
||||
if (rc) {
|
||||
ctrl_err(ctrl, "Issue of Set Attention command failed\n");
|
||||
return rc;
|
||||
@ -401,7 +401,7 @@ static void shpchp_pushbutton_thread(struct work_struct *work)
|
||||
case POWERON_STATE:
|
||||
mutex_unlock(&p_slot->lock);
|
||||
if (shpchp_enable_slot(p_slot))
|
||||
p_slot->hpc_ops->green_led_off(p_slot);
|
||||
shpchp_green_led_off(p_slot);
|
||||
mutex_lock(&p_slot->lock);
|
||||
p_slot->state = STATIC_STATE;
|
||||
break;
|
||||
@ -446,10 +446,10 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
|
||||
|
||||
static void update_slot_info(struct slot *slot)
|
||||
{
|
||||
slot->hpc_ops->get_power_status(slot, &slot->pwr_save);
|
||||
slot->hpc_ops->get_attention_status(slot, &slot->attention_save);
|
||||
slot->hpc_ops->get_latch_status(slot, &slot->latch_save);
|
||||
slot->hpc_ops->get_adapter_status(slot, &slot->presence_save);
|
||||
shpchp_get_power_status(slot, &slot->pwr_save);
|
||||
shpchp_get_attention_status(slot, &slot->attention_save);
|
||||
shpchp_get_latch_status(slot, &slot->latch_save);
|
||||
shpchp_get_adapter_status(slot, &slot->presence_save);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -462,7 +462,7 @@ static void handle_button_press_event(struct slot *p_slot)
|
||||
|
||||
switch (p_slot->state) {
|
||||
case STATIC_STATE:
|
||||
p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
|
||||
shpchp_get_power_status(p_slot, &getstatus);
|
||||
if (getstatus) {
|
||||
p_slot->state = BLINKINGOFF_STATE;
|
||||
ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
|
||||
@ -473,8 +473,8 @@ static void handle_button_press_event(struct slot *p_slot)
|
||||
slot_name(p_slot));
|
||||
}
|
||||
/* blink green LED and turn off amber */
|
||||
p_slot->hpc_ops->green_led_blink(p_slot);
|
||||
p_slot->hpc_ops->set_attention_status(p_slot, 0);
|
||||
shpchp_green_led_blink(p_slot);
|
||||
shpchp_set_attention_status(p_slot, 0);
|
||||
|
||||
queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
|
||||
break;
|
||||
@ -489,10 +489,10 @@ static void handle_button_press_event(struct slot *p_slot)
|
||||
slot_name(p_slot));
|
||||
cancel_delayed_work(&p_slot->work);
|
||||
if (p_slot->state == BLINKINGOFF_STATE)
|
||||
p_slot->hpc_ops->green_led_on(p_slot);
|
||||
shpchp_green_led_on(p_slot);
|
||||
else
|
||||
p_slot->hpc_ops->green_led_off(p_slot);
|
||||
p_slot->hpc_ops->set_attention_status(p_slot, 0);
|
||||
shpchp_green_led_off(p_slot);
|
||||
shpchp_set_attention_status(p_slot, 0);
|
||||
ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
|
||||
slot_name(p_slot));
|
||||
p_slot->state = STATIC_STATE;
|
||||
@ -526,8 +526,8 @@ static void interrupt_event_handler(struct work_struct *work)
|
||||
break;
|
||||
case INT_POWER_FAULT:
|
||||
ctrl_dbg(p_slot->ctrl, "%s: Power fault\n", __func__);
|
||||
p_slot->hpc_ops->set_attention_status(p_slot, 1);
|
||||
p_slot->hpc_ops->green_led_off(p_slot);
|
||||
shpchp_set_attention_status(p_slot, 1);
|
||||
shpchp_green_led_off(p_slot);
|
||||
break;
|
||||
default:
|
||||
update_slot_info(p_slot);
|
||||
@ -547,17 +547,17 @@ static int shpchp_enable_slot (struct slot *p_slot)
|
||||
|
||||
/* Check to see if (latch closed, card present, power off) */
|
||||
mutex_lock(&p_slot->ctrl->crit_sect);
|
||||
rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
|
||||
rc = shpchp_get_adapter_status(p_slot, &getstatus);
|
||||
if (rc || !getstatus) {
|
||||
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
|
||||
goto out;
|
||||
}
|
||||
rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
|
||||
rc = shpchp_get_latch_status(p_slot, &getstatus);
|
||||
if (rc || getstatus) {
|
||||
ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
|
||||
goto out;
|
||||
}
|
||||
rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
|
||||
rc = shpchp_get_power_status(p_slot, &getstatus);
|
||||
if (rc || getstatus) {
|
||||
ctrl_info(ctrl, "Already enabled on slot(%s)\n",
|
||||
slot_name(p_slot));
|
||||
@ -567,10 +567,10 @@ static int shpchp_enable_slot (struct slot *p_slot)
|
||||
p_slot->is_a_board = 1;
|
||||
|
||||
/* We have to save the presence info for these slots */
|
||||
p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
|
||||
p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save));
|
||||
shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
|
||||
shpchp_get_power_status(p_slot, &p_slot->pwr_save);
|
||||
ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
|
||||
p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
|
||||
shpchp_get_latch_status(p_slot, &getstatus);
|
||||
|
||||
if ((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD &&
|
||||
p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)
|
||||
@ -584,9 +584,8 @@ static int shpchp_enable_slot (struct slot *p_slot)
|
||||
retval = board_added(p_slot);
|
||||
|
||||
if (retval) {
|
||||
p_slot->hpc_ops->get_adapter_status(p_slot,
|
||||
&(p_slot->presence_save));
|
||||
p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
|
||||
shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
|
||||
shpchp_get_latch_status(p_slot, &getstatus);
|
||||
}
|
||||
|
||||
update_slot_info(p_slot);
|
||||
@ -608,17 +607,17 @@ static int shpchp_disable_slot (struct slot *p_slot)
|
||||
/* Check to see if (latch closed, card present, power on) */
|
||||
mutex_lock(&p_slot->ctrl->crit_sect);
|
||||
|
||||
rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
|
||||
rc = shpchp_get_adapter_status(p_slot, &getstatus);
|
||||
if (rc || !getstatus) {
|
||||
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
|
||||
goto out;
|
||||
}
|
||||
rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
|
||||
rc = shpchp_get_latch_status(p_slot, &getstatus);
|
||||
if (rc || getstatus) {
|
||||
ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
|
||||
goto out;
|
||||
}
|
||||
rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
|
||||
rc = shpchp_get_power_status(p_slot, &getstatus);
|
||||
if (rc || !getstatus) {
|
||||
ctrl_info(ctrl, "Already disabled on slot(%s)\n",
|
||||
slot_name(p_slot));
|
||||
|
@ -167,7 +167,6 @@
|
||||
|
||||
static irqreturn_t shpc_isr(int irq, void *dev_id);
|
||||
static void start_int_poll_timer(struct controller *ctrl, int sec);
|
||||
static int hpc_check_cmd_status(struct controller *ctrl);
|
||||
|
||||
static inline u8 shpc_readb(struct controller *ctrl, int reg)
|
||||
{
|
||||
@ -317,7 +316,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
cmd_status = hpc_check_cmd_status(slot->ctrl);
|
||||
cmd_status = shpchp_check_cmd_status(slot->ctrl);
|
||||
if (cmd_status) {
|
||||
ctrl_err(ctrl, "Failed to issued command 0x%x (error code = %d)\n",
|
||||
cmd, cmd_status);
|
||||
@ -328,7 +327,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int hpc_check_cmd_status(struct controller *ctrl)
|
||||
int shpchp_check_cmd_status(struct controller *ctrl)
|
||||
{
|
||||
int retval = 0;
|
||||
u16 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F;
|
||||
@ -357,7 +356,7 @@ static int hpc_check_cmd_status(struct controller *ctrl)
|
||||
}
|
||||
|
||||
|
||||
static int hpc_get_attention_status(struct slot *slot, u8 *status)
|
||||
int shpchp_get_attention_status(struct slot *slot, u8 *status)
|
||||
{
|
||||
struct controller *ctrl = slot->ctrl;
|
||||
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
|
||||
@ -381,7 +380,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpc_get_power_status(struct slot *slot, u8 *status)
|
||||
int shpchp_get_power_status(struct slot *slot, u8 *status)
|
||||
{
|
||||
struct controller *ctrl = slot->ctrl;
|
||||
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
|
||||
@ -406,7 +405,7 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
|
||||
}
|
||||
|
||||
|
||||
static int hpc_get_latch_status(struct slot *slot, u8 *status)
|
||||
int shpchp_get_latch_status(struct slot *slot, u8 *status)
|
||||
{
|
||||
struct controller *ctrl = slot->ctrl;
|
||||
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
|
||||
@ -416,7 +415,7 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpc_get_adapter_status(struct slot *slot, u8 *status)
|
||||
int shpchp_get_adapter_status(struct slot *slot, u8 *status)
|
||||
{
|
||||
struct controller *ctrl = slot->ctrl;
|
||||
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
|
||||
@ -427,7 +426,7 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
|
||||
int shpchp_get_prog_int(struct slot *slot, u8 *prog_int)
|
||||
{
|
||||
struct controller *ctrl = slot->ctrl;
|
||||
|
||||
@ -436,7 +435,7 @@ static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
|
||||
int shpchp_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
|
||||
{
|
||||
int retval = 0;
|
||||
struct controller *ctrl = slot->ctrl;
|
||||
@ -444,7 +443,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
|
||||
u8 m66_cap = !!(slot_reg & MHZ66_CAP);
|
||||
u8 pi, pcix_cap;
|
||||
|
||||
retval = hpc_get_prog_int(slot, &pi);
|
||||
retval = shpchp_get_prog_int(slot, &pi);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
@ -489,7 +488,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int hpc_query_power_fault(struct slot *slot)
|
||||
int shpchp_query_power_fault(struct slot *slot)
|
||||
{
|
||||
struct controller *ctrl = slot->ctrl;
|
||||
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
|
||||
@ -498,7 +497,7 @@ static int hpc_query_power_fault(struct slot *slot)
|
||||
return !(slot_reg & POWER_FAULT);
|
||||
}
|
||||
|
||||
static int hpc_set_attention_status(struct slot *slot, u8 value)
|
||||
int shpchp_set_attention_status(struct slot *slot, u8 value)
|
||||
{
|
||||
u8 slot_cmd = 0;
|
||||
|
||||
@ -520,22 +519,22 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
|
||||
}
|
||||
|
||||
|
||||
static void hpc_set_green_led_on(struct slot *slot)
|
||||
void shpchp_green_led_on(struct slot *slot)
|
||||
{
|
||||
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_ON);
|
||||
}
|
||||
|
||||
static void hpc_set_green_led_off(struct slot *slot)
|
||||
void shpchp_green_led_off(struct slot *slot)
|
||||
{
|
||||
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_OFF);
|
||||
}
|
||||
|
||||
static void hpc_set_green_led_blink(struct slot *slot)
|
||||
void shpchp_green_led_blink(struct slot *slot)
|
||||
{
|
||||
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_BLINK);
|
||||
}
|
||||
|
||||
static void hpc_release_ctlr(struct controller *ctrl)
|
||||
void shpchp_release_ctlr(struct controller *ctrl)
|
||||
{
|
||||
int i;
|
||||
u32 slot_reg, serr_int;
|
||||
@ -575,7 +574,7 @@ static void hpc_release_ctlr(struct controller *ctrl)
|
||||
release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
|
||||
}
|
||||
|
||||
static int hpc_power_on_slot(struct slot *slot)
|
||||
int shpchp_power_on_slot(struct slot *slot)
|
||||
{
|
||||
int retval;
|
||||
|
||||
@ -586,7 +585,7 @@ static int hpc_power_on_slot(struct slot *slot)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int hpc_slot_enable(struct slot *slot)
|
||||
int shpchp_slot_enable(struct slot *slot)
|
||||
{
|
||||
int retval;
|
||||
|
||||
@ -599,7 +598,7 @@ static int hpc_slot_enable(struct slot *slot)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int hpc_slot_disable(struct slot *slot)
|
||||
int shpchp_slot_disable(struct slot *slot)
|
||||
{
|
||||
int retval;
|
||||
|
||||
@ -681,7 +680,7 @@ static int shpc_get_cur_bus_speed(struct controller *ctrl)
|
||||
}
|
||||
|
||||
|
||||
static int hpc_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
|
||||
int shpchp_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
|
||||
{
|
||||
int retval;
|
||||
struct controller *ctrl = slot->ctrl;
|
||||
@ -871,28 +870,6 @@ static int shpc_get_max_bus_speed(struct controller *ctrl)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static const struct hpc_ops shpchp_hpc_ops = {
|
||||
.power_on_slot = hpc_power_on_slot,
|
||||
.slot_enable = hpc_slot_enable,
|
||||
.slot_disable = hpc_slot_disable,
|
||||
.set_bus_speed_mode = hpc_set_bus_speed_mode,
|
||||
.set_attention_status = hpc_set_attention_status,
|
||||
.get_power_status = hpc_get_power_status,
|
||||
.get_attention_status = hpc_get_attention_status,
|
||||
.get_latch_status = hpc_get_latch_status,
|
||||
.get_adapter_status = hpc_get_adapter_status,
|
||||
|
||||
.get_adapter_speed = hpc_get_adapter_speed,
|
||||
.get_prog_int = hpc_get_prog_int,
|
||||
|
||||
.query_power_fault = hpc_query_power_fault,
|
||||
.green_led_on = hpc_set_green_led_on,
|
||||
.green_led_off = hpc_set_green_led_off,
|
||||
.green_led_blink = hpc_set_green_led_blink,
|
||||
|
||||
.release_ctlr = hpc_release_ctlr,
|
||||
};
|
||||
|
||||
int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
|
||||
{
|
||||
int rc = -1, num_slots = 0;
|
||||
@ -978,8 +955,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
|
||||
/* Setup wait queue */
|
||||
init_waitqueue_head(&ctrl->queue);
|
||||
|
||||
ctrl->hpc_ops = &shpchp_hpc_ops;
|
||||
|
||||
/* Return PCI Controller Info */
|
||||
slot_config = shpc_readl(ctrl, SLOT_CONFIG);
|
||||
ctrl->slot_device_offset = (slot_config & FIRST_DEV_NUM) >> 8;
|
||||
|
@ -156,7 +156,7 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc);
|
||||
* the different IOMAP ranges.
|
||||
*
|
||||
* But if the architecture does not use the generic iomap code, and if
|
||||
* it has _not_ defined it's own private pci_iounmap function, we define
|
||||
* it has _not_ defined its own private pci_iounmap function, we define
|
||||
* it here.
|
||||
*
|
||||
* NOTE! This default implementation assumes that if the architecture
|
||||
|
595
drivers/pci/npem.c
Normal file
595
drivers/pci/npem.c
Normal file
@ -0,0 +1,595 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PCIe Enclosure management driver created for LED interfaces based on
|
||||
* indications. It says *what indications* blink but does not specify *how*
|
||||
* they blink - it is hardware defined.
|
||||
*
|
||||
* The driver name refers to Native PCIe Enclosure Management. It is
|
||||
* first indication oriented standard with specification.
|
||||
*
|
||||
* Native PCIe Enclosure Management (NPEM)
|
||||
* PCIe Base Specification r6.1 sec 6.28, 7.9.19
|
||||
*
|
||||
* _DSM Definitions for PCIe SSD Status LED
|
||||
* PCI Firmware Specification, r3.3 sec 4.7
|
||||
*
|
||||
* Two backends are supported to manipulate indications: Direct NPEM register
|
||||
* access (npem_ops) and indirect access through the ACPI _DSM (dsm_ops).
|
||||
* _DSM is used if supported, else NPEM.
|
||||
*
|
||||
* Copyright (c) 2021-2022 Dell Inc.
|
||||
* Copyright (c) 2023-2024 Intel Corporation
|
||||
* Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci_regs.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uleds.h>
|
||||
|
||||
#include "pci.h"
|
||||
|
||||
struct indication {
|
||||
u32 bit;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static const struct indication npem_indications[] = {
|
||||
{PCI_NPEM_IND_OK, "enclosure:ok"},
|
||||
{PCI_NPEM_IND_LOCATE, "enclosure:locate"},
|
||||
{PCI_NPEM_IND_FAIL, "enclosure:fail"},
|
||||
{PCI_NPEM_IND_REBUILD, "enclosure:rebuild"},
|
||||
{PCI_NPEM_IND_PFA, "enclosure:pfa"},
|
||||
{PCI_NPEM_IND_HOTSPARE, "enclosure:hotspare"},
|
||||
{PCI_NPEM_IND_ICA, "enclosure:ica"},
|
||||
{PCI_NPEM_IND_IFA, "enclosure:ifa"},
|
||||
{PCI_NPEM_IND_IDT, "enclosure:idt"},
|
||||
{PCI_NPEM_IND_DISABLED, "enclosure:disabled"},
|
||||
{PCI_NPEM_IND_SPEC_0, "enclosure:specific_0"},
|
||||
{PCI_NPEM_IND_SPEC_1, "enclosure:specific_1"},
|
||||
{PCI_NPEM_IND_SPEC_2, "enclosure:specific_2"},
|
||||
{PCI_NPEM_IND_SPEC_3, "enclosure:specific_3"},
|
||||
{PCI_NPEM_IND_SPEC_4, "enclosure:specific_4"},
|
||||
{PCI_NPEM_IND_SPEC_5, "enclosure:specific_5"},
|
||||
{PCI_NPEM_IND_SPEC_6, "enclosure:specific_6"},
|
||||
{PCI_NPEM_IND_SPEC_7, "enclosure:specific_7"},
|
||||
{0, NULL}
|
||||
};
|
||||
|
||||
/* _DSM PCIe SSD LED States correspond to NPEM register values */
|
||||
static const struct indication dsm_indications[] = {
|
||||
{PCI_NPEM_IND_OK, "enclosure:ok"},
|
||||
{PCI_NPEM_IND_LOCATE, "enclosure:locate"},
|
||||
{PCI_NPEM_IND_FAIL, "enclosure:fail"},
|
||||
{PCI_NPEM_IND_REBUILD, "enclosure:rebuild"},
|
||||
{PCI_NPEM_IND_PFA, "enclosure:pfa"},
|
||||
{PCI_NPEM_IND_HOTSPARE, "enclosure:hotspare"},
|
||||
{PCI_NPEM_IND_ICA, "enclosure:ica"},
|
||||
{PCI_NPEM_IND_IFA, "enclosure:ifa"},
|
||||
{PCI_NPEM_IND_IDT, "enclosure:idt"},
|
||||
{PCI_NPEM_IND_DISABLED, "enclosure:disabled"},
|
||||
{0, NULL}
|
||||
};
|
||||
|
||||
#define for_each_indication(ind, inds) \
|
||||
for (ind = inds; ind->bit; ind++)
|
||||
|
||||
/*
|
||||
* The driver has internal list of supported indications. Ideally, the driver
|
||||
* should not touch bits that are not defined and for which LED devices are
|
||||
* not exposed but in reality, it needs to turn them off.
|
||||
*
|
||||
* Otherwise, there will be no possibility to turn off indications turned on by
|
||||
* other utilities or turned on by default and it leads to bad user experience.
|
||||
*
|
||||
* Additionally, it excludes NPEM commands like RESET or ENABLE.
|
||||
*/
|
||||
static u32 reg_to_indications(u32 caps, const struct indication *inds)
|
||||
{
|
||||
const struct indication *ind;
|
||||
u32 supported_indications = 0;
|
||||
|
||||
for_each_indication(ind, inds)
|
||||
supported_indications |= ind->bit;
|
||||
|
||||
return caps & supported_indications;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct npem_led - LED details
|
||||
* @indication: indication details
|
||||
* @npem: NPEM device
|
||||
* @name: LED name
|
||||
* @led: LED device
|
||||
*/
|
||||
struct npem_led {
|
||||
const struct indication *indication;
|
||||
struct npem *npem;
|
||||
char name[LED_MAX_NAME_SIZE];
|
||||
struct led_classdev led;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct npem_ops - backend specific callbacks
|
||||
* @get_active_indications: get active indications
|
||||
* npem: NPEM device
|
||||
* inds: response buffer
|
||||
* @set_active_indications: set new indications
|
||||
* npem: npem device
|
||||
* inds: bit mask to set
|
||||
* @inds: supported indications array, set of indications is backend specific
|
||||
* @name: backend name
|
||||
*/
|
||||
struct npem_ops {
|
||||
int (*get_active_indications)(struct npem *npem, u32 *inds);
|
||||
int (*set_active_indications)(struct npem *npem, u32 inds);
|
||||
const struct indication *inds;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct npem - NPEM device properties
|
||||
* @dev: PCI device this driver is attached to
|
||||
* @ops: backend specific callbacks
|
||||
* @lock: serializes concurrent access to NPEM device by multiple LED devices
|
||||
* @pos: cached offset of NPEM Capability Register in Configuration Space;
|
||||
* only used if NPEM registers are accessed directly and not through _DSM
|
||||
* @supported_indications: cached bit mask of supported indications;
|
||||
* non-indication and reserved bits in the NPEM Capability Register are
|
||||
* cleared in this bit mask
|
||||
* @active_indications: cached bit mask of active indications;
|
||||
* non-indication and reserved bits in the NPEM Control Register are
|
||||
* cleared in this bit mask
|
||||
* @active_inds_initialized: whether @active_indications has been initialized;
|
||||
* On Dell platforms, it is required that IPMI drivers are loaded before
|
||||
* the GET_STATE_DSM method is invoked: They use an IPMI OpRegion to
|
||||
* get/set the active LEDs. By initializing @active_indications lazily
|
||||
* (on first access to an LED), IPMI drivers are given a chance to load.
|
||||
* If they are not loaded in time, users will see various errors on LED
|
||||
* access in dmesg. Once they are loaded, the errors go away and LED
|
||||
* access becomes possible.
|
||||
* @led_cnt: size of @leds array
|
||||
* @leds: array containing LED class devices of all supported LEDs
|
||||
*/
|
||||
struct npem {
|
||||
struct pci_dev *dev;
|
||||
const struct npem_ops *ops;
|
||||
struct mutex lock;
|
||||
u16 pos;
|
||||
u32 supported_indications;
|
||||
u32 active_indications;
|
||||
unsigned int active_inds_initialized:1;
|
||||
int led_cnt;
|
||||
struct npem_led leds[];
|
||||
};
|
||||
|
||||
static int npem_read_reg(struct npem *npem, u16 reg, u32 *val)
|
||||
{
|
||||
int ret = pci_read_config_dword(npem->dev, npem->pos + reg, val);
|
||||
|
||||
return pcibios_err_to_errno(ret);
|
||||
}
|
||||
|
||||
static int npem_write_ctrl(struct npem *npem, u32 reg)
|
||||
{
|
||||
int pos = npem->pos + PCI_NPEM_CTRL;
|
||||
int ret = pci_write_config_dword(npem->dev, pos, reg);
|
||||
|
||||
return pcibios_err_to_errno(ret);
|
||||
}
|
||||
|
||||
static int npem_get_active_indications(struct npem *npem, u32 *inds)
|
||||
{
|
||||
u32 ctrl;
|
||||
int ret;
|
||||
|
||||
ret = npem_read_reg(npem, PCI_NPEM_CTRL, &ctrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* If PCI_NPEM_CTRL_ENABLE is not set then no indication should blink */
|
||||
if (!(ctrl & PCI_NPEM_CTRL_ENABLE)) {
|
||||
*inds = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*inds = ctrl & npem->supported_indications;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int npem_set_active_indications(struct npem *npem, u32 inds)
|
||||
{
|
||||
int ctrl, ret, ret_val;
|
||||
u32 cc_status;
|
||||
|
||||
lockdep_assert_held(&npem->lock);
|
||||
|
||||
/* This bit is always required */
|
||||
ctrl = inds | PCI_NPEM_CTRL_ENABLE;
|
||||
|
||||
ret = npem_write_ctrl(npem, ctrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* For the case where a NPEM command has not completed immediately,
|
||||
* it is recommended that software not continuously "spin" on polling
|
||||
* the status register, but rather poll under interrupt at a reduced
|
||||
* rate; for example at 10 ms intervals.
|
||||
*
|
||||
* PCIe r6.1 sec 6.28 "Implementation Note: Software Polling of NPEM
|
||||
* Command Completed"
|
||||
*/
|
||||
ret = read_poll_timeout(npem_read_reg, ret_val,
|
||||
ret_val || (cc_status & PCI_NPEM_STATUS_CC),
|
||||
10 * USEC_PER_MSEC, USEC_PER_SEC, false, npem,
|
||||
PCI_NPEM_STATUS, &cc_status);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/*
|
||||
* All writes to control register, including writes that do not change
|
||||
* the register value, are NPEM commands and should eventually result
|
||||
* in a command completion indication in the NPEM Status Register.
|
||||
*
|
||||
* PCIe Base Specification r6.1 sec 7.9.19.3
|
||||
*
|
||||
* Register may not be updated, or other conflicting bits may be
|
||||
* cleared. Spec is not strict here. Read NPEM Control register after
|
||||
* write to keep cache in-sync.
|
||||
*/
|
||||
return npem_get_active_indications(npem, &npem->active_indications);
|
||||
}
|
||||
|
||||
static const struct npem_ops npem_ops = {
|
||||
.get_active_indications = npem_get_active_indications,
|
||||
.set_active_indications = npem_set_active_indications,
|
||||
.name = "Native PCIe Enclosure Management",
|
||||
.inds = npem_indications,
|
||||
};
|
||||
|
||||
#define DSM_GUID GUID_INIT(0x5d524d9d, 0xfff9, 0x4d4b, 0x8c, 0xb7, 0x74, 0x7e,\
|
||||
0xd5, 0x1e, 0x19, 0x4d)
|
||||
#define GET_SUPPORTED_STATES_DSM 1
|
||||
#define GET_STATE_DSM 2
|
||||
#define SET_STATE_DSM 3
|
||||
|
||||
static const guid_t dsm_guid = DSM_GUID;
|
||||
|
||||
static bool npem_has_dsm(struct pci_dev *pdev)
|
||||
{
|
||||
acpi_handle handle;
|
||||
|
||||
handle = ACPI_HANDLE(&pdev->dev);
|
||||
if (!handle)
|
||||
return false;
|
||||
|
||||
return acpi_check_dsm(handle, &dsm_guid, 0x1,
|
||||
BIT(GET_SUPPORTED_STATES_DSM) |
|
||||
BIT(GET_STATE_DSM) | BIT(SET_STATE_DSM));
|
||||
}
|
||||
|
||||
struct dsm_output {
|
||||
u16 status;
|
||||
u8 function_specific_err;
|
||||
u8 vendor_specific_err;
|
||||
u32 state;
|
||||
};
|
||||
|
||||
/**
|
||||
* dsm_evaluate() - send DSM PCIe SSD Status LED command
|
||||
* @pdev: PCI device
|
||||
* @dsm_func: DSM LED Function
|
||||
* @output: buffer to copy DSM Response
|
||||
* @value_to_set: value for SET_STATE_DSM function
|
||||
*
|
||||
* To not bother caller with ACPI context, the returned _DSM Output Buffer is
|
||||
* copied.
|
||||
*/
|
||||
static int dsm_evaluate(struct pci_dev *pdev, u64 dsm_func,
|
||||
struct dsm_output *output, u32 value_to_set)
|
||||
{
|
||||
acpi_handle handle = ACPI_HANDLE(&pdev->dev);
|
||||
union acpi_object *out_obj, arg3[2];
|
||||
union acpi_object *arg3_p = NULL;
|
||||
|
||||
if (dsm_func == SET_STATE_DSM) {
|
||||
arg3[0].type = ACPI_TYPE_PACKAGE;
|
||||
arg3[0].package.count = 1;
|
||||
arg3[0].package.elements = &arg3[1];
|
||||
|
||||
arg3[1].type = ACPI_TYPE_BUFFER;
|
||||
arg3[1].buffer.length = 4;
|
||||
arg3[1].buffer.pointer = (u8 *)&value_to_set;
|
||||
|
||||
arg3_p = arg3;
|
||||
}
|
||||
|
||||
out_obj = acpi_evaluate_dsm_typed(handle, &dsm_guid, 0x1, dsm_func,
|
||||
arg3_p, ACPI_TYPE_BUFFER);
|
||||
if (!out_obj)
|
||||
return -EIO;
|
||||
|
||||
if (out_obj->buffer.length < sizeof(struct dsm_output)) {
|
||||
ACPI_FREE(out_obj);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
memcpy(output, out_obj->buffer.pointer, sizeof(struct dsm_output));
|
||||
|
||||
ACPI_FREE(out_obj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dsm_get(struct pci_dev *pdev, u64 dsm_func, u32 *buf)
|
||||
{
|
||||
struct dsm_output output;
|
||||
int ret = dsm_evaluate(pdev, dsm_func, &output, 0);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (output.status != 0)
|
||||
return -EIO;
|
||||
|
||||
*buf = output.state;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dsm_get_active_indications(struct npem *npem, u32 *buf)
|
||||
{
|
||||
int ret = dsm_get(npem->dev, GET_STATE_DSM, buf);
|
||||
|
||||
/* Filter out not supported indications in response */
|
||||
*buf &= npem->supported_indications;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dsm_set_active_indications(struct npem *npem, u32 value)
|
||||
{
|
||||
struct dsm_output output;
|
||||
int ret = dsm_evaluate(npem->dev, SET_STATE_DSM, &output, value);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (output.status) {
|
||||
case 4:
|
||||
/*
|
||||
* Not all bits are set. If this bit is set, the platform
|
||||
* disregarded some or all of the request state changes. OSPM
|
||||
* should check the resulting PCIe SSD Status LED States to see
|
||||
* what, if anything, has changed.
|
||||
*
|
||||
* PCI Firmware Specification, r3.3 Table 4-19.
|
||||
*/
|
||||
if (output.function_specific_err != 1)
|
||||
return -EIO;
|
||||
fallthrough;
|
||||
case 0:
|
||||
break;
|
||||
default:
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
npem->active_indications = output.state;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct npem_ops dsm_ops = {
|
||||
.get_active_indications = dsm_get_active_indications,
|
||||
.set_active_indications = dsm_set_active_indications,
|
||||
.name = "_DSM PCIe SSD Status LED Management",
|
||||
.inds = dsm_indications,
|
||||
};
|
||||
|
||||
static int npem_initialize_active_indications(struct npem *npem)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&npem->lock);
|
||||
|
||||
if (npem->active_inds_initialized)
|
||||
return 0;
|
||||
|
||||
ret = npem->ops->get_active_indications(npem,
|
||||
&npem->active_indications);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
npem->active_inds_initialized = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The status of each indicator is cached on first brightness_ get/set time
|
||||
* and updated at write time. brightness_get() is only responsible for
|
||||
* reflecting the last written/cached value.
|
||||
*/
|
||||
static enum led_brightness brightness_get(struct led_classdev *led)
|
||||
{
|
||||
struct npem_led *nled = container_of(led, struct npem_led, led);
|
||||
struct npem *npem = nled->npem;
|
||||
int ret, val = 0;
|
||||
|
||||
ret = mutex_lock_interruptible(&npem->lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = npem_initialize_active_indications(npem);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (npem->active_indications & nled->indication->bit)
|
||||
val = 1;
|
||||
|
||||
out:
|
||||
mutex_unlock(&npem->lock);
|
||||
return val;
|
||||
}
|
||||
|
||||
static int brightness_set(struct led_classdev *led,
|
||||
enum led_brightness brightness)
|
||||
{
|
||||
struct npem_led *nled = container_of(led, struct npem_led, led);
|
||||
struct npem *npem = nled->npem;
|
||||
u32 indications;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&npem->lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = npem_initialize_active_indications(npem);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (brightness == 0)
|
||||
indications = npem->active_indications & ~(nled->indication->bit);
|
||||
else
|
||||
indications = npem->active_indications | nled->indication->bit;
|
||||
|
||||
ret = npem->ops->set_active_indications(npem, indications);
|
||||
|
||||
out:
|
||||
mutex_unlock(&npem->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void npem_free(struct npem *npem)
|
||||
{
|
||||
struct npem_led *nled;
|
||||
int cnt;
|
||||
|
||||
if (!npem)
|
||||
return;
|
||||
|
||||
for (cnt = 0; cnt < npem->led_cnt; cnt++) {
|
||||
nled = &npem->leds[cnt];
|
||||
|
||||
if (nled->name[0])
|
||||
led_classdev_unregister(&nled->led);
|
||||
}
|
||||
|
||||
mutex_destroy(&npem->lock);
|
||||
kfree(npem);
|
||||
}
|
||||
|
||||
static int pci_npem_set_led_classdev(struct npem *npem, struct npem_led *nled)
|
||||
{
|
||||
struct led_classdev *led = &nled->led;
|
||||
struct led_init_data init_data = {};
|
||||
char *name = nled->name;
|
||||
int ret;
|
||||
|
||||
init_data.devicename = pci_name(npem->dev);
|
||||
init_data.default_label = nled->indication->name;
|
||||
|
||||
ret = led_compose_name(&npem->dev->dev, &init_data, name);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
led->name = name;
|
||||
led->brightness_set_blocking = brightness_set;
|
||||
led->brightness_get = brightness_get;
|
||||
led->max_brightness = 1;
|
||||
led->default_trigger = "none";
|
||||
led->flags = 0;
|
||||
|
||||
ret = led_classdev_register(&npem->dev->dev, led);
|
||||
if (ret)
|
||||
/* Clear the name to indicate that it is not registered. */
|
||||
name[0] = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pci_npem_init(struct pci_dev *dev, const struct npem_ops *ops,
|
||||
int pos, u32 caps)
|
||||
{
|
||||
u32 supported = reg_to_indications(caps, ops->inds);
|
||||
int supported_cnt = hweight32(supported);
|
||||
const struct indication *indication;
|
||||
struct npem_led *nled;
|
||||
struct npem *npem;
|
||||
int led_idx = 0;
|
||||
int ret;
|
||||
|
||||
npem = kzalloc(struct_size(npem, leds, supported_cnt), GFP_KERNEL);
|
||||
if (!npem)
|
||||
return -ENOMEM;
|
||||
|
||||
npem->supported_indications = supported;
|
||||
npem->led_cnt = supported_cnt;
|
||||
npem->pos = pos;
|
||||
npem->dev = dev;
|
||||
npem->ops = ops;
|
||||
|
||||
mutex_init(&npem->lock);
|
||||
|
||||
for_each_indication(indication, npem_indications) {
|
||||
if (!(npem->supported_indications & indication->bit))
|
||||
continue;
|
||||
|
||||
nled = &npem->leds[led_idx++];
|
||||
nled->indication = indication;
|
||||
nled->npem = npem;
|
||||
|
||||
ret = pci_npem_set_led_classdev(npem, nled);
|
||||
if (ret) {
|
||||
npem_free(npem);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
dev->npem = npem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void pci_npem_remove(struct pci_dev *dev)
|
||||
{
|
||||
npem_free(dev->npem);
|
||||
}
|
||||
|
||||
void pci_npem_create(struct pci_dev *dev)
|
||||
{
|
||||
const struct npem_ops *ops = &npem_ops;
|
||||
int pos = 0, ret;
|
||||
u32 cap;
|
||||
|
||||
if (npem_has_dsm(dev)) {
|
||||
/*
|
||||
* OS should use the DSM for LED control if it is available
|
||||
* PCI Firmware Spec r3.3 sec 4.7.
|
||||
*/
|
||||
ret = dsm_get(dev, GET_SUPPORTED_STATES_DSM, &cap);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
ops = &dsm_ops;
|
||||
} else {
|
||||
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_NPEM);
|
||||
if (pos == 0)
|
||||
return;
|
||||
|
||||
if (pci_read_config_dword(dev, pos + PCI_NPEM_CAP, &cap) != 0 ||
|
||||
(cap & PCI_NPEM_CAP_CAPABLE) == 0)
|
||||
return;
|
||||
}
|
||||
|
||||
pci_info(dev, "Configuring %s\n", ops->name);
|
||||
|
||||
ret = pci_npem_init(dev, ops, pos, cap);
|
||||
if (ret)
|
||||
pci_err(dev, "Failed to register %s, err: %d\n", ops->name,
|
||||
ret);
|
||||
}
|
@ -257,8 +257,8 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] =
|
||||
*/
|
||||
.rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
|
||||
PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE |
|
||||
PCI_EXP_RTCTL_CRSSVE),
|
||||
.ro = PCI_EXP_RTCAP_CRSVIS << 16,
|
||||
PCI_EXP_RTCTL_RRS_SVE),
|
||||
.ro = PCI_EXP_RTCAP_RRS_SV << 16,
|
||||
},
|
||||
|
||||
[PCI_EXP_RTSTA / 4] = {
|
||||
|
@ -1670,7 +1670,7 @@ static void pci_dma_cleanup(struct device *dev)
|
||||
iommu_device_unuse_default_domain(dev);
|
||||
}
|
||||
|
||||
struct bus_type pci_bus_type = {
|
||||
const struct bus_type pci_bus_type = {
|
||||
.name = "pci",
|
||||
.match = pci_bus_match,
|
||||
.uevent = pci_uevent,
|
||||
|
@ -31,6 +31,10 @@
|
||||
#include <linux/aperture.h>
|
||||
#include "pci.h"
|
||||
|
||||
#ifndef ARCH_PCI_DEV_GROUPS
|
||||
#define ARCH_PCI_DEV_GROUPS
|
||||
#endif
|
||||
|
||||
static int sysfs_initialized; /* = 0 */
|
||||
|
||||
/* show configuration fields */
|
||||
@ -1624,6 +1628,7 @@ const struct attribute_group *pci_dev_groups[] = {
|
||||
&pci_dev_acpi_attr_group,
|
||||
#endif
|
||||
&pci_dev_resource_resize_group,
|
||||
ARCH_PCI_DEV_GROUPS
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -1283,7 +1283,9 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
|
||||
{
|
||||
int delay = 1;
|
||||
bool retrain = false;
|
||||
struct pci_dev *bridge;
|
||||
struct pci_dev *root, *bridge;
|
||||
|
||||
root = pcie_find_root_port(dev);
|
||||
|
||||
if (pci_is_pcie(dev)) {
|
||||
bridge = pci_upstream_bridge(dev);
|
||||
@ -1292,16 +1294,23 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
|
||||
}
|
||||
|
||||
/*
|
||||
* After reset, the device should not silently discard config
|
||||
* requests, but it may still indicate that it needs more time by
|
||||
* responding to them with CRS completions. The Root Port will
|
||||
* generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
|
||||
* the read (except when CRS SV is enabled and the read was for the
|
||||
* Vendor ID; in that case it synthesizes 0x0001 data).
|
||||
* The caller has already waited long enough after a reset that the
|
||||
* device should respond to config requests, but it may respond
|
||||
* with Request Retry Status (RRS) if it needs more time to
|
||||
* initialize.
|
||||
*
|
||||
* Wait for the device to return a non-CRS completion. Read the
|
||||
* Command register instead of Vendor ID so we don't have to
|
||||
* contend with the CRS SV value.
|
||||
* If the device is below a Root Port with Configuration RRS
|
||||
* Software Visibility enabled, reading the Vendor ID returns a
|
||||
* special data value if the device responded with RRS. Read the
|
||||
* Vendor ID until we get non-RRS status.
|
||||
*
|
||||
* If there's no Root Port or Configuration RRS Software Visibility
|
||||
* is not enabled, the device may still respond with RRS, but
|
||||
* hardware may retry the config request. If no retries receive
|
||||
* Successful Completion, hardware generally synthesizes ~0
|
||||
* (PCI_ERROR_RESPONSE) data to complete the read. Reading Vendor
|
||||
* ID for VFs and non-existent devices also returns ~0, so read the
|
||||
* Command register until it returns something other than ~0.
|
||||
*/
|
||||
for (;;) {
|
||||
u32 id;
|
||||
@ -1311,9 +1320,15 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
pci_read_config_dword(dev, PCI_COMMAND, &id);
|
||||
if (!PCI_POSSIBLE_ERROR(id))
|
||||
break;
|
||||
if (root && root->config_rrs_sv) {
|
||||
pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
|
||||
if (!pci_bus_rrs_vendor_id(id))
|
||||
break;
|
||||
} else {
|
||||
pci_read_config_dword(dev, PCI_COMMAND, &id);
|
||||
if (!PCI_POSSIBLE_ERROR(id))
|
||||
break;
|
||||
}
|
||||
|
||||
if (delay > timeout) {
|
||||
pci_warn(dev, "not ready %dms after %s; giving up\n",
|
||||
@ -1324,7 +1339,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
|
||||
if (delay > PCI_RESET_WAIT) {
|
||||
if (retrain) {
|
||||
retrain = false;
|
||||
if (pcie_failed_link_retrain(bridge)) {
|
||||
if (pcie_failed_link_retrain(bridge) == 0) {
|
||||
delay = 1;
|
||||
continue;
|
||||
}
|
||||
@ -4718,7 +4733,15 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
|
||||
pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
|
||||
}
|
||||
|
||||
return pcie_wait_for_link_status(pdev, use_lt, !use_lt);
|
||||
rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
|
||||
|
||||
/*
|
||||
* Clear LBMS after a manual retrain so that the bit can be used
|
||||
* to track link speed or width changes made by hardware itself
|
||||
* in attempt to correct unreliable link operation.
|
||||
*/
|
||||
pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -5672,8 +5695,10 @@ static void pci_bus_restore_locked(struct pci_bus *bus)
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
pci_dev_restore(dev);
|
||||
if (dev->subordinate)
|
||||
if (dev->subordinate) {
|
||||
pci_bridge_wait_for_secondary_bus(dev, "bus reset");
|
||||
pci_bus_restore_locked(dev->subordinate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -5707,8 +5732,10 @@ static void pci_slot_restore_locked(struct pci_slot *slot)
|
||||
if (!dev->slot || dev->slot != slot)
|
||||
continue;
|
||||
pci_dev_restore(dev);
|
||||
if (dev->subordinate)
|
||||
if (dev->subordinate) {
|
||||
pci_bridge_wait_for_secondary_bus(dev, "slot reset");
|
||||
pci_bus_restore_locked(dev->subordinate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -6802,16 +6829,16 @@ static int of_pci_bus_find_domain_nr(struct device *parent)
|
||||
return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
|
||||
static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
|
||||
{
|
||||
if (bus->domain_nr < 0)
|
||||
if (domain_nr < 0)
|
||||
return;
|
||||
|
||||
/* Release domain from IDA where it was allocated. */
|
||||
if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr)
|
||||
ida_free(&pci_domain_nr_static_ida, bus->domain_nr);
|
||||
if (of_get_pci_domain_nr(parent->of_node) == domain_nr)
|
||||
ida_free(&pci_domain_nr_static_ida, domain_nr);
|
||||
else
|
||||
ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr);
|
||||
ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
|
||||
}
|
||||
|
||||
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
|
||||
@ -6820,11 +6847,11 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
|
||||
acpi_pci_bus_find_domain_nr(bus);
|
||||
}
|
||||
|
||||
void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
|
||||
void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
|
||||
{
|
||||
if (!acpi_disabled)
|
||||
return;
|
||||
of_pci_bus_release_domain_nr(bus, parent);
|
||||
of_pci_bus_release_domain_nr(parent, domain_nr);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -13,9 +13,24 @@
|
||||
|
||||
#define PCIE_LINK_RETRAIN_TIMEOUT_MS 1000
|
||||
|
||||
/* Power stable to PERST# inactive from PCIe card Electromechanical Spec */
|
||||
/*
|
||||
* Power stable to PERST# inactive.
|
||||
*
|
||||
* See the "Power Sequencing and Reset Signal Timings" table of the PCI Express
|
||||
* Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol
|
||||
* "T_PVPERL".
|
||||
*/
|
||||
#define PCIE_T_PVPERL_MS 100
|
||||
|
||||
/*
|
||||
* REFCLK stable before PERST# inactive.
|
||||
*
|
||||
* See the "Power Sequencing and Reset Signal Timings" table of the PCI Express
|
||||
* Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol
|
||||
* "T_PERST-CLK".
|
||||
*/
|
||||
#define PCIE_T_PERST_CLK_US 100
|
||||
|
||||
/*
|
||||
* End of conventional reset (PERST# de-asserted) to first configuration
|
||||
* request (device able to respond with a "Request Retry Status" completion),
|
||||
@ -124,7 +139,6 @@ void pcie_clear_device_status(struct pci_dev *dev);
|
||||
void pcie_clear_root_pme_status(struct pci_dev *dev);
|
||||
bool pci_check_pme_status(struct pci_dev *dev);
|
||||
void pci_pme_wakeup_bus(struct pci_bus *bus);
|
||||
int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
|
||||
void pci_pme_restore(struct pci_dev *dev);
|
||||
bool pci_dev_need_resume(struct pci_dev *dev);
|
||||
void pci_dev_adjust_pme(struct pci_dev *dev);
|
||||
@ -139,6 +153,11 @@ bool pci_bridge_d3_possible(struct pci_dev *dev);
|
||||
void pci_bridge_d3_update(struct pci_dev *dev);
|
||||
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type);
|
||||
|
||||
static inline bool pci_bus_rrs_vendor_id(u32 l)
|
||||
{
|
||||
return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
|
||||
}
|
||||
|
||||
static inline void pci_wakeup_event(struct pci_dev *dev)
|
||||
{
|
||||
/* Wait 100 ms before the system can be put into a sleep state. */
|
||||
@ -169,7 +188,6 @@ static inline bool pcie_downstream_port(const struct pci_dev *dev)
|
||||
}
|
||||
|
||||
void pci_vpd_init(struct pci_dev *dev);
|
||||
void pci_vpd_release(struct pci_dev *dev);
|
||||
extern const struct attribute_group pci_dev_vpd_attr_group;
|
||||
|
||||
/* PCI Virtual Channel */
|
||||
@ -290,10 +308,10 @@ void pci_put_host_bridge_device(struct device *dev);
|
||||
|
||||
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
|
||||
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
|
||||
int crs_timeout);
|
||||
int rrs_timeout);
|
||||
bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
|
||||
int crs_timeout);
|
||||
int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout);
|
||||
int rrs_timeout);
|
||||
int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout);
|
||||
|
||||
int pci_setup_device(struct pci_dev *dev);
|
||||
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
|
||||
@ -398,6 +416,14 @@ static inline void pci_doe_destroy(struct pci_dev *pdev) { }
|
||||
static inline void pci_doe_disconnected(struct pci_dev *pdev) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PCI_NPEM
|
||||
void pci_npem_create(struct pci_dev *dev);
|
||||
void pci_npem_remove(struct pci_dev *dev);
|
||||
#else
|
||||
static inline void pci_npem_create(struct pci_dev *dev) { }
|
||||
static inline void pci_npem_remove(struct pci_dev *dev) { }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* pci_dev_set_io_state - Set the new error state if possible.
|
||||
*
|
||||
@ -606,7 +632,7 @@ void pci_acs_init(struct pci_dev *dev);
|
||||
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
|
||||
int pci_dev_specific_enable_acs(struct pci_dev *dev);
|
||||
int pci_dev_specific_disable_acs_redir(struct pci_dev *dev);
|
||||
bool pcie_failed_link_retrain(struct pci_dev *dev);
|
||||
int pcie_failed_link_retrain(struct pci_dev *dev);
|
||||
#else
|
||||
static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
|
||||
u16 acs_flags)
|
||||
@ -621,9 +647,9 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
|
||||
{
|
||||
return -ENOTTY;
|
||||
}
|
||||
static inline bool pcie_failed_link_retrain(struct pci_dev *dev)
|
||||
static inline int pcie_failed_link_retrain(struct pci_dev *dev)
|
||||
{
|
||||
return false;
|
||||
return -ENOTTY;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -887,8 +913,6 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
|
||||
#endif
|
||||
|
||||
int pcim_intx(struct pci_dev *dev, int enable);
|
||||
|
||||
int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
|
||||
int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
|
||||
const char *name);
|
||||
void pcim_release_region(struct pci_dev *pdev, int bar);
|
||||
|
@ -430,7 +430,7 @@ static int aer_inject(struct aer_error_inj *einj)
|
||||
else
|
||||
rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
|
||||
rperr->source_id &= 0xffff0000;
|
||||
rperr->source_id |= (einj->bus << 8) | devfn;
|
||||
rperr->source_id |= PCI_DEVID(einj->bus, devfn);
|
||||
}
|
||||
if (einj->uncor_status) {
|
||||
if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
|
||||
@ -443,7 +443,7 @@ static int aer_inject(struct aer_error_inj *einj)
|
||||
rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
|
||||
rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
|
||||
rperr->source_id &= 0x0000ffff;
|
||||
rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
|
||||
rperr->source_id |= PCI_DEVID(einj->bus, devfn) << 16;
|
||||
}
|
||||
spin_unlock_irqrestore(&inject_lock, flags);
|
||||
|
||||
|
@ -1061,7 +1061,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
|
||||
|
||||
free:
|
||||
#ifdef CONFIG_PCI_DOMAINS_GENERIC
|
||||
pci_bus_release_domain_nr(bus, parent);
|
||||
pci_bus_release_domain_nr(parent, bus->domain_nr);
|
||||
#endif
|
||||
kfree(bus);
|
||||
return err;
|
||||
@ -1203,15 +1203,17 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(pci_add_new_bus);
|
||||
|
||||
static void pci_enable_crs(struct pci_dev *pdev)
|
||||
static void pci_enable_rrs_sv(struct pci_dev *pdev)
|
||||
{
|
||||
u16 root_cap = 0;
|
||||
|
||||
/* Enable CRS Software Visibility if supported */
|
||||
/* Enable Configuration RRS Software Visibility if supported */
|
||||
pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
|
||||
if (root_cap & PCI_EXP_RTCAP_CRSVIS)
|
||||
if (root_cap & PCI_EXP_RTCAP_RRS_SV) {
|
||||
pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
|
||||
PCI_EXP_RTCTL_CRSSVE);
|
||||
PCI_EXP_RTCTL_RRS_SVE);
|
||||
pdev->config_rrs_sv = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
|
||||
@ -1326,7 +1328,7 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
|
||||
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
|
||||
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
|
||||
|
||||
pci_enable_crs(dev);
|
||||
pci_enable_rrs_sv(dev);
|
||||
|
||||
if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
|
||||
!is_cardbus && !broken) {
|
||||
@ -2343,28 +2345,23 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
|
||||
}
|
||||
EXPORT_SYMBOL(pci_alloc_dev);
|
||||
|
||||
static bool pci_bus_crs_vendor_id(u32 l)
|
||||
{
|
||||
return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
|
||||
}
|
||||
|
||||
static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
|
||||
static bool pci_bus_wait_rrs(struct pci_bus *bus, int devfn, u32 *l,
|
||||
int timeout)
|
||||
{
|
||||
int delay = 1;
|
||||
|
||||
if (!pci_bus_crs_vendor_id(*l))
|
||||
return true; /* not a CRS completion */
|
||||
if (!pci_bus_rrs_vendor_id(*l))
|
||||
return true; /* not a Configuration RRS completion */
|
||||
|
||||
if (!timeout)
|
||||
return false; /* CRS, but caller doesn't want to wait */
|
||||
return false; /* RRS, but caller doesn't want to wait */
|
||||
|
||||
/*
|
||||
* We got the reserved Vendor ID that indicates a completion with
|
||||
* Configuration Request Retry Status (CRS). Retry until we get a
|
||||
* Configuration Request Retry Status (RRS). Retry until we get a
|
||||
* valid Vendor ID or we time out.
|
||||
*/
|
||||
while (pci_bus_crs_vendor_id(*l)) {
|
||||
while (pci_bus_rrs_vendor_id(*l)) {
|
||||
if (delay > timeout) {
|
||||
pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
|
||||
pci_domain_nr(bus), bus->number,
|
||||
@ -2403,8 +2400,8 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
|
||||
*l == 0x0000ffff || *l == 0xffff0000)
|
||||
return false;
|
||||
|
||||
if (pci_bus_crs_vendor_id(*l))
|
||||
return pci_bus_wait_crs(bus, devfn, l, timeout);
|
||||
if (pci_bus_rrs_vendor_id(*l))
|
||||
return pci_bus_wait_rrs(bus, devfn, l, timeout);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -2593,6 +2590,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
|
||||
dev->match_driver = false;
|
||||
ret = device_add(&dev->dev);
|
||||
WARN_ON(ret < 0);
|
||||
|
||||
pci_npem_create(dev);
|
||||
}
|
||||
|
||||
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
|
||||
|
@ -66,6 +66,11 @@ static const struct of_device_id pci_pwrctl_pwrseq_of_match[] = {
|
||||
.compatible = "pci17cb,1101",
|
||||
.data = "wlan",
|
||||
},
|
||||
{
|
||||
/* ATH11K in WCN6855 package. */
|
||||
.compatible = "pci17cb,1103",
|
||||
.data = "wlan",
|
||||
},
|
||||
{
|
||||
/* ATH12K in WCN7850 package. */
|
||||
.compatible = "pci17cb,1107",
|
||||
|
@ -66,7 +66,7 @@
|
||||
* apply this erratum workaround to any downstream ports as long as they
|
||||
* support Link Active reporting and have the Link Control 2 register.
|
||||
* Restrict the speed to 2.5GT/s then with the Target Link Speed field,
|
||||
* request a retrain and wait 200ms for the data link to go up.
|
||||
* request a retrain and check the result.
|
||||
*
|
||||
* If this turns out successful and we know by the Vendor:Device ID it is
|
||||
* safe to do so, then lift the restriction, letting the devices negotiate
|
||||
@ -74,33 +74,45 @@
|
||||
* firmware may have already arranged and lift it with ports that already
|
||||
* report their data link being up.
|
||||
*
|
||||
* Return TRUE if the link has been successfully retrained, otherwise FALSE.
|
||||
* Otherwise revert the speed to the original setting and request a retrain
|
||||
* again to remove any residual state, ignoring the result as it's supposed
|
||||
* to fail anyway.
|
||||
*
|
||||
* Return 0 if the link has been successfully retrained. Return an error
|
||||
* if retraining was not needed or we attempted a retrain and it failed.
|
||||
*/
|
||||
bool pcie_failed_link_retrain(struct pci_dev *dev)
|
||||
int pcie_failed_link_retrain(struct pci_dev *dev)
|
||||
{
|
||||
static const struct pci_device_id ids[] = {
|
||||
{ PCI_VDEVICE(ASMEDIA, 0x2824) }, /* ASMedia ASM2824 */
|
||||
{}
|
||||
};
|
||||
u16 lnksta, lnkctl2;
|
||||
int ret = -ENOTTY;
|
||||
|
||||
if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) ||
|
||||
!pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting)
|
||||
return false;
|
||||
return ret;
|
||||
|
||||
pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
|
||||
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
|
||||
if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) ==
|
||||
PCI_EXP_LNKSTA_LBMS) {
|
||||
u16 oldlnkctl2 = lnkctl2;
|
||||
|
||||
pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
|
||||
|
||||
lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
|
||||
lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT;
|
||||
pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
|
||||
|
||||
if (pcie_retrain_link(dev, false)) {
|
||||
ret = pcie_retrain_link(dev, false);
|
||||
if (ret) {
|
||||
pci_info(dev, "retraining failed\n");
|
||||
return false;
|
||||
pcie_capability_write_word(dev, PCI_EXP_LNKCTL2,
|
||||
oldlnkctl2);
|
||||
pcie_retrain_link(dev, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
|
||||
@ -117,13 +129,14 @@ bool pcie_failed_link_retrain(struct pci_dev *dev)
|
||||
lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS;
|
||||
pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
|
||||
|
||||
if (pcie_retrain_link(dev, false)) {
|
||||
ret = pcie_retrain_link(dev, false);
|
||||
if (ret) {
|
||||
pci_info(dev, "retraining failed\n");
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ktime_t fixup_debug_start(struct pci_dev *dev,
|
||||
@ -3608,6 +3621,8 @@ DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
|
||||
quirk_broken_intx_masking);
|
||||
DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004, /* Ceton InfiniTV4 */
|
||||
quirk_broken_intx_masking);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K2,
|
||||
quirk_broken_intx_masking);
|
||||
|
||||
/*
|
||||
* Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
|
||||
@ -4246,6 +4261,10 @@ static void quirk_dma_func0_alias(struct pci_dev *dev)
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
|
||||
|
||||
/* Some Glenfly chips use function 0 as the PCIe Requester ID for DMA */
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d40, quirk_dma_func0_alias);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d41, quirk_dma_func0_alias);
|
||||
|
||||
static void quirk_dma_func1_alias(struct pci_dev *dev)
|
||||
{
|
||||
if (PCI_FUNC(dev->devfn) != 1)
|
||||
@ -5070,6 +5089,8 @@ static const struct pci_dev_acs_enabled {
|
||||
/* QCOM QDF2xxx root ports */
|
||||
{ PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
|
||||
{ PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
|
||||
/* QCOM SA8775P root port */
|
||||
{ PCI_VENDOR_ID_QCOM, 0x0115, pci_quirk_qcom_rp_acs },
|
||||
/* HXT SD4800 root ports. The ACS design is same as QCOM QDF2xxx */
|
||||
{ PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
|
||||
/* Intel PCH root ports */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user