mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-19 12:00:00 +00:00
Merge branch 'pci/controller/microchip'
- Move PLDA XpressRICH generic DT binding properties to plda,xpressrich3-axi-common.yaml where they can be shared across PLDA-based drivers (Minda Chen) - Create a drivers/pci/controller/plda/ directory for PLDA-based drivers and move pcie-microchip-host.c there (Minda Chen) - Move PLDA generic macros to pcie-plda.h where they can be shared across drivers (Minda Chen) - Extract PLDA generic structures from pcie-microchip-host.c, rename them to be generic, and move them to pcie-plda-host.c where they can be shared across drivers (Minda Chen) - Add a .request_event_irq() callback for requesting device-specific interrupts in addition to PLDA-generic interrupts (Minda Chen) - Add DT binding and driver for the StarFive JH7110 SoC, based on PLDA IP (Minda Chen) * pci/controller/microchip: PCI: starfive: Add JH7110 PCIe controller dt-bindings: PCI: Add StarFive JH7110 PCIe controller PCI: Add PCIE_RESET_CONFIG_DEVICE_WAIT_MS waiting time value PCI: plda: Pass pci_host_bridge to plda_pcie_setup_iomems() PCI: plda: Add host init/deinit and map bus functions PCI: plda: Add event bitmap field to struct plda_pcie_rp PCI: microchip: Move IRQ functions to pcie-plda-host.c PCI: microchip: Add event irqchip field to host port and add PLDA irqchip PCI: microchip: Add get_events() callback and PLDA get_event() PCI: microchip: Add INTx and MSI event num to struct plda_event PCI: microchip: Add request_event_irq() callback function PCI: microchip: Add num_events field to struct plda_pcie_rp PCI: microchip: Rename interrupt related functions PCI: microchip: Move PLDA functions to pcie-plda-host.c PCI: microchip: Rename PLDA functions to be generic PCI: microchip: Move PLDA structures to plda-pcie.h PCI: microchip: Rename PLDA structures to be generic PCI: microchip: Add bridge_addr field to struct mc_pcie PCI: microchip: Move PLDA IP register macros to pcie-plda.h PCI: microchip: Move pcie-microchip-host.c to PLDA directory dt-bindings: PCI: Add PLDA XpressRICH PCIe host common properties # Conflicts: # drivers/pci/pci.h
This commit is contained in:
commit
325b9a3e4e
@ -10,21 +10,13 @@ maintainers:
|
||||
- Daire McNamara <daire.mcnamara@microchip.com>
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/pci/pci-host-bridge.yaml#
|
||||
- $ref: plda,xpressrich3-axi-common.yaml#
|
||||
- $ref: /schemas/interrupt-controller/msi-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: microchip,pcie-host-1.0 # PolarFire
|
||||
|
||||
reg:
|
||||
maxItems: 2
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: cfg
|
||||
- const: apb
|
||||
|
||||
clocks:
|
||||
description:
|
||||
Fabric Interface Controllers, FICs, are the interface between the FPGA
|
||||
@ -52,18 +44,6 @@ properties:
|
||||
items:
|
||||
pattern: '^fic[0-3]$'
|
||||
|
||||
interrupts:
|
||||
minItems: 1
|
||||
items:
|
||||
- description: PCIe host controller
|
||||
- description: builtin MSI controller
|
||||
|
||||
interrupt-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: pcie
|
||||
- const: msi
|
||||
|
||||
ranges:
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
@ -72,39 +52,6 @@ properties:
|
||||
minItems: 1
|
||||
maxItems: 6
|
||||
|
||||
msi-controller:
|
||||
description: Identifies the node as an MSI controller.
|
||||
|
||||
msi-parent:
|
||||
description: MSI controller the device is capable of using.
|
||||
|
||||
interrupt-controller:
|
||||
type: object
|
||||
properties:
|
||||
'#address-cells':
|
||||
const: 0
|
||||
|
||||
'#interrupt-cells':
|
||||
const: 1
|
||||
|
||||
interrupt-controller: true
|
||||
|
||||
required:
|
||||
- '#address-cells'
|
||||
- '#interrupt-cells'
|
||||
- interrupt-controller
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
required:
|
||||
- reg
|
||||
- reg-names
|
||||
- "#interrupt-cells"
|
||||
- interrupts
|
||||
- interrupt-map-mask
|
||||
- interrupt-map
|
||||
- msi-controller
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
|
@ -0,0 +1,75 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/pci/plda,xpressrich3-axi-common.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: PLDA XpressRICH PCIe host common properties
|
||||
|
||||
maintainers:
|
||||
- Daire McNamara <daire.mcnamara@microchip.com>
|
||||
- Kevin Xie <kevin.xie@starfivetech.com>
|
||||
|
||||
description:
|
||||
Generic PLDA XpressRICH PCIe host common properties.
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/pci/pci-host-bridge.yaml#
|
||||
|
||||
properties:
|
||||
reg:
|
||||
maxItems: 2
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: cfg
|
||||
- const: apb
|
||||
|
||||
interrupts:
|
||||
minItems: 1
|
||||
items:
|
||||
- description: PCIe host controller
|
||||
- description: builtin MSI controller
|
||||
|
||||
interrupt-names:
|
||||
minItems: 1
|
||||
items:
|
||||
- const: pcie
|
||||
- const: msi
|
||||
|
||||
msi-controller:
|
||||
description: Identifies the node as an MSI controller.
|
||||
|
||||
msi-parent:
|
||||
description: MSI controller the device is capable of using.
|
||||
|
||||
interrupt-controller:
|
||||
type: object
|
||||
properties:
|
||||
'#address-cells':
|
||||
const: 0
|
||||
|
||||
'#interrupt-cells':
|
||||
const: 1
|
||||
|
||||
interrupt-controller: true
|
||||
|
||||
required:
|
||||
- '#address-cells'
|
||||
- '#interrupt-cells'
|
||||
- interrupt-controller
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
required:
|
||||
- reg
|
||||
- reg-names
|
||||
- interrupts
|
||||
- msi-controller
|
||||
- "#interrupt-cells"
|
||||
- interrupt-map-mask
|
||||
- interrupt-map
|
||||
|
||||
additionalProperties: true
|
||||
|
||||
...
|
120
Documentation/devicetree/bindings/pci/starfive,jh7110-pcie.yaml
Normal file
120
Documentation/devicetree/bindings/pci/starfive,jh7110-pcie.yaml
Normal file
@ -0,0 +1,120 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/pci/starfive,jh7110-pcie.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: StarFive JH7110 PCIe host controller
|
||||
|
||||
maintainers:
|
||||
- Kevin Xie <kevin.xie@starfivetech.com>
|
||||
|
||||
allOf:
|
||||
- $ref: plda,xpressrich3-axi-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: starfive,jh7110-pcie
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: NOC bus clock
|
||||
- description: Transport layer clock
|
||||
- description: AXI MST0 clock
|
||||
- description: APB clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: noc
|
||||
- const: tl
|
||||
- const: axi_mst0
|
||||
- const: apb
|
||||
|
||||
resets:
|
||||
items:
|
||||
- description: AXI MST0 reset
|
||||
- description: AXI SLAVE0 reset
|
||||
- description: AXI SLAVE reset
|
||||
- description: PCIE BRIDGE reset
|
||||
- description: PCIE CORE reset
|
||||
- description: PCIE APB reset
|
||||
|
||||
reset-names:
|
||||
items:
|
||||
- const: mst0
|
||||
- const: slv0
|
||||
- const: slv
|
||||
- const: brg
|
||||
- const: core
|
||||
- const: apb
|
||||
|
||||
starfive,stg-syscon:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
description:
|
||||
The phandle to System Register Controller syscon node.
|
||||
|
||||
perst-gpios:
|
||||
description: GPIO controlled connection to PERST# signal
|
||||
maxItems: 1
|
||||
|
||||
phys:
|
||||
description:
|
||||
Specified PHY is attached to PCIe controller.
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- clocks
|
||||
- resets
|
||||
- starfive,stg-syscon
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
soc {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
pcie@940000000 {
|
||||
compatible = "starfive,jh7110-pcie";
|
||||
reg = <0x9 0x40000000 0x0 0x10000000>,
|
||||
<0x0 0x2b000000 0x0 0x1000000>;
|
||||
reg-names = "cfg", "apb";
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
device_type = "pci";
|
||||
ranges = <0x82000000 0x0 0x30000000 0x0 0x30000000 0x0 0x08000000>,
|
||||
<0xc3000000 0x9 0x00000000 0x9 0x00000000 0x0 0x40000000>;
|
||||
starfive,stg-syscon = <&stg_syscon>;
|
||||
bus-range = <0x0 0xff>;
|
||||
interrupt-parent = <&plic>;
|
||||
interrupts = <56>;
|
||||
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
|
||||
interrupt-map = <0x0 0x0 0x0 0x1 &pcie_intc0 0x1>,
|
||||
<0x0 0x0 0x0 0x2 &pcie_intc0 0x2>,
|
||||
<0x0 0x0 0x0 0x3 &pcie_intc0 0x3>,
|
||||
<0x0 0x0 0x0 0x4 &pcie_intc0 0x4>;
|
||||
msi-controller;
|
||||
clocks = <&syscrg 86>,
|
||||
<&stgcrg 10>,
|
||||
<&stgcrg 8>,
|
||||
<&stgcrg 9>;
|
||||
clock-names = "noc", "tl", "axi_mst0", "apb";
|
||||
resets = <&stgcrg 11>,
|
||||
<&stgcrg 12>,
|
||||
<&stgcrg 13>,
|
||||
<&stgcrg 14>,
|
||||
<&stgcrg 15>,
|
||||
<&stgcrg 16>;
|
||||
perst-gpios = <&gpios 26 GPIO_ACTIVE_LOW>;
|
||||
phys = <&pciephy0>;
|
||||
|
||||
pcie_intc0: interrupt-controller {
|
||||
#address-cells = <0>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-controller;
|
||||
};
|
||||
};
|
||||
};
|
19
MAINTAINERS
19
MAINTAINERS
@ -17219,6 +17219,14 @@ S: Maintained
|
||||
F: Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt
|
||||
F: drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
|
||||
|
||||
PCI DRIVER FOR PLDA PCIE IP
|
||||
M: Daire McNamara <daire.mcnamara@microchip.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pci/plda,xpressrich3-axi-common.yaml
|
||||
F: drivers/pci/controller/plda/pcie-plda-host.c
|
||||
F: drivers/pci/controller/plda/pcie-plda.h
|
||||
|
||||
PCI DRIVER FOR RENESAS R-CAR
|
||||
M: Marek Vasut <marek.vasut+renesas@gmail.com>
|
||||
M: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
|
||||
@ -17449,7 +17457,7 @@ M: Daire McNamara <daire.mcnamara@microchip.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/pci/microchip*
|
||||
F: drivers/pci/controller/*microchip*
|
||||
F: drivers/pci/controller/plda/*microchip*
|
||||
|
||||
PCIE DRIVER FOR QUALCOMM MSM
|
||||
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
@ -17479,6 +17487,13 @@ L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/pci/controller/dwc/*spear*
|
||||
|
||||
PCIE DRIVER FOR STARFIVE JH71x0
|
||||
M: Kevin Xie <kevin.xie@starfivetech.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pci/starfive,jh7110-pcie.yaml
|
||||
F: drivers/pci/controller/plda/pcie-starfive.c
|
||||
|
||||
PCIE ENDPOINT DRIVER FOR QUALCOMM
|
||||
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
@ -19285,7 +19300,7 @@ F: drivers/clk/microchip/clk-mpfs*.c
|
||||
F: drivers/firmware/microchip/mpfs-auto-update.c
|
||||
F: drivers/i2c/busses/i2c-microchip-corei2c.c
|
||||
F: drivers/mailbox/mailbox-mpfs.c
|
||||
F: drivers/pci/controller/pcie-microchip-host.c
|
||||
F: drivers/pci/controller/plda/pcie-microchip-host.c
|
||||
F: drivers/pwm/pwm-microchip-core.c
|
||||
F: drivers/reset/reset-mpfs.c
|
||||
F: drivers/rtc/rtc-mpfs.c
|
||||
|
@ -215,14 +215,6 @@ config PCIE_MT7621
|
||||
help
|
||||
This selects a driver for the MediaTek MT7621 PCIe Controller.
|
||||
|
||||
config PCIE_MICROCHIP_HOST
|
||||
tristate "Microchip AXI PCIe controller"
|
||||
depends on PCI_MSI && OF
|
||||
select PCI_HOST_COMMON
|
||||
help
|
||||
Say Y here if you want kernel to support the Microchip AXI PCIe
|
||||
Host Bridge driver.
|
||||
|
||||
config PCI_HYPERV_INTERFACE
|
||||
tristate "Microsoft Hyper-V PCI Interface"
|
||||
depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI
|
||||
@ -356,4 +348,5 @@ config PCIE_XILINX_CPM
|
||||
source "drivers/pci/controller/cadence/Kconfig"
|
||||
source "drivers/pci/controller/dwc/Kconfig"
|
||||
source "drivers/pci/controller/mobiveil/Kconfig"
|
||||
source "drivers/pci/controller/plda/Kconfig"
|
||||
endmenu
|
||||
|
@ -33,7 +33,6 @@ obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
|
||||
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
|
||||
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
|
||||
obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
|
||||
obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
|
||||
obj-$(CONFIG_VMD) += vmd.o
|
||||
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
|
||||
obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
|
||||
@ -44,6 +43,7 @@ obj-$(CONFIG_PCIE_MT7621) += pcie-mt7621.o
|
||||
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
|
||||
obj-y += dwc/
|
||||
obj-y += mobiveil/
|
||||
obj-y += plda/
|
||||
|
||||
|
||||
# The following drivers are for devices that use the generic ACPI
|
||||
|
30
drivers/pci/controller/plda/Kconfig
Normal file
30
drivers/pci/controller/plda/Kconfig
Normal file
@ -0,0 +1,30 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
menu "PLDA-based PCIe controllers"
|
||||
depends on PCI
|
||||
|
||||
config PCIE_PLDA_HOST
|
||||
bool
|
||||
|
||||
config PCIE_MICROCHIP_HOST
|
||||
tristate "Microchip AXI PCIe controller"
|
||||
depends on PCI_MSI && OF
|
||||
select PCI_HOST_COMMON
|
||||
select PCIE_PLDA_HOST
|
||||
help
|
||||
Say Y here if you want kernel to support the Microchip AXI PCIe
|
||||
Host Bridge driver.
|
||||
|
||||
config PCIE_STARFIVE_HOST
|
||||
tristate "StarFive PCIe host controller"
|
||||
depends on PCI_MSI && OF
|
||||
depends on ARCH_STARFIVE || COMPILE_TEST
|
||||
select PCIE_PLDA_HOST
|
||||
help
|
||||
Say Y here if you want to support the StarFive PCIe controller in
|
||||
host mode. StarFive PCIe controller uses PLDA PCIe core.
|
||||
|
||||
If you choose to build this driver as module it will be dynamically
|
||||
linked and module will be called pcie-starfive.ko.
|
||||
|
||||
endmenu
|
4
drivers/pci/controller/plda/Makefile
Normal file
4
drivers/pci/controller/plda/Makefile
Normal file
@ -0,0 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_PCIE_PLDA_HOST) += pcie-plda-host.o
|
||||
obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
|
||||
obj-$(CONFIG_PCIE_STARFIVE_HOST) += pcie-starfive.o
|
@ -18,10 +18,8 @@
|
||||
#include <linux/pci-ecam.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include "../pci.h"
|
||||
|
||||
/* Number of MSI IRQs */
|
||||
#define MC_MAX_NUM_MSI_IRQS 32
|
||||
#include "../../pci.h"
|
||||
#include "pcie-plda.h"
|
||||
|
||||
/* PCIe Bridge Phy and Controller Phy offsets */
|
||||
#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
|
||||
@ -30,84 +28,6 @@
|
||||
#define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR)
|
||||
#define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR)
|
||||
|
||||
/* PCIe Bridge Phy Regs */
|
||||
#define PCIE_PCI_IRQ_DW0 0xa8
|
||||
#define MSIX_CAP_MASK BIT(31)
|
||||
#define NUM_MSI_MSGS_MASK GENMASK(6, 4)
|
||||
#define NUM_MSI_MSGS_SHIFT 4
|
||||
|
||||
#define IMASK_LOCAL 0x180
|
||||
#define DMA_END_ENGINE_0_MASK 0x00000000u
|
||||
#define DMA_END_ENGINE_0_SHIFT 0
|
||||
#define DMA_END_ENGINE_1_MASK 0x00000000u
|
||||
#define DMA_END_ENGINE_1_SHIFT 1
|
||||
#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
|
||||
#define DMA_ERROR_ENGINE_0_SHIFT 8
|
||||
#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
|
||||
#define DMA_ERROR_ENGINE_1_SHIFT 9
|
||||
#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
|
||||
#define A_ATR_EVT_POST_ERR_SHIFT 16
|
||||
#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
|
||||
#define A_ATR_EVT_FETCH_ERR_SHIFT 17
|
||||
#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
|
||||
#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
|
||||
#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
|
||||
#define A_ATR_EVT_DOORBELL_SHIFT 19
|
||||
#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
|
||||
#define P_ATR_EVT_POST_ERR_SHIFT 20
|
||||
#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
|
||||
#define P_ATR_EVT_FETCH_ERR_SHIFT 21
|
||||
#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
|
||||
#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
|
||||
#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
|
||||
#define P_ATR_EVT_DOORBELL_SHIFT 23
|
||||
#define PM_MSI_INT_INTA_MASK 0x01000000u
|
||||
#define PM_MSI_INT_INTA_SHIFT 24
|
||||
#define PM_MSI_INT_INTB_MASK 0x02000000u
|
||||
#define PM_MSI_INT_INTB_SHIFT 25
|
||||
#define PM_MSI_INT_INTC_MASK 0x04000000u
|
||||
#define PM_MSI_INT_INTC_SHIFT 26
|
||||
#define PM_MSI_INT_INTD_MASK 0x08000000u
|
||||
#define PM_MSI_INT_INTD_SHIFT 27
|
||||
#define PM_MSI_INT_INTX_MASK 0x0f000000u
|
||||
#define PM_MSI_INT_INTX_SHIFT 24
|
||||
#define PM_MSI_INT_MSI_MASK 0x10000000u
|
||||
#define PM_MSI_INT_MSI_SHIFT 28
|
||||
#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
|
||||
#define PM_MSI_INT_AER_EVT_SHIFT 29
|
||||
#define PM_MSI_INT_EVENTS_MASK 0x40000000u
|
||||
#define PM_MSI_INT_EVENTS_SHIFT 30
|
||||
#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
|
||||
#define PM_MSI_INT_SYS_ERR_SHIFT 31
|
||||
#define NUM_LOCAL_EVENTS 15
|
||||
#define ISTATUS_LOCAL 0x184
|
||||
#define IMASK_HOST 0x188
|
||||
#define ISTATUS_HOST 0x18c
|
||||
#define IMSI_ADDR 0x190
|
||||
#define ISTATUS_MSI 0x194
|
||||
|
||||
/* PCIe Master table init defines */
|
||||
#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
|
||||
#define ATR0_PCIE_ATR_SIZE 0x25
|
||||
#define ATR0_PCIE_ATR_SIZE_SHIFT 1
|
||||
#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
|
||||
#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
|
||||
#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
|
||||
#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
|
||||
|
||||
/* PCIe AXI slave table init defines */
|
||||
#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
|
||||
#define ATR_SIZE_SHIFT 1
|
||||
#define ATR_IMPL_ENABLE 1
|
||||
#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
|
||||
#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
|
||||
#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
|
||||
#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
|
||||
#define PCIE_TX_RX_INTERFACE 0x00000000u
|
||||
#define PCIE_CONFIG_INTERFACE 0x00000001u
|
||||
|
||||
#define ATR_ENTRY_SIZE 32
|
||||
|
||||
/* PCIe Controller Phy Regs */
|
||||
#define SEC_ERROR_EVENT_CNT 0x20
|
||||
#define DED_ERROR_EVENT_CNT 0x24
|
||||
@ -179,20 +99,21 @@
|
||||
#define EVENT_LOCAL_DMA_END_ENGINE_1 12
|
||||
#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
|
||||
#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
|
||||
#define EVENT_LOCAL_A_ATR_EVT_POST_ERR 15
|
||||
#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR 16
|
||||
#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR 17
|
||||
#define EVENT_LOCAL_A_ATR_EVT_DOORBELL 18
|
||||
#define EVENT_LOCAL_P_ATR_EVT_POST_ERR 19
|
||||
#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR 20
|
||||
#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR 21
|
||||
#define EVENT_LOCAL_P_ATR_EVT_DOORBELL 22
|
||||
#define EVENT_LOCAL_PM_MSI_INT_INTX 23
|
||||
#define EVENT_LOCAL_PM_MSI_INT_MSI 24
|
||||
#define EVENT_LOCAL_PM_MSI_INT_AER_EVT 25
|
||||
#define EVENT_LOCAL_PM_MSI_INT_EVENTS 26
|
||||
#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR 27
|
||||
#define NUM_EVENTS 28
|
||||
#define NUM_MC_EVENTS 15
|
||||
#define EVENT_LOCAL_A_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_AXI_POST_ERR)
|
||||
#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_AXI_FETCH_ERR)
|
||||
#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_AXI_DISCARD_ERR)
|
||||
#define EVENT_LOCAL_A_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_AXI_DOORBELL)
|
||||
#define EVENT_LOCAL_P_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_PCIE_POST_ERR)
|
||||
#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_PCIE_FETCH_ERR)
|
||||
#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_PCIE_DISCARD_ERR)
|
||||
#define EVENT_LOCAL_P_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_PCIE_DOORBELL)
|
||||
#define EVENT_LOCAL_PM_MSI_INT_INTX (NUM_MC_EVENTS + PLDA_INTX)
|
||||
#define EVENT_LOCAL_PM_MSI_INT_MSI (NUM_MC_EVENTS + PLDA_MSI)
|
||||
#define EVENT_LOCAL_PM_MSI_INT_AER_EVT (NUM_MC_EVENTS + PLDA_AER_EVENT)
|
||||
#define EVENT_LOCAL_PM_MSI_INT_EVENTS (NUM_MC_EVENTS + PLDA_MISC_EVENTS)
|
||||
#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR (NUM_MC_EVENTS + PLDA_SYS_ERR)
|
||||
#define NUM_EVENTS (NUM_MC_EVENTS + PLDA_INT_EVENT_NUM)
|
||||
|
||||
#define PCIE_EVENT_CAUSE(x, s) \
|
||||
[EVENT_PCIE_ ## x] = { __stringify(x), s }
|
||||
@ -255,22 +176,10 @@ struct event_map {
|
||||
u32 event_bit;
|
||||
};
|
||||
|
||||
struct mc_msi {
|
||||
struct mutex lock; /* Protect used bitmap */
|
||||
struct irq_domain *msi_domain;
|
||||
struct irq_domain *dev_domain;
|
||||
u32 num_vectors;
|
||||
u64 vector_phy;
|
||||
DECLARE_BITMAP(used, MC_MAX_NUM_MSI_IRQS);
|
||||
};
|
||||
|
||||
struct mc_pcie {
|
||||
struct plda_pcie_rp plda;
|
||||
void __iomem *axi_base_addr;
|
||||
struct device *dev;
|
||||
struct irq_domain *intx_domain;
|
||||
struct irq_domain *event_domain;
|
||||
raw_spinlock_t lock;
|
||||
struct mc_msi msi;
|
||||
};
|
||||
|
||||
struct cause {
|
||||
@ -388,7 +297,7 @@ static struct mc_pcie *port;
|
||||
|
||||
static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
|
||||
{
|
||||
struct mc_msi *msi = &port->msi;
|
||||
struct plda_msi *msi = &port->plda.msi;
|
||||
u16 reg;
|
||||
u8 queue_size;
|
||||
|
||||
@ -409,246 +318,6 @@ static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
|
||||
ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI);
|
||||
}
|
||||
|
||||
static void mc_handle_msi(struct irq_desc *desc)
|
||||
{
|
||||
struct mc_pcie *port = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct device *dev = port->dev;
|
||||
struct mc_msi *msi = &port->msi;
|
||||
void __iomem *bridge_base_addr =
|
||||
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
|
||||
unsigned long status;
|
||||
u32 bit;
|
||||
int ret;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
|
||||
if (status & PM_MSI_INT_MSI_MASK) {
|
||||
writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
|
||||
status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
|
||||
for_each_set_bit(bit, &status, msi->num_vectors) {
|
||||
ret = generic_handle_domain_irq(msi->dev_domain, bit);
|
||||
if (ret)
|
||||
dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
|
||||
bit);
|
||||
}
|
||||
}
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static void mc_msi_bottom_irq_ack(struct irq_data *data)
|
||||
{
|
||||
struct mc_pcie *port = irq_data_get_irq_chip_data(data);
|
||||
void __iomem *bridge_base_addr =
|
||||
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
|
||||
u32 bitpos = data->hwirq;
|
||||
|
||||
writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
|
||||
}
|
||||
|
||||
static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
struct mc_pcie *port = irq_data_get_irq_chip_data(data);
|
||||
phys_addr_t addr = port->msi.vector_phy;
|
||||
|
||||
msg->address_lo = lower_32_bits(addr);
|
||||
msg->address_hi = upper_32_bits(addr);
|
||||
msg->data = data->hwirq;
|
||||
|
||||
dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
|
||||
(int)data->hwirq, msg->address_hi, msg->address_lo);
|
||||
}
|
||||
|
||||
static int mc_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct irq_chip mc_msi_bottom_irq_chip = {
|
||||
.name = "Microchip MSI",
|
||||
.irq_ack = mc_msi_bottom_irq_ack,
|
||||
.irq_compose_msi_msg = mc_compose_msi_msg,
|
||||
.irq_set_affinity = mc_msi_set_affinity,
|
||||
};
|
||||
|
||||
static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs, void *args)
|
||||
{
|
||||
struct mc_pcie *port = domain->host_data;
|
||||
struct mc_msi *msi = &port->msi;
|
||||
unsigned long bit;
|
||||
|
||||
mutex_lock(&msi->lock);
|
||||
bit = find_first_zero_bit(msi->used, msi->num_vectors);
|
||||
if (bit >= msi->num_vectors) {
|
||||
mutex_unlock(&msi->lock);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
set_bit(bit, msi->used);
|
||||
|
||||
irq_domain_set_info(domain, virq, bit, &mc_msi_bottom_irq_chip,
|
||||
domain->host_data, handle_edge_irq, NULL, NULL);
|
||||
|
||||
mutex_unlock(&msi->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs)
|
||||
{
|
||||
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
|
||||
struct mc_pcie *port = irq_data_get_irq_chip_data(d);
|
||||
struct mc_msi *msi = &port->msi;
|
||||
|
||||
mutex_lock(&msi->lock);
|
||||
|
||||
if (test_bit(d->hwirq, msi->used))
|
||||
__clear_bit(d->hwirq, msi->used);
|
||||
else
|
||||
dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
|
||||
|
||||
mutex_unlock(&msi->lock);
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops msi_domain_ops = {
|
||||
.alloc = mc_irq_msi_domain_alloc,
|
||||
.free = mc_irq_msi_domain_free,
|
||||
};
|
||||
|
||||
static struct irq_chip mc_msi_irq_chip = {
|
||||
.name = "Microchip PCIe MSI",
|
||||
.irq_ack = irq_chip_ack_parent,
|
||||
.irq_mask = pci_msi_mask_irq,
|
||||
.irq_unmask = pci_msi_unmask_irq,
|
||||
};
|
||||
|
||||
static struct msi_domain_info mc_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX),
|
||||
.chip = &mc_msi_irq_chip,
|
||||
};
|
||||
|
||||
static int mc_allocate_msi_domains(struct mc_pcie *port)
|
||||
{
|
||||
struct device *dev = port->dev;
|
||||
struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
|
||||
struct mc_msi *msi = &port->msi;
|
||||
|
||||
mutex_init(&port->msi.lock);
|
||||
|
||||
msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
|
||||
&msi_domain_ops, port);
|
||||
if (!msi->dev_domain) {
|
||||
dev_err(dev, "failed to create IRQ domain\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mc_msi_domain_info,
|
||||
msi->dev_domain);
|
||||
if (!msi->msi_domain) {
|
||||
dev_err(dev, "failed to create MSI domain\n");
|
||||
irq_domain_remove(msi->dev_domain);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mc_handle_intx(struct irq_desc *desc)
|
||||
{
|
||||
struct mc_pcie *port = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct device *dev = port->dev;
|
||||
void __iomem *bridge_base_addr =
|
||||
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
|
||||
unsigned long status;
|
||||
u32 bit;
|
||||
int ret;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
|
||||
if (status & PM_MSI_INT_INTX_MASK) {
|
||||
status &= PM_MSI_INT_INTX_MASK;
|
||||
status >>= PM_MSI_INT_INTX_SHIFT;
|
||||
for_each_set_bit(bit, &status, PCI_NUM_INTX) {
|
||||
ret = generic_handle_domain_irq(port->intx_domain, bit);
|
||||
if (ret)
|
||||
dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
|
||||
bit);
|
||||
}
|
||||
}
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static void mc_ack_intx_irq(struct irq_data *data)
|
||||
{
|
||||
struct mc_pcie *port = irq_data_get_irq_chip_data(data);
|
||||
void __iomem *bridge_base_addr =
|
||||
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
|
||||
u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
|
||||
|
||||
writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
|
||||
}
|
||||
|
||||
static void mc_mask_intx_irq(struct irq_data *data)
|
||||
{
|
||||
struct mc_pcie *port = irq_data_get_irq_chip_data(data);
|
||||
void __iomem *bridge_base_addr =
|
||||
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
|
||||
unsigned long flags;
|
||||
u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
|
||||
u32 val;
|
||||
|
||||
raw_spin_lock_irqsave(&port->lock, flags);
|
||||
val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
|
||||
val &= ~mask;
|
||||
writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
|
||||
raw_spin_unlock_irqrestore(&port->lock, flags);
|
||||
}
|
||||
|
||||
static void mc_unmask_intx_irq(struct irq_data *data)
|
||||
{
|
||||
struct mc_pcie *port = irq_data_get_irq_chip_data(data);
|
||||
void __iomem *bridge_base_addr =
|
||||
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
|
||||
unsigned long flags;
|
||||
u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
|
||||
u32 val;
|
||||
|
||||
raw_spin_lock_irqsave(&port->lock, flags);
|
||||
val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
|
||||
val |= mask;
|
||||
writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
|
||||
raw_spin_unlock_irqrestore(&port->lock, flags);
|
||||
}
|
||||
|
||||
static struct irq_chip mc_intx_irq_chip = {
|
||||
.name = "Microchip PCIe INTx",
|
||||
.irq_ack = mc_ack_intx_irq,
|
||||
.irq_mask = mc_mask_intx_irq,
|
||||
.irq_unmask = mc_unmask_intx_irq,
|
||||
};
|
||||
|
||||
static int mc_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
irq_set_chip_and_handler(irq, &mc_intx_irq_chip, handle_level_irq);
|
||||
irq_set_chip_data(irq, domain->host_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops intx_domain_ops = {
|
||||
.map = mc_pcie_intx_map,
|
||||
};
|
||||
|
||||
static inline u32 reg_to_event(u32 reg, struct event_map field)
|
||||
{
|
||||
return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
|
||||
@ -706,21 +375,22 @@ static u32 local_events(struct mc_pcie *port)
|
||||
return val;
|
||||
}
|
||||
|
||||
static u32 get_events(struct mc_pcie *port)
|
||||
static u32 mc_get_events(struct plda_pcie_rp *port)
|
||||
{
|
||||
struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
|
||||
u32 events = 0;
|
||||
|
||||
events |= pcie_events(port);
|
||||
events |= sec_errors(port);
|
||||
events |= ded_errors(port);
|
||||
events |= local_events(port);
|
||||
events |= pcie_events(mc_port);
|
||||
events |= sec_errors(mc_port);
|
||||
events |= ded_errors(mc_port);
|
||||
events |= local_events(mc_port);
|
||||
|
||||
return events;
|
||||
}
|
||||
|
||||
static irqreturn_t mc_event_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct mc_pcie *port = dev_id;
|
||||
struct plda_pcie_rp *port = dev_id;
|
||||
struct device *dev = port->dev;
|
||||
struct irq_data *data;
|
||||
|
||||
@ -734,31 +404,15 @@ static irqreturn_t mc_event_handler(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void mc_handle_event(struct irq_desc *desc)
|
||||
{
|
||||
struct mc_pcie *port = irq_desc_get_handler_data(desc);
|
||||
unsigned long events;
|
||||
u32 bit;
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
events = get_events(port);
|
||||
|
||||
for_each_set_bit(bit, &events, NUM_EVENTS)
|
||||
generic_handle_domain_irq(port->event_domain, bit);
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static void mc_ack_event_irq(struct irq_data *data)
|
||||
{
|
||||
struct mc_pcie *port = irq_data_get_irq_chip_data(data);
|
||||
struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
|
||||
struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
|
||||
u32 event = data->hwirq;
|
||||
void __iomem *addr;
|
||||
u32 mask;
|
||||
|
||||
addr = port->axi_base_addr + event_descs[event].base +
|
||||
addr = mc_port->axi_base_addr + event_descs[event].base +
|
||||
event_descs[event].offset;
|
||||
mask = event_descs[event].mask;
|
||||
mask |= event_descs[event].enb_mask;
|
||||
@ -768,13 +422,14 @@ static void mc_ack_event_irq(struct irq_data *data)
|
||||
|
||||
static void mc_mask_event_irq(struct irq_data *data)
|
||||
{
|
||||
struct mc_pcie *port = irq_data_get_irq_chip_data(data);
|
||||
struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
|
||||
struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
|
||||
u32 event = data->hwirq;
|
||||
void __iomem *addr;
|
||||
u32 mask;
|
||||
u32 val;
|
||||
|
||||
addr = port->axi_base_addr + event_descs[event].base +
|
||||
addr = mc_port->axi_base_addr + event_descs[event].base +
|
||||
event_descs[event].mask_offset;
|
||||
mask = event_descs[event].mask;
|
||||
if (event_descs[event].enb_mask) {
|
||||
@ -798,13 +453,14 @@ static void mc_mask_event_irq(struct irq_data *data)
|
||||
|
||||
static void mc_unmask_event_irq(struct irq_data *data)
|
||||
{
|
||||
struct mc_pcie *port = irq_data_get_irq_chip_data(data);
|
||||
struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
|
||||
struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
|
||||
u32 event = data->hwirq;
|
||||
void __iomem *addr;
|
||||
u32 mask;
|
||||
u32 val;
|
||||
|
||||
addr = port->axi_base_addr + event_descs[event].base +
|
||||
addr = mc_port->axi_base_addr + event_descs[event].base +
|
||||
event_descs[event].mask_offset;
|
||||
mask = event_descs[event].mask;
|
||||
|
||||
@ -834,19 +490,6 @@ static struct irq_chip mc_event_irq_chip = {
|
||||
.irq_unmask = mc_unmask_event_irq,
|
||||
};
|
||||
|
||||
static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq);
|
||||
irq_set_chip_data(irq, domain->host_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops event_domain_ops = {
|
||||
.map = mc_pcie_event_map,
|
||||
};
|
||||
|
||||
static inline void mc_pcie_deinit_clk(void *data)
|
||||
{
|
||||
struct clk *clk = data;
|
||||
@ -892,105 +535,22 @@ static int mc_pcie_init_clks(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mc_pcie_init_irq_domains(struct mc_pcie *port)
|
||||
static int mc_request_event_irq(struct plda_pcie_rp *plda, int event_irq,
|
||||
int event)
|
||||
{
|
||||
struct device *dev = port->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
struct device_node *pcie_intc_node;
|
||||
|
||||
/* Setup INTx */
|
||||
pcie_intc_node = of_get_next_child(node, NULL);
|
||||
if (!pcie_intc_node) {
|
||||
dev_err(dev, "failed to find PCIe Intc node\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS,
|
||||
&event_domain_ops, port);
|
||||
if (!port->event_domain) {
|
||||
dev_err(dev, "failed to get event domain\n");
|
||||
of_node_put(pcie_intc_node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
|
||||
|
||||
port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
|
||||
&intx_domain_ops, port);
|
||||
if (!port->intx_domain) {
|
||||
dev_err(dev, "failed to get an INTx IRQ domain\n");
|
||||
of_node_put(pcie_intc_node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
|
||||
|
||||
of_node_put(pcie_intc_node);
|
||||
raw_spin_lock_init(&port->lock);
|
||||
|
||||
return mc_allocate_msi_domains(port);
|
||||
return devm_request_irq(plda->dev, event_irq, mc_event_handler,
|
||||
0, event_cause[event].sym, plda);
|
||||
}
|
||||
|
||||
static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
|
||||
phys_addr_t axi_addr, phys_addr_t pci_addr,
|
||||
size_t size)
|
||||
{
|
||||
u32 atr_sz = ilog2(size) - 1;
|
||||
u32 val;
|
||||
static const struct plda_event_ops mc_event_ops = {
|
||||
.get_events = mc_get_events,
|
||||
};
|
||||
|
||||
if (index == 0)
|
||||
val = PCIE_CONFIG_INTERFACE;
|
||||
else
|
||||
val = PCIE_TX_RX_INTERFACE;
|
||||
|
||||
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
|
||||
ATR0_AXI4_SLV0_TRSL_PARAM);
|
||||
|
||||
val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
|
||||
ATR_IMPL_ENABLE;
|
||||
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
|
||||
ATR0_AXI4_SLV0_SRCADDR_PARAM);
|
||||
|
||||
val = upper_32_bits(axi_addr);
|
||||
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
|
||||
ATR0_AXI4_SLV0_SRC_ADDR);
|
||||
|
||||
val = lower_32_bits(pci_addr);
|
||||
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
|
||||
ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
|
||||
|
||||
val = upper_32_bits(pci_addr);
|
||||
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
|
||||
ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
|
||||
|
||||
val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
|
||||
val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
|
||||
writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
|
||||
writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
|
||||
}
|
||||
|
||||
static int mc_pcie_setup_windows(struct platform_device *pdev,
|
||||
struct mc_pcie *port)
|
||||
{
|
||||
void __iomem *bridge_base_addr =
|
||||
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
|
||||
struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
|
||||
struct resource_entry *entry;
|
||||
u64 pci_addr;
|
||||
u32 index = 1;
|
||||
|
||||
resource_list_for_each_entry(entry, &bridge->windows) {
|
||||
if (resource_type(entry->res) == IORESOURCE_MEM) {
|
||||
pci_addr = entry->res->start - entry->offset;
|
||||
mc_pcie_setup_window(bridge_base_addr, index,
|
||||
entry->res->start, pci_addr,
|
||||
resource_size(entry->res));
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
static const struct plda_event mc_event = {
|
||||
.request_event_irq = mc_request_event_irq,
|
||||
.intx_event = EVENT_LOCAL_PM_MSI_INT_INTX,
|
||||
.msi_event = EVENT_LOCAL_PM_MSI_INT_MSI,
|
||||
};
|
||||
|
||||
static inline void mc_clear_secs(struct mc_pcie *port)
|
||||
{
|
||||
@ -1052,85 +612,34 @@ static void mc_disable_interrupts(struct mc_pcie *port)
|
||||
writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
|
||||
}
|
||||
|
||||
static int mc_init_interrupts(struct platform_device *pdev, struct mc_pcie *port)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
int irq;
|
||||
int i, intx_irq, msi_irq, event_irq;
|
||||
int ret;
|
||||
|
||||
ret = mc_pcie_init_irq_domains(port);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed creating IRQ domains\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return -ENODEV;
|
||||
|
||||
for (i = 0; i < NUM_EVENTS; i++) {
|
||||
event_irq = irq_create_mapping(port->event_domain, i);
|
||||
if (!event_irq) {
|
||||
dev_err(dev, "failed to map hwirq %d\n", i);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, event_irq, mc_event_handler,
|
||||
0, event_cause[i].sym, port);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to request IRQ %d\n", event_irq);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
intx_irq = irq_create_mapping(port->event_domain,
|
||||
EVENT_LOCAL_PM_MSI_INT_INTX);
|
||||
if (!intx_irq) {
|
||||
dev_err(dev, "failed to map INTx interrupt\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
/* Plug the INTx chained handler */
|
||||
irq_set_chained_handler_and_data(intx_irq, mc_handle_intx, port);
|
||||
|
||||
msi_irq = irq_create_mapping(port->event_domain,
|
||||
EVENT_LOCAL_PM_MSI_INT_MSI);
|
||||
if (!msi_irq)
|
||||
return -ENXIO;
|
||||
|
||||
/* Plug the MSI chained handler */
|
||||
irq_set_chained_handler_and_data(msi_irq, mc_handle_msi, port);
|
||||
|
||||
/* Plug the main event chained handler */
|
||||
irq_set_chained_handler_and_data(irq, mc_handle_event, port);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mc_platform_init(struct pci_config_window *cfg)
|
||||
{
|
||||
struct device *dev = cfg->parent;
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
|
||||
void __iomem *bridge_base_addr =
|
||||
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
|
||||
int ret;
|
||||
|
||||
/* Configure address translation table 0 for PCIe config space */
|
||||
mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start,
|
||||
cfg->res.start,
|
||||
resource_size(&cfg->res));
|
||||
plda_pcie_setup_window(bridge_base_addr, 0, cfg->res.start,
|
||||
cfg->res.start,
|
||||
resource_size(&cfg->res));
|
||||
|
||||
/* Need some fixups in config space */
|
||||
mc_pcie_enable_msi(port, cfg->win);
|
||||
|
||||
/* Configure non-config space outbound ranges */
|
||||
ret = mc_pcie_setup_windows(pdev, port);
|
||||
ret = plda_pcie_setup_iomems(bridge, &port->plda);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
port->plda.event_ops = &mc_event_ops;
|
||||
port->plda.event_irq_chip = &mc_event_irq_chip;
|
||||
port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0);
|
||||
|
||||
/* Address translation is up; safe to enable interrupts */
|
||||
ret = mc_init_interrupts(pdev, port);
|
||||
ret = plda_init_interrupts(pdev, &port->plda, &mc_event);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1141,6 +650,7 @@ static int mc_host_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
void __iomem *bridge_base_addr;
|
||||
struct plda_pcie_rp *plda;
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
@ -1148,7 +658,8 @@ static int mc_host_probe(struct platform_device *pdev)
|
||||
if (!port)
|
||||
return -ENOMEM;
|
||||
|
||||
port->dev = dev;
|
||||
plda = &port->plda;
|
||||
plda->dev = dev;
|
||||
|
||||
port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
|
||||
if (IS_ERR(port->axi_base_addr))
|
||||
@ -1157,6 +668,8 @@ static int mc_host_probe(struct platform_device *pdev)
|
||||
mc_disable_interrupts(port);
|
||||
|
||||
bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
|
||||
plda->bridge_addr = bridge_base_addr;
|
||||
plda->num_events = NUM_EVENTS;
|
||||
|
||||
/* Allow enabling MSI by disabling MSI-X */
|
||||
val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
|
||||
@ -1168,10 +681,10 @@ static int mc_host_probe(struct platform_device *pdev)
|
||||
val &= NUM_MSI_MSGS_MASK;
|
||||
val >>= NUM_MSI_MSGS_SHIFT;
|
||||
|
||||
port->msi.num_vectors = 1 << val;
|
||||
plda->msi.num_vectors = 1 << val;
|
||||
|
||||
/* Pick vector address from design */
|
||||
port->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR);
|
||||
plda->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR);
|
||||
|
||||
ret = mc_pcie_init_clks(dev);
|
||||
if (ret) {
|
651
drivers/pci/controller/plda/pcie-plda-host.c
Normal file
651
drivers/pci/controller/plda/pcie-plda-host.c
Normal file
@ -0,0 +1,651 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PLDA PCIe XpressRich host controller driver
|
||||
*
|
||||
* Copyright (C) 2023 Microchip Co. Ltd
|
||||
* StarFive Co. Ltd
|
||||
*
|
||||
* Author: Daire McNamara <daire.mcnamara@microchip.com>
|
||||
*/
|
||||
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/pci_regs.h>
|
||||
#include <linux/pci-ecam.h>
|
||||
|
||||
#include "pcie-plda.h"
|
||||
|
||||
void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
|
||||
int where)
|
||||
{
|
||||
struct plda_pcie_rp *pcie = bus->sysdata;
|
||||
|
||||
return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(plda_pcie_map_bus);
|
||||
|
||||
static void plda_handle_msi(struct irq_desc *desc)
|
||||
{
|
||||
struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct device *dev = port->dev;
|
||||
struct plda_msi *msi = &port->msi;
|
||||
void __iomem *bridge_base_addr = port->bridge_addr;
|
||||
unsigned long status;
|
||||
u32 bit;
|
||||
int ret;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
|
||||
if (status & PM_MSI_INT_MSI_MASK) {
|
||||
writel_relaxed(status & PM_MSI_INT_MSI_MASK,
|
||||
bridge_base_addr + ISTATUS_LOCAL);
|
||||
status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
|
||||
for_each_set_bit(bit, &status, msi->num_vectors) {
|
||||
ret = generic_handle_domain_irq(msi->dev_domain, bit);
|
||||
if (ret)
|
||||
dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
|
||||
bit);
|
||||
}
|
||||
}
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static void plda_msi_bottom_irq_ack(struct irq_data *data)
|
||||
{
|
||||
struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
|
||||
void __iomem *bridge_base_addr = port->bridge_addr;
|
||||
u32 bitpos = data->hwirq;
|
||||
|
||||
writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
|
||||
}
|
||||
|
||||
static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
|
||||
phys_addr_t addr = port->msi.vector_phy;
|
||||
|
||||
msg->address_lo = lower_32_bits(addr);
|
||||
msg->address_hi = upper_32_bits(addr);
|
||||
msg->data = data->hwirq;
|
||||
|
||||
dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
|
||||
(int)data->hwirq, msg->address_hi, msg->address_lo);
|
||||
}
|
||||
|
||||
static int plda_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct irq_chip plda_msi_bottom_irq_chip = {
|
||||
.name = "PLDA MSI",
|
||||
.irq_ack = plda_msi_bottom_irq_ack,
|
||||
.irq_compose_msi_msg = plda_compose_msi_msg,
|
||||
.irq_set_affinity = plda_msi_set_affinity,
|
||||
};
|
||||
|
||||
static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
|
||||
unsigned int virq,
|
||||
unsigned int nr_irqs,
|
||||
void *args)
|
||||
{
|
||||
struct plda_pcie_rp *port = domain->host_data;
|
||||
struct plda_msi *msi = &port->msi;
|
||||
unsigned long bit;
|
||||
|
||||
mutex_lock(&msi->lock);
|
||||
bit = find_first_zero_bit(msi->used, msi->num_vectors);
|
||||
if (bit >= msi->num_vectors) {
|
||||
mutex_unlock(&msi->lock);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
set_bit(bit, msi->used);
|
||||
|
||||
irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
|
||||
domain->host_data, handle_edge_irq, NULL, NULL);
|
||||
|
||||
mutex_unlock(&msi->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void plda_irq_msi_domain_free(struct irq_domain *domain,
|
||||
unsigned int virq,
|
||||
unsigned int nr_irqs)
|
||||
{
|
||||
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
|
||||
struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
|
||||
struct plda_msi *msi = &port->msi;
|
||||
|
||||
mutex_lock(&msi->lock);
|
||||
|
||||
if (test_bit(d->hwirq, msi->used))
|
||||
__clear_bit(d->hwirq, msi->used);
|
||||
else
|
||||
dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
|
||||
|
||||
mutex_unlock(&msi->lock);
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops msi_domain_ops = {
|
||||
.alloc = plda_irq_msi_domain_alloc,
|
||||
.free = plda_irq_msi_domain_free,
|
||||
};
|
||||
|
||||
static struct irq_chip plda_msi_irq_chip = {
|
||||
.name = "PLDA PCIe MSI",
|
||||
.irq_ack = irq_chip_ack_parent,
|
||||
.irq_mask = pci_msi_mask_irq,
|
||||
.irq_unmask = pci_msi_unmask_irq,
|
||||
};
|
||||
|
||||
static struct msi_domain_info plda_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX),
|
||||
.chip = &plda_msi_irq_chip,
|
||||
};
|
||||
|
||||
static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
|
||||
{
|
||||
struct device *dev = port->dev;
|
||||
struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
|
||||
struct plda_msi *msi = &port->msi;
|
||||
|
||||
mutex_init(&port->msi.lock);
|
||||
|
||||
msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
|
||||
&msi_domain_ops, port);
|
||||
if (!msi->dev_domain) {
|
||||
dev_err(dev, "failed to create IRQ domain\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
msi->msi_domain = pci_msi_create_irq_domain(fwnode,
|
||||
&plda_msi_domain_info,
|
||||
msi->dev_domain);
|
||||
if (!msi->msi_domain) {
|
||||
dev_err(dev, "failed to create MSI domain\n");
|
||||
irq_domain_remove(msi->dev_domain);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void plda_handle_intx(struct irq_desc *desc)
|
||||
{
|
||||
struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct device *dev = port->dev;
|
||||
void __iomem *bridge_base_addr = port->bridge_addr;
|
||||
unsigned long status;
|
||||
u32 bit;
|
||||
int ret;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
|
||||
if (status & PM_MSI_INT_INTX_MASK) {
|
||||
status &= PM_MSI_INT_INTX_MASK;
|
||||
status >>= PM_MSI_INT_INTX_SHIFT;
|
||||
for_each_set_bit(bit, &status, PCI_NUM_INTX) {
|
||||
ret = generic_handle_domain_irq(port->intx_domain, bit);
|
||||
if (ret)
|
||||
dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
|
||||
bit);
|
||||
}
|
||||
}
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static void plda_ack_intx_irq(struct irq_data *data)
|
||||
{
|
||||
struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
|
||||
void __iomem *bridge_base_addr = port->bridge_addr;
|
||||
u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
|
||||
|
||||
writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
|
||||
}
|
||||
|
||||
static void plda_mask_intx_irq(struct irq_data *data)
|
||||
{
|
||||
struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
|
||||
void __iomem *bridge_base_addr = port->bridge_addr;
|
||||
unsigned long flags;
|
||||
u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
|
||||
u32 val;
|
||||
|
||||
raw_spin_lock_irqsave(&port->lock, flags);
|
||||
val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
|
||||
val &= ~mask;
|
||||
writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
|
||||
raw_spin_unlock_irqrestore(&port->lock, flags);
|
||||
}
|
||||
|
||||
static void plda_unmask_intx_irq(struct irq_data *data)
|
||||
{
|
||||
struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
|
||||
void __iomem *bridge_base_addr = port->bridge_addr;
|
||||
unsigned long flags;
|
||||
u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
|
||||
u32 val;
|
||||
|
||||
raw_spin_lock_irqsave(&port->lock, flags);
|
||||
val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
|
||||
val |= mask;
|
||||
writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
|
||||
raw_spin_unlock_irqrestore(&port->lock, flags);
|
||||
}
|
||||
|
||||
static struct irq_chip plda_intx_irq_chip = {
|
||||
.name = "PLDA PCIe INTx",
|
||||
.irq_ack = plda_ack_intx_irq,
|
||||
.irq_mask = plda_mask_intx_irq,
|
||||
.irq_unmask = plda_unmask_intx_irq,
|
||||
};
|
||||
|
||||
static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
|
||||
irq_set_chip_data(irq, domain->host_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops intx_domain_ops = {
|
||||
.map = plda_pcie_intx_map,
|
||||
};
|
||||
|
||||
static u32 plda_get_events(struct plda_pcie_rp *port)
|
||||
{
|
||||
u32 events, val, origin;
|
||||
|
||||
origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
|
||||
|
||||
/* MSI event and sys events */
|
||||
val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
|
||||
events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
|
||||
|
||||
/* INTx events */
|
||||
if (origin & PM_MSI_INT_INTX_MASK)
|
||||
events |= BIT(PM_MSI_INT_INTX_SHIFT);
|
||||
|
||||
/* remains are same with register */
|
||||
events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
|
||||
|
||||
return events;
|
||||
}
|
||||
|
||||
static irqreturn_t plda_event_handler(int irq, void *dev_id)
|
||||
{
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void plda_handle_event(struct irq_desc *desc)
|
||||
{
|
||||
struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
|
||||
unsigned long events;
|
||||
u32 bit;
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
events = port->event_ops->get_events(port);
|
||||
|
||||
events &= port->events_bitmap;
|
||||
for_each_set_bit(bit, &events, port->num_events)
|
||||
generic_handle_domain_irq(port->event_domain, bit);
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static u32 plda_hwirq_to_mask(int hwirq)
|
||||
{
|
||||
u32 mask;
|
||||
|
||||
/* hwirq 23 - 0 are the same with register */
|
||||
if (hwirq < EVENT_PM_MSI_INT_INTX)
|
||||
mask = BIT(hwirq);
|
||||
else if (hwirq == EVENT_PM_MSI_INT_INTX)
|
||||
mask = PM_MSI_INT_INTX_MASK;
|
||||
else
|
||||
mask = BIT(hwirq + PCI_NUM_INTX - 1);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static void plda_ack_event_irq(struct irq_data *data)
|
||||
{
|
||||
struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
|
||||
|
||||
writel_relaxed(plda_hwirq_to_mask(data->hwirq),
|
||||
port->bridge_addr + ISTATUS_LOCAL);
|
||||
}
|
||||
|
||||
static void plda_mask_event_irq(struct irq_data *data)
|
||||
{
|
||||
struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
|
||||
u32 mask, val;
|
||||
|
||||
mask = plda_hwirq_to_mask(data->hwirq);
|
||||
|
||||
raw_spin_lock(&port->lock);
|
||||
val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
|
||||
val &= ~mask;
|
||||
writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
|
||||
raw_spin_unlock(&port->lock);
|
||||
}
|
||||
|
||||
static void plda_unmask_event_irq(struct irq_data *data)
|
||||
{
|
||||
struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
|
||||
u32 mask, val;
|
||||
|
||||
mask = plda_hwirq_to_mask(data->hwirq);
|
||||
|
||||
raw_spin_lock(&port->lock);
|
||||
val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
|
||||
val |= mask;
|
||||
writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
|
||||
raw_spin_unlock(&port->lock);
|
||||
}
|
||||
|
||||
static struct irq_chip plda_event_irq_chip = {
|
||||
.name = "PLDA PCIe EVENT",
|
||||
.irq_ack = plda_ack_event_irq,
|
||||
.irq_mask = plda_mask_event_irq,
|
||||
.irq_unmask = plda_unmask_event_irq,
|
||||
};
|
||||
|
||||
static const struct plda_event_ops plda_event_ops = {
|
||||
.get_events = plda_get_events,
|
||||
};
|
||||
|
||||
static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
struct plda_pcie_rp *port = (void *)domain->host_data;
|
||||
|
||||
irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
|
||||
irq_set_chip_data(irq, domain->host_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops plda_event_domain_ops = {
|
||||
.map = plda_pcie_event_map,
|
||||
};
|
||||
|
||||
static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
|
||||
{
|
||||
struct device *dev = port->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
struct device_node *pcie_intc_node;
|
||||
|
||||
/* Setup INTx */
|
||||
pcie_intc_node = of_get_next_child(node, NULL);
|
||||
if (!pcie_intc_node) {
|
||||
dev_err(dev, "failed to find PCIe Intc node\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
port->event_domain = irq_domain_add_linear(pcie_intc_node,
|
||||
port->num_events,
|
||||
&plda_event_domain_ops,
|
||||
port);
|
||||
if (!port->event_domain) {
|
||||
dev_err(dev, "failed to get event domain\n");
|
||||
of_node_put(pcie_intc_node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
|
||||
|
||||
port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
|
||||
&intx_domain_ops, port);
|
||||
if (!port->intx_domain) {
|
||||
dev_err(dev, "failed to get an INTx IRQ domain\n");
|
||||
of_node_put(pcie_intc_node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
|
||||
|
||||
of_node_put(pcie_intc_node);
|
||||
raw_spin_lock_init(&port->lock);
|
||||
|
||||
return plda_allocate_msi_domains(port);
|
||||
}
|
||||
|
||||
int plda_init_interrupts(struct platform_device *pdev,
|
||||
struct plda_pcie_rp *port,
|
||||
const struct plda_event *event)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
int event_irq, ret;
|
||||
u32 i;
|
||||
|
||||
if (!port->event_ops)
|
||||
port->event_ops = &plda_event_ops;
|
||||
|
||||
if (!port->event_irq_chip)
|
||||
port->event_irq_chip = &plda_event_irq_chip;
|
||||
|
||||
ret = plda_pcie_init_irq_domains(port);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed creating IRQ domains\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
port->irq = platform_get_irq(pdev, 0);
|
||||
if (port->irq < 0)
|
||||
return -ENODEV;
|
||||
|
||||
for_each_set_bit(i, &port->events_bitmap, port->num_events) {
|
||||
event_irq = irq_create_mapping(port->event_domain, i);
|
||||
if (!event_irq) {
|
||||
dev_err(dev, "failed to map hwirq %d\n", i);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (event->request_event_irq)
|
||||
ret = event->request_event_irq(port, event_irq, i);
|
||||
else
|
||||
ret = devm_request_irq(dev, event_irq,
|
||||
plda_event_handler,
|
||||
0, NULL, port);
|
||||
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to request IRQ %d\n", event_irq);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
port->intx_irq = irq_create_mapping(port->event_domain,
|
||||
event->intx_event);
|
||||
if (!port->intx_irq) {
|
||||
dev_err(dev, "failed to map INTx interrupt\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
/* Plug the INTx chained handler */
|
||||
irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port);
|
||||
|
||||
port->msi_irq = irq_create_mapping(port->event_domain,
|
||||
event->msi_event);
|
||||
if (!port->msi_irq)
|
||||
return -ENXIO;
|
||||
|
||||
/* Plug the MSI chained handler */
|
||||
irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port);
|
||||
|
||||
/* Plug the main event chained handler */
|
||||
irq_set_chained_handler_and_data(port->irq, plda_handle_event, port);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(plda_init_interrupts);
|
||||
|
||||
void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
|
||||
phys_addr_t axi_addr, phys_addr_t pci_addr,
|
||||
size_t size)
|
||||
{
|
||||
u32 atr_sz = ilog2(size) - 1;
|
||||
u32 val;
|
||||
|
||||
if (index == 0)
|
||||
val = PCIE_CONFIG_INTERFACE;
|
||||
else
|
||||
val = PCIE_TX_RX_INTERFACE;
|
||||
|
||||
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
|
||||
ATR0_AXI4_SLV0_TRSL_PARAM);
|
||||
|
||||
val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
|
||||
ATR_IMPL_ENABLE;
|
||||
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
|
||||
ATR0_AXI4_SLV0_SRCADDR_PARAM);
|
||||
|
||||
val = upper_32_bits(axi_addr);
|
||||
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
|
||||
ATR0_AXI4_SLV0_SRC_ADDR);
|
||||
|
||||
val = lower_32_bits(pci_addr);
|
||||
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
|
||||
ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
|
||||
|
||||
val = upper_32_bits(pci_addr);
|
||||
writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
|
||||
ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
|
||||
|
||||
val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
|
||||
val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
|
||||
writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
|
||||
writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
|
||||
|
||||
int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
|
||||
struct plda_pcie_rp *port)
|
||||
{
|
||||
void __iomem *bridge_base_addr = port->bridge_addr;
|
||||
struct resource_entry *entry;
|
||||
u64 pci_addr;
|
||||
u32 index = 1;
|
||||
|
||||
resource_list_for_each_entry(entry, &bridge->windows) {
|
||||
if (resource_type(entry->res) == IORESOURCE_MEM) {
|
||||
pci_addr = entry->res->start - entry->offset;
|
||||
plda_pcie_setup_window(bridge_base_addr, index,
|
||||
entry->res->start, pci_addr,
|
||||
resource_size(entry->res));
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
|
||||
|
||||
static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie)
|
||||
{
|
||||
irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
|
||||
irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL);
|
||||
irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL);
|
||||
|
||||
irq_domain_remove(pcie->msi.msi_domain);
|
||||
irq_domain_remove(pcie->msi.dev_domain);
|
||||
|
||||
irq_domain_remove(pcie->intx_domain);
|
||||
irq_domain_remove(pcie->event_domain);
|
||||
}
|
||||
|
||||
int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
|
||||
const struct plda_event *plda_event)
|
||||
{
|
||||
struct device *dev = port->dev;
|
||||
struct pci_host_bridge *bridge;
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct resource *cfg_res;
|
||||
int ret;
|
||||
|
||||
pdev = to_platform_device(dev);
|
||||
|
||||
port->bridge_addr =
|
||||
devm_platform_ioremap_resource_byname(pdev, "apb");
|
||||
|
||||
if (IS_ERR(port->bridge_addr))
|
||||
return dev_err_probe(dev, PTR_ERR(port->bridge_addr),
|
||||
"failed to map reg memory\n");
|
||||
|
||||
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
|
||||
if (!cfg_res)
|
||||
return dev_err_probe(dev, -ENODEV,
|
||||
"failed to get config memory\n");
|
||||
|
||||
port->config_base = devm_ioremap_resource(dev, cfg_res);
|
||||
if (IS_ERR(port->config_base))
|
||||
return dev_err_probe(dev, PTR_ERR(port->config_base),
|
||||
"failed to map config memory\n");
|
||||
|
||||
bridge = devm_pci_alloc_host_bridge(dev, 0);
|
||||
if (!bridge)
|
||||
return dev_err_probe(dev, -ENOMEM,
|
||||
"failed to alloc bridge\n");
|
||||
|
||||
if (port->host_ops && port->host_ops->host_init) {
|
||||
ret = port->host_ops->host_init(port);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
port->bridge = bridge;
|
||||
plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0,
|
||||
resource_size(cfg_res));
|
||||
plda_pcie_setup_iomems(bridge, port);
|
||||
plda_set_default_msi(&port->msi);
|
||||
ret = plda_init_interrupts(pdev, port, plda_event);
|
||||
if (ret)
|
||||
goto err_host;
|
||||
|
||||
/* Set default bus ops */
|
||||
bridge->ops = ops;
|
||||
bridge->sysdata = port;
|
||||
|
||||
ret = pci_host_probe(bridge);
|
||||
if (ret < 0) {
|
||||
dev_err_probe(dev, ret, "failed to probe pci host\n");
|
||||
goto err_probe;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
err_probe:
|
||||
plda_pcie_irq_domain_deinit(port);
|
||||
err_host:
|
||||
if (port->host_ops && port->host_ops->host_deinit)
|
||||
port->host_ops->host_deinit(port);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(plda_pcie_host_init);
|
||||
|
||||
void plda_pcie_host_deinit(struct plda_pcie_rp *port)
|
||||
{
|
||||
pci_stop_root_bus(port->bridge->bus);
|
||||
pci_remove_root_bus(port->bridge->bus);
|
||||
|
||||
plda_pcie_irq_domain_deinit(port);
|
||||
|
||||
if (port->host_ops && port->host_ops->host_deinit)
|
||||
port->host_ops->host_deinit(port);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(plda_pcie_host_deinit);
|
273
drivers/pci/controller/plda/pcie-plda.h
Normal file
273
drivers/pci/controller/plda/pcie-plda.h
Normal file
@ -0,0 +1,273 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* PLDA PCIe host controller driver
|
||||
*/
|
||||
|
||||
#ifndef _PCIE_PLDA_H
|
||||
#define _PCIE_PLDA_H
|
||||
|
||||
/* Number of MSI IRQs */
|
||||
#define PLDA_MAX_NUM_MSI_IRQS 32
|
||||
|
||||
/* PCIe Bridge Phy Regs */
|
||||
#define GEN_SETTINGS 0x80
|
||||
#define RP_ENABLE 1
|
||||
#define PCIE_PCI_IDS_DW1 0x9c
|
||||
#define IDS_CLASS_CODE_SHIFT 16
|
||||
#define REVISION_ID_MASK GENMASK(7, 0)
|
||||
#define CLASS_CODE_ID_MASK GENMASK(31, 8)
|
||||
#define PCIE_PCI_IRQ_DW0 0xa8
|
||||
#define MSIX_CAP_MASK BIT(31)
|
||||
#define NUM_MSI_MSGS_MASK GENMASK(6, 4)
|
||||
#define NUM_MSI_MSGS_SHIFT 4
|
||||
#define PCI_MISC 0xb4
|
||||
#define PHY_FUNCTION_DIS BIT(15)
|
||||
#define PCIE_WINROM 0xfc
|
||||
#define PREF_MEM_WIN_64_SUPPORT BIT(3)
|
||||
|
||||
#define IMASK_LOCAL 0x180
|
||||
#define DMA_END_ENGINE_0_MASK 0x00000000u
|
||||
#define DMA_END_ENGINE_0_SHIFT 0
|
||||
#define DMA_END_ENGINE_1_MASK 0x00000000u
|
||||
#define DMA_END_ENGINE_1_SHIFT 1
|
||||
#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
|
||||
#define DMA_ERROR_ENGINE_0_SHIFT 8
|
||||
#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
|
||||
#define DMA_ERROR_ENGINE_1_SHIFT 9
|
||||
#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
|
||||
#define A_ATR_EVT_POST_ERR_SHIFT 16
|
||||
#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
|
||||
#define A_ATR_EVT_FETCH_ERR_SHIFT 17
|
||||
#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
|
||||
#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
|
||||
#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
|
||||
#define A_ATR_EVT_DOORBELL_SHIFT 19
|
||||
#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
|
||||
#define P_ATR_EVT_POST_ERR_SHIFT 20
|
||||
#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
|
||||
#define P_ATR_EVT_FETCH_ERR_SHIFT 21
|
||||
#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
|
||||
#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
|
||||
#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
|
||||
#define P_ATR_EVT_DOORBELL_SHIFT 23
|
||||
#define PM_MSI_INT_INTA_MASK 0x01000000u
|
||||
#define PM_MSI_INT_INTA_SHIFT 24
|
||||
#define PM_MSI_INT_INTB_MASK 0x02000000u
|
||||
#define PM_MSI_INT_INTB_SHIFT 25
|
||||
#define PM_MSI_INT_INTC_MASK 0x04000000u
|
||||
#define PM_MSI_INT_INTC_SHIFT 26
|
||||
#define PM_MSI_INT_INTD_MASK 0x08000000u
|
||||
#define PM_MSI_INT_INTD_SHIFT 27
|
||||
#define PM_MSI_INT_INTX_MASK 0x0f000000u
|
||||
#define PM_MSI_INT_INTX_SHIFT 24
|
||||
#define PM_MSI_INT_MSI_MASK 0x10000000u
|
||||
#define PM_MSI_INT_MSI_SHIFT 28
|
||||
#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
|
||||
#define PM_MSI_INT_AER_EVT_SHIFT 29
|
||||
#define PM_MSI_INT_EVENTS_MASK 0x40000000u
|
||||
#define PM_MSI_INT_EVENTS_SHIFT 30
|
||||
#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
|
||||
#define PM_MSI_INT_SYS_ERR_SHIFT 31
|
||||
#define SYS_AND_MSI_MASK GENMASK(31, 28)
|
||||
#define NUM_LOCAL_EVENTS 15
|
||||
#define ISTATUS_LOCAL 0x184
|
||||
#define IMASK_HOST 0x188
|
||||
#define ISTATUS_HOST 0x18c
|
||||
#define IMSI_ADDR 0x190
|
||||
#define ISTATUS_MSI 0x194
|
||||
#define PMSG_SUPPORT_RX 0x3f0
|
||||
#define PMSG_LTR_SUPPORT BIT(2)
|
||||
|
||||
/* PCIe Master table init defines */
|
||||
#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
|
||||
#define ATR0_PCIE_ATR_SIZE 0x25
|
||||
#define ATR0_PCIE_ATR_SIZE_SHIFT 1
|
||||
#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
|
||||
#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
|
||||
#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
|
||||
#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
|
||||
|
||||
/* PCIe AXI slave table init defines */
|
||||
#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
|
||||
#define ATR_SIZE_SHIFT 1
|
||||
#define ATR_IMPL_ENABLE 1
|
||||
#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
|
||||
#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
|
||||
#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
|
||||
#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
|
||||
#define PCIE_TX_RX_INTERFACE 0x00000000u
|
||||
#define PCIE_CONFIG_INTERFACE 0x00000001u
|
||||
|
||||
#define CONFIG_SPACE_ADDR_OFFSET 0x1000u
|
||||
|
||||
#define ATR_ENTRY_SIZE 32
|
||||
|
||||
enum plda_int_event {
|
||||
PLDA_AXI_POST_ERR,
|
||||
PLDA_AXI_FETCH_ERR,
|
||||
PLDA_AXI_DISCARD_ERR,
|
||||
PLDA_AXI_DOORBELL,
|
||||
PLDA_PCIE_POST_ERR,
|
||||
PLDA_PCIE_FETCH_ERR,
|
||||
PLDA_PCIE_DISCARD_ERR,
|
||||
PLDA_PCIE_DOORBELL,
|
||||
PLDA_INTX,
|
||||
PLDA_MSI,
|
||||
PLDA_AER_EVENT,
|
||||
PLDA_MISC_EVENTS,
|
||||
PLDA_SYS_ERR,
|
||||
PLDA_INT_EVENT_NUM
|
||||
};
|
||||
|
||||
#define PLDA_NUM_DMA_EVENTS 16
|
||||
|
||||
#define EVENT_PM_MSI_INT_INTX (PLDA_NUM_DMA_EVENTS + PLDA_INTX)
|
||||
#define EVENT_PM_MSI_INT_MSI (PLDA_NUM_DMA_EVENTS + PLDA_MSI)
|
||||
#define PLDA_MAX_EVENT_NUM (PLDA_NUM_DMA_EVENTS + PLDA_INT_EVENT_NUM)
|
||||
|
||||
/*
|
||||
* PLDA interrupt register
|
||||
*
|
||||
* 31 27 23 15 7 0
|
||||
* +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+
|
||||
* |12|11|10|9| intx |7|6|5|4|3|2|1|0| DMA error | DMA end |
|
||||
* +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+
|
||||
* event bit
|
||||
* 0-7 (0-7) DMA interrupt end : reserved for vendor implement
|
||||
* 8-15 (8-15) DMA error : reserved for vendor implement
|
||||
* 16 (16) AXI post error (PLDA_AXI_POST_ERR)
|
||||
* 17 (17) AXI fetch error (PLDA_AXI_FETCH_ERR)
|
||||
* 18 (18) AXI discard error (PLDA_AXI_DISCARD_ERR)
|
||||
* 19 (19) AXI doorbell (PLDA_PCIE_DOORBELL)
|
||||
* 20 (20) PCIe post error (PLDA_PCIE_POST_ERR)
|
||||
* 21 (21) PCIe fetch error (PLDA_PCIE_FETCH_ERR)
|
||||
* 22 (22) PCIe discard error (PLDA_PCIE_DISCARD_ERR)
|
||||
* 23 (23) PCIe doorbell (PLDA_PCIE_DOORBELL)
|
||||
* 24 (27-24) INTx interruts (PLDA_INTX)
|
||||
* 25 (28): MSI interrupt (PLDA_MSI)
|
||||
* 26 (29): AER event (PLDA_AER_EVENT)
|
||||
* 27 (30): PM/LTR/Hotplug (PLDA_MISC_EVENTS)
|
||||
* 28 (31): System error (PLDA_SYS_ERR)
|
||||
*/
|
||||
|
||||
struct plda_pcie_rp;
|
||||
|
||||
struct plda_event_ops {
|
||||
u32 (*get_events)(struct plda_pcie_rp *pcie);
|
||||
};
|
||||
|
||||
struct plda_pcie_host_ops {
|
||||
int (*host_init)(struct plda_pcie_rp *pcie);
|
||||
void (*host_deinit)(struct plda_pcie_rp *pcie);
|
||||
};
|
||||
|
||||
struct plda_msi {
|
||||
struct mutex lock; /* Protect used bitmap */
|
||||
struct irq_domain *msi_domain;
|
||||
struct irq_domain *dev_domain;
|
||||
u32 num_vectors;
|
||||
u64 vector_phy;
|
||||
DECLARE_BITMAP(used, PLDA_MAX_NUM_MSI_IRQS);
|
||||
};
|
||||
|
||||
struct plda_pcie_rp {
|
||||
struct device *dev;
|
||||
struct pci_host_bridge *bridge;
|
||||
struct irq_domain *intx_domain;
|
||||
struct irq_domain *event_domain;
|
||||
raw_spinlock_t lock;
|
||||
struct plda_msi msi;
|
||||
const struct plda_event_ops *event_ops;
|
||||
const struct irq_chip *event_irq_chip;
|
||||
const struct plda_pcie_host_ops *host_ops;
|
||||
void __iomem *bridge_addr;
|
||||
void __iomem *config_base;
|
||||
unsigned long events_bitmap;
|
||||
int irq;
|
||||
int msi_irq;
|
||||
int intx_irq;
|
||||
int num_events;
|
||||
};
|
||||
|
||||
struct plda_event {
|
||||
int (*request_event_irq)(struct plda_pcie_rp *pcie,
|
||||
int event_irq, int event);
|
||||
int intx_event;
|
||||
int msi_event;
|
||||
};
|
||||
|
||||
void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
|
||||
int where);
|
||||
int plda_init_interrupts(struct platform_device *pdev,
|
||||
struct plda_pcie_rp *port,
|
||||
const struct plda_event *event);
|
||||
void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
|
||||
phys_addr_t axi_addr, phys_addr_t pci_addr,
|
||||
size_t size);
|
||||
int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
|
||||
struct plda_pcie_rp *port);
|
||||
int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
|
||||
const struct plda_event *plda_event);
|
||||
void plda_pcie_host_deinit(struct plda_pcie_rp *pcie);
|
||||
|
||||
static inline void plda_set_default_msi(struct plda_msi *msi)
|
||||
{
|
||||
msi->vector_phy = IMSI_ADDR;
|
||||
msi->num_vectors = PLDA_MAX_NUM_MSI_IRQS;
|
||||
}
|
||||
|
||||
static inline void plda_pcie_enable_root_port(struct plda_pcie_rp *plda)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
value = readl_relaxed(plda->bridge_addr + GEN_SETTINGS);
|
||||
value |= RP_ENABLE;
|
||||
writel_relaxed(value, plda->bridge_addr + GEN_SETTINGS);
|
||||
}
|
||||
|
||||
static inline void plda_pcie_set_standard_class(struct plda_pcie_rp *plda)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
/* set class code and reserve revision id */
|
||||
value = readl_relaxed(plda->bridge_addr + PCIE_PCI_IDS_DW1);
|
||||
value &= REVISION_ID_MASK;
|
||||
value |= (PCI_CLASS_BRIDGE_PCI << IDS_CLASS_CODE_SHIFT);
|
||||
writel_relaxed(value, plda->bridge_addr + PCIE_PCI_IDS_DW1);
|
||||
}
|
||||
|
||||
static inline void plda_pcie_set_pref_win_64bit(struct plda_pcie_rp *plda)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
value = readl_relaxed(plda->bridge_addr + PCIE_WINROM);
|
||||
value |= PREF_MEM_WIN_64_SUPPORT;
|
||||
writel_relaxed(value, plda->bridge_addr + PCIE_WINROM);
|
||||
}
|
||||
|
||||
static inline void plda_pcie_disable_ltr(struct plda_pcie_rp *plda)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
value = readl_relaxed(plda->bridge_addr + PMSG_SUPPORT_RX);
|
||||
value &= ~PMSG_LTR_SUPPORT;
|
||||
writel_relaxed(value, plda->bridge_addr + PMSG_SUPPORT_RX);
|
||||
}
|
||||
|
||||
static inline void plda_pcie_disable_func(struct plda_pcie_rp *plda)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
value = readl_relaxed(plda->bridge_addr + PCI_MISC);
|
||||
value |= PHY_FUNCTION_DIS;
|
||||
writel_relaxed(value, plda->bridge_addr + PCI_MISC);
|
||||
}
|
||||
|
||||
static inline void plda_pcie_write_rc_bar(struct plda_pcie_rp *plda, u64 val)
|
||||
{
|
||||
void __iomem *addr = plda->bridge_addr + CONFIG_SPACE_ADDR_OFFSET;
|
||||
|
||||
writel_relaxed(lower_32_bits(val), addr + PCI_BASE_ADDRESS_0);
|
||||
writel_relaxed(upper_32_bits(val), addr + PCI_BASE_ADDRESS_1);
|
||||
}
|
||||
#endif /* _PCIE_PLDA_H */
|
488
drivers/pci/controller/plda/pcie-starfive.c
Normal file
488
drivers/pci/controller/plda/pcie-starfive.c
Normal file
@ -0,0 +1,488 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* PCIe host controller driver for StarFive JH7110 Soc.
|
||||
*
|
||||
* Copyright (C) 2023 StarFive Technology Co., Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/reset.h>
|
||||
#include "../../pci.h"
|
||||
|
||||
#include "pcie-plda.h"
|
||||
|
||||
#define PCIE_FUNC_NUM 4
|
||||
|
||||
/* system control */
|
||||
#define STG_SYSCON_PCIE0_BASE 0x48
|
||||
#define STG_SYSCON_PCIE1_BASE 0x1f8
|
||||
|
||||
#define STG_SYSCON_AR_OFFSET 0x78
|
||||
#define STG_SYSCON_AXI4_SLVL_AR_MASK GENMASK(22, 8)
|
||||
#define STG_SYSCON_AXI4_SLVL_PHY_AR(x) FIELD_PREP(GENMASK(20, 17), x)
|
||||
#define STG_SYSCON_AW_OFFSET 0x7c
|
||||
#define STG_SYSCON_AXI4_SLVL_AW_MASK GENMASK(14, 0)
|
||||
#define STG_SYSCON_AXI4_SLVL_PHY_AW(x) FIELD_PREP(GENMASK(12, 9), x)
|
||||
#define STG_SYSCON_CLKREQ BIT(22)
|
||||
#define STG_SYSCON_CKREF_SRC_MASK GENMASK(19, 18)
|
||||
#define STG_SYSCON_RP_NEP_OFFSET 0xe8
|
||||
#define STG_SYSCON_K_RP_NEP BIT(8)
|
||||
#define STG_SYSCON_LNKSTA_OFFSET 0x170
|
||||
#define DATA_LINK_ACTIVE BIT(5)
|
||||
|
||||
/* Parameters for the waiting for link up routine */
|
||||
#define LINK_WAIT_MAX_RETRIES 10
|
||||
#define LINK_WAIT_USLEEP_MIN 90000
|
||||
#define LINK_WAIT_USLEEP_MAX 100000
|
||||
|
||||
struct starfive_jh7110_pcie {
|
||||
struct plda_pcie_rp plda;
|
||||
struct reset_control *resets;
|
||||
struct clk_bulk_data *clks;
|
||||
struct regmap *reg_syscon;
|
||||
struct gpio_desc *power_gpio;
|
||||
struct gpio_desc *reset_gpio;
|
||||
struct phy *phy;
|
||||
|
||||
unsigned int stg_pcie_base;
|
||||
int num_clks;
|
||||
};
|
||||
|
||||
/*
|
||||
* JH7110 PCIe port BAR0/1 can be configured as 64-bit prefetchable memory
|
||||
* space. PCIe read and write requests targeting BAR0/1 are routed to so called
|
||||
* 'Bridge Configuration space' in PLDA IP datasheet, which contains the bridge
|
||||
* internal registers, such as interrupt, DMA and ATU registers...
|
||||
* JH7110 can access the Bridge Configuration space by local bus, and don`t
|
||||
* want the bridge internal registers accessed by the DMA from EP devices.
|
||||
* Thus, they are unimplemented and should be hidden here.
|
||||
*/
|
||||
static bool starfive_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
|
||||
int offset)
|
||||
{
|
||||
if (pci_is_root_bus(bus) && !devfn &&
|
||||
(offset == PCI_BASE_ADDRESS_0 || offset == PCI_BASE_ADDRESS_1))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int starfive_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
|
||||
int where, int size, u32 value)
|
||||
{
|
||||
if (starfive_pcie_hide_rc_bar(bus, devfn, where))
|
||||
return PCIBIOS_SUCCESSFUL;
|
||||
|
||||
return pci_generic_config_write(bus, devfn, where, size, value);
|
||||
}
|
||||
|
||||
static int starfive_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
|
||||
int where, int size, u32 *value)
|
||||
{
|
||||
if (starfive_pcie_hide_rc_bar(bus, devfn, where)) {
|
||||
*value = 0;
|
||||
return PCIBIOS_SUCCESSFUL;
|
||||
}
|
||||
|
||||
return pci_generic_config_read(bus, devfn, where, size, value);
|
||||
}
|
||||
|
||||
static int starfive_pcie_parse_dt(struct starfive_jh7110_pcie *pcie,
|
||||
struct device *dev)
|
||||
{
|
||||
int domain_nr;
|
||||
|
||||
pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
|
||||
if (pcie->num_clks < 0)
|
||||
return dev_err_probe(dev, pcie->num_clks,
|
||||
"failed to get pcie clocks\n");
|
||||
|
||||
pcie->resets = devm_reset_control_array_get_exclusive(dev);
|
||||
if (IS_ERR(pcie->resets))
|
||||
return dev_err_probe(dev, PTR_ERR(pcie->resets),
|
||||
"failed to get pcie resets");
|
||||
|
||||
pcie->reg_syscon =
|
||||
syscon_regmap_lookup_by_phandle(dev->of_node,
|
||||
"starfive,stg-syscon");
|
||||
|
||||
if (IS_ERR(pcie->reg_syscon))
|
||||
return dev_err_probe(dev, PTR_ERR(pcie->reg_syscon),
|
||||
"failed to parse starfive,stg-syscon\n");
|
||||
|
||||
pcie->phy = devm_phy_optional_get(dev, NULL);
|
||||
if (IS_ERR(pcie->phy))
|
||||
return dev_err_probe(dev, PTR_ERR(pcie->phy),
|
||||
"failed to get pcie phy\n");
|
||||
|
||||
/*
|
||||
* The PCIe domain numbers are set to be static in JH7110 DTS.
|
||||
* As the STG system controller defines different bases in PCIe RP0 &
|
||||
* RP1, we use them to identify which controller is doing the hardware
|
||||
* initialization.
|
||||
*/
|
||||
domain_nr = of_get_pci_domain_nr(dev->of_node);
|
||||
|
||||
if (domain_nr < 0 || domain_nr > 1)
|
||||
return dev_err_probe(dev, -ENODEV,
|
||||
"failed to get valid pcie domain\n");
|
||||
|
||||
if (domain_nr == 0)
|
||||
pcie->stg_pcie_base = STG_SYSCON_PCIE0_BASE;
|
||||
else
|
||||
pcie->stg_pcie_base = STG_SYSCON_PCIE1_BASE;
|
||||
|
||||
pcie->reset_gpio = devm_gpiod_get_optional(dev, "perst",
|
||||
GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(pcie->reset_gpio))
|
||||
return dev_err_probe(dev, PTR_ERR(pcie->reset_gpio),
|
||||
"failed to get perst-gpio\n");
|
||||
|
||||
pcie->power_gpio = devm_gpiod_get_optional(dev, "enable",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(pcie->power_gpio))
|
||||
return dev_err_probe(dev, PTR_ERR(pcie->power_gpio),
|
||||
"failed to get power-gpio\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_ops starfive_pcie_ops = {
|
||||
.map_bus = plda_pcie_map_bus,
|
||||
.read = starfive_pcie_config_read,
|
||||
.write = starfive_pcie_config_write,
|
||||
};
|
||||
|
||||
static int starfive_pcie_clk_rst_init(struct starfive_jh7110_pcie *pcie)
|
||||
{
|
||||
struct device *dev = pcie->plda.dev;
|
||||
int ret;
|
||||
|
||||
ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed to enable clocks\n");
|
||||
|
||||
ret = reset_control_deassert(pcie->resets);
|
||||
if (ret) {
|
||||
clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
|
||||
dev_err_probe(dev, ret, "failed to deassert resets\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void starfive_pcie_clk_rst_deinit(struct starfive_jh7110_pcie *pcie)
|
||||
{
|
||||
reset_control_assert(pcie->resets);
|
||||
clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
|
||||
}
|
||||
|
||||
static bool starfive_pcie_link_up(struct plda_pcie_rp *plda)
|
||||
{
|
||||
struct starfive_jh7110_pcie *pcie =
|
||||
container_of(plda, struct starfive_jh7110_pcie, plda);
|
||||
int ret;
|
||||
u32 stg_reg_val;
|
||||
|
||||
ret = regmap_read(pcie->reg_syscon,
|
||||
pcie->stg_pcie_base + STG_SYSCON_LNKSTA_OFFSET,
|
||||
&stg_reg_val);
|
||||
if (ret) {
|
||||
dev_err(pcie->plda.dev, "failed to read link status\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return !!(stg_reg_val & DATA_LINK_ACTIVE);
|
||||
}
|
||||
|
||||
static int starfive_pcie_host_wait_for_link(struct starfive_jh7110_pcie *pcie)
|
||||
{
|
||||
int retries;
|
||||
|
||||
/* Check if the link is up or not */
|
||||
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
|
||||
if (starfive_pcie_link_up(&pcie->plda)) {
|
||||
dev_info(pcie->plda.dev, "port link up\n");
|
||||
return 0;
|
||||
}
|
||||
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int starfive_pcie_enable_phy(struct device *dev,
|
||||
struct starfive_jh7110_pcie *pcie)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!pcie->phy)
|
||||
return 0;
|
||||
|
||||
ret = phy_init(pcie->phy);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret,
|
||||
"failed to initialize pcie phy\n");
|
||||
|
||||
ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
|
||||
if (ret) {
|
||||
dev_err_probe(dev, ret, "failed to set pcie mode\n");
|
||||
goto err_phy_on;
|
||||
}
|
||||
|
||||
ret = phy_power_on(pcie->phy);
|
||||
if (ret) {
|
||||
dev_err_probe(dev, ret, "failed to power on pcie phy\n");
|
||||
goto err_phy_on;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_phy_on:
|
||||
phy_exit(pcie->phy);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void starfive_pcie_disable_phy(struct starfive_jh7110_pcie *pcie)
|
||||
{
|
||||
phy_power_off(pcie->phy);
|
||||
phy_exit(pcie->phy);
|
||||
}
|
||||
|
||||
static void starfive_pcie_host_deinit(struct plda_pcie_rp *plda)
|
||||
{
|
||||
struct starfive_jh7110_pcie *pcie =
|
||||
container_of(plda, struct starfive_jh7110_pcie, plda);
|
||||
|
||||
starfive_pcie_clk_rst_deinit(pcie);
|
||||
if (pcie->power_gpio)
|
||||
gpiod_set_value_cansleep(pcie->power_gpio, 0);
|
||||
starfive_pcie_disable_phy(pcie);
|
||||
}
|
||||
|
||||
static int starfive_pcie_host_init(struct plda_pcie_rp *plda)
|
||||
{
|
||||
struct starfive_jh7110_pcie *pcie =
|
||||
container_of(plda, struct starfive_jh7110_pcie, plda);
|
||||
struct device *dev = plda->dev;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = starfive_pcie_enable_phy(dev, pcie);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
regmap_update_bits(pcie->reg_syscon,
|
||||
pcie->stg_pcie_base + STG_SYSCON_RP_NEP_OFFSET,
|
||||
STG_SYSCON_K_RP_NEP, STG_SYSCON_K_RP_NEP);
|
||||
|
||||
regmap_update_bits(pcie->reg_syscon,
|
||||
pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
|
||||
STG_SYSCON_CKREF_SRC_MASK,
|
||||
FIELD_PREP(STG_SYSCON_CKREF_SRC_MASK, 2));
|
||||
|
||||
regmap_update_bits(pcie->reg_syscon,
|
||||
pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
|
||||
STG_SYSCON_CLKREQ, STG_SYSCON_CLKREQ);
|
||||
|
||||
ret = starfive_pcie_clk_rst_init(pcie);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (pcie->power_gpio)
|
||||
gpiod_set_value_cansleep(pcie->power_gpio, 1);
|
||||
|
||||
if (pcie->reset_gpio)
|
||||
gpiod_set_value_cansleep(pcie->reset_gpio, 1);
|
||||
|
||||
/* Disable physical functions except #0 */
|
||||
for (i = 1; i < PCIE_FUNC_NUM; i++) {
|
||||
regmap_update_bits(pcie->reg_syscon,
|
||||
pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET,
|
||||
STG_SYSCON_AXI4_SLVL_AR_MASK,
|
||||
STG_SYSCON_AXI4_SLVL_PHY_AR(i));
|
||||
|
||||
regmap_update_bits(pcie->reg_syscon,
|
||||
pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
|
||||
STG_SYSCON_AXI4_SLVL_AW_MASK,
|
||||
STG_SYSCON_AXI4_SLVL_PHY_AW(i));
|
||||
|
||||
plda_pcie_disable_func(plda);
|
||||
}
|
||||
|
||||
regmap_update_bits(pcie->reg_syscon,
|
||||
pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET,
|
||||
STG_SYSCON_AXI4_SLVL_AR_MASK, 0);
|
||||
regmap_update_bits(pcie->reg_syscon,
|
||||
pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
|
||||
STG_SYSCON_AXI4_SLVL_AW_MASK, 0);
|
||||
|
||||
plda_pcie_enable_root_port(plda);
|
||||
plda_pcie_write_rc_bar(plda, 0);
|
||||
|
||||
/* PCIe PCI Standard Configuration Identification Settings. */
|
||||
plda_pcie_set_standard_class(plda);
|
||||
|
||||
/*
|
||||
* The LTR message receiving is enabled by the register "PCIe Message
|
||||
* Reception" as default, but the forward id & addr are uninitialized.
|
||||
* If we do not disable LTR message forwarding here, or set a legal
|
||||
* forwarding address, the kernel will get stuck.
|
||||
* To workaround, disable the LTR message forwarding here before using
|
||||
* this feature.
|
||||
*/
|
||||
plda_pcie_disable_ltr(plda);
|
||||
|
||||
/*
|
||||
* Enable the prefetchable memory window 64-bit addressing in JH7110.
|
||||
* The 64-bits prefetchable address translation configurations in ATU
|
||||
* can be work after enable the register setting below.
|
||||
*/
|
||||
plda_pcie_set_pref_win_64bit(plda);
|
||||
|
||||
/*
|
||||
* Ensure that PERST has been asserted for at least 100 ms,
|
||||
* the sleep value is T_PVPERL from PCIe CEM spec r2.0 (Table 2-4)
|
||||
*/
|
||||
msleep(100);
|
||||
if (pcie->reset_gpio)
|
||||
gpiod_set_value_cansleep(pcie->reset_gpio, 0);
|
||||
|
||||
/*
|
||||
* With a Downstream Port (<=5GT/s), software must wait a minimum
|
||||
* of 100ms following exit from a conventional reset before
|
||||
* sending a configuration request to the device.
|
||||
*/
|
||||
msleep(PCIE_RESET_CONFIG_DEVICE_WAIT_MS);
|
||||
|
||||
if (starfive_pcie_host_wait_for_link(pcie))
|
||||
dev_info(dev, "port link down\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct plda_pcie_host_ops sf_host_ops = {
|
||||
.host_init = starfive_pcie_host_init,
|
||||
.host_deinit = starfive_pcie_host_deinit,
|
||||
};
|
||||
|
||||
static const struct plda_event stf_pcie_event = {
|
||||
.intx_event = EVENT_PM_MSI_INT_INTX,
|
||||
.msi_event = EVENT_PM_MSI_INT_MSI
|
||||
};
|
||||
|
||||
static int starfive_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct starfive_jh7110_pcie *pcie;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct plda_pcie_rp *plda;
|
||||
int ret;
|
||||
|
||||
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
|
||||
if (!pcie)
|
||||
return -ENOMEM;
|
||||
|
||||
plda = &pcie->plda;
|
||||
plda->dev = dev;
|
||||
|
||||
ret = starfive_pcie_parse_dt(pcie, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
plda->host_ops = &sf_host_ops;
|
||||
plda->num_events = PLDA_MAX_EVENT_NUM;
|
||||
/* mask doorbell event */
|
||||
plda->events_bitmap = GENMASK(PLDA_INT_EVENT_NUM - 1, 0)
|
||||
& ~BIT(PLDA_AXI_DOORBELL)
|
||||
& ~BIT(PLDA_PCIE_DOORBELL);
|
||||
plda->events_bitmap <<= PLDA_NUM_DMA_EVENTS;
|
||||
ret = plda_pcie_host_init(&pcie->plda, &starfive_pcie_ops,
|
||||
&stf_pcie_event);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
platform_set_drvdata(pdev, pcie);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void starfive_pcie_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct starfive_jh7110_pcie *pcie = platform_get_drvdata(pdev);
|
||||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
plda_pcie_host_deinit(&pcie->plda);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
}
|
||||
|
||||
static int starfive_pcie_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev);
|
||||
|
||||
clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
|
||||
starfive_pcie_disable_phy(pcie);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int starfive_pcie_resume_noirq(struct device *dev)
|
||||
{
|
||||
struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
ret = starfive_pcie_enable_phy(dev, pcie);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to enable clocks\n");
|
||||
starfive_pcie_disable_phy(pcie);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops starfive_pcie_pm_ops = {
|
||||
NOIRQ_SYSTEM_SLEEP_PM_OPS(starfive_pcie_suspend_noirq,
|
||||
starfive_pcie_resume_noirq)
|
||||
};
|
||||
|
||||
static const struct of_device_id starfive_pcie_of_match[] = {
|
||||
{ .compatible = "starfive,jh7110-pcie", },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, starfive_pcie_of_match);
|
||||
|
||||
static struct platform_driver starfive_pcie_driver = {
|
||||
.driver = {
|
||||
.name = "pcie-starfive",
|
||||
.of_match_table = of_match_ptr(starfive_pcie_of_match),
|
||||
.pm = pm_sleep_ptr(&starfive_pcie_pm_ops),
|
||||
},
|
||||
.probe = starfive_pcie_probe,
|
||||
.remove_new = starfive_pcie_remove,
|
||||
};
|
||||
module_platform_driver(starfive_pcie_driver);
|
||||
|
||||
MODULE_DESCRIPTION("StarFive JH7110 PCIe host driver");
|
||||
MODULE_LICENSE("GPL v2");
|
@ -22,6 +22,21 @@
|
||||
*/
|
||||
#define PCIE_PME_TO_L2_TIMEOUT_US 10000
|
||||
|
||||
/*
|
||||
* PCIe r6.0, sec 6.6.1 <Conventional Reset>
|
||||
*
|
||||
* - "With a Downstream Port that does not support Link speeds greater
|
||||
* than 5.0 GT/s, software must wait a minimum of 100 ms following exit
|
||||
* from a Conventional Reset before sending a Configuration Request to
|
||||
* the device immediately below that Port."
|
||||
*
|
||||
* - "With a Downstream Port that supports Link speeds greater than
|
||||
* 5.0 GT/s, software must wait a minimum of 100 ms after Link training
|
||||
* completes before sending a Configuration Request to the device
|
||||
* immediately below that Port."
|
||||
*/
|
||||
#define PCIE_RESET_CONFIG_DEVICE_WAIT_MS 100
|
||||
|
||||
/* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */
|
||||
#define PCIE_MSG_TYPE_R_RC 0
|
||||
#define PCIE_MSG_TYPE_R_ADDR 1
|
||||
|
Loading…
x
Reference in New Issue
Block a user