mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 00:32:00 +00:00
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
This commit is contained in:
commit
ecc935f512
@ -17,6 +17,7 @@ properties:
|
||||
enum:
|
||||
- xlnx,versal-cpm-host-1.00
|
||||
- xlnx,versal-cpm5-host
|
||||
- xlnx,versal-cpm5-host1
|
||||
|
||||
reg:
|
||||
items:
|
||||
|
@ -1987,7 +1987,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
|
||||
/* legacy intx interrupts */
|
||||
pci_intx(pdev, 1);
|
||||
pcim_intx(pdev, 1);
|
||||
}
|
||||
hpriv->irq = pci_irq_vector(pdev, 0);
|
||||
|
||||
|
@ -1725,7 +1725,7 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
* message-signalled interrupts currently).
|
||||
*/
|
||||
if (port_flags & PIIX_FLAG_CHECKINTR)
|
||||
pci_intx(pdev, 1);
|
||||
pcim_intx(pdev, 1);
|
||||
|
||||
if (piix_check_450nx_errata(pdev)) {
|
||||
/* This writes into the master table but it does not
|
||||
|
@ -340,7 +340,7 @@ static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
return rc;
|
||||
host->private_data = hpriv;
|
||||
|
||||
pci_intx(pdev, 1);
|
||||
pcim_intx(pdev, 1);
|
||||
|
||||
host->flags |= ATA_HOST_PARALLEL_SCAN;
|
||||
|
||||
|
@ -1317,7 +1317,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
if (sata_sil24_msi && !pci_enable_msi(pdev)) {
|
||||
dev_info(&pdev->dev, "Using MSI\n");
|
||||
pci_intx(pdev, 0);
|
||||
pcim_intx(pdev, 0);
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
@ -290,7 +290,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
pci_intx(pdev, 1);
|
||||
pcim_intx(pdev, 1);
|
||||
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
|
||||
IRQF_SHARED, &sis_sht);
|
||||
}
|
||||
|
@ -221,7 +221,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
pci_intx(pdev, 1);
|
||||
pcim_intx(pdev, 1);
|
||||
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
|
||||
IRQF_SHARED, &uli_sht);
|
||||
}
|
||||
|
@ -384,7 +384,7 @@ static int vsc_sata_init_one(struct pci_dev *pdev,
|
||||
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
|
||||
|
||||
if (pci_enable_msi(pdev) == 0)
|
||||
pci_intx(pdev, 0);
|
||||
pcim_intx(pdev, 0);
|
||||
|
||||
/*
|
||||
* Config offset 0x98 is "Extended Control and Status Register 0"
|
||||
|
@ -478,7 +478,6 @@ static int en7581_pci_enable(struct clk_hw *hw)
|
||||
REG_PCI_CONTROL_PERSTOUT;
|
||||
val = readl(np_base + REG_PCI_CONTROL);
|
||||
writel(val | mask, np_base + REG_PCI_CONTROL);
|
||||
msleep(250);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
|
||||
{
|
||||
int rc;
|
||||
|
||||
pci_intx(privdata->pdev, true);
|
||||
pcim_intx(privdata->pdev, true);
|
||||
|
||||
rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq,
|
||||
amd_sfh_irq_handler, 0, DRIVER_NAME, privdata);
|
||||
@ -248,7 +248,7 @@ static void amd_mp2_pci_remove(void *privdata)
|
||||
struct amd_mp2_dev *mp2 = privdata;
|
||||
amd_sfh_hid_client_deinit(privdata);
|
||||
mp2->mp2_ops->stop_all(mp2);
|
||||
pci_intx(mp2->pdev, false);
|
||||
pcim_intx(mp2->pdev, false);
|
||||
amd_sfh_clear_intr(mp2);
|
||||
}
|
||||
|
||||
|
@ -289,7 +289,7 @@ static void amd_mp2_pci_remove(void *privdata)
|
||||
sfh_deinit_emp2();
|
||||
amd_sfh_hid_client_deinit(privdata);
|
||||
mp2->mp2_ops->stop_all(mp2);
|
||||
pci_intx(mp2->pdev, false);
|
||||
pcim_intx(mp2->pdev, false);
|
||||
amd_sfh_clear_intr(mp2);
|
||||
}
|
||||
|
||||
|
@ -1057,7 +1057,7 @@ static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
|
||||
}
|
||||
|
||||
pcr->irq = pcr->pci->irq;
|
||||
pci_intx(pcr->pci, !pcr->msi_en);
|
||||
pci_intx_unmanaged(pcr->pci, !pcr->msi_en);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -69,6 +69,9 @@
|
||||
#define PCI_ENDPOINT_TEST_FLAGS 0x2c
|
||||
#define FLAG_USE_DMA BIT(0)
|
||||
|
||||
#define PCI_ENDPOINT_TEST_CAPS 0x30
|
||||
#define CAP_UNALIGNED_ACCESS BIT(0)
|
||||
|
||||
#define PCI_DEVICE_ID_TI_AM654 0xb00c
|
||||
#define PCI_DEVICE_ID_TI_J7200 0xb00f
|
||||
#define PCI_DEVICE_ID_TI_AM64 0xb010
|
||||
@ -322,6 +325,91 @@ static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
|
||||
return true;
|
||||
}
|
||||
|
||||
static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* Keep the BAR pattern in the top byte. */
|
||||
val = bar_test_pattern[barno] & 0xff000000;
|
||||
/* Store the (partial) offset in the remaining bytes. */
|
||||
val |= offset & 0x00ffffff;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static bool pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test,
|
||||
enum pci_barno barno)
|
||||
{
|
||||
struct pci_dev *pdev = test->pdev;
|
||||
int j, size;
|
||||
|
||||
size = pci_resource_len(pdev, barno);
|
||||
|
||||
if (barno == test->test_reg_bar)
|
||||
size = 0x4;
|
||||
|
||||
for (j = 0; j < size; j += 4)
|
||||
writel_relaxed(bar_test_pattern_with_offset(barno, j),
|
||||
test->bar[barno] + j);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
|
||||
enum pci_barno barno)
|
||||
{
|
||||
struct pci_dev *pdev = test->pdev;
|
||||
struct device *dev = &pdev->dev;
|
||||
int j, size;
|
||||
u32 val;
|
||||
|
||||
size = pci_resource_len(pdev, barno);
|
||||
|
||||
if (barno == test->test_reg_bar)
|
||||
size = 0x4;
|
||||
|
||||
for (j = 0; j < size; j += 4) {
|
||||
u32 expected = bar_test_pattern_with_offset(barno, j);
|
||||
|
||||
val = readl_relaxed(test->bar[barno] + j);
|
||||
if (val != expected) {
|
||||
dev_err(dev,
|
||||
"BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n",
|
||||
barno, j, val, expected);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool pci_endpoint_test_bars(struct pci_endpoint_test *test)
|
||||
{
|
||||
enum pci_barno bar;
|
||||
bool ret;
|
||||
|
||||
/* Write all BARs in order (without reading). */
|
||||
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
|
||||
if (test->bar[bar])
|
||||
pci_endpoint_test_bars_write_bar(test, bar);
|
||||
|
||||
/*
|
||||
* Read all BARs in order (without writing).
|
||||
* If there is an address translation issue on the EP, writing one BAR
|
||||
* might have overwritten another BAR. Ensure that this is not the case.
|
||||
* (Reading back the BAR directly after writing can not detect this.)
|
||||
*/
|
||||
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
|
||||
if (test->bar[bar]) {
|
||||
ret = pci_endpoint_test_bars_read_bar(test, bar);
|
||||
if (!ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
|
||||
{
|
||||
u32 val;
|
||||
@ -768,6 +856,9 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
|
||||
goto ret;
|
||||
ret = pci_endpoint_test_bar(test, bar);
|
||||
break;
|
||||
case PCITEST_BARS:
|
||||
ret = pci_endpoint_test_bars(test);
|
||||
break;
|
||||
case PCITEST_INTX_IRQ:
|
||||
ret = pci_endpoint_test_intx_irq(test);
|
||||
break;
|
||||
@ -805,6 +896,20 @@ static const struct file_operations pci_endpoint_test_fops = {
|
||||
.unlocked_ioctl = pci_endpoint_test_ioctl,
|
||||
};
|
||||
|
||||
static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
|
||||
{
|
||||
struct pci_dev *pdev = test->pdev;
|
||||
struct device *dev = &pdev->dev;
|
||||
u32 caps;
|
||||
|
||||
caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
|
||||
dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", caps);
|
||||
|
||||
/* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
|
||||
if (caps & CAP_UNALIGNED_ACCESS)
|
||||
test->alignment = 0;
|
||||
}
|
||||
|
||||
static int pci_endpoint_test_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
@ -906,6 +1011,8 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
|
||||
goto err_kfree_test_name;
|
||||
}
|
||||
|
||||
pci_endpoint_test_get_capabilities(test);
|
||||
|
||||
misc_device = &test->miscdev;
|
||||
misc_device->minor = MISC_DYNAMIC_MINOR;
|
||||
misc_device->name = kstrdup(name, GFP_KERNEL);
|
||||
|
@ -327,7 +327,7 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
pci_intx(dev, 1);
|
||||
pci_intx_unmanaged(dev, 1);
|
||||
|
||||
fm = tifm_alloc_adapter(dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM
|
||||
? 4 : 2, &dev->dev);
|
||||
@ -368,7 +368,7 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
|
||||
err_out_free:
|
||||
tifm_free_adapter(fm);
|
||||
err_out_int:
|
||||
pci_intx(dev, 0);
|
||||
pci_intx_unmanaged(dev, 0);
|
||||
pci_release_regions(dev);
|
||||
err_out:
|
||||
if (!pci_dev_busy)
|
||||
@ -392,7 +392,7 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
|
||||
tifm_7xx1_sock_power_off(tifm_7xx1_sock_addr(fm->addr, cnt));
|
||||
|
||||
iounmap(fm->addr);
|
||||
pci_intx(dev, 0);
|
||||
pci_intx_unmanaged(dev, 0);
|
||||
pci_release_regions(dev);
|
||||
|
||||
pci_disable_device(dev);
|
||||
|
@ -204,7 +204,7 @@ static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi)
|
||||
|
||||
if (!priv->msi_enabled) {
|
||||
pr_warn("legacy PCIE interrupts enabled\n");
|
||||
pci_intx(pdev, 1);
|
||||
pcim_intx(pdev, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -791,7 +791,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev,
|
||||
err_msi_enable:
|
||||
|
||||
/* Try to set up intx irq */
|
||||
pci_intx(pdev, 1);
|
||||
pci_intx_unmanaged(pdev, 1);
|
||||
|
||||
rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
|
||||
"ndev_irq_isr", ndev);
|
||||
@ -831,7 +831,7 @@ static void ndev_deinit_isr(struct amd_ntb_dev *ndev)
|
||||
if (pci_dev_msi_enabled(pdev))
|
||||
pci_disable_msi(pdev);
|
||||
else
|
||||
pci_intx(pdev, 0);
|
||||
pci_intx_unmanaged(pdev, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -445,7 +445,7 @@ int ndev_init_isr(struct intel_ntb_dev *ndev,
|
||||
|
||||
/* Try to set up intx irq */
|
||||
|
||||
pci_intx(pdev, 1);
|
||||
pci_intx_unmanaged(pdev, 1);
|
||||
|
||||
rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
|
||||
"ndev_irq_isr", ndev);
|
||||
|
@ -369,9 +369,22 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct pci_epc_features artpec6_pcie_epc_features = {
|
||||
.linkup_notifier = false,
|
||||
.msi_capable = true,
|
||||
.msix_capable = false,
|
||||
};
|
||||
|
||||
static const struct pci_epc_features *
|
||||
artpec6_pcie_get_features(struct dw_pcie_ep *ep)
|
||||
{
|
||||
return &artpec6_pcie_epc_features;
|
||||
}
|
||||
|
||||
static const struct dw_pcie_ep_ops pcie_ep_ops = {
|
||||
.init = artpec6_pcie_ep_init,
|
||||
.raise_irq = artpec6_pcie_raise_irq,
|
||||
.get_features = artpec6_pcie_get_features,
|
||||
};
|
||||
|
||||
static int artpec6_pcie_probe(struct platform_device *pdev)
|
||||
|
@ -128,7 +128,8 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
|
||||
dma_addr_t cpu_addr, enum pci_barno bar)
|
||||
dma_addr_t cpu_addr, enum pci_barno bar,
|
||||
size_t size)
|
||||
{
|
||||
int ret;
|
||||
u32 free_win;
|
||||
@ -145,7 +146,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
|
||||
}
|
||||
|
||||
ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
|
||||
cpu_addr, bar);
|
||||
cpu_addr, bar, size);
|
||||
if (ret < 0) {
|
||||
dev_err(pci->dev, "Failed to program IB window\n");
|
||||
return ret;
|
||||
@ -222,20 +223,31 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
||||
if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Certain EPF drivers dynamically change the physical address of a BAR
|
||||
* (i.e. they call set_bar() twice, without ever calling clear_bar(), as
|
||||
* calling clear_bar() would clear the BAR's PCI address assigned by the
|
||||
* host).
|
||||
*/
|
||||
if (ep->epf_bar[bar]) {
|
||||
/*
|
||||
* We can only dynamically change a BAR if the new BAR size and
|
||||
* BAR flags do not differ from the existing configuration.
|
||||
*/
|
||||
if (ep->epf_bar[bar]->barno != bar ||
|
||||
ep->epf_bar[bar]->size != size ||
|
||||
ep->epf_bar[bar]->flags != flags)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* When dynamically changing a BAR, skip writing the BAR reg, as
|
||||
* that would clear the BAR's PCI address assigned by the host.
|
||||
*/
|
||||
goto config_atu;
|
||||
}
|
||||
|
||||
reg = PCI_BASE_ADDRESS_0 + (4 * bar);
|
||||
|
||||
if (!(flags & PCI_BASE_ADDRESS_SPACE))
|
||||
type = PCIE_ATU_TYPE_MEM;
|
||||
else
|
||||
type = PCIE_ATU_TYPE_IO;
|
||||
|
||||
ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ep->epf_bar[bar])
|
||||
return 0;
|
||||
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
|
||||
dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
|
||||
@ -246,9 +258,21 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
||||
dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
|
||||
}
|
||||
|
||||
ep->epf_bar[bar] = epf_bar;
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
|
||||
config_atu:
|
||||
if (!(flags & PCI_BASE_ADDRESS_SPACE))
|
||||
type = PCIE_ATU_TYPE_MEM;
|
||||
else
|
||||
type = PCIE_ATU_TYPE_IO;
|
||||
|
||||
ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar,
|
||||
size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ep->epf_bar[bar] = epf_bar;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -597,11 +597,12 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
|
||||
}
|
||||
|
||||
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u8 bar)
|
||||
int type, u64 cpu_addr, u8 bar, size_t size)
|
||||
{
|
||||
u32 retries, val;
|
||||
|
||||
if (!IS_ALIGNED(cpu_addr, pci->region_align))
|
||||
if (!IS_ALIGNED(cpu_addr, pci->region_align) ||
|
||||
!IS_ALIGNED(cpu_addr, size))
|
||||
return -EINVAL;
|
||||
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
|
||||
|
@ -491,7 +491,7 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
|
||||
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr, u64 size);
|
||||
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u8 bar);
|
||||
int type, u64 cpu_addr, u8 bar, size_t size);
|
||||
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
|
||||
void dw_pcie_setup(struct dw_pcie *pci);
|
||||
void dw_pcie_iatu_detect(struct dw_pcie *pci);
|
||||
|
@ -389,6 +389,34 @@ static const struct dw_pcie_ops dw_pcie_ops = {
|
||||
.stop_link = rockchip_pcie_stop_link,
|
||||
};
|
||||
|
||||
static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
|
||||
{
|
||||
struct rockchip_pcie *rockchip = arg;
|
||||
struct dw_pcie *pci = &rockchip->pci;
|
||||
struct dw_pcie_rp *pp = &pci->pp;
|
||||
struct device *dev = pci->dev;
|
||||
u32 reg, val;
|
||||
|
||||
reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
|
||||
rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
|
||||
|
||||
dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
|
||||
dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
|
||||
|
||||
if (reg & PCIE_RDLH_LINK_UP_CHGED) {
|
||||
val = rockchip_pcie_get_ltssm(rockchip);
|
||||
if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
|
||||
dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
|
||||
/* Rescan the bus to enumerate endpoint devices */
|
||||
pci_lock_rescan_remove();
|
||||
pci_rescan_bus(pp->bridge->bus);
|
||||
pci_unlock_rescan_remove();
|
||||
}
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
|
||||
{
|
||||
struct rockchip_pcie *rockchip = arg;
|
||||
@ -418,14 +446,31 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
|
||||
static int rockchip_pcie_configure_rc(struct platform_device *pdev,
|
||||
struct rockchip_pcie *rockchip)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct dw_pcie_rp *pp;
|
||||
int irq, ret;
|
||||
u32 val;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST))
|
||||
return -ENODEV;
|
||||
|
||||
irq = platform_get_irq_byname(pdev, "sys");
|
||||
if (irq < 0) {
|
||||
dev_err(dev, "missing sys IRQ resource\n");
|
||||
return irq;
|
||||
}
|
||||
|
||||
ret = devm_request_threaded_irq(dev, irq, NULL,
|
||||
rockchip_pcie_rc_sys_irq_thread,
|
||||
IRQF_ONESHOT, "pcie-sys-rc", rockchip);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to request PCIe sys IRQ\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* LTSSM enable control mode */
|
||||
val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
|
||||
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
|
||||
@ -436,7 +481,16 @@ static int rockchip_pcie_configure_rc(struct rockchip_pcie *rockchip)
|
||||
pp = &rockchip->pci.pp;
|
||||
pp->ops = &rockchip_pcie_host_ops;
|
||||
|
||||
return dw_pcie_host_init(pp);
|
||||
ret = dw_pcie_host_init(pp);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to initialize host\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* unmask DLL up/down indicator */
|
||||
rockchip_pcie_writel_apb(rockchip, 0x20000, PCIE_CLIENT_INTR_MASK_MISC);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rockchip_pcie_configure_ep(struct platform_device *pdev,
|
||||
@ -457,7 +511,7 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
|
||||
|
||||
ret = devm_request_threaded_irq(dev, irq, NULL,
|
||||
rockchip_pcie_ep_sys_irq_thread,
|
||||
IRQF_ONESHOT, "pcie-sys", rockchip);
|
||||
IRQF_ONESHOT, "pcie-sys-ep", rockchip);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to request PCIe sys IRQ\n");
|
||||
return ret;
|
||||
@ -553,7 +607,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
|
||||
|
||||
switch (data->mode) {
|
||||
case DW_PCIE_RC_TYPE:
|
||||
ret = rockchip_pcie_configure_rc(rockchip);
|
||||
ret = rockchip_pcie_configure_rc(pdev, rockchip);
|
||||
if (ret)
|
||||
goto deinit_clk;
|
||||
break;
|
||||
|
@ -125,18 +125,30 @@
|
||||
|
||||
#define MAX_NUM_PHY_RESETS 3
|
||||
|
||||
#define PCIE_MTK_RESET_TIME_US 10
|
||||
|
||||
/* Time in ms needed to complete PCIe reset on EN7581 SoC */
|
||||
#define PCIE_EN7581_RESET_TIME_MS 100
|
||||
|
||||
struct mtk_gen3_pcie;
|
||||
|
||||
#define PCIE_CONF_LINK2_CTL_STS (PCIE_CFG_OFFSET_ADDR + 0xb0)
|
||||
#define PCIE_CONF_LINK2_LCR2_LINK_SPEED GENMASK(3, 0)
|
||||
|
||||
struct mtk_gen3_pcie;
|
||||
|
||||
enum mtk_gen3_pcie_flags {
|
||||
SKIP_PCIE_RSTB = BIT(0), /*
|
||||
* Skip PCIE_RSTB signals configuration
|
||||
* during device probing or suspend/resume
|
||||
* phase in order to avoid hardware
|
||||
* bugs/issues.
|
||||
*/
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mtk_gen3_pcie_pdata - differentiate between host generations
|
||||
* @power_up: pcie power_up callback
|
||||
* @phy_resets: phy reset lines SoC data.
|
||||
* @flags: pcie device flags.
|
||||
*/
|
||||
struct mtk_gen3_pcie_pdata {
|
||||
int (*power_up)(struct mtk_gen3_pcie *pcie);
|
||||
@ -144,6 +156,7 @@ struct mtk_gen3_pcie_pdata {
|
||||
const char *id[MAX_NUM_PHY_RESETS];
|
||||
int num_resets;
|
||||
} phy_resets;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -438,22 +451,34 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
|
||||
val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
|
||||
writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
|
||||
|
||||
/* Assert all reset signals */
|
||||
val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
|
||||
val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
|
||||
writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
|
||||
|
||||
/*
|
||||
* Described in PCIe CEM specification sections 2.2 (PERST# Signal)
|
||||
* and 2.2.1 (Initial Power-Up (G3 to S0)).
|
||||
* The deassertion of PERST# should be delayed 100ms (TPVPERL)
|
||||
* for the power and clock to become stable.
|
||||
* Airoha EN7581 has a hardware bug asserting/releasing PCIE_PE_RSTB
|
||||
* signal causing occasional PCIe link down. In order to overcome the
|
||||
* issue, PCIE_RSTB signals are not asserted/released at this stage
|
||||
* and the PCIe block is reset configuting REG_PCI_CONTROL (0x88) and
|
||||
* REG_RESET_CONTROL (0x834) registers available in the clock module
|
||||
* running clk_bulk_prepare_enable in mtk_pcie_en7581_power_up().
|
||||
*/
|
||||
msleep(100);
|
||||
if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
|
||||
/* Assert all reset signals */
|
||||
val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
|
||||
val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
|
||||
PCIE_PE_RSTB;
|
||||
writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
|
||||
|
||||
/* De-assert reset signals */
|
||||
val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
|
||||
writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
|
||||
/*
|
||||
* Described in PCIe CEM specification sections 2.2 (PERST# Signal)
|
||||
* and 2.2.1 (Initial Power-Up (G3 to S0)).
|
||||
* The deassertion of PERST# should be delayed 100ms (TPVPERL)
|
||||
* for the power and clock to become stable.
|
||||
*/
|
||||
msleep(PCIE_T_PVPERL_MS);
|
||||
|
||||
/* De-assert reset signals */
|
||||
val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
|
||||
PCIE_PE_RSTB);
|
||||
writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
|
||||
}
|
||||
|
||||
/* Check if the link is up or not */
|
||||
err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
|
||||
@ -913,11 +938,20 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* Wait for the time needed to complete the bulk assert in
|
||||
* mtk_pcie_setup for EN7581 SoC.
|
||||
* The controller may have been left out of reset by the bootloader
|
||||
* so make sure that we get a clean start by asserting resets here.
|
||||
*/
|
||||
mdelay(PCIE_EN7581_RESET_TIME_MS);
|
||||
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
|
||||
pcie->phy_resets);
|
||||
reset_control_assert(pcie->mac_reset);
|
||||
|
||||
/* Wait for the time needed to complete the reset lines assert. */
|
||||
msleep(PCIE_EN7581_RESET_TIME_MS);
|
||||
|
||||
/*
|
||||
* Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
|
||||
* requires PHY initialization and power-on before PHY reset deassert.
|
||||
*/
|
||||
err = phy_init(pcie->phy);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to initialize PHY\n");
|
||||
@ -940,17 +974,14 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
|
||||
* Wait for the time needed to complete the bulk de-assert above.
|
||||
* This time is specific for EN7581 SoC.
|
||||
*/
|
||||
mdelay(PCIE_EN7581_RESET_TIME_MS);
|
||||
msleep(PCIE_EN7581_RESET_TIME_MS);
|
||||
|
||||
/* MAC power on and enable transaction layer clocks */
|
||||
reset_control_deassert(pcie->mac_reset);
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_get_sync(dev);
|
||||
|
||||
err = clk_bulk_prepare(pcie->num_clks, pcie->clks);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to prepare clock\n");
|
||||
goto err_clk_prepare;
|
||||
}
|
||||
|
||||
val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
|
||||
FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
|
||||
FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
|
||||
@ -963,19 +994,25 @@ static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
|
||||
FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
|
||||
writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
|
||||
|
||||
err = clk_bulk_enable(pcie->num_clks, pcie->clks);
|
||||
err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
|
||||
if (err) {
|
||||
dev_err(dev, "failed to prepare clock\n");
|
||||
goto err_clk_enable;
|
||||
goto err_clk_prepare_enable;
|
||||
}
|
||||
|
||||
/*
|
||||
* Airoha EN7581 performs PCIe reset via clk callabacks since it has a
|
||||
* hw issue with PCIE_PE_RSTB signal. Add wait for the time needed to
|
||||
* complete the PCIe reset.
|
||||
*/
|
||||
msleep(PCIE_T_PVPERL_MS);
|
||||
|
||||
return 0;
|
||||
|
||||
err_clk_enable:
|
||||
clk_bulk_unprepare(pcie->num_clks, pcie->clks);
|
||||
err_clk_prepare:
|
||||
err_clk_prepare_enable:
|
||||
pm_runtime_put_sync(dev);
|
||||
pm_runtime_disable(dev);
|
||||
reset_control_assert(pcie->mac_reset);
|
||||
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
|
||||
err_phy_deassert:
|
||||
phy_power_off(pcie->phy);
|
||||
@ -990,6 +1027,15 @@ static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
|
||||
struct device *dev = pcie->dev;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* The controller may have been left out of reset by the bootloader
|
||||
* so make sure that we get a clean start by asserting resets here.
|
||||
*/
|
||||
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
|
||||
pcie->phy_resets);
|
||||
reset_control_assert(pcie->mac_reset);
|
||||
usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
|
||||
|
||||
/* PHY power on and enable pipe clock */
|
||||
err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
|
||||
if (err) {
|
||||
@ -1074,14 +1120,6 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
|
||||
* counter since the bulk is shared.
|
||||
*/
|
||||
reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
|
||||
/*
|
||||
* The controller may have been left out of reset by the bootloader
|
||||
* so make sure that we get a clean start by asserting resets here.
|
||||
*/
|
||||
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets);
|
||||
|
||||
reset_control_assert(pcie->mac_reset);
|
||||
usleep_range(10, 20);
|
||||
|
||||
/* Don't touch the hardware registers before power up */
|
||||
err = pcie->soc->power_up(pcie);
|
||||
@ -1231,10 +1269,12 @@ static int mtk_pcie_suspend_noirq(struct device *dev)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Pull down the PERST# pin */
|
||||
val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
|
||||
val |= PCIE_PE_RSTB;
|
||||
writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
|
||||
if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
|
||||
/* Pull down the PERST# pin */
|
||||
val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
|
||||
val |= PCIE_PE_RSTB;
|
||||
writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
|
||||
}
|
||||
|
||||
dev_dbg(pcie->dev, "entered L2 states successfully");
|
||||
|
||||
@ -1285,6 +1325,7 @@ static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
|
||||
.id[2] = "phy-lane2",
|
||||
.num_resets = 3,
|
||||
},
|
||||
.flags = SKIP_PCIE_RSTB,
|
||||
};
|
||||
|
||||
static const struct of_device_id mtk_pcie_of_match[] = {
|
||||
|
@ -40,6 +40,10 @@
|
||||
* @irq_pci_fn: the latest PCI function that has updated the mapping of
|
||||
* the MSI/INTX IRQ dedicated outbound region.
|
||||
* @irq_pending: bitmask of asserted INTX IRQs.
|
||||
* @perst_irq: IRQ used for the PERST# signal.
|
||||
* @perst_asserted: True if the PERST# signal was asserted.
|
||||
* @link_up: True if the PCI link is up.
|
||||
* @link_training: Work item to execute PCI link training.
|
||||
*/
|
||||
struct rockchip_pcie_ep {
|
||||
struct rockchip_pcie rockchip;
|
||||
|
@ -30,11 +30,14 @@
|
||||
#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
|
||||
#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
|
||||
#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
|
||||
#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
|
||||
#define XILINX_CPM_PCIE0_MISC_IR_LOCAL BIT(1)
|
||||
#define XILINX_CPM_PCIE1_MISC_IR_LOCAL BIT(2)
|
||||
|
||||
#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
|
||||
#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
|
||||
#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
|
||||
#define XILINX_CPM_PCIE0_IR_STATUS 0x000002A0
|
||||
#define XILINX_CPM_PCIE1_IR_STATUS 0x000002B4
|
||||
#define XILINX_CPM_PCIE0_IR_ENABLE 0x000002A8
|
||||
#define XILINX_CPM_PCIE1_IR_ENABLE 0x000002BC
|
||||
#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
|
||||
|
||||
#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
|
||||
|
||||
@ -80,14 +83,21 @@
|
||||
enum xilinx_cpm_version {
|
||||
CPM,
|
||||
CPM5,
|
||||
CPM5_HOST1,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct xilinx_cpm_variant - CPM variant information
|
||||
* @version: CPM version
|
||||
* @ir_status: Offset for the error interrupt status register
|
||||
* @ir_enable: Offset for the CPM5 local error interrupt enable register
|
||||
* @ir_misc_value: A bitmask for the miscellaneous interrupt status
|
||||
*/
|
||||
struct xilinx_cpm_variant {
|
||||
enum xilinx_cpm_version version;
|
||||
u32 ir_status;
|
||||
u32 ir_enable;
|
||||
u32 ir_misc_value;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -269,6 +279,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
|
||||
{
|
||||
struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
const struct xilinx_cpm_variant *variant = port->variant;
|
||||
unsigned long val;
|
||||
int i;
|
||||
|
||||
@ -279,11 +290,11 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
|
||||
generic_handle_domain_irq(port->cpm_domain, i);
|
||||
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
|
||||
|
||||
if (port->variant->version == CPM5) {
|
||||
val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
|
||||
if (variant->ir_status) {
|
||||
val = readl_relaxed(port->cpm_base + variant->ir_status);
|
||||
if (val)
|
||||
writel_relaxed(val, port->cpm_base +
|
||||
XILINX_CPM_PCIE_IR_STATUS);
|
||||
variant->ir_status);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -465,6 +476,8 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
|
||||
*/
|
||||
static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
|
||||
{
|
||||
const struct xilinx_cpm_variant *variant = port->variant;
|
||||
|
||||
if (cpm_pcie_link_up(port))
|
||||
dev_info(port->dev, "PCIe Link is UP\n");
|
||||
else
|
||||
@ -483,15 +496,15 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
|
||||
* XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
|
||||
* CPM SLCR block.
|
||||
*/
|
||||
writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
|
||||
writel(variant->ir_misc_value,
|
||||
port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
|
||||
|
||||
if (port->variant->version == CPM5) {
|
||||
if (variant->ir_enable) {
|
||||
writel(XILINX_CPM_PCIE_IR_LOCAL,
|
||||
port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
|
||||
port->cpm_base + variant->ir_enable);
|
||||
}
|
||||
|
||||
/* Enable the Bridge enable bit */
|
||||
/* Set Bridge enable bit */
|
||||
pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
|
||||
XILINX_CPM_PCIE_REG_RPSC_BEN,
|
||||
XILINX_CPM_PCIE_REG_RPSC);
|
||||
@ -609,10 +622,21 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
|
||||
|
||||
static const struct xilinx_cpm_variant cpm_host = {
|
||||
.version = CPM,
|
||||
.ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
|
||||
};
|
||||
|
||||
static const struct xilinx_cpm_variant cpm5_host = {
|
||||
.version = CPM5,
|
||||
.ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
|
||||
.ir_status = XILINX_CPM_PCIE0_IR_STATUS,
|
||||
.ir_enable = XILINX_CPM_PCIE0_IR_ENABLE,
|
||||
};
|
||||
|
||||
static const struct xilinx_cpm_variant cpm5_host1 = {
|
||||
.version = CPM5_HOST1,
|
||||
.ir_misc_value = XILINX_CPM_PCIE1_MISC_IR_LOCAL,
|
||||
.ir_status = XILINX_CPM_PCIE1_IR_STATUS,
|
||||
.ir_enable = XILINX_CPM_PCIE1_IR_ENABLE,
|
||||
};
|
||||
|
||||
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
|
||||
@ -624,6 +648,10 @@ static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
|
||||
.compatible = "xlnx,versal-cpm5-host",
|
||||
.data = &cpm5_host,
|
||||
},
|
||||
{
|
||||
.compatible = "xlnx,versal-cpm5-host1",
|
||||
.data = &cpm5_host1,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -101,7 +101,7 @@ static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
|
||||
* @bar: BAR the range is within
|
||||
* @offset: offset from the BAR's start address
|
||||
* @maxlen: length in bytes, beginning at @offset
|
||||
* @name: name associated with the request
|
||||
* @name: name of the driver requesting the resource
|
||||
* @req_flags: flags for the request, e.g., for kernel-exclusive requests
|
||||
*
|
||||
* Returns: 0 on success, a negative error code on failure.
|
||||
@ -411,31 +411,12 @@ static inline bool mask_contains_bar(int mask, int bar)
|
||||
return mask & BIT(bar);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a copy of pci_intx() used to bypass the problem of recursive
|
||||
* function calls due to the hybrid nature of pci_intx().
|
||||
*/
|
||||
static void __pcim_intx(struct pci_dev *pdev, int enable)
|
||||
{
|
||||
u16 pci_command, new;
|
||||
|
||||
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
|
||||
|
||||
if (enable)
|
||||
new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
|
||||
else
|
||||
new = pci_command | PCI_COMMAND_INTX_DISABLE;
|
||||
|
||||
if (new != pci_command)
|
||||
pci_write_config_word(pdev, PCI_COMMAND, new);
|
||||
}
|
||||
|
||||
static void pcim_intx_restore(struct device *dev, void *data)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct pcim_intx_devres *res = data;
|
||||
|
||||
__pcim_intx(pdev, res->orig_intx);
|
||||
pci_intx_unmanaged(pdev, res->orig_intx);
|
||||
}
|
||||
|
||||
static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
|
||||
@ -472,10 +453,11 @@ int pcim_intx(struct pci_dev *pdev, int enable)
|
||||
return -ENOMEM;
|
||||
|
||||
res->orig_intx = !enable;
|
||||
__pcim_intx(pdev, enable);
|
||||
pci_intx_unmanaged(pdev, enable);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcim_intx);
|
||||
|
||||
static void pcim_disable_device(void *pdev_raw)
|
||||
{
|
||||
@ -723,7 +705,7 @@ EXPORT_SYMBOL(pcim_iounmap);
|
||||
* pcim_iomap_region - Request and iomap a PCI BAR
|
||||
* @pdev: PCI device to map IO resources for
|
||||
* @bar: Index of a BAR to map
|
||||
* @name: Name associated with the request
|
||||
* @name: Name of the driver requesting the resource
|
||||
*
|
||||
* Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
|
||||
*
|
||||
@ -790,7 +772,7 @@ EXPORT_SYMBOL(pcim_iounmap_region);
|
||||
* pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED)
|
||||
* @pdev: PCI device to map IO resources for
|
||||
* @mask: Mask of BARs to request and iomap
|
||||
* @name: Name associated with the requests
|
||||
* @name: Name of the driver requesting the resources
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
@ -855,9 +837,9 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
|
||||
|
||||
/**
|
||||
* pcim_request_region - Request a PCI BAR
|
||||
* @pdev: PCI device to requestion region for
|
||||
* @pdev: PCI device to request region for
|
||||
* @bar: Index of BAR to request
|
||||
* @name: Name associated with the request
|
||||
* @name: Name of the driver requesting the resource
|
||||
*
|
||||
* Returns: 0 on success, a negative error code on failure.
|
||||
*
|
||||
@ -874,9 +856,9 @@ EXPORT_SYMBOL(pcim_request_region);
|
||||
|
||||
/**
|
||||
* pcim_request_region_exclusive - Request a PCI BAR exclusively
|
||||
* @pdev: PCI device to requestion region for
|
||||
* @pdev: PCI device to request region for
|
||||
* @bar: Index of BAR to request
|
||||
* @name: Name associated with the request
|
||||
* @name: Name of the driver requesting the resource
|
||||
*
|
||||
* Returns: 0 on success, a negative error code on failure.
|
||||
*
|
||||
@ -932,7 +914,7 @@ static void pcim_release_all_regions(struct pci_dev *pdev)
|
||||
/**
|
||||
* pcim_request_all_regions - Request all regions
|
||||
* @pdev: PCI device to map IO resources for
|
||||
* @name: name associated with the request
|
||||
* @name: name of the driver requesting the resources
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
|
@ -44,6 +44,8 @@
|
||||
|
||||
#define TIMER_RESOLUTION 1
|
||||
|
||||
#define CAP_UNALIGNED_ACCESS BIT(0)
|
||||
|
||||
static struct workqueue_struct *kpcitest_workqueue;
|
||||
|
||||
struct pci_epf_test {
|
||||
@ -74,6 +76,7 @@ struct pci_epf_test_reg {
|
||||
u32 irq_type;
|
||||
u32 irq_number;
|
||||
u32 flags;
|
||||
u32 caps;
|
||||
} __packed;
|
||||
|
||||
static struct pci_epf_header test_header = {
|
||||
@ -739,6 +742,20 @@ static void pci_epf_test_clear_bar(struct pci_epf *epf)
|
||||
}
|
||||
}
|
||||
|
||||
static void pci_epf_test_set_capabilities(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
|
||||
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
|
||||
struct pci_epc *epc = epf->epc;
|
||||
u32 caps = 0;
|
||||
|
||||
if (epc->ops->align_addr)
|
||||
caps |= CAP_UNALIGNED_ACCESS;
|
||||
|
||||
reg->caps = cpu_to_le32(caps);
|
||||
}
|
||||
|
||||
static int pci_epf_test_epc_init(struct pci_epf *epf)
|
||||
{
|
||||
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
|
||||
@ -763,6 +780,8 @@ static int pci_epf_test_epc_init(struct pci_epf *epf)
|
||||
}
|
||||
}
|
||||
|
||||
pci_epf_test_set_capabilities(epf);
|
||||
|
||||
ret = pci_epf_test_set_bar(epf);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -60,26 +60,17 @@ struct pci_epc *pci_epc_get(const char *epc_name)
|
||||
int ret = -EINVAL;
|
||||
struct pci_epc *epc;
|
||||
struct device *dev;
|
||||
struct class_dev_iter iter;
|
||||
|
||||
class_dev_iter_init(&iter, &pci_epc_class, NULL, NULL);
|
||||
while ((dev = class_dev_iter_next(&iter))) {
|
||||
if (strcmp(epc_name, dev_name(dev)))
|
||||
continue;
|
||||
dev = class_find_device_by_name(&pci_epc_class, epc_name);
|
||||
if (!dev)
|
||||
goto err;
|
||||
|
||||
epc = to_pci_epc(dev);
|
||||
if (!try_module_get(epc->ops->owner)) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
class_dev_iter_exit(&iter);
|
||||
get_device(&epc->dev);
|
||||
epc = to_pci_epc(dev);
|
||||
if (try_module_get(epc->ops->owner))
|
||||
return epc;
|
||||
}
|
||||
|
||||
err:
|
||||
class_dev_iter_exit(&iter);
|
||||
put_device(dev);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_epc_get);
|
||||
@ -609,10 +600,20 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
|
||||
int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
|
||||
struct pci_epf_bar *epf_bar)
|
||||
{
|
||||
int ret;
|
||||
const struct pci_epc_features *epc_features;
|
||||
enum pci_barno bar = epf_bar->barno;
|
||||
int flags = epf_bar->flags;
|
||||
int ret;
|
||||
|
||||
if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
|
||||
epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
|
||||
if (!epc_features)
|
||||
return -EINVAL;
|
||||
|
||||
if (epc_features->bar[bar].type == BAR_FIXED &&
|
||||
(epc_features->bar[bar].fixed_size != epf_bar->size))
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_power_of_2(epf_bar->size))
|
||||
return -EINVAL;
|
||||
|
||||
if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
|
||||
@ -942,7 +943,7 @@ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
|
||||
r = devres_release(dev, devm_pci_epc_release, devm_pci_epc_match,
|
||||
epc);
|
||||
dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
|
||||
}
|
||||
|
@ -202,6 +202,7 @@ void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
|
||||
|
||||
mutex_lock(&epf_pf->lock);
|
||||
clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
|
||||
epf_vf->epf_pf = NULL;
|
||||
list_del(&epf_vf->list);
|
||||
mutex_unlock(&epf_pf->lock);
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status);
|
||||
static void ibm_handle_events(acpi_handle handle, u32 event, void *context);
|
||||
static int ibm_get_table_from_acpi(char **bufp);
|
||||
static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
const struct bin_attribute *bin_attr,
|
||||
char *buffer, loff_t pos, size_t size);
|
||||
static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
|
||||
u32 lvl, void *context, void **rv);
|
||||
@ -98,7 +98,7 @@ static struct bin_attribute ibm_apci_table_attr __ro_after_init = {
|
||||
.name = "apci_table",
|
||||
.mode = S_IRUGO,
|
||||
},
|
||||
.read = ibm_read_apci_table,
|
||||
.read_new = ibm_read_apci_table,
|
||||
.write = NULL,
|
||||
};
|
||||
static struct acpiphp_attention_info ibm_attention_info =
|
||||
@ -353,7 +353,7 @@ static int ibm_get_table_from_acpi(char **bufp)
|
||||
* our solution is to only allow reading the table in all at once.
|
||||
*/
|
||||
static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
const struct bin_attribute *bin_attr,
|
||||
char *buffer, loff_t pos, size_t size)
|
||||
{
|
||||
int bytes_read = -EINVAL;
|
||||
|
@ -289,7 +289,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
|
||||
*/
|
||||
if (affd)
|
||||
irq_create_affinity_masks(1, affd);
|
||||
pci_intx(dev, 1);
|
||||
pci_intx_unmanaged(dev, 1);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(pci_write_msi_msg);
|
||||
static void pci_intx_for_msi(struct pci_dev *dev, int enable)
|
||||
{
|
||||
if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
|
||||
pci_intx(dev, enable);
|
||||
pci_intx_unmanaged(dev, enable);
|
||||
}
|
||||
|
||||
static void pci_msi_set_enable(struct pci_dev *dev, int enable)
|
||||
|
@ -161,7 +161,7 @@ static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct bin_attribute p2pmem_alloc_attr = {
|
||||
static const struct bin_attribute p2pmem_alloc_attr = {
|
||||
.attr = { .name = "allocate", .mode = 0660 },
|
||||
.mmap = p2pmem_alloc_mmap,
|
||||
/*
|
||||
@ -180,14 +180,14 @@ static struct attribute *p2pmem_attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct bin_attribute *p2pmem_bin_attrs[] = {
|
||||
static const struct bin_attribute *const p2pmem_bin_attrs[] = {
|
||||
&p2pmem_alloc_attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group p2pmem_group = {
|
||||
.attrs = p2pmem_attrs,
|
||||
.bin_attrs = p2pmem_bin_attrs,
|
||||
.bin_attrs_new = p2pmem_bin_attrs,
|
||||
.name = "p2pmem",
|
||||
};
|
||||
|
||||
|
@ -694,7 +694,7 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
|
||||
static DEVICE_ATTR_RO(boot_vga);
|
||||
|
||||
static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
const struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
|
||||
@ -769,7 +769,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
|
||||
}
|
||||
|
||||
static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
const struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
|
||||
@ -837,9 +837,9 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
|
||||
|
||||
return count;
|
||||
}
|
||||
static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
|
||||
static const BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
|
||||
|
||||
static struct bin_attribute *pci_dev_config_attrs[] = {
|
||||
static const struct bin_attribute *const pci_dev_config_attrs[] = {
|
||||
&bin_attr_config,
|
||||
NULL,
|
||||
};
|
||||
@ -856,7 +856,7 @@ static size_t pci_dev_config_attr_bin_size(struct kobject *kobj,
|
||||
}
|
||||
|
||||
static const struct attribute_group pci_dev_config_attr_group = {
|
||||
.bin_attrs = pci_dev_config_attrs,
|
||||
.bin_attrs_new = pci_dev_config_attrs,
|
||||
.bin_size = pci_dev_config_attr_bin_size,
|
||||
};
|
||||
|
||||
@ -887,8 +887,8 @@ pci_llseek_resource(struct file *filep,
|
||||
* callback routine (pci_legacy_read).
|
||||
*/
|
||||
static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
const struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
|
||||
|
||||
@ -912,8 +912,8 @@ static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
|
||||
* callback routine (pci_legacy_write).
|
||||
*/
|
||||
static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
const struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
|
||||
|
||||
@ -1003,8 +1003,8 @@ void pci_create_legacy_files(struct pci_bus *b)
|
||||
b->legacy_io->attr.name = "legacy_io";
|
||||
b->legacy_io->size = 0xffff;
|
||||
b->legacy_io->attr.mode = 0600;
|
||||
b->legacy_io->read = pci_read_legacy_io;
|
||||
b->legacy_io->write = pci_write_legacy_io;
|
||||
b->legacy_io->read_new = pci_read_legacy_io;
|
||||
b->legacy_io->write_new = pci_write_legacy_io;
|
||||
/* See pci_create_attr() for motivation */
|
||||
b->legacy_io->llseek = pci_llseek_resource;
|
||||
b->legacy_io->mmap = pci_mmap_legacy_io;
|
||||
@ -1099,7 +1099,7 @@ static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
|
||||
}
|
||||
|
||||
static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *attr, char *buf,
|
||||
const struct bin_attribute *attr, char *buf,
|
||||
loff_t off, size_t count, bool write)
|
||||
{
|
||||
#ifdef CONFIG_HAS_IOPORT
|
||||
@ -1142,14 +1142,14 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
|
||||
}
|
||||
|
||||
static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *attr, char *buf,
|
||||
const struct bin_attribute *attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
return pci_resource_io(filp, kobj, attr, buf, off, count, false);
|
||||
}
|
||||
|
||||
static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *attr, char *buf,
|
||||
const struct bin_attribute *attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
int ret;
|
||||
@ -1210,8 +1210,8 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
|
||||
} else {
|
||||
sprintf(res_attr_name, "resource%d", num);
|
||||
if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
|
||||
res_attr->read = pci_read_resource_io;
|
||||
res_attr->write = pci_write_resource_io;
|
||||
res_attr->read_new = pci_read_resource_io;
|
||||
res_attr->write_new = pci_write_resource_io;
|
||||
if (arch_can_pci_mmap_io())
|
||||
res_attr->mmap = pci_mmap_resource_uc;
|
||||
} else {
|
||||
@ -1292,7 +1292,7 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
|
||||
* writing anything except 0 enables it
|
||||
*/
|
||||
static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
const struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
|
||||
@ -1318,7 +1318,7 @@ static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
|
||||
* device corresponding to @kobj.
|
||||
*/
|
||||
static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
const struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
|
||||
@ -1344,9 +1344,9 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
|
||||
|
||||
return count;
|
||||
}
|
||||
static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
|
||||
static const BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
|
||||
|
||||
static struct bin_attribute *pci_dev_rom_attrs[] = {
|
||||
static const struct bin_attribute *const pci_dev_rom_attrs[] = {
|
||||
&bin_attr_rom,
|
||||
NULL,
|
||||
};
|
||||
@ -1372,7 +1372,7 @@ static size_t pci_dev_rom_attr_bin_size(struct kobject *kobj,
|
||||
}
|
||||
|
||||
static const struct attribute_group pci_dev_rom_attr_group = {
|
||||
.bin_attrs = pci_dev_rom_attrs,
|
||||
.bin_attrs_new = pci_dev_rom_attrs,
|
||||
.is_bin_visible = pci_dev_rom_attr_is_visible,
|
||||
.bin_size = pci_dev_rom_attr_bin_size,
|
||||
};
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/logic_pio.h>
|
||||
#include <linux/pm_wakeup.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pci_hotplug.h>
|
||||
@ -3941,15 +3940,14 @@ EXPORT_SYMBOL(pci_release_region);
|
||||
* __pci_request_region - Reserved PCI I/O and memory resource
|
||||
* @pdev: PCI device whose resources are to be reserved
|
||||
* @bar: BAR to be reserved
|
||||
* @res_name: Name to be associated with resource.
|
||||
* @name: name of the driver requesting the resource
|
||||
* @exclusive: whether the region access is exclusive or not
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* Mark the PCI region associated with PCI device @pdev BAR @bar as
|
||||
* being reserved by owner @res_name. Do not access any
|
||||
* address inside the PCI regions unless this call returns
|
||||
* successfully.
|
||||
* Mark the PCI region associated with PCI device @pdev BAR @bar as being
|
||||
* reserved by owner @name. Do not access any address inside the PCI regions
|
||||
* unless this call returns successfully.
|
||||
*
|
||||
* If @exclusive is set, then the region is marked so that userspace
|
||||
* is explicitly not allowed to map the resource via /dev/mem or
|
||||
@ -3959,13 +3957,13 @@ EXPORT_SYMBOL(pci_release_region);
|
||||
* message is also printed on failure.
|
||||
*/
|
||||
static int __pci_request_region(struct pci_dev *pdev, int bar,
|
||||
const char *res_name, int exclusive)
|
||||
const char *name, int exclusive)
|
||||
{
|
||||
if (pci_is_managed(pdev)) {
|
||||
if (exclusive == IORESOURCE_EXCLUSIVE)
|
||||
return pcim_request_region_exclusive(pdev, bar, res_name);
|
||||
return pcim_request_region_exclusive(pdev, bar, name);
|
||||
|
||||
return pcim_request_region(pdev, bar, res_name);
|
||||
return pcim_request_region(pdev, bar, name);
|
||||
}
|
||||
|
||||
if (pci_resource_len(pdev, bar) == 0)
|
||||
@ -3973,11 +3971,11 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
|
||||
|
||||
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
|
||||
if (!request_region(pci_resource_start(pdev, bar),
|
||||
pci_resource_len(pdev, bar), res_name))
|
||||
pci_resource_len(pdev, bar), name))
|
||||
goto err_out;
|
||||
} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
|
||||
if (!__request_mem_region(pci_resource_start(pdev, bar),
|
||||
pci_resource_len(pdev, bar), res_name,
|
||||
pci_resource_len(pdev, bar), name,
|
||||
exclusive))
|
||||
goto err_out;
|
||||
}
|
||||
@ -3994,14 +3992,13 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
|
||||
* pci_request_region - Reserve PCI I/O and memory resource
|
||||
* @pdev: PCI device whose resources are to be reserved
|
||||
* @bar: BAR to be reserved
|
||||
* @res_name: Name to be associated with resource
|
||||
* @name: name of the driver requesting the resource
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* Mark the PCI region associated with PCI device @pdev BAR @bar as
|
||||
* being reserved by owner @res_name. Do not access any
|
||||
* address inside the PCI regions unless this call returns
|
||||
* successfully.
|
||||
* Mark the PCI region associated with PCI device @pdev BAR @bar as being
|
||||
* reserved by owner @name. Do not access any address inside the PCI regions
|
||||
* unless this call returns successfully.
|
||||
*
|
||||
* Returns 0 on success, or %EBUSY on error. A warning
|
||||
* message is also printed on failure.
|
||||
@ -4011,9 +4008,9 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
|
||||
int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
|
||||
{
|
||||
return __pci_request_region(pdev, bar, res_name, 0);
|
||||
return __pci_request_region(pdev, bar, name, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_request_region);
|
||||
|
||||
@ -4036,13 +4033,13 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
|
||||
EXPORT_SYMBOL(pci_release_selected_regions);
|
||||
|
||||
static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
|
||||
const char *res_name, int excl)
|
||||
const char *name, int excl)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PCI_STD_NUM_BARS; i++)
|
||||
if (bars & (1 << i))
|
||||
if (__pci_request_region(pdev, i, res_name, excl))
|
||||
if (__pci_request_region(pdev, i, name, excl))
|
||||
goto err_out;
|
||||
return 0;
|
||||
|
||||
@ -4059,7 +4056,7 @@ static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
|
||||
* pci_request_selected_regions - Reserve selected PCI I/O and memory resources
|
||||
* @pdev: PCI device whose resources are to be reserved
|
||||
* @bars: Bitmask of BARs to be requested
|
||||
* @res_name: Name to be associated with resource
|
||||
* @name: Name of the driver requesting the resources
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
@ -4069,9 +4066,9 @@ static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
|
||||
const char *res_name)
|
||||
const char *name)
|
||||
{
|
||||
return __pci_request_selected_regions(pdev, bars, res_name, 0);
|
||||
return __pci_request_selected_regions(pdev, bars, name, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_request_selected_regions);
|
||||
|
||||
@ -4079,7 +4076,7 @@ EXPORT_SYMBOL(pci_request_selected_regions);
|
||||
* pci_request_selected_regions_exclusive - Request regions exclusively
|
||||
* @pdev: PCI device to request regions from
|
||||
* @bars: bit mask of BARs to request
|
||||
* @res_name: name to be associated with the requests
|
||||
* @name: name of the driver requesting the resources
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
@ -4089,9 +4086,9 @@ EXPORT_SYMBOL(pci_request_selected_regions);
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
|
||||
const char *res_name)
|
||||
const char *name)
|
||||
{
|
||||
return __pci_request_selected_regions(pdev, bars, res_name,
|
||||
return __pci_request_selected_regions(pdev, bars, name,
|
||||
IORESOURCE_EXCLUSIVE);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
|
||||
@ -4114,12 +4111,11 @@ EXPORT_SYMBOL(pci_release_regions);
|
||||
/**
|
||||
* pci_request_regions - Reserve PCI I/O and memory resources
|
||||
* @pdev: PCI device whose resources are to be reserved
|
||||
* @res_name: Name to be associated with resource.
|
||||
* @name: name of the driver requesting the resources
|
||||
*
|
||||
* Mark all PCI regions associated with PCI device @pdev as
|
||||
* being reserved by owner @res_name. Do not access any
|
||||
* address inside the PCI regions unless this call returns
|
||||
* successfully.
|
||||
* Mark all PCI regions associated with PCI device @pdev as being reserved by
|
||||
* owner @name. Do not access any address inside the PCI regions unless this
|
||||
* call returns successfully.
|
||||
*
|
||||
* Returns 0 on success, or %EBUSY on error. A warning
|
||||
* message is also printed on failure.
|
||||
@ -4129,22 +4125,22 @@ EXPORT_SYMBOL(pci_release_regions);
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_regions(struct pci_dev *pdev, const char *res_name)
|
||||
int pci_request_regions(struct pci_dev *pdev, const char *name)
|
||||
{
|
||||
return pci_request_selected_regions(pdev,
|
||||
((1 << PCI_STD_NUM_BARS) - 1), res_name);
|
||||
((1 << PCI_STD_NUM_BARS) - 1), name);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_request_regions);
|
||||
|
||||
/**
|
||||
* pci_request_regions_exclusive - Reserve PCI I/O and memory resources
|
||||
* @pdev: PCI device whose resources are to be reserved
|
||||
* @res_name: Name to be associated with resource.
|
||||
* @name: name of the driver requesting the resources
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* Mark all PCI regions associated with PCI device @pdev as being reserved
|
||||
* by owner @res_name. Do not access any address inside the PCI regions
|
||||
* by owner @name. Do not access any address inside the PCI regions
|
||||
* unless this call returns successfully.
|
||||
*
|
||||
* pci_request_regions_exclusive() will mark the region so that /dev/mem
|
||||
@ -4158,10 +4154,10 @@ EXPORT_SYMBOL(pci_request_regions);
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
|
||||
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
|
||||
{
|
||||
return pci_request_selected_regions_exclusive(pdev,
|
||||
((1 << PCI_STD_NUM_BARS) - 1), res_name);
|
||||
((1 << PCI_STD_NUM_BARS) - 1), name);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_request_regions_exclusive);
|
||||
|
||||
@ -4482,6 +4478,35 @@ void pci_disable_parity(struct pci_dev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_intx_unmanaged - enables/disables PCI INTx for device dev,
|
||||
* unmanaged version
|
||||
* @pdev: the PCI device to operate on
|
||||
* @enable: boolean: whether to enable or disable PCI INTx
|
||||
*
|
||||
* Enables/disables PCI INTx for device @pdev
|
||||
*
|
||||
* This function behavios identically to pci_intx(), but is never managed with
|
||||
* devres.
|
||||
*/
|
||||
void pci_intx_unmanaged(struct pci_dev *pdev, int enable)
|
||||
{
|
||||
u16 pci_command, new;
|
||||
|
||||
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
|
||||
|
||||
if (enable)
|
||||
new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
|
||||
else
|
||||
new = pci_command | PCI_COMMAND_INTX_DISABLE;
|
||||
|
||||
if (new == pci_command)
|
||||
return;
|
||||
|
||||
pci_write_config_word(pdev, PCI_COMMAND, new);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_intx_unmanaged);
|
||||
|
||||
/**
|
||||
* pci_intx - enables/disables PCI INTx for device dev
|
||||
* @pdev: the PCI device to operate on
|
||||
|
@ -81,24 +81,47 @@ void pci_configure_aspm_l1ss(struct pci_dev *pdev)
|
||||
|
||||
void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_dev *parent = pdev->bus->self;
|
||||
struct pci_cap_saved_state *save_state;
|
||||
u16 l1ss = pdev->l1ss;
|
||||
u32 *cap;
|
||||
|
||||
/*
|
||||
* If this is a Downstream Port, we never restore the L1SS state
|
||||
* directly; we only restore it when we restore the state of the
|
||||
* Upstream Port below it.
|
||||
*/
|
||||
if (pcie_downstream_port(pdev) || !parent)
|
||||
return;
|
||||
|
||||
if (!pdev->l1ss || !parent->l1ss)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Save L1 substate configuration. The ASPM L0s/L1 configuration
|
||||
* in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
|
||||
*/
|
||||
if (!l1ss)
|
||||
return;
|
||||
|
||||
save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
|
||||
if (!save_state)
|
||||
return;
|
||||
|
||||
cap = &save_state->cap.data[0];
|
||||
pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL2, cap++);
|
||||
pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, cap++);
|
||||
pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
|
||||
pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
|
||||
|
||||
if (parent->state_saved)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Save parent's L1 substate configuration so we have it for
|
||||
* pci_restore_aspm_l1ss_state(pdev) to restore.
|
||||
*/
|
||||
save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
|
||||
if (!save_state)
|
||||
return;
|
||||
|
||||
cap = &save_state->cap.data[0];
|
||||
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++);
|
||||
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++);
|
||||
}
|
||||
|
||||
void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
|
||||
|
@ -271,8 +271,8 @@ void pci_vpd_init(struct pci_dev *dev)
|
||||
}
|
||||
|
||||
static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf, loff_t off,
|
||||
size_t count)
|
||||
const struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
|
||||
struct pci_dev *vpd_dev = dev;
|
||||
@ -295,8 +295,8 @@ static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
|
||||
}
|
||||
|
||||
static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf, loff_t off,
|
||||
size_t count)
|
||||
const struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
|
||||
struct pci_dev *vpd_dev = dev;
|
||||
@ -317,9 +317,9 @@ static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
|
||||
|
||||
return ret;
|
||||
}
|
||||
static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
|
||||
static const BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
|
||||
|
||||
static struct bin_attribute *vpd_attrs[] = {
|
||||
static const struct bin_attribute *const vpd_attrs[] = {
|
||||
&bin_attr_vpd,
|
||||
NULL,
|
||||
};
|
||||
@ -336,7 +336,7 @@ static umode_t vpd_attr_is_visible(struct kobject *kobj,
|
||||
}
|
||||
|
||||
const struct attribute_group pci_dev_vpd_attr_group = {
|
||||
.bin_attrs = vpd_attrs,
|
||||
.bin_attrs_new = vpd_attrs,
|
||||
.is_bin_visible = vpd_attr_is_visible,
|
||||
};
|
||||
|
||||
|
@ -498,7 +498,7 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
|
||||
if (vfio_pci_nointx(pdev)) {
|
||||
pci_info(pdev, "Masking broken INTx support\n");
|
||||
vdev->nointx = true;
|
||||
pci_intx(pdev, 0);
|
||||
pci_intx_unmanaged(pdev, 0);
|
||||
} else
|
||||
vdev->pci_2_3 = pci_intx_mask_supported(pdev);
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
|
||||
*/
|
||||
if (unlikely(!is_intx(vdev))) {
|
||||
if (vdev->pci_2_3)
|
||||
pci_intx(pdev, 0);
|
||||
pci_intx_unmanaged(pdev, 0);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@ -132,7 +132,7 @@ static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
|
||||
* mask, not just when something is pending.
|
||||
*/
|
||||
if (vdev->pci_2_3)
|
||||
pci_intx(pdev, 0);
|
||||
pci_intx_unmanaged(pdev, 0);
|
||||
else
|
||||
disable_irq_nosync(pdev->irq);
|
||||
|
||||
@ -178,7 +178,7 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *data)
|
||||
*/
|
||||
if (unlikely(!is_intx(vdev))) {
|
||||
if (vdev->pci_2_3)
|
||||
pci_intx(pdev, 1);
|
||||
pci_intx_unmanaged(pdev, 1);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@ -296,7 +296,7 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
|
||||
*/
|
||||
ctx->masked = vdev->virq_disabled;
|
||||
if (vdev->pci_2_3) {
|
||||
pci_intx(pdev, !ctx->masked);
|
||||
pci_intx_unmanaged(pdev, !ctx->masked);
|
||||
irqflags = IRQF_SHARED;
|
||||
} else {
|
||||
irqflags = ctx->masked ? IRQF_NO_AUTOEN : 0;
|
||||
@ -569,7 +569,7 @@ static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
|
||||
* via their shutdown paths. Restore for NoINTx devices.
|
||||
*/
|
||||
if (vdev->nointx)
|
||||
pci_intx(pdev, 0);
|
||||
pci_intx_unmanaged(pdev, 0);
|
||||
|
||||
vdev->irq_type = VFIO_PCI_NUM_IRQS;
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
|
||||
|
||||
if (dev_data && dev_data->allow_interrupt_control &&
|
||||
((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE))
|
||||
pci_intx(dev, !(value & PCI_COMMAND_INTX_DISABLE));
|
||||
pci_intx_unmanaged(dev, !(value & PCI_COMMAND_INTX_DISABLE));
|
||||
|
||||
cmd->val = value;
|
||||
|
||||
|
@ -157,7 +157,7 @@ struct pci_epf {
|
||||
struct device dev;
|
||||
const char *name;
|
||||
struct pci_epf_header *header;
|
||||
struct pci_epf_bar bar[6];
|
||||
struct pci_epf_bar bar[PCI_STD_NUM_BARS];
|
||||
u8 msi_interrupts;
|
||||
u16 msix_interrupts;
|
||||
u8 func_no;
|
||||
@ -174,7 +174,7 @@ struct pci_epf {
|
||||
/* Below members are to attach secondary EPC to an endpoint function */
|
||||
struct pci_epc *sec_epc;
|
||||
struct list_head sec_epc_list;
|
||||
struct pci_epf_bar sec_epc_bar[6];
|
||||
struct pci_epf_bar sec_epc_bar[PCI_STD_NUM_BARS];
|
||||
u8 sec_epc_func_no;
|
||||
struct config_group *group;
|
||||
unsigned int is_bound;
|
||||
|
@ -1350,6 +1350,7 @@ int __must_check pcim_set_mwi(struct pci_dev *dev);
|
||||
int pci_try_set_mwi(struct pci_dev *dev);
|
||||
void pci_clear_mwi(struct pci_dev *dev);
|
||||
void pci_disable_parity(struct pci_dev *dev);
|
||||
void pci_intx_unmanaged(struct pci_dev *pdev, int enable);
|
||||
void pci_intx(struct pci_dev *dev, int enable);
|
||||
bool pci_check_and_mask_intx(struct pci_dev *dev);
|
||||
bool pci_check_and_unmask_intx(struct pci_dev *dev);
|
||||
@ -2297,6 +2298,7 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass,
|
||||
struct pci_dev *dev) { }
|
||||
#endif
|
||||
|
||||
int pcim_intx(struct pci_dev *pdev, int enabled);
|
||||
int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
|
||||
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
|
||||
void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
|
||||
|
@ -533,7 +533,7 @@
|
||||
#define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */
|
||||
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */
|
||||
#define PCI_EXP_LNKCAP 0x0c /* Link Capabilities */
|
||||
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
|
||||
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Max Link Speed (prior to PCIe r3.0: Supported Link Speeds) */
|
||||
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
|
||||
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
|
||||
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
|
||||
|
@ -20,6 +20,7 @@
|
||||
#define PCITEST_MSIX _IOW('P', 0x7, int)
|
||||
#define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int)
|
||||
#define PCITEST_GET_IRQTYPE _IO('P', 0x9)
|
||||
#define PCITEST_BARS _IO('P', 0xa)
|
||||
#define PCITEST_CLEAR_IRQ _IO('P', 0x10)
|
||||
|
||||
#define PCITEST_FLAGS_USE_DMA 0x00000001
|
||||
|
@ -22,6 +22,7 @@ static char *irq[] = { "LEGACY", "MSI", "MSI-X" };
|
||||
struct pci_test {
|
||||
char *device;
|
||||
char barnum;
|
||||
bool consecutive_bar_test;
|
||||
bool legacyirq;
|
||||
unsigned int msinum;
|
||||
unsigned int msixnum;
|
||||
@ -57,6 +58,15 @@ static int run_test(struct pci_test *test)
|
||||
fprintf(stdout, "%s\n", result[ret]);
|
||||
}
|
||||
|
||||
if (test->consecutive_bar_test) {
|
||||
ret = ioctl(fd, PCITEST_BARS);
|
||||
fprintf(stdout, "Consecutive BAR test:\t\t");
|
||||
if (ret < 0)
|
||||
fprintf(stdout, "TEST FAILED\n");
|
||||
else
|
||||
fprintf(stdout, "%s\n", result[ret]);
|
||||
}
|
||||
|
||||
if (test->set_irqtype) {
|
||||
ret = ioctl(fd, PCITEST_SET_IRQTYPE, test->irqtype);
|
||||
fprintf(stdout, "SET IRQ TYPE TO %s:\t\t", irq[test->irqtype]);
|
||||
@ -172,7 +182,7 @@ int main(int argc, char **argv)
|
||||
/* set default endpoint device */
|
||||
test->device = "/dev/pci-endpoint-test.0";
|
||||
|
||||
while ((c = getopt(argc, argv, "D:b:m:x:i:deIlhrwcs:")) != EOF)
|
||||
while ((c = getopt(argc, argv, "D:b:Cm:x:i:deIlhrwcs:")) != EOF)
|
||||
switch (c) {
|
||||
case 'D':
|
||||
test->device = optarg;
|
||||
@ -182,6 +192,9 @@ int main(int argc, char **argv)
|
||||
if (test->barnum < 0 || test->barnum > 5)
|
||||
goto usage;
|
||||
continue;
|
||||
case 'C':
|
||||
test->consecutive_bar_test = true;
|
||||
continue;
|
||||
case 'l':
|
||||
test->legacyirq = true;
|
||||
continue;
|
||||
@ -230,6 +243,7 @@ int main(int argc, char **argv)
|
||||
"Options:\n"
|
||||
"\t-D <dev> PCI endpoint test device {default: /dev/pci-endpoint-test.0}\n"
|
||||
"\t-b <bar num> BAR test (bar number between 0..5)\n"
|
||||
"\t-C Consecutive BAR test\n"
|
||||
"\t-m <msi num> MSI test (msi number between 1..32)\n"
|
||||
"\t-x <msix num> \tMSI-X test (msix number between 1..2048)\n"
|
||||
"\t-i <irq type> \tSet IRQ type (0 - Legacy, 1 - MSI, 2 - MSI-X)\n"
|
||||
|
@ -11,6 +11,7 @@ do
|
||||
pcitest -b $bar
|
||||
bar=`expr $bar + 1`
|
||||
done
|
||||
pcitest -C
|
||||
echo
|
||||
|
||||
echo "Interrupt tests"
|
||||
|
Loading…
Reference in New Issue
Block a user