diff options
Diffstat (limited to 'drivers/pci/controller')
43 files changed, 4963 insertions, 937 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 41748d083b93..c254d2b8bf17 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -146,7 +146,7 @@ config PCIE_HISI_ERR config PCI_IXP4XX bool "Intel IXP4xx PCI controller" - depends on ARM && OF + depends on OF depends on ARCH_IXP4XX || COMPILE_TEST default ARCH_IXP4XX help @@ -259,12 +259,20 @@ config PCIE_RCAR_EP config PCI_RCAR_GEN2 bool "Renesas R-Car Gen2 Internal PCI controller" - depends on ARCH_RENESAS || COMPILE_TEST - depends on ARM + depends on (ARCH_RENESAS && ARM) || COMPILE_TEST help Say Y here if you want internal PCI support on R-Car Gen2 SoC. - There are 3 internal PCI controllers available with a single - built-in EHCI/OHCI host controller present on each one. + Each internal PCI controller contains a single built-in EHCI/OHCI + host controller. + +config PCIE_RENESAS_RZG3S_HOST + bool "Renesas RZ/G3S PCIe host controller" + depends on ARCH_RENESAS || COMPILE_TEST + select MFD_SYSCON + select IRQ_MSI_LIB + help + Say Y here if you want PCIe host controller support on Renesas RZ/G3S + SoC. config PCIE_ROCKCHIP bool diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index 038ccbd9e3ba..229929a945c2 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o obj-$(CONFIG_PCIE_RCAR_HOST) += pcie-rcar.o pcie-rcar-host.o obj-$(CONFIG_PCIE_RCAR_EP) += pcie-rcar.o pcie-rcar-ep.o +obj-$(CONFIG_PCIE_RENESAS_RZG3S_HOST) += pcie-rzg3s-host.o obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig index 02a639e55fd8..9e651d545973 100644 --- a/drivers/pci/controller/cadence/Kconfig +++ b/drivers/pci/controller/cadence/Kconfig @@ -19,10 +19,10 @@ config PCIE_CADENCE_EP select PCIE_CADENCE config PCIE_CADENCE_PLAT - bool + tristate config PCIE_CADENCE_PLAT_HOST - bool "Cadence platform PCIe controller (host mode)" + tristate "Cadence platform PCIe controller (host mode)" depends on OF select PCIE_CADENCE_HOST select PCIE_CADENCE_PLAT @@ -32,7 +32,7 @@ config PCIE_CADENCE_PLAT_HOST vendors SoCs. config PCIE_CADENCE_PLAT_EP - bool "Cadence platform PCIe controller (endpoint mode)" + tristate "Cadence platform PCIe controller (endpoint mode)" depends on OF depends on PCI_ENDPOINT select PCIE_CADENCE_EP @@ -42,6 +42,21 @@ config PCIE_CADENCE_PLAT_EP endpoint mode. This PCIe controller may be embedded into many different vendors SoCs. +config PCI_SKY1_HOST + tristate "CIX SKY1 PCIe controller (host mode)" + depends on OF && (ARCH_CIX || COMPILE_TEST) + select PCIE_CADENCE_HOST + select PCI_ECAM + help + Say Y here if you want to support the CIX SKY1 PCIe platform + controller in host mode. CIX SKY1 PCIe controller uses Cadence + HPA (High Performance Architecture IP [Second generation of + Cadence PCIe IP]) + + This driver requires Cadence PCIe core infrastructure + (PCIE_CADENCE_HOST) and hardware platform adaptation layer + to function. + config PCIE_SG2042_HOST tristate "Sophgo SG2042 PCIe controller (host mode)" depends on OF && (ARCH_SOPHGO || COMPILE_TEST) diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile index 5e23f8539ecc..b8ec1cecfaa8 100644 --- a/drivers/pci/controller/cadence/Makefile +++ b/drivers/pci/controller/cadence/Makefile @@ -1,7 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o -obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o -obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o +pcie-cadence-mod-y := pcie-cadence-hpa.o pcie-cadence.o +pcie-cadence-host-mod-y := pcie-cadence-host-common.o pcie-cadence-host.o pcie-cadence-host-hpa.o +pcie-cadence-ep-mod-y := pcie-cadence-ep.o + +obj-$(CONFIG_PCIE_CADENCE) = pcie-cadence-mod.o +obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host-mod.o +obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep-mod.o obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o obj-$(CONFIG_PCI_J721E) += pci-j721e.o obj-$(CONFIG_PCIE_SG2042_HOST) += pcie-sg2042.o +obj-$(CONFIG_PCI_SKY1_HOST) += pci-sky1.o diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 5bc5ab20aa6d..ecd1b0312400 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -477,9 +477,7 @@ static int j721e_pcie_probe(struct platform_device *pdev) struct j721e_pcie *pcie; struct cdns_pcie_rc *rc = NULL; struct cdns_pcie_ep *ep = NULL; - struct gpio_desc *gpiod; void __iomem *base; - struct clk *clk; u32 num_lanes; u32 mode; int ret; @@ -590,12 +588,12 @@ static int j721e_pcie_probe(struct platform_device *pdev) switch (mode) { case PCI_MODE_RC: - gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(gpiod)) { - ret = dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get reset GPIO\n"); + pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(pcie->reset_gpio)) { + ret = dev_err_probe(dev, PTR_ERR(pcie->reset_gpio), + "Failed to get reset GPIO\n"); goto err_get_sync; } - pcie->reset_gpio = gpiod; ret = cdns_pcie_init_phy(dev, cdns_pcie); if (ret) { @@ -603,19 +601,13 @@ static int j721e_pcie_probe(struct platform_device *pdev) goto err_get_sync; } - clk = devm_clk_get_optional(dev, "pcie_refclk"); - if (IS_ERR(clk)) { - ret = dev_err_probe(dev, PTR_ERR(clk), "failed to get pcie_refclk\n"); + pcie->refclk = devm_clk_get_optional_enabled(dev, "pcie_refclk"); + if (IS_ERR(pcie->refclk)) { + ret = dev_err_probe(dev, PTR_ERR(pcie->refclk), + "failed to enable pcie_refclk\n"); goto err_pcie_setup; } - ret = clk_prepare_enable(clk); - if (ret) { - dev_err_probe(dev, ret, "failed to enable pcie_refclk\n"); - goto err_pcie_setup; - } - pcie->refclk = clk; - /* * Section 2.2 of the PCI Express Card Electromechanical * Specification (Revision 5.1) mandates that the deassertion @@ -623,16 +615,14 @@ static int j721e_pcie_probe(struct platform_device *pdev) * This shall ensure that the power and the reference clock * are stable. */ - if (gpiod) { + if (pcie->reset_gpio) { msleep(PCIE_T_PVPERL_MS); - gpiod_set_value_cansleep(gpiod, 1); + gpiod_set_value_cansleep(pcie->reset_gpio, 1); } ret = cdns_pcie_host_setup(rc); - if (ret < 0) { - clk_disable_unprepare(pcie->refclk); + if (ret < 0) goto err_pcie_setup; - } break; case PCI_MODE_EP: @@ -679,7 +669,6 @@ static void j721e_pcie_remove(struct platform_device *pdev) gpiod_set_value_cansleep(pcie->reset_gpio, 0); - clk_disable_unprepare(pcie->refclk); cdns_pcie_disable_phy(cdns_pcie); j721e_pcie_disable_link_irq(pcie); pm_runtime_put(dev); diff --git a/drivers/pci/controller/cadence/pci-sky1.c b/drivers/pci/controller/cadence/pci-sky1.c new file mode 100644 index 000000000000..d8c216dc120d --- /dev/null +++ b/drivers/pci/controller/cadence/pci-sky1.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe controller driver for CIX's sky1 SoCs + * + * Copyright 2025 Cix Technology Group Co., Ltd. + * Author: Hans Zhang <hans.zhang@cixtech.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pci.h> +#include <linux/pci-ecam.h> +#include <linux/pci_ids.h> + +#include "pcie-cadence.h" +#include "pcie-cadence-host-common.h" + +#define PCI_VENDOR_ID_CIX 0x1f6c +#define PCI_DEVICE_ID_CIX_SKY1 0x0001 + +#define STRAP_REG(n) ((n) * 0x04) +#define STATUS_REG(n) ((n) * 0x04) +#define LINK_TRAINING_ENABLE BIT(0) +#define LINK_COMPLETE BIT(0) + +#define SKY1_IP_REG_BANK 0x1000 +#define SKY1_IP_CFG_CTRL_REG_BANK 0x4c00 +#define SKY1_IP_AXI_MASTER_COMMON 0xf000 +#define SKY1_AXI_SLAVE 0x9000 +#define SKY1_AXI_MASTER 0xb000 +#define SKY1_AXI_HLS_REGISTERS 0xc000 +#define SKY1_AXI_RAS_REGISTERS 0xe000 +#define SKY1_DTI_REGISTERS 0xd000 + +#define IP_REG_I_DBG_STS_0 0x420 + +struct sky1_pcie { + struct cdns_pcie *cdns_pcie; + struct cdns_pcie_rc *cdns_pcie_rc; + + struct resource *cfg_res; + struct resource *msg_res; + struct pci_config_window *cfg; + void __iomem *strap_base; + void __iomem *status_base; + void __iomem *reg_base; + void __iomem *cfg_base; + void __iomem *msg_base; +}; + +static int sky1_pcie_resource_get(struct platform_device *pdev, + struct sky1_pcie *pcie) +{ + struct device *dev = &pdev->dev; + struct resource *res; + void __iomem *base; + + base = devm_platform_ioremap_resource_byname(pdev, "reg"); + if (IS_ERR(base)) + return dev_err_probe(dev, PTR_ERR(base), + "unable to find \"reg\" registers\n"); + pcie->reg_base = base; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); + if (!res) + return dev_err_probe(dev, -ENODEV, "unable to get \"cfg\" resource\n"); + pcie->cfg_res = res; + + base = devm_platform_ioremap_resource_byname(pdev, "rcsu_strap"); + if (IS_ERR(base)) + return dev_err_probe(dev, PTR_ERR(base), + "unable to find \"rcsu_strap\" registers\n"); + pcie->strap_base = base; + + base = devm_platform_ioremap_resource_byname(pdev, "rcsu_status"); + if (IS_ERR(base)) + return dev_err_probe(dev, PTR_ERR(base), + "unable to find \"rcsu_status\" registers\n"); + pcie->status_base = base; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "msg"); + if (!res) + return dev_err_probe(dev, -ENODEV, "unable to get \"msg\" resource\n"); + pcie->msg_res = res; + pcie->msg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->msg_base)) { + return dev_err_probe(dev, PTR_ERR(pcie->msg_base), + "unable to ioremap msg resource\n"); + } + + return 0; +} + +static int sky1_pcie_start_link(struct cdns_pcie *cdns_pcie) +{ + struct sky1_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 val; + + val = readl(pcie->strap_base + STRAP_REG(1)); + val |= LINK_TRAINING_ENABLE; + writel(val, pcie->strap_base + STRAP_REG(1)); + + return 0; +} + +static void sky1_pcie_stop_link(struct cdns_pcie *cdns_pcie) +{ + struct sky1_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 val; + + val = readl(pcie->strap_base + STRAP_REG(1)); + val &= ~LINK_TRAINING_ENABLE; + writel(val, pcie->strap_base + STRAP_REG(1)); +} + +static bool sky1_pcie_link_up(struct cdns_pcie *cdns_pcie) +{ + u32 val; + + val = cdns_pcie_hpa_readl(cdns_pcie, REG_BANK_IP_REG, + IP_REG_I_DBG_STS_0); + return val & LINK_COMPLETE; +} + +static const struct cdns_pcie_ops sky1_pcie_ops = { + .start_link = sky1_pcie_start_link, + .stop_link = sky1_pcie_stop_link, + .link_up = sky1_pcie_link_up, +}; + +static int sky1_pcie_probe(struct platform_device *pdev) +{ + struct cdns_plat_pcie_of_data *reg_off; + struct device *dev = &pdev->dev; + struct pci_host_bridge *bridge; + struct cdns_pcie *cdns_pcie; + struct resource_entry *bus; + struct cdns_pcie_rc *rc; + struct sky1_pcie *pcie; + int ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); + if (!bridge) + return -ENOMEM; + + ret = sky1_pcie_resource_get(pdev, pcie); + if (ret < 0) + return ret; + + bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); + if (!bus) + return -ENODEV; + + pcie->cfg = pci_ecam_create(dev, pcie->cfg_res, bus->res, + &pci_generic_ecam_ops); + if (IS_ERR(pcie->cfg)) + return PTR_ERR(pcie->cfg); + + bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops; + rc = pci_host_bridge_priv(bridge); + rc->ecam_supported = 1; + rc->cfg_base = pcie->cfg->win; + rc->cfg_res = &pcie->cfg->res; + + cdns_pcie = &rc->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &sky1_pcie_ops; + cdns_pcie->reg_base = pcie->reg_base; + cdns_pcie->msg_res = pcie->msg_res; + cdns_pcie->is_rc = 1; + + reg_off = devm_kzalloc(dev, sizeof(*reg_off), GFP_KERNEL); + if (!reg_off) + return -ENOMEM; + + reg_off->ip_reg_bank_offset = SKY1_IP_REG_BANK; + reg_off->ip_cfg_ctrl_reg_offset = SKY1_IP_CFG_CTRL_REG_BANK; + reg_off->axi_mstr_common_offset = SKY1_IP_AXI_MASTER_COMMON; + reg_off->axi_slave_offset = SKY1_AXI_SLAVE; + reg_off->axi_master_offset = SKY1_AXI_MASTER; + reg_off->axi_hls_offset = SKY1_AXI_HLS_REGISTERS; + reg_off->axi_ras_offset = SKY1_AXI_RAS_REGISTERS; + reg_off->axi_dti_offset = SKY1_DTI_REGISTERS; + cdns_pcie->cdns_pcie_reg_offsets = reg_off; + + pcie->cdns_pcie = cdns_pcie; + pcie->cdns_pcie_rc = rc; + pcie->cfg_base = rc->cfg_base; + bridge->sysdata = pcie->cfg; + + rc->vendor_id = PCI_VENDOR_ID_CIX; + rc->device_id = PCI_DEVICE_ID_CIX_SKY1; + rc->no_inbound_map = 1; + + dev_set_drvdata(dev, pcie); + + ret = cdns_pcie_hpa_host_setup(rc); + if (ret < 0) { + pci_ecam_free(pcie->cfg); + return ret; + } + + return 0; +} + +static const struct of_device_id of_sky1_pcie_match[] = { + { .compatible = "cix,sky1-pcie-host", }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_sky1_pcie_match); + +static void sky1_pcie_remove(struct platform_device *pdev) +{ + struct sky1_pcie *pcie = platform_get_drvdata(pdev); + + pci_ecam_free(pcie->cfg); +} + +static struct platform_driver sky1_pcie_driver = { + .probe = sky1_pcie_probe, + .remove = sky1_pcie_remove, + .driver = { + .name = "sky1-pcie", + .of_match_table = of_sky1_pcie_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; +module_platform_driver(sky1_pcie_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("PCIe controller driver for CIX's sky1 SoCs"); +MODULE_AUTHOR("Hans Zhang <hans.zhang@cixtech.com>"); diff --git a/drivers/pci/controller/cadence/pcie-cadence-host-common.c b/drivers/pci/controller/cadence/pcie-cadence-host-common.c new file mode 100644 index 000000000000..15415d7f35ee --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence-host-common.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence PCIe host controller library. + * + * Copyright (c) 2017 Cadence + * Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> + */ +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/list_sort.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/platform_device.h> + +#include "pcie-cadence.h" +#include "pcie-cadence-host-common.h" + +#define LINK_RETRAIN_TIMEOUT HZ + +u64 bar_max_size[] = { + [RP_BAR0] = _ULL(128 * SZ_2G), + [RP_BAR1] = SZ_2G, + [RP_NO_BAR] = _BITULL(63), +}; +EXPORT_SYMBOL_GPL(bar_max_size); + +int cdns_pcie_host_training_complete(struct cdns_pcie *pcie) +{ + u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET; + unsigned long end_jiffies; + u16 lnk_stat; + + /* Wait for link training to complete. Exit after timeout. */ + end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT; + do { + lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA); + if (!(lnk_stat & PCI_EXP_LNKSTA_LT)) + break; + usleep_range(0, 1000); + } while (time_before(jiffies, end_jiffies)); + + if (!(lnk_stat & PCI_EXP_LNKSTA_LT)) + return 0; + + return -ETIMEDOUT; +} +EXPORT_SYMBOL_GPL(cdns_pcie_host_training_complete); + +int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie, + cdns_pcie_linkup_func pcie_link_up) +{ + struct device *dev = pcie->dev; + int retries; + + /* Check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (pcie_link_up(pcie)) { + dev_info(dev, "Link up\n"); + return 0; + } + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); + } + + return -ETIMEDOUT; +} +EXPORT_SYMBOL_GPL(cdns_pcie_host_wait_for_link); + +int cdns_pcie_retrain(struct cdns_pcie *pcie, + cdns_pcie_linkup_func pcie_link_up) +{ + u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET; + u16 lnk_stat, lnk_ctl; + int ret = 0; + + /* + * Set retrain bit if current speed is 2.5 GB/s, + * but the PCIe root port support is > 2.5 GB/s. + */ + + lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off + + PCI_EXP_LNKCAP)); + if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) + return ret; + + lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA); + if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) { + lnk_ctl = cdns_pcie_rp_readw(pcie, + pcie_cap_off + PCI_EXP_LNKCTL); + lnk_ctl |= PCI_EXP_LNKCTL_RL; + cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL, + lnk_ctl); + + ret = cdns_pcie_host_training_complete(pcie); + if (ret) + return ret; + + ret = cdns_pcie_host_wait_for_link(pcie, pcie_link_up); + } + return ret; +} +EXPORT_SYMBOL_GPL(cdns_pcie_retrain); + +int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc, + cdns_pcie_linkup_func pcie_link_up) +{ + struct cdns_pcie *pcie = &rc->pcie; + int ret; + + ret = cdns_pcie_host_wait_for_link(pcie, pcie_link_up); + + /* + * Retrain link for Gen2 training defect + * if quirk flag is set. + */ + if (!ret && rc->quirk_retrain_flag) + ret = cdns_pcie_retrain(pcie, pcie_link_up); + + return ret; +} +EXPORT_SYMBOL_GPL(cdns_pcie_host_start_link); + +enum cdns_pcie_rp_bar +cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size) +{ + enum cdns_pcie_rp_bar bar, sel_bar; + + sel_bar = RP_BAR_UNDEFINED; + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { + if (!rc->avail_ib_bar[bar]) + continue; + + if (size <= bar_max_size[bar]) { + if (sel_bar == RP_BAR_UNDEFINED) { + sel_bar = bar; + continue; + } + + if (bar_max_size[bar] < bar_max_size[sel_bar]) + sel_bar = bar; + } + } + + return sel_bar; +} +EXPORT_SYMBOL_GPL(cdns_pcie_host_find_min_bar); + +enum cdns_pcie_rp_bar +cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size) +{ + enum cdns_pcie_rp_bar bar, sel_bar; + + sel_bar = RP_BAR_UNDEFINED; + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { + if (!rc->avail_ib_bar[bar]) + continue; + + if (size >= bar_max_size[bar]) { + if (sel_bar == RP_BAR_UNDEFINED) { + sel_bar = bar; + continue; + } + + if (bar_max_size[bar] > bar_max_size[sel_bar]) + sel_bar = bar; + } + } + + return sel_bar; +} +EXPORT_SYMBOL_GPL(cdns_pcie_host_find_max_bar); + +int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a, + const struct list_head *b) +{ + struct resource_entry *entry1, *entry2; + + entry1 = container_of(a, struct resource_entry, node); + entry2 = container_of(b, struct resource_entry, node); + + return resource_size(entry2->res) - resource_size(entry1->res); +} +EXPORT_SYMBOL_GPL(cdns_pcie_host_dma_ranges_cmp); + +int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc, + struct resource_entry *entry, + cdns_pcie_host_bar_ib_cfg pci_host_ib_config) +{ + struct cdns_pcie *pcie = &rc->pcie; + struct device *dev = pcie->dev; + u64 cpu_addr, size, winsize; + enum cdns_pcie_rp_bar bar; + unsigned long flags; + int ret; + + cpu_addr = entry->res->start; + flags = entry->res->flags; + size = resource_size(entry->res); + + while (size > 0) { + /* + * Try to find a minimum BAR whose size is greater than + * or equal to the remaining resource_entry size. This will + * fail if the size of each of the available BARs is less than + * the remaining resource_entry size. + * + * If a minimum BAR is found, IB ATU will be configured and + * exited. + */ + bar = cdns_pcie_host_find_min_bar(rc, size); + if (bar != RP_BAR_UNDEFINED) { + ret = pci_host_ib_config(rc, bar, cpu_addr, size, flags); + if (ret) + dev_err(dev, "IB BAR: %d config failed\n", bar); + return ret; + } + + /* + * If the control reaches here, it would mean the remaining + * resource_entry size cannot be fitted in a single BAR. So we + * find a maximum BAR whose size is less than or equal to the + * remaining resource_entry size and split the resource entry + * so that part of resource entry is fitted inside the maximum + * BAR. The remaining size would be fitted during the next + * iteration of the loop. + * + * If a maximum BAR is not found, there is no way we can fit + * this resource_entry, so we error out. + */ + bar = cdns_pcie_host_find_max_bar(rc, size); + if (bar == RP_BAR_UNDEFINED) { + dev_err(dev, "No free BAR to map cpu_addr %llx\n", + cpu_addr); + return -EINVAL; + } + + winsize = bar_max_size[bar]; + ret = pci_host_ib_config(rc, bar, cpu_addr, winsize, flags); + if (ret) { + dev_err(dev, "IB BAR: %d config failed\n", bar); + return ret; + } + + size -= winsize; + cpu_addr += winsize; + } + + return 0; +} + +int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc, + cdns_pcie_host_bar_ib_cfg pci_host_ib_config) +{ + struct cdns_pcie *pcie = &rc->pcie; + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; + struct pci_host_bridge *bridge; + struct resource_entry *entry; + u32 no_bar_nbits = 32; + int err; + + bridge = pci_host_bridge_from_priv(rc); + if (!bridge) + return -ENOMEM; + + if (list_empty(&bridge->dma_ranges)) { + of_property_read_u32(np, "cdns,no-bar-match-nbits", + &no_bar_nbits); + err = pci_host_ib_config(rc, RP_NO_BAR, 0x0, (u64)1 << no_bar_nbits, 0); + if (err) + dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR); + return err; + } + + list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp); + + resource_list_for_each_entry(entry, &bridge->dma_ranges) { + err = cdns_pcie_host_bar_config(rc, entry, pci_host_ib_config); + if (err) { + dev_err(dev, "Fail to configure IB using dma-ranges\n"); + return err; + } + } + + return 0; +} + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cadence PCIe host controller driver"); diff --git a/drivers/pci/controller/cadence/pcie-cadence-host-common.h b/drivers/pci/controller/cadence/pcie-cadence-host-common.h new file mode 100644 index 000000000000..fe7d4202a8b6 --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence-host-common.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence PCIe Host controller driver. + * + * Copyright (c) 2017 Cadence + * Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> + */ +#ifndef _PCIE_CADENCE_HOST_COMMON_H +#define _PCIE_CADENCE_HOST_COMMON_H + +#include <linux/kernel.h> +#include <linux/pci.h> + +extern u64 bar_max_size[]; + +typedef int (*cdns_pcie_host_bar_ib_cfg)(struct cdns_pcie_rc *, + enum cdns_pcie_rp_bar, + u64, + u64, + unsigned long); +typedef bool (*cdns_pcie_linkup_func)(struct cdns_pcie *); + +int cdns_pcie_host_training_complete(struct cdns_pcie *pcie); +int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie, + cdns_pcie_linkup_func pcie_link_up); +int cdns_pcie_retrain(struct cdns_pcie *pcie, cdns_pcie_linkup_func pcie_linkup_func); +int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc, + cdns_pcie_linkup_func pcie_link_up); +enum cdns_pcie_rp_bar +cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size); +enum cdns_pcie_rp_bar +cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size); +int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a, + const struct list_head *b); +int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc, + enum cdns_pcie_rp_bar bar, + u64 cpu_addr, + u64 size, + unsigned long flags); +int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc, + struct resource_entry *entry, + cdns_pcie_host_bar_ib_cfg pci_host_ib_config); +int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc, + cdns_pcie_host_bar_ib_cfg pci_host_ib_config); + +#endif /* _PCIE_CADENCE_HOST_COMMON_H */ diff --git a/drivers/pci/controller/cadence/pcie-cadence-host-hpa.c b/drivers/pci/controller/cadence/pcie-cadence-host-hpa.c new file mode 100644 index 000000000000..0f540bed58e8 --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence-host-hpa.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence PCIe host controller driver. + * + * Copyright (c) 2024, Cadence Design Systems + * Author: Manikandan K Pillai <mpillai@cadence.com> + */ +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/list_sort.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> + +#include "pcie-cadence.h" +#include "pcie-cadence-host-common.h" + +static u8 bar_aperture_mask[] = { + [RP_BAR0] = 0x3F, + [RP_BAR1] = 0x3F, +}; + +void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct pci_host_bridge *bridge = pci_find_host_bridge(bus); + struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); + struct cdns_pcie *pcie = &rc->pcie; + unsigned int busn = bus->number; + u32 addr0, desc0, desc1, ctrl0; + u32 regval; + + if (pci_is_root_bus(bus)) { + /* + * Only the root port (devfn == 0) is connected to this bus. + * All other PCI devices are behind some bridge hence on another + * bus. + */ + if (devfn) + return NULL; + + return pcie->reg_base + (where & 0xfff); + } + + /* Clear AXI link-down status */ + regval = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN, + (regval & ~GENMASK(0, 0))); + + /* Update Output registers for AXI region 0 */ + addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(12) | + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) | + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS(busn); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(0), addr0); + + desc1 = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0)); + desc1 &= ~CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK; + desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0); + ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS | + CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN; + + if (busn == bridge->busnr + 1) + desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0; + else + desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1; + + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_DESC0(0), desc0); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(0), ctrl0); + + return rc->cfg_base + (where & 0xfff); +} + +static struct pci_ops cdns_pcie_hpa_host_ops = { + .map_bus = cdns_pci_hpa_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, +}; + +static void cdns_pcie_hpa_host_enable_ptm_response(struct cdns_pcie *pcie) +{ + u32 val; + + val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_LM_PTM_CTRL); + cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_LM_PTM_CTRL, + val | CDNS_PCIE_HPA_LM_PTM_CTRL_PTMRSEN); +} + +static int cdns_pcie_hpa_host_bar_ib_config(struct cdns_pcie_rc *rc, + enum cdns_pcie_rp_bar bar, + u64 cpu_addr, u64 size, + unsigned long flags) +{ + struct cdns_pcie *pcie = &rc->pcie; + u32 addr0, addr1, aperture, value; + + if (!rc->avail_ib_bar[bar]) + return -ENODEV; + + rc->avail_ib_bar[bar] = false; + + aperture = ilog2(size); + if (bar == RP_NO_BAR) { + addr0 = CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS(aperture) | + (lower_32_bits(cpu_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(cpu_addr); + } else { + addr0 = lower_32_bits(cpu_addr); + addr1 = upper_32_bits(cpu_addr); + } + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER, + CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0(bar), addr0); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER, + CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR1(bar), addr1); + + if (bar == RP_NO_BAR) + bar = (enum cdns_pcie_rp_bar)BAR_0; + + value = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_HPA_LM_RC_BAR_CFG); + value &= ~(HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) | + HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) | + HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) | + HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) | + HPA_LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 7)); + if (size + cpu_addr >= SZ_4G) { + value |= HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar); + if ((flags & IORESOURCE_PREFETCH)) + value |= HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar); + } else { + value |= HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar); + if ((flags & IORESOURCE_PREFETCH)) + value |= HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar); + } + + value |= HPA_LM_RC_BAR_CFG_APERTURE(bar, aperture); + cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_HPA_LM_RC_BAR_CFG, value); + + return 0; +} + +static int cdns_pcie_hpa_host_init_root_port(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + u32 value, ctrl; + + /* + * Set the root port BAR configuration register: + * - disable both BAR0 and BAR1 + * - enable Prefetchable Memory Base and Limit registers in type 1 + * config space (64 bits) + * - enable IO Base and Limit registers in type 1 config + * space (32 bits) + */ + + ctrl = CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED; + value = CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) | + CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) | + CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE | + CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS | + CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_ENABLE | + CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_32BITS; + cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG, + CDNS_PCIE_HPA_LM_RC_BAR_CFG, value); + + if (rc->vendor_id != 0xffff) + cdns_pcie_hpa_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); + + if (rc->device_id != 0xffff) + cdns_pcie_hpa_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); + + cdns_pcie_hpa_rp_writeb(pcie, PCI_CLASS_REVISION, 0); + cdns_pcie_hpa_rp_writeb(pcie, PCI_CLASS_PROG, 0); + cdns_pcie_hpa_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); + + /* Enable bus mastering */ + value = cdns_pcie_hpa_readl(pcie, REG_BANK_RP, PCI_COMMAND); + value |= (PCI_COMMAND_MEMORY | PCI_COMMAND_IO | PCI_COMMAND_MASTER); + cdns_pcie_hpa_writel(pcie, REG_BANK_RP, PCI_COMMAND, value); + return 0; +} + +static void cdns_pcie_hpa_create_region_for_cfg(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc); + struct resource *cfg_res = rc->cfg_res; + struct resource_entry *entry; + u64 cpu_addr = cfg_res->start; + u32 addr0, addr1, desc1; + int busnr = 0; + + entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); + if (entry) + busnr = entry->res->start; + + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_TAG_MANAGEMENT, 0x01000000); + /* + * Reserve region 0 for PCI configure space accesses: + * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by + * cdns_pci_map_bus(), other region registers are set here once for all + */ + desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(0), 0x0); + /* Type-1 CFG */ + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_DESC0(0), 0x05000000); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1); + + addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(12) | + (lower_32_bits(cpu_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(cpu_addr); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(0), addr0); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(0), addr1); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(0), 0x06000000); +} + +static int cdns_pcie_hpa_host_init_address_translation(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc); + struct resource_entry *entry; + int r = 0, busnr = 0; + + if (!rc->ecam_supported) + cdns_pcie_hpa_create_region_for_cfg(rc); + + entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); + if (entry) + busnr = entry->res->start; + + r++; + if (pcie->msg_res) { + cdns_pcie_hpa_set_outbound_region_for_normal_msg(pcie, busnr, 0, r, + pcie->msg_res->start); + + r++; + } + resource_list_for_each_entry(entry, &bridge->windows) { + struct resource *res = entry->res; + u64 pci_addr = res->start - entry->offset; + + if (resource_type(res) == IORESOURCE_IO) + cdns_pcie_hpa_set_outbound_region(pcie, busnr, 0, r, + true, + pci_pio_to_address(res->start), + pci_addr, + resource_size(res)); + else + cdns_pcie_hpa_set_outbound_region(pcie, busnr, 0, r, + false, + res->start, + pci_addr, + resource_size(res)); + + r++; + } + + if (rc->no_inbound_map) + return 0; + else + return cdns_pcie_host_map_dma_ranges(rc, cdns_pcie_hpa_host_bar_ib_config); +} + +static int cdns_pcie_hpa_host_init(struct cdns_pcie_rc *rc) +{ + int err; + + err = cdns_pcie_hpa_host_init_root_port(rc); + if (err) + return err; + + return cdns_pcie_hpa_host_init_address_translation(rc); +} + +int cdns_pcie_hpa_host_link_setup(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + struct device *dev = rc->pcie.dev; + int ret; + + if (rc->quirk_detect_quiet_flag) + cdns_pcie_hpa_detect_quiet_min_delay_set(&rc->pcie); + + cdns_pcie_hpa_host_enable_ptm_response(pcie); + + ret = cdns_pcie_start_link(pcie); + if (ret) { + dev_err(dev, "Failed to start link\n"); + return ret; + } + + ret = cdns_pcie_host_wait_for_link(pcie, cdns_pcie_hpa_link_up); + if (ret) + dev_dbg(dev, "PCIe link never came up\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(cdns_pcie_hpa_host_link_setup); + +int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc) +{ + struct device *dev = rc->pcie.dev; + struct platform_device *pdev = to_platform_device(dev); + struct pci_host_bridge *bridge; + enum cdns_pcie_rp_bar bar; + struct cdns_pcie *pcie; + struct resource *res; + int ret; + + bridge = pci_host_bridge_from_priv(rc); + if (!bridge) + return -ENOMEM; + + pcie = &rc->pcie; + pcie->is_rc = true; + + if (!pcie->reg_base) { + pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); + if (IS_ERR(pcie->reg_base)) { + dev_err(dev, "missing \"reg\"\n"); + return PTR_ERR(pcie->reg_base); + } + } + + /* ECAM config space is remapped at glue layer */ + if (!rc->cfg_base) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); + rc->cfg_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(rc->cfg_base)) + return PTR_ERR(rc->cfg_base); + rc->cfg_res = res; + } + + /* Put EROM Bar aperture to 0 */ + cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_EROM, 0x0); + + ret = cdns_pcie_hpa_host_link_setup(rc); + if (ret) + return ret; + + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) + rc->avail_ib_bar[bar] = true; + + ret = cdns_pcie_hpa_host_init(rc); + if (ret) + return ret; + + if (!bridge->ops) + bridge->ops = &cdns_pcie_hpa_host_ops; + + return pci_host_probe(bridge); +} +EXPORT_SYMBOL_GPL(cdns_pcie_hpa_host_setup); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cadence PCIe host controller driver"); diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c index fffd63d6665e..db3154c1eccb 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-host.c +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -12,14 +12,7 @@ #include <linux/platform_device.h> #include "pcie-cadence.h" - -#define LINK_RETRAIN_TIMEOUT HZ - -static u64 bar_max_size[] = { - [RP_BAR0] = _ULL(128 * SZ_2G), - [RP_BAR1] = SZ_2G, - [RP_NO_BAR] = _BITULL(63), -}; +#include "pcie-cadence-host-common.h" static u8 bar_aperture_mask[] = { [RP_BAR0] = 0x1F, @@ -81,77 +74,6 @@ static struct pci_ops cdns_pcie_host_ops = { .write = pci_generic_config_write, }; -static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie) -{ - u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET; - unsigned long end_jiffies; - u16 lnk_stat; - - /* Wait for link training to complete. Exit after timeout. */ - end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT; - do { - lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA); - if (!(lnk_stat & PCI_EXP_LNKSTA_LT)) - break; - usleep_range(0, 1000); - } while (time_before(jiffies, end_jiffies)); - - if (!(lnk_stat & PCI_EXP_LNKSTA_LT)) - return 0; - - return -ETIMEDOUT; -} - -static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie) -{ - struct device *dev = pcie->dev; - int retries; - - /* Check if the link is up or not */ - for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { - if (cdns_pcie_link_up(pcie)) { - dev_info(dev, "Link up\n"); - return 0; - } - usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); - } - - return -ETIMEDOUT; -} - -static int cdns_pcie_retrain(struct cdns_pcie *pcie) -{ - u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET; - u16 lnk_stat, lnk_ctl; - int ret = 0; - - /* - * Set retrain bit if current speed is 2.5 GB/s, - * but the PCIe root port support is > 2.5 GB/s. - */ - - lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off + - PCI_EXP_LNKCAP)); - if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) - return ret; - - lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA); - if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) { - lnk_ctl = cdns_pcie_rp_readw(pcie, - pcie_cap_off + PCI_EXP_LNKCTL); - lnk_ctl |= PCI_EXP_LNKCTL_RL; - cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL, - lnk_ctl); - - ret = cdns_pcie_host_training_complete(pcie); - if (ret) - return ret; - - ret = cdns_pcie_host_wait_for_link(pcie); - } - return ret; -} - static void cdns_pcie_host_disable_ptm_response(struct cdns_pcie *pcie) { u32 val; @@ -168,23 +90,6 @@ static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie) cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN); } -static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc) -{ - struct cdns_pcie *pcie = &rc->pcie; - int ret; - - ret = cdns_pcie_host_wait_for_link(pcie); - - /* - * Retrain link for Gen2 training defect - * if quirk flag is set. - */ - if (!ret && rc->quirk_retrain_flag) - ret = cdns_pcie_retrain(pcie); - - return ret; -} - static void cdns_pcie_host_deinit_root_port(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; @@ -245,10 +150,11 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) return 0; } -static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc, - enum cdns_pcie_rp_bar bar, - u64 cpu_addr, u64 size, - unsigned long flags) +int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc, + enum cdns_pcie_rp_bar bar, + u64 cpu_addr, + u64 size, + unsigned long flags) { struct cdns_pcie *pcie = &rc->pcie; u32 addr0, addr1, aperture, value; @@ -290,137 +196,6 @@ static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc, return 0; } -static enum cdns_pcie_rp_bar -cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size) -{ - enum cdns_pcie_rp_bar bar, sel_bar; - - sel_bar = RP_BAR_UNDEFINED; - for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { - if (!rc->avail_ib_bar[bar]) - continue; - - if (size <= bar_max_size[bar]) { - if (sel_bar == RP_BAR_UNDEFINED) { - sel_bar = bar; - continue; - } - - if (bar_max_size[bar] < bar_max_size[sel_bar]) - sel_bar = bar; - } - } - - return sel_bar; -} - -static enum cdns_pcie_rp_bar -cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size) -{ - enum cdns_pcie_rp_bar bar, sel_bar; - - sel_bar = RP_BAR_UNDEFINED; - for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { - if (!rc->avail_ib_bar[bar]) - continue; - - if (size >= bar_max_size[bar]) { - if (sel_bar == RP_BAR_UNDEFINED) { - sel_bar = bar; - continue; - } - - if (bar_max_size[bar] > bar_max_size[sel_bar]) - sel_bar = bar; - } - } - - return sel_bar; -} - -static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc, - struct resource_entry *entry) -{ - u64 cpu_addr, pci_addr, size, winsize; - struct cdns_pcie *pcie = &rc->pcie; - struct device *dev = pcie->dev; - enum cdns_pcie_rp_bar bar; - unsigned long flags; - int ret; - - cpu_addr = entry->res->start; - pci_addr = entry->res->start - entry->offset; - flags = entry->res->flags; - size = resource_size(entry->res); - - if (entry->offset) { - dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n", - pci_addr, cpu_addr); - return -EINVAL; - } - - while (size > 0) { - /* - * Try to find a minimum BAR whose size is greater than - * or equal to the remaining resource_entry size. This will - * fail if the size of each of the available BARs is less than - * the remaining resource_entry size. - * If a minimum BAR is found, IB ATU will be configured and - * exited. - */ - bar = cdns_pcie_host_find_min_bar(rc, size); - if (bar != RP_BAR_UNDEFINED) { - ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, - size, flags); - if (ret) - dev_err(dev, "IB BAR: %d config failed\n", bar); - return ret; - } - - /* - * If the control reaches here, it would mean the remaining - * resource_entry size cannot be fitted in a single BAR. So we - * find a maximum BAR whose size is less than or equal to the - * remaining resource_entry size and split the resource entry - * so that part of resource entry is fitted inside the maximum - * BAR. The remaining size would be fitted during the next - * iteration of the loop. - * If a maximum BAR is not found, there is no way we can fit - * this resource_entry, so we error out. - */ - bar = cdns_pcie_host_find_max_bar(rc, size); - if (bar == RP_BAR_UNDEFINED) { - dev_err(dev, "No free BAR to map cpu_addr %llx\n", - cpu_addr); - return -EINVAL; - } - - winsize = bar_max_size[bar]; - ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize, - flags); - if (ret) { - dev_err(dev, "IB BAR: %d config failed\n", bar); - return ret; - } - - size -= winsize; - cpu_addr += winsize; - } - - return 0; -} - -static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a, - const struct list_head *b) -{ - struct resource_entry *entry1, *entry2; - - entry1 = container_of(a, struct resource_entry, node); - entry2 = container_of(b, struct resource_entry, node); - - return resource_size(entry2->res) - resource_size(entry1->res); -} - static void cdns_pcie_host_unmap_dma_ranges(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; @@ -447,43 +222,6 @@ static void cdns_pcie_host_unmap_dma_ranges(struct cdns_pcie_rc *rc) } } -static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc) -{ - struct cdns_pcie *pcie = &rc->pcie; - struct device *dev = pcie->dev; - struct device_node *np = dev->of_node; - struct pci_host_bridge *bridge; - struct resource_entry *entry; - u32 no_bar_nbits = 32; - int err; - - bridge = pci_host_bridge_from_priv(rc); - if (!bridge) - return -ENOMEM; - - if (list_empty(&bridge->dma_ranges)) { - of_property_read_u32(np, "cdns,no-bar-match-nbits", - &no_bar_nbits); - err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0, - (u64)1 << no_bar_nbits, 0); - if (err) - dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR); - return err; - } - - list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp); - - resource_list_for_each_entry(entry, &bridge->dma_ranges) { - err = cdns_pcie_host_bar_config(rc, entry); - if (err) { - dev_err(dev, "Fail to configure IB using dma-ranges\n"); - return err; - } - } - - return 0; -} - static void cdns_pcie_host_deinit_address_translation(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; @@ -561,7 +299,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) r++; } - return cdns_pcie_host_map_dma_ranges(rc); + return cdns_pcie_host_map_dma_ranges(rc, cdns_pcie_host_bar_ib_config); } static void cdns_pcie_host_deinit(struct cdns_pcie_rc *rc) @@ -607,7 +345,7 @@ int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc) return ret; } - ret = cdns_pcie_host_start_link(rc); + ret = cdns_pcie_host_start_link(rc, cdns_pcie_link_up); if (ret) dev_dbg(dev, "PCIe link never came up\n"); diff --git a/drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h b/drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h new file mode 100644 index 000000000000..026e131600de --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence PCIe controller driver. + * + * Copyright (c) 2024, Cadence Design Systems + * Author: Manikandan K Pillai <mpillai@cadence.com> + */ +#ifndef _PCIE_CADENCE_HPA_REGS_H +#define _PCIE_CADENCE_HPA_REGS_H + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/pci-epf.h> +#include <linux/phy/phy.h> +#include <linux/bitfield.h> + +/* High Performance Architecture (HPA) PCIe controller registers */ +#define CDNS_PCIE_HPA_IP_REG_BANK 0x01000000 +#define CDNS_PCIE_HPA_IP_CFG_CTRL_REG_BANK 0x01003C00 +#define CDNS_PCIE_HPA_IP_AXI_MASTER_COMMON 0x02020000 + +/* Address Translation Registers */ +#define CDNS_PCIE_HPA_AXI_SLAVE 0x03000000 +#define CDNS_PCIE_HPA_AXI_MASTER 0x03002000 + +/* Root Port register base address */ +#define CDNS_PCIE_HPA_RP_BASE 0x0 + +#define CDNS_PCIE_HPA_LM_ID 0x1420 + +/* Endpoint Function BARs */ +#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG(bar, fn) \ + (((bar) < BAR_3) ? CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG0(fn) : \ + CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG1(fn)) +#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG0(pfn) (0x4000 * (pfn)) +#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG1(pfn) ((0x4000 * (pfn)) + 0x04) +#define CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG(bar, fn) \ + (((bar) < BAR_3) ? CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG0(fn) : \ + CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG1(fn)) +#define CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG0(vfn) ((0x4000 * (vfn)) + 0x08) +#define CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG1(vfn) ((0x4000 * (vfn)) + 0x0C) +#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(f) \ + (GENMASK(5, 0) << (0x4 + (f) * 10)) +#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ + (((a) << (4 + ((b) * 10))) & (CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))) +#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(f) \ + (GENMASK(3, 0) << ((f) * 10)) +#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ + (((c) << ((b) * 10)) & (CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))) + +/* Endpoint Function Configuration Register */ +#define CDNS_PCIE_HPA_LM_EP_FUNC_CFG 0x02C0 + +/* Root Complex BAR Configuration Register */ +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG 0x14 +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(9, 4) +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_APERTURE(a) \ + FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_APERTURE_MASK, a) +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(3, 0) +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL(c) \ + FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL_MASK, c) +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(19, 14) +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_APERTURE(a) \ + FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_APERTURE_MASK, a) +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(13, 10) +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL(c) \ + FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL_MASK, c) + +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(20) +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(21) +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_ENABLE BIT(22) +#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_32BITS BIT(23) + +/* BAR control values applicable to both Endpoint Function and Root Complex */ +#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED 0x0 +#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_IO_32BITS 0x3 +#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_MEM_32BITS 0x1 +#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x9 +#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_MEM_64BITS 0x5 +#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0xD + +#define HPA_LM_RC_BAR_CFG_CTRL_DISABLED(bar) \ + (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED << ((bar) * 10)) +#define HPA_LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \ + (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_IO_32BITS << ((bar) * 10)) +#define HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \ + (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_MEM_32BITS << ((bar) * 10)) +#define HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \ + (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << ((bar) * 10)) +#define HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \ + (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_MEM_64BITS << ((bar) * 10)) +#define HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \ + (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << ((bar) * 10)) +#define HPA_LM_RC_BAR_CFG_APERTURE(bar, aperture) \ + (((aperture) - 7) << (((bar) * 10) + 4)) + +#define CDNS_PCIE_HPA_LM_PTM_CTRL 0x0520 +#define CDNS_PCIE_HPA_LM_PTM_CTRL_PTMRSEN BIT(17) + +/* Root Port Registers PCI config space for root port function */ +#define CDNS_PCIE_HPA_RP_CAP_OFFSET 0xC0 + +/* Region r Outbound AXI to PCIe Address Translation Register 0 */ +#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r) (0x1010 + ((r) & 0x1F) * 0x0080) +#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0) +#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \ + (((nbits) - 1) & CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS_MASK) +#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(23, 16) +#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ + FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK, devfn) +#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(31, 24) +#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ + FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS_MASK, bus) + +/* Region r Outbound AXI to PCIe Address Translation Register 1 */ +#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r) (0x1014 + ((r) & 0x1F) * 0x0080) + +/* Region r Outbound PCIe Descriptor Register */ +#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r) (0x1008 + ((r) & 0x1F) * 0x0080) +#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(28, 24) +#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MEM \ + FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x0) +#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_IO \ + FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x2) +#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 \ + FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x4) +#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 \ + FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x5) +#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG \ + FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x10) + +/* Region r Outbound PCIe Descriptor Register */ +#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r) (0x100C + ((r) & 0x1F) * 0x0080) +#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS_MASK GENMASK(31, 24) +#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(bus) \ + FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS_MASK, bus) +#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK GENMASK(23, 16) +#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(devfn) \ + FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK, devfn) + +#define CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r) (0x1018 + ((r) & 0x1F) * 0x0080) +#define CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS BIT(26) +#define CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN BIT(25) + +/* Region r AXI Region Base Address Register 0 */ +#define CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r) (0x1000 + ((r) & 0x1F) * 0x0080) +#define CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0) +#define CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \ + (((nbits) - 1) & CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS_MASK) + +/* Region r AXI Region Base Address Register 1 */ +#define CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r) (0x1004 + ((r) & 0x1F) * 0x0080) + +/* Root Port BAR Inbound PCIe to AXI Address Translation Register */ +#define CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0(bar) (((bar) * 0x0008)) +#define CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0) +#define CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \ + (((nbits) - 1) & CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS_MASK) +#define CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR1(bar) (0x04 + ((bar) * 0x0008)) + +/* AXI link down register */ +#define CDNS_PCIE_HPA_AT_LINKDOWN 0x04 + +/* + * Physical Layer Configuration Register 0 + * This register contains the parameters required for functional setup + * of Physical Layer. + */ +#define CDNS_PCIE_HPA_PHY_LAYER_CFG0 0x0400 +#define CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY_MASK GENMASK(26, 24) +#define CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY(delay) \ + FIELD_PREP(CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY_MASK, delay) +#define CDNS_PCIE_HPA_LINK_TRNG_EN_MASK GENMASK(27, 27) + +#define CDNS_PCIE_HPA_PHY_DBG_STS_REG0 0x0420 + +#define CDNS_PCIE_HPA_RP_MAX_IB 0x3 +#define CDNS_PCIE_HPA_MAX_OB 15 + +/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ +#define CDNS_PCIE_HPA_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) (((fn) * 0x0080) + ((bar) * 0x0008)) +#define CDNS_PCIE_HPA_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) (0x4 + ((fn) * 0x0080) + ((bar) * 0x0008)) + +/* Miscellaneous offsets definitions */ +#define CDNS_PCIE_HPA_TAG_MANAGEMENT 0x0 +#define CDNS_PCIE_HPA_SLAVE_RESP 0x100 + +#define I_ROOT_PORT_REQ_ID_REG 0x141c +#define LM_HAL_SBSA_CTRL 0x1170 + +#define I_PCIE_BUS_NUMBERS (CDNS_PCIE_HPA_RP_BASE + 0x18) +#define CDNS_PCIE_EROM 0x18 +#endif /* _PCIE_CADENCE_HPA_REGS_H */ diff --git a/drivers/pci/controller/cadence/pcie-cadence-hpa.c b/drivers/pci/controller/cadence/pcie-cadence-hpa.c new file mode 100644 index 000000000000..f60a16938265 --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence-hpa.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence PCIe controller driver. + * + * Copyright (c) 2024, Cadence Design Systems + * Author: Manikandan K Pillai <mpillai@cadence.com> + */ +#include <linux/kernel.h> +#include <linux/of.h> + +#include "pcie-cadence.h" + +bool cdns_pcie_hpa_link_up(struct cdns_pcie *pcie) +{ + u32 pl_reg_val; + + pl_reg_val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_PHY_DBG_STS_REG0); + if (pl_reg_val & GENMASK(0, 0)) + return true; + return false; +} +EXPORT_SYMBOL_GPL(cdns_pcie_hpa_link_up); + +void cdns_pcie_hpa_detect_quiet_min_delay_set(struct cdns_pcie *pcie) +{ + u32 delay = 0x3; + u32 ltssm_control_cap; + + /* Set the LTSSM Detect Quiet state min. delay to 2ms */ + ltssm_control_cap = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, + CDNS_PCIE_HPA_PHY_LAYER_CFG0); + ltssm_control_cap = ((ltssm_control_cap & + ~CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY_MASK) | + CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY(delay)); + + cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG, + CDNS_PCIE_HPA_PHY_LAYER_CFG0, ltssm_control_cap); +} +EXPORT_SYMBOL_GPL(cdns_pcie_hpa_detect_quiet_min_delay_set); + +void cdns_pcie_hpa_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, + u32 r, bool is_io, + u64 cpu_addr, u64 pci_addr, size_t size) +{ + /* + * roundup_pow_of_two() returns an unsigned long, which is not suited + * for 64bit values + */ + u64 sz = 1ULL << fls64(size - 1); + int nbits = ilog2(sz); + u32 addr0, addr1, desc0, desc1, ctrl0; + + if (nbits < 8) + nbits = 8; + + /* Set the PCI address */ + addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) | + (lower_32_bits(pci_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(pci_addr); + + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r), addr0); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r), addr1); + + /* Set the PCIe header descriptor */ + if (is_io) + desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_IO; + else + desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MEM; + desc1 = 0; + ctrl0 = 0; + + /* + * Whether Bit [26] is set or not inside DESC0 register of the outbound + * PCIe descriptor, the PCI function number must be set into + * Bits [31:24] of DESC1 anyway. + * + * In Root Complex mode, the function number is always 0 but in Endpoint + * mode, the PCIe controller may support more than one function. This + * function number needs to be set properly into the outbound PCIe + * descriptor. + * + * Besides, setting Bit [26] is mandatory when in Root Complex mode: + * then the driver must provide the bus, resp. device, number in + * Bits [31:24] of DESC1, resp. Bits[23:16] of DESC0. Like the function + * number, the device number is always 0 in Root Complex mode. + * + * However when in Endpoint mode, we can clear Bit [26] of DESC0, hence + * the PCIe controller will use the captured values for the bus and + * device numbers. + */ + if (pcie->is_rc) { + /* The device and function numbers are always 0 */ + desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr) | + CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0); + ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS | + CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN; + } else { + /* + * Use captured values for bus and device numbers but still + * need to set the function number + */ + desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(fn); + } + + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r), desc0); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r), desc1); + + addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) | + (lower_32_bits(cpu_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(cpu_addr); + + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r), addr0); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r), addr1); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r), ctrl0); +} +EXPORT_SYMBOL_GPL(cdns_pcie_hpa_set_outbound_region); + +void cdns_pcie_hpa_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, + u8 busnr, u8 fn, + u32 r, u64 cpu_addr) +{ + u32 addr0, addr1, desc0, desc1, ctrl0; + + desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG; + desc1 = 0; + ctrl0 = 0; + + /* See cdns_pcie_set_outbound_region() comments above */ + if (pcie->is_rc) { + desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr) | + CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0); + ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS | + CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN; + } else { + desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(fn); + } + + addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(17) | + (lower_32_bits(cpu_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(cpu_addr); + + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r), 0); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r), 0); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r), desc0); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r), desc1); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r), addr0); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r), addr1); + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, + CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r), ctrl0); +} +EXPORT_SYMBOL_GPL(cdns_pcie_hpa_set_outbound_region_for_normal_msg); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cadence PCIe controller driver"); diff --git a/drivers/pci/controller/cadence/pcie-cadence-lga-regs.h b/drivers/pci/controller/cadence/pcie-cadence-lga-regs.h new file mode 100644 index 000000000000..857b2140c5d2 --- /dev/null +++ b/drivers/pci/controller/cadence/pcie-cadence-lga-regs.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence PCIe controller driver. + * + * Copyright (c) 2017 Cadence + * Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> + */ +#ifndef _PCIE_CADENCE_LGA_REGS_H +#define _PCIE_CADENCE_LGA_REGS_H + +#include <linux/bitfield.h> + +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + +/* Local Management Registers */ +#define CDNS_PCIE_LM_BASE 0x00100000 + +/* Vendor ID Register */ +#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044) +#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0) +#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0 +#define CDNS_PCIE_LM_ID_VENDOR(vid) \ + (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK) +#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16) +#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16 +#define CDNS_PCIE_LM_ID_SUBSYS(sub) \ + (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK) + +/* Root Port Requester ID Register */ +#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228) +#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0) +#define CDNS_PCIE_LM_RP_RID_SHIFT 0 +#define CDNS_PCIE_LM_RP_RID_(rid) \ + (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK) + +/* Endpoint Bus and Device Number Register */ +#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022C) +#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0) +#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0 +#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8) +#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8 + +/* Endpoint Function f BAR b Configuration Registers */ +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \ + (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn)) +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \ + (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008) +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \ + (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008) +#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \ + (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn)) +#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \ + (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008) +#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \ + (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008) +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \ + (GENMASK(4, 0) << ((b) * 8)) +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ + (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)) +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \ + (GENMASK(7, 5) << ((b) * 8)) +#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ + (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) + +/* Endpoint Function Configuration Register */ +#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02C0) + +/* Root Complex BAR Configuration Register */ +#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \ + (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \ + (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \ + (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14) +#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \ + (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK) +#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17) +#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0 +#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18) +#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19) +#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0 +#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20) +#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31) + +/* BAR control values applicable to both Endpoint Function and Root Complex */ +#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0 +#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1 +#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4 +#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5 +#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6 +#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 + +#define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \ + (((aperture) - 2) << ((bar) * 8)) + +/* PTM Control Register */ +#define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0DA8) +#define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17) + +/* + * Endpoint Function Registers (PCI configuration space for endpoint functions) + */ +#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) + +#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90 +#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xB0 +#define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xC0 +#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200 + +/* Endpoint PF Registers */ +#define CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(fn) (0x144 + (fn) * 0x1000) +#define CDNS_PCIE_ARI_CAP_NFN_MASK GENMASK(15, 8) + +/* Root Port Registers (PCI configuration space for the root port function) */ +#define CDNS_PCIE_RP_BASE 0x00200000 +#define CDNS_PCIE_RP_CAP_OFFSET 0xC0 + +/* Address Translation Registers */ +#define CDNS_PCIE_AT_BASE 0x00400000 + +/* Region r Outbound AXI to PCIe Address Translation Register 0 */ +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ + (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1F) * 0x0020) +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0) +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \ + (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK) +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12) +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ + (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK) +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20) +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ + (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK) + +/* Region r Outbound AXI to PCIe Address Translation Register 1 */ +#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \ + (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1F) * 0x0020) + +/* Region r Outbound PCIe Descriptor Register 0 */ +#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \ + (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1F) * 0x0020) +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0) +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2 +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6 +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xA +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xB +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xC +#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xD +/* Bit 23 MUST be set in RC mode. */ +#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23) +#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24) +#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \ + (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK) + +/* Region r Outbound PCIe Descriptor Register 1 */ +#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \ + (CDNS_PCIE_AT_BASE + 0x000C + ((r) & 0x1F) * 0x0020) +#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0) +#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \ + ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK) + +/* Region r AXI Region Base Address Register 0 */ +#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \ + (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1F) * 0x0020) +#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0) +#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \ + (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK) + +/* Region r AXI Region Base Address Register 1 */ +#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \ + (CDNS_PCIE_AT_BASE + 0x001C + ((r) & 0x1F) * 0x0020) + +/* Root Port BAR Inbound PCIe to AXI Address Translation Register */ +#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \ + (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008) +#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0) +#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \ + (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK) +#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \ + (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008) + +/* AXI link down register */ +#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824) + +/* LTSSM Capabilities register */ +#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054) +#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1) +#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1 +#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \ + (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \ + CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) + +#define CDNS_PCIE_RP_MAX_IB 0x3 +#define CDNS_PCIE_MAX_OB 32 + +/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ +#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ + (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) +#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ + (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008) + +/* Normal/Vendor specific message access: offset inside some outbound region */ +#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5) +#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \ + (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK) +#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8) +#define CDNS_PCIE_NORMAL_MSG_CODE(code) \ + (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) +#define CDNS_PCIE_MSG_NO_DATA BIT(16) + +#endif /* _PCIE_CADENCE_LGA_REGS_H */ diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c index 0456845dabb9..b067a3296dd3 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-plat.c +++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c @@ -22,10 +22,6 @@ struct cdns_plat_pcie { struct cdns_pcie *pcie; }; -struct cdns_plat_pcie_of_data { - bool is_rc; -}; - static const struct of_device_id cdns_plat_pcie_of_match[]; static u64 cdns_plat_cpu_addr_fixup(struct cdns_pcie *pcie, u64 cpu_addr) @@ -177,4 +173,7 @@ static struct platform_driver cdns_plat_pcie_driver = { .probe = cdns_plat_pcie_probe, .shutdown = cdns_plat_pcie_shutdown, }; -builtin_platform_driver(cdns_plat_pcie_driver); +module_platform_driver(cdns_plat_pcie_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cadence PCIe controller platform driver"); diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c index bd683d0fecb2..e6f1a4ac0fb7 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.c +++ b/drivers/pci/controller/cadence/pcie-cadence.c @@ -23,6 +23,17 @@ u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap) } EXPORT_SYMBOL_GPL(cdns_pcie_find_ext_capability); +bool cdns_pcie_linkup(struct cdns_pcie *pcie) +{ + u32 pl_reg_val; + + pl_reg_val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE); + if (pl_reg_val & GENMASK(0, 0)) + return true; + return false; +} +EXPORT_SYMBOL_GPL(cdns_pcie_linkup); + void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie) { u32 delay = 0x3; @@ -293,6 +304,7 @@ const struct dev_pm_ops cdns_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq, cdns_pcie_resume_noirq) }; +EXPORT_SYMBOL_GPL(cdns_pcie_pm_ops); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cadence PCIe controller driver"); diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index e2a853d2c0ab..443033c607d7 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -7,211 +7,12 @@ #define _PCIE_CADENCE_H #include <linux/kernel.h> +#include <linux/module.h> #include <linux/pci.h> #include <linux/pci-epf.h> #include <linux/phy/phy.h> - -/* Parameters for the waiting for link up routine */ -#define LINK_WAIT_MAX_RETRIES 10 -#define LINK_WAIT_USLEEP_MIN 90000 -#define LINK_WAIT_USLEEP_MAX 100000 - -/* - * Local Management Registers - */ -#define CDNS_PCIE_LM_BASE 0x00100000 - -/* Vendor ID Register */ -#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044) -#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0) -#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0 -#define CDNS_PCIE_LM_ID_VENDOR(vid) \ - (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK) -#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16) -#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16 -#define CDNS_PCIE_LM_ID_SUBSYS(sub) \ - (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK) - -/* Root Port Requester ID Register */ -#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228) -#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0) -#define CDNS_PCIE_LM_RP_RID_SHIFT 0 -#define CDNS_PCIE_LM_RP_RID_(rid) \ - (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK) - -/* Endpoint Bus and Device Number Register */ -#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c) -#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0) -#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0 -#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8) -#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8 - -/* Endpoint Function f BAR b Configuration Registers */ -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \ - (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn)) -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \ - (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008) -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \ - (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008) -#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \ - (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn)) -#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \ - (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008) -#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \ - (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008) -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \ - (GENMASK(4, 0) << ((b) * 8)) -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ - (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)) -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \ - (GENMASK(7, 5) << ((b) * 8)) -#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ - (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) - -/* Endpoint Function Configuration Register */ -#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0) - -/* Root Complex BAR Configuration Register */ -#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \ - (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \ - (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \ - (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14) -#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \ - (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK) -#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17) -#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0 -#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18) -#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19) -#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0 -#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20) -#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31) - -/* BAR control values applicable to both Endpoint Function and Root Complex */ -#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0 -#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1 -#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4 -#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5 -#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6 -#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 - -#define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \ - (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6)) -#define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \ - (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6)) -#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \ - (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6)) -#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \ - (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6)) -#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \ - (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6)) -#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \ - (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6)) -#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \ - (((aperture) - 2) << ((bar) * 8)) - -/* PTM Control Register */ -#define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0da8) -#define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17) - -/* - * Endpoint Function Registers (PCI configuration space for endpoint functions) - */ -#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) - -/* - * Endpoint PF Registers - */ -#define CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(fn) (0x144 + (fn) * 0x1000) -#define CDNS_PCIE_ARI_CAP_NFN_MASK GENMASK(15, 8) - -/* - * Root Port Registers (PCI configuration space for the root port function) - */ -#define CDNS_PCIE_RP_BASE 0x00200000 -#define CDNS_PCIE_RP_CAP_OFFSET 0xc0 - -/* - * Address Translation Registers - */ -#define CDNS_PCIE_AT_BASE 0x00400000 - -/* Region r Outbound AXI to PCIe Address Translation Register 0 */ -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ - (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020) -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0) -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \ - (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK) -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12) -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ - (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK) -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20) -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ - (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK) - -/* Region r Outbound AXI to PCIe Address Translation Register 1 */ -#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \ - (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020) - -/* Region r Outbound PCIe Descriptor Register 0 */ -#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \ - (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020) -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0) -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2 -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6 -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc -#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd -/* Bit 23 MUST be set in RC mode. */ -#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23) -#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24) -#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \ - (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK) - -/* Region r Outbound PCIe Descriptor Register 1 */ -#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \ - (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020) -#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0) -#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \ - ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK) - -/* Region r AXI Region Base Address Register 0 */ -#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \ - (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020) -#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0) -#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \ - (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK) - -/* Region r AXI Region Base Address Register 1 */ -#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \ - (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020) - -/* Root Port BAR Inbound PCIe to AXI Address Translation Register */ -#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \ - (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008) -#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0) -#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \ - (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK) -#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \ - (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008) - -/* AXI link down register */ -#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824) - -/* LTSSM Capabilities register */ -#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054) -#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1) -#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1 -#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \ - (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \ - CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) +#include "pcie-cadence-lga-regs.h" +#include "pcie-cadence-hpa-regs.h" enum cdns_pcie_rp_bar { RP_BAR_UNDEFINED = -1, @@ -220,42 +21,63 @@ enum cdns_pcie_rp_bar { RP_NO_BAR }; -#define CDNS_PCIE_RP_MAX_IB 0x3 -#define CDNS_PCIE_MAX_OB 32 - struct cdns_pcie_rp_ib_bar { u64 size; bool free; }; -/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ -#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ - (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) -#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ - (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008) - -/* Normal/Vendor specific message access: offset inside some outbound region */ -#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5) -#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \ - (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK) -#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8) -#define CDNS_PCIE_NORMAL_MSG_CODE(code) \ - (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) -#define CDNS_PCIE_MSG_DATA BIT(16) - struct cdns_pcie; +struct cdns_pcie_rc; + +enum cdns_pcie_reg_bank { + REG_BANK_RP, + REG_BANK_IP_REG, + REG_BANK_IP_CFG_CTRL_REG, + REG_BANK_AXI_MASTER_COMMON, + REG_BANK_AXI_MASTER, + REG_BANK_AXI_SLAVE, + REG_BANK_AXI_HLS, + REG_BANK_AXI_RAS, + REG_BANK_AXI_DTI, + REG_BANKS_MAX, +}; struct cdns_pcie_ops { - int (*start_link)(struct cdns_pcie *pcie); - void (*stop_link)(struct cdns_pcie *pcie); - bool (*link_up)(struct cdns_pcie *pcie); + int (*start_link)(struct cdns_pcie *pcie); + void (*stop_link)(struct cdns_pcie *pcie); + bool (*link_up)(struct cdns_pcie *pcie); u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr); }; /** + * struct cdns_plat_pcie_of_data - Register bank offset for a platform + * @is_rc: controller is a RC + * @ip_reg_bank_offset: ip register bank start offset + * @ip_cfg_ctrl_reg_offset: ip config control register start offset + * @axi_mstr_common_offset: AXI master common register start offset + * @axi_slave_offset: AXI slave start offset + * @axi_master_offset: AXI master start offset + * @axi_hls_offset: AXI HLS offset start + * @axi_ras_offset: AXI RAS offset + * @axi_dti_offset: AXI DTI offset + */ +struct cdns_plat_pcie_of_data { + u32 is_rc:1; + u32 ip_reg_bank_offset; + u32 ip_cfg_ctrl_reg_offset; + u32 axi_mstr_common_offset; + u32 axi_slave_offset; + u32 axi_master_offset; + u32 axi_hls_offset; + u32 axi_ras_offset; + u32 axi_dti_offset; +}; + +/** * struct cdns_pcie - private data for Cadence PCIe controller drivers * @reg_base: IO mapped register base * @mem_res: start/end offsets in the physical system memory to map PCI accesses + * @msg_res: Region for send message to map PCI accesses * @dev: PCIe controller * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint. * @phy_count: number of supported PHY devices @@ -263,16 +85,19 @@ struct cdns_pcie_ops { * @link: list of pointers to corresponding device link representations * @ops: Platform-specific ops to control various inputs from Cadence PCIe * wrapper + * @cdns_pcie_reg_offsets: Register bank offsets for different SoC */ struct cdns_pcie { - void __iomem *reg_base; - struct resource *mem_res; - struct device *dev; - bool is_rc; - int phy_count; - struct phy **phy; - struct device_link **link; - const struct cdns_pcie_ops *ops; + void __iomem *reg_base; + struct resource *mem_res; + struct resource *msg_res; + struct device *dev; + bool is_rc; + int phy_count; + struct phy **phy; + struct device_link **link; + const struct cdns_pcie_ops *ops; + const struct cdns_plat_pcie_of_data *cdns_pcie_reg_offsets; }; /** @@ -288,6 +113,8 @@ struct cdns_pcie { * available * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2 * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk + * @ecam_supported: Whether the ECAM is supported + * @no_inbound_map: Whether inbound mapping is supported */ struct cdns_pcie_rc { struct cdns_pcie pcie; @@ -298,6 +125,8 @@ struct cdns_pcie_rc { bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB]; unsigned int quirk_retrain_flag:1; unsigned int quirk_detect_quiet_flag:1; + unsigned int ecam_supported:1; + unsigned int no_inbound_map:1; }; /** @@ -350,6 +179,43 @@ struct cdns_pcie_ep { unsigned int quirk_disable_flr:1; }; +static inline u32 cdns_reg_bank_to_off(struct cdns_pcie *pcie, enum cdns_pcie_reg_bank bank) +{ + u32 offset = 0x0; + + switch (bank) { + case REG_BANK_RP: + offset = 0; + break; + case REG_BANK_IP_REG: + offset = pcie->cdns_pcie_reg_offsets->ip_reg_bank_offset; + break; + case REG_BANK_IP_CFG_CTRL_REG: + offset = pcie->cdns_pcie_reg_offsets->ip_cfg_ctrl_reg_offset; + break; + case REG_BANK_AXI_MASTER_COMMON: + offset = pcie->cdns_pcie_reg_offsets->axi_mstr_common_offset; + break; + case REG_BANK_AXI_MASTER: + offset = pcie->cdns_pcie_reg_offsets->axi_master_offset; + break; + case REG_BANK_AXI_SLAVE: + offset = pcie->cdns_pcie_reg_offsets->axi_slave_offset; + break; + case REG_BANK_AXI_HLS: + offset = pcie->cdns_pcie_reg_offsets->axi_hls_offset; + break; + case REG_BANK_AXI_RAS: + offset = pcie->cdns_pcie_reg_offsets->axi_ras_offset; + break; + case REG_BANK_AXI_DTI: + offset = pcie->cdns_pcie_reg_offsets->axi_dti_offset; + break; + default: + break; + } + return offset; +} /* Register access */ static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) @@ -362,6 +228,27 @@ static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) return readl(pcie->reg_base + reg); } +static inline void cdns_pcie_hpa_writel(struct cdns_pcie *pcie, + enum cdns_pcie_reg_bank bank, + u32 reg, + u32 value) +{ + u32 offset = cdns_reg_bank_to_off(pcie, bank); + + reg += offset; + writel(value, pcie->reg_base + reg); +} + +static inline u32 cdns_pcie_hpa_readl(struct cdns_pcie *pcie, + enum cdns_pcie_reg_bank bank, + u32 reg) +{ + u32 offset = cdns_reg_bank_to_off(pcie, bank); + + reg += offset; + return readl(pcie->reg_base + reg); +} + static inline u16 cdns_pcie_readw(struct cdns_pcie *pcie, u32 reg) { return readw(pcie->reg_base + reg); @@ -457,6 +344,29 @@ static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg) return cdns_pcie_read_sz(addr, 0x2); } +static inline void cdns_pcie_hpa_rp_writeb(struct cdns_pcie *pcie, + u32 reg, u8 value) +{ + void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg; + + cdns_pcie_write_sz(addr, 0x1, value); +} + +static inline void cdns_pcie_hpa_rp_writew(struct cdns_pcie *pcie, + u32 reg, u16 value) +{ + void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg; + + cdns_pcie_write_sz(addr, 0x2, value); +} + +static inline u16 cdns_pcie_hpa_rp_readw(struct cdns_pcie *pcie, u32 reg) +{ + void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg; + + return cdns_pcie_read_sz(addr, 0x2); +} + /* Endpoint Function register access */ static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn, u32 reg, u8 value) @@ -521,6 +431,7 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc); void cdns_pcie_host_disable(struct cdns_pcie_rc *rc); void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, int where); +int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc); #else static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc) { @@ -537,6 +448,11 @@ static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) return 0; } +static inline int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc) +{ + return 0; +} + static inline void cdns_pcie_host_disable(struct cdns_pcie_rc *rc) { } @@ -551,6 +467,7 @@ static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int d #if IS_ENABLED(CONFIG_PCIE_CADENCE_EP) int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep); void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep); +int cdns_pcie_hpa_ep_setup(struct cdns_pcie_ep *ep); #else static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) { @@ -560,10 +477,17 @@ static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep) { } + +static inline int cdns_pcie_hpa_ep_setup(struct cdns_pcie_ep *ep) +{ + return 0; +} + #endif -u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap); -u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap); +u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap); +u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap); +bool cdns_pcie_linkup(struct cdns_pcie *pcie); void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie); @@ -577,8 +501,23 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r); void cdns_pcie_disable_phy(struct cdns_pcie *pcie); -int cdns_pcie_enable_phy(struct cdns_pcie *pcie); -int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie); +int cdns_pcie_enable_phy(struct cdns_pcie *pcie); +int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie); +void cdns_pcie_hpa_detect_quiet_min_delay_set(struct cdns_pcie *pcie); +void cdns_pcie_hpa_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, + u32 r, bool is_io, + u64 cpu_addr, u64 pci_addr, size_t size); +void cdns_pcie_hpa_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, + u8 busnr, u8 fn, + u32 r, u64 cpu_addr); +int cdns_pcie_hpa_host_link_setup(struct cdns_pcie_rc *rc); +void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn, + int where); +int cdns_pcie_hpa_host_start_link(struct cdns_pcie_rc *rc); +int cdns_pcie_hpa_start_link(struct cdns_pcie *pcie); +void cdns_pcie_hpa_stop_link(struct cdns_pcie *pcie); +bool cdns_pcie_hpa_link_up(struct cdns_pcie *pcie); + extern const struct dev_pm_ops cdns_pcie_pm_ops; #endif /* _PCIE_CADENCE_H */ diff --git a/drivers/pci/controller/cadence/pcie-sg2042.c b/drivers/pci/controller/cadence/pcie-sg2042.c index a077b28d4894..0c50c74d03ee 100644 --- a/drivers/pci/controller/cadence/pcie-sg2042.c +++ b/drivers/pci/controller/cadence/pcie-sg2042.c @@ -74,15 +74,12 @@ static int sg2042_pcie_probe(struct platform_device *pdev) static void sg2042_pcie_remove(struct platform_device *pdev) { struct cdns_pcie *pcie = platform_get_drvdata(pdev); - struct device *dev = &pdev->dev; struct cdns_pcie_rc *rc; rc = container_of(pcie, struct cdns_pcie_rc, pcie); cdns_pcie_host_disable(rc); cdns_pcie_disable_phy(pcie); - - pm_runtime_disable(dev); } static int sg2042_pcie_suspend_noirq(struct device *dev) diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 349d4657393c..519b59422b47 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -256,6 +256,16 @@ config PCIE_TEGRA194_EP in order to enable device-specific features PCIE_TEGRA194_EP must be selected. This uses the DesignWare core. +config PCIE_NXP_S32G + bool "NXP S32G PCIe controller (host mode)" + depends on ARCH_S32 || COMPILE_TEST + select PCIE_DW_HOST + help + Enable support for the PCIe controller in NXP S32G based boards to + work in Host mode. The controller is based on DesignWare IP and + can work either as RC or EP. In order to enable host-specific + features PCIE_NXP_S32G must be selected. + config PCIE_DW_PLAT bool @@ -416,6 +426,19 @@ config PCIE_SOPHGO_DW Say Y here if you want PCIe host controller support on Sophgo SoCs. +config PCIE_SPACEMIT_K1 + tristate "SpacemiT K1 PCIe controller (host mode)" + depends on ARCH_SPACEMIT || COMPILE_TEST + depends on HAS_IOMEM + select PCIE_DW_HOST + select PCI_PWRCTRL_SLOT + default ARCH_SPACEMIT + help + Enables support for the DesignWare based PCIe controller in + the SpacemiT K1 SoC operating in host mode. Three controllers + are available on the K1 SoC; the first of these shares a PHY + with a USB 3.0 host controller (one or the other can be used). + config PCIE_SPEAR13XX bool "STMicroelectronics SPEAr PCIe controller" depends on ARCH_SPEAR13XX || COMPILE_TEST @@ -482,15 +505,21 @@ config PCI_DRA7XX_EP to enable device-specific features PCI_DRA7XX_EP must be selected. This uses the DesignWare core. +# ARM32 platforms use hook_fault_code() and cannot support loadable module. config PCI_KEYSTONE bool +# On non-ARM32 platforms, loadable module can be supported. +config PCI_KEYSTONE_TRISTATE + tristate + config PCI_KEYSTONE_HOST - bool "TI Keystone PCIe controller (host mode)" + tristate "TI Keystone PCIe controller (host mode)" depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST depends on PCI_MSI select PCIE_DW_HOST - select PCI_KEYSTONE + select PCI_KEYSTONE if ARM + select PCI_KEYSTONE_TRISTATE if !ARM help Enables support for the PCIe controller in the Keystone SoC to work in host mode. The PCI controller on Keystone is based on @@ -498,11 +527,12 @@ config PCI_KEYSTONE_HOST DesignWare core functions to implement the driver. config PCI_KEYSTONE_EP - bool "TI Keystone PCIe controller (endpoint mode)" + tristate "TI Keystone PCIe controller (endpoint mode)" depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST depends on PCI_ENDPOINT select PCIE_DW_EP - select PCI_KEYSTONE + select PCI_KEYSTONE if ARM + select PCI_KEYSTONE_TRISTATE if !ARM help Enables support for the PCIe controller in the Keystone SoC to work in endpoint mode. The PCI controller on Keystone is based diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index 7ae28f3b0fb3..67ba59c02038 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -10,8 +10,12 @@ obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o obj-$(CONFIG_PCI_IMX6) += pci-imx6.o +obj-$(CONFIG_PCIE_NXP_S32G) += pcie-nxp-s32g.o obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o +# ARM32 platforms use hook_fault_code() and cannot support loadable module. obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o +# On non-ARM32 platforms, loadable module can be supported. +obj-$(CONFIG_PCI_KEYSTONE_TRISTATE) += pci-keystone.o obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o obj-$(CONFIG_PCIE_QCOM_COMMON) += pcie-qcom-common.o @@ -31,6 +35,7 @@ obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o obj-$(CONFIG_PCIE_RCAR_GEN4) += pcie-rcar-gen4.o +obj-$(CONFIG_PCIE_SPACEMIT_K1) += pcie-spacemit-k1.o obj-$(CONFIG_PCIE_STM32_HOST) += pcie-stm32.o obj-$(CONFIG_PCIE_STM32_EP) += pcie-stm32-ep.o diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index eb00aa380722..f86d9111f863 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -17,6 +17,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/mfd/syscon.h> +#include <linux/module.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/of_irq.h> @@ -777,29 +778,7 @@ err: return ret; } -#ifdef CONFIG_ARM -/* - * When a PCI device does not exist during config cycles, keystone host - * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE). - * This handler always returns 0 for this kind of fault. - */ -static int ks_pcie_fault(unsigned long addr, unsigned int fsr, - struct pt_regs *regs) -{ - unsigned long instr = *(unsigned long *) instruction_pointer(regs); - - if ((instr & 0x0e100090) == 0x00100090) { - int reg = (instr >> 12) & 15; - - regs->uregs[reg] = -1; - regs->ARM_pc += 4; - } - - return 0; -} -#endif - -static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) +static int ks_pcie_init_id(struct keystone_pcie *ks_pcie) { int ret; unsigned int id; @@ -831,7 +810,7 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) return 0; } -static int __init ks_pcie_host_init(struct dw_pcie_rp *pp) +static int ks_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); @@ -861,15 +840,6 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp) if (ret < 0) return ret; -#ifdef CONFIG_ARM - /* - * PCIe access errors that result into OCP errors are caught by ARM as - * "External aborts" - */ - hook_fault_code(17, ks_pcie_fault, SIGBUS, 0, - "Asynchronous external abort"); -#endif - return 0; } @@ -1134,6 +1104,7 @@ static const struct of_device_id ks_pcie_of_match[] = { }, { }, }; +MODULE_DEVICE_TABLE(of, ks_pcie_of_match); static int ks_pcie_probe(struct platform_device *pdev) { @@ -1337,6 +1308,8 @@ static int ks_pcie_probe(struct platform_device *pdev) break; default: dev_err(dev, "INVALID device type %d\n", mode); + ret = -EINVAL; + goto err_get_sync; } ks_pcie_enable_error_irq(ks_pcie); @@ -1379,4 +1352,45 @@ static struct platform_driver ks_pcie_driver = { .of_match_table = ks_pcie_of_match, }, }; + +#ifdef CONFIG_ARM +/* + * When a PCI device does not exist during config cycles, keystone host + * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE). + * This handler always returns 0 for this kind of fault. + */ +static int ks_pcie_fault(unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + unsigned long instr = *(unsigned long *)instruction_pointer(regs); + + if ((instr & 0x0e100090) == 0x00100090) { + int reg = (instr >> 12) & 15; + + regs->uregs[reg] = -1; + regs->ARM_pc += 4; + } + + return 0; +} + +static int __init ks_pcie_init(void) +{ + /* + * PCIe access errors that result into OCP errors are caught by ARM as + * "External aborts" + */ + if (of_find_matching_node(NULL, ks_pcie_of_match)) + hook_fault_code(17, ks_pcie_fault, SIGBUS, 0, + "Asynchronous external abort"); + + return platform_driver_register(&ks_pcie_driver); +} +device_initcall(ks_pcie_init); +#else builtin_platform_driver(ks_pcie_driver); +#endif + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("PCIe controller driver for Texas Instruments Keystone SoCs"); +MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>"); diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index 787469d1b396..54b6a4196f17 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -108,10 +108,22 @@ static int meson_pcie_get_mems(struct platform_device *pdev, struct meson_pcie *mp) { struct dw_pcie *pci = &mp->pci; + struct resource *res; - pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi"); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); + /* + * For the broken DTs that supply 'dbi' as 'elbi', parse the 'elbi' + * region and assign it to both 'pci->elbi_base' and 'pci->dbi_space' so + * that the DWC core can skip parsing both regions. + */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); + if (res) { + pci->elbi_base = devm_pci_remap_cfg_resource(pci->dev, res); + if (IS_ERR(pci->elbi_base)) + return PTR_ERR(pci->elbi_base); + + pci->dbi_base = pci->elbi_base; + pci->dbi_phys_addr = res->start; + } mp->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg"); if (IS_ERR(mp->cfg_base)) diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 7f2112c2fb21..19571ac2b961 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -797,6 +797,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, return 0; } +EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msix_irq); /** * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 20c9333bcb1c..372207c33a85 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -23,6 +23,7 @@ #include "pcie-designware.h" static struct pci_ops dw_pcie_ops; +static struct pci_ops dw_pcie_ecam_ops; static struct pci_ops dw_child_pcie_ops; #define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ @@ -232,6 +233,7 @@ int dw_pcie_allocate_domains(struct dw_pcie_rp *pp) return 0; } +EXPORT_SYMBOL_GPL(dw_pcie_allocate_domains); void dw_pcie_free_msi(struct dw_pcie_rp *pp) { @@ -471,9 +473,6 @@ static int dw_pcie_create_ecam_window(struct dw_pcie_rp *pp, struct resource *re if (IS_ERR(pp->cfg)) return PTR_ERR(pp->cfg); - pci->dbi_base = pp->cfg->win; - pci->dbi_phys_addr = res->start; - return 0; } @@ -529,7 +528,7 @@ static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp) if (ret) return ret; - pp->bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops; + pp->bridge->ops = &dw_pcie_ecam_ops; pp->bridge->sysdata = pp->cfg; pp->cfg->priv = pp; } else { @@ -842,10 +841,41 @@ void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, } EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus); +static void __iomem *dw_pcie_ecam_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) +{ + struct pci_config_window *cfg = bus->sysdata; + struct dw_pcie_rp *pp = cfg->priv; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + unsigned int busn = bus->number; + + if (busn > 0) + return pci_ecam_map_bus(bus, devfn, where); + + if (PCI_SLOT(devfn) > 0) + return NULL; + + return pci->dbi_base + where; +} + +static int dw_pcie_op_assert_perst(struct pci_bus *bus, bool assert) +{ + struct dw_pcie_rp *pp = bus->sysdata; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + + return dw_pcie_assert_perst(pci, assert); +} + static struct pci_ops dw_pcie_ops = { .map_bus = dw_pcie_own_conf_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, + .assert_perst = dw_pcie_op_assert_perst, +}; + +static struct pci_ops dw_pcie_ecam_ops = { + .map_bus = dw_pcie_ecam_conf_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, }; static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) @@ -1060,6 +1090,8 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp) PCI_COMMAND_MASTER | PCI_COMMAND_SERR; dw_pcie_writel_dbi(pci, PCI_COMMAND, val); + dw_pcie_hide_unsupported_l1ss(pci); + dw_pcie_config_presets(pp); /* * If the platform provides its own child bus config accesses, it means diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index c644216995f6..75fc8b767fcc 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -168,11 +168,13 @@ int dw_pcie_get_resources(struct dw_pcie *pci) } /* ELBI is an optional resource */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); - if (res) { - pci->elbi_base = devm_ioremap_resource(pci->dev, res); - if (IS_ERR(pci->elbi_base)) - return PTR_ERR(pci->elbi_base); + if (!pci->elbi_base) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); + if (res) { + pci->elbi_base = devm_ioremap_resource(pci->dev, res); + if (IS_ERR(pci->elbi_base)) + return PTR_ERR(pci->elbi_base); + } } /* LLDD is supposed to manually switch the clocks and resets state */ @@ -1081,6 +1083,30 @@ void dw_pcie_edma_remove(struct dw_pcie *pci) dw_edma_remove(&pci->edma); } +void dw_pcie_hide_unsupported_l1ss(struct dw_pcie *pci) +{ + u16 l1ss; + u32 l1ss_cap; + + if (pci->l1ss_support) + return; + + l1ss = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); + if (!l1ss) + return; + + /* + * Unless the driver claims "l1ss_support", don't advertise L1 PM + * Substates because they require CLKREQ# and possibly other + * device-specific configuration. + */ + l1ss_cap = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP); + l1ss_cap &= ~(PCI_L1SS_CAP_PCIPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_1 | + PCI_L1SS_CAP_PCIPM_L1_2 | PCI_L1SS_CAP_ASPM_L1_2 | + PCI_L1SS_CAP_L1_PM_SS); + dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, l1ss_cap); +} + void dw_pcie_setup(struct dw_pcie *pci) { u32 val; diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index e995f692a1ec..31685951a080 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -97,7 +97,7 @@ #define PORT_LANE_SKEW_INSERT_MASK GENMASK(23, 0) #define PCIE_PORT_DEBUG0 0x728 -#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f +#define PORT_LOGIC_LTSSM_STATE_MASK 0x3f #define PORT_LOGIC_LTSSM_STATE_L0 0x11 #define PCIE_PORT_DEBUG1 0x72C #define PCIE_PORT_DEBUG1_LINK_UP BIT(4) @@ -121,6 +121,7 @@ #define GEN3_RELATED_OFF 0x890 #define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0) +#define GEN3_RELATED_OFF_EQ_PHASE_2_3 BIT(9) #define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS BIT(13) #define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16) #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24 @@ -138,6 +139,13 @@ #define GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA GENMASK(13, 10) #define GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA GENMASK(17, 14) +#define COHERENCY_CONTROL_1_OFF 0x8E0 +#define CFG_MEMTYPE_BOUNDARY_LOW_ADDR_MASK GENMASK(31, 2) +#define CFG_MEMTYPE_VALUE BIT(0) + +#define COHERENCY_CONTROL_2_OFF 0x8E4 +#define COHERENCY_CONTROL_3_OFF 0x8E8 + #define PCIE_PORT_MULTI_LANE_CTRL 0x8C0 #define PORT_MLTI_UPCFG_SUPPORT BIT(7) @@ -485,6 +493,7 @@ struct dw_pcie_ops { enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie); int (*start_link)(struct dw_pcie *pcie); void (*stop_link)(struct dw_pcie *pcie); + int (*assert_perst)(struct dw_pcie *pcie, bool assert); }; struct debugfs_info { @@ -516,6 +525,7 @@ struct dw_pcie { int max_link_speed; u8 n_fts[2]; struct dw_edma_chip edma; + bool l1ss_support; /* L1 PM Substates support */ struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS]; struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS]; struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS]; @@ -573,6 +583,7 @@ int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, int type, u64 parent_bus_addr, u8 bar, size_t size); void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index); +void dw_pcie_hide_unsupported_l1ss(struct dw_pcie *pci); void dw_pcie_setup(struct dw_pcie *pci); void dw_pcie_iatu_detect(struct dw_pcie *pci); int dw_pcie_edma_detect(struct dw_pcie *pci); @@ -787,6 +798,14 @@ static inline void dw_pcie_stop_link(struct dw_pcie *pci) pci->ops->stop_link(pci); } +static inline int dw_pcie_assert_perst(struct dw_pcie *pci, bool assert) +{ + if (pci->ops && pci->ops->assert_perst) + return pci->ops->assert_perst(pci, assert); + + return 0; +} + static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci) { u32 val; diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index 3e2752c7dd09..f8605fe61a41 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -62,6 +62,12 @@ /* Interrupt Mask Register Related to Miscellaneous Operation */ #define PCIE_CLIENT_INTR_MASK_MISC 0x24 +/* Power Management Control Register */ +#define PCIE_CLIENT_POWER_CON 0x2c +#define PCIE_CLKREQ_READY FIELD_PREP_WM16(BIT(0), 1) +#define PCIE_CLKREQ_NOT_READY FIELD_PREP_WM16(BIT(0), 0) +#define PCIE_CLKREQ_PULL_DOWN FIELD_PREP_WM16(GENMASK(13, 12), 1) + /* Hot Reset Control Register */ #define PCIE_CLIENT_HOT_RESET_CTRL 0x180 #define PCIE_LTSSM_APP_DLY2_EN BIT(1) @@ -82,9 +88,9 @@ struct rockchip_pcie { unsigned int clk_cnt; struct reset_control *rst; struct gpio_desc *rst_gpio; - struct regulator *vpcie3v3; struct irq_domain *irq_domain; const struct rockchip_pcie_of_data *data; + bool supports_clkreq; }; struct rockchip_pcie_of_data { @@ -200,6 +206,35 @@ static bool rockchip_pcie_link_up(struct dw_pcie *pci) return FIELD_GET(PCIE_LINKUP_MASK, val) == PCIE_LINKUP; } +/* + * See e.g. section '11.6.6.4 L1 Substate' in the RK3588 TRM V1.0 for the steps + * needed to support L1 substates. Currently, just enable L1 substates for RC + * mode if CLKREQ# is properly connected and supports-clkreq is present in DT. + * For EP mode, there are more things should be done to actually save power in + * L1 substates, so disable L1 substates until there is proper support. + */ +static void rockchip_pcie_configure_l1ss(struct dw_pcie *pci) +{ + struct rockchip_pcie *rockchip = to_rockchip_pcie(pci); + + /* Enable L1 substates if CLKREQ# is properly connected */ + if (rockchip->supports_clkreq) { + rockchip_pcie_writel_apb(rockchip, PCIE_CLKREQ_READY, + PCIE_CLIENT_POWER_CON); + pci->l1ss_support = true; + return; + } + + /* + * Otherwise, assert CLKREQ# unconditionally. Since + * pci->l1ss_support is not set, the DWC core will prevent L1 + * Substates support from being advertised. + */ + rockchip_pcie_writel_apb(rockchip, + PCIE_CLKREQ_PULL_DOWN | PCIE_CLKREQ_NOT_READY, + PCIE_CLIENT_POWER_CON); +} + static void rockchip_pcie_enable_l0s(struct dw_pcie *pci) { u32 cap, lnkcap; @@ -264,6 +299,7 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp) irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler, rockchip); + rockchip_pcie_configure_l1ss(pci); rockchip_pcie_enable_l0s(pci); return 0; @@ -412,6 +448,9 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev, return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst), "failed to get reset lines\n"); + rockchip->supports_clkreq = of_property_read_bool(pdev->dev.of_node, + "supports-clkreq"); + return 0; } @@ -652,22 +691,15 @@ static int rockchip_pcie_probe(struct platform_device *pdev) return ret; /* DON'T MOVE ME: must be enable before PHY init */ - rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); - if (IS_ERR(rockchip->vpcie3v3)) { - if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV) - return dev_err_probe(dev, PTR_ERR(rockchip->vpcie3v3), - "failed to get vpcie3v3 regulator\n"); - rockchip->vpcie3v3 = NULL; - } else { - ret = regulator_enable(rockchip->vpcie3v3); - if (ret) - return dev_err_probe(dev, ret, - "failed to enable vpcie3v3 regulator\n"); - } + ret = devm_regulator_get_enable_optional(dev, "vpcie3v3"); + if (ret < 0 && ret != -ENODEV) + return dev_err_probe(dev, ret, + "failed to enable vpcie3v3 regulator\n"); ret = rockchip_pcie_phy_init(rockchip); if (ret) - goto disable_regulator; + return dev_err_probe(dev, ret, + "failed to initialize the phy\n"); ret = reset_control_deassert(rockchip->rst); if (ret) @@ -700,9 +732,6 @@ deinit_clk: clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks); deinit_phy: rockchip_pcie_phy_deinit(rockchip); -disable_regulator: - if (rockchip->vpcie3v3) - regulator_disable(rockchip->vpcie3v3); return ret; } diff --git a/drivers/pci/controller/dwc/pcie-nxp-s32g.c b/drivers/pci/controller/dwc/pcie-nxp-s32g.c new file mode 100644 index 000000000000..47745749f75c --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-nxp-s32g.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe host controller driver for NXP S32G SoCs + * + * Copyright 2019-2025 NXP + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/sizes.h> +#include <linux/types.h> + +#include "pcie-designware.h" + +/* PCIe controller Sub-System */ + +/* PCIe controller 0 General Control 1 */ +#define PCIE_S32G_PE0_GEN_CTRL_1 0x50 +#define DEVICE_TYPE_MASK GENMASK(3, 0) +#define SRIS_MODE BIT(8) + +/* PCIe controller 0 General Control 3 */ +#define PCIE_S32G_PE0_GEN_CTRL_3 0x58 +#define LTSSM_EN BIT(0) + +/* PCIe Controller 0 Interrupt Status */ +#define PCIE_S32G_PE0_INT_STS 0xE8 +#define HP_INT_STS BIT(6) + +/* Boundary between peripheral space and physical memory space */ +#define S32G_MEMORY_BOUNDARY_ADDR 0x80000000 + +struct s32g_pcie_port { + struct list_head list; + struct phy *phy; +}; + +struct s32g_pcie { + struct dw_pcie pci; + void __iomem *ctrl_base; + struct list_head ports; +}; + +#define to_s32g_from_dw_pcie(x) \ + container_of(x, struct s32g_pcie, pci) + +static void s32g_pcie_writel_ctrl(struct s32g_pcie *s32g_pp, u32 reg, u32 val) +{ + writel(val, s32g_pp->ctrl_base + reg); +} + +static u32 s32g_pcie_readl_ctrl(struct s32g_pcie *s32g_pp, u32 reg) +{ + return readl(s32g_pp->ctrl_base + reg); +} + +static void s32g_pcie_enable_ltssm(struct s32g_pcie *s32g_pp) +{ + u32 reg; + + reg = s32g_pcie_readl_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3); + reg |= LTSSM_EN; + s32g_pcie_writel_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3, reg); +} + +static void s32g_pcie_disable_ltssm(struct s32g_pcie *s32g_pp) +{ + u32 reg; + + reg = s32g_pcie_readl_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3); + reg &= ~LTSSM_EN; + s32g_pcie_writel_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3, reg); +} + +static int s32g_pcie_start_link(struct dw_pcie *pci) +{ + struct s32g_pcie *s32g_pp = to_s32g_from_dw_pcie(pci); + + s32g_pcie_enable_ltssm(s32g_pp); + + return 0; +} + +static void s32g_pcie_stop_link(struct dw_pcie *pci) +{ + struct s32g_pcie *s32g_pp = to_s32g_from_dw_pcie(pci); + + s32g_pcie_disable_ltssm(s32g_pp); +} + +static struct dw_pcie_ops s32g_pcie_ops = { + .start_link = s32g_pcie_start_link, + .stop_link = s32g_pcie_stop_link, +}; + +/* Configure the AMBA AXI Coherency Extensions (ACE) interface */ +static void s32g_pcie_reset_mstr_ace(struct dw_pcie *pci) +{ + u32 ddr_base_low = lower_32_bits(S32G_MEMORY_BOUNDARY_ADDR); + u32 ddr_base_high = upper_32_bits(S32G_MEMORY_BOUNDARY_ADDR); + + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writel_dbi(pci, COHERENCY_CONTROL_3_OFF, 0x0); + + /* + * Ncore is a cache-coherent interconnect module that enables the + * integration of heterogeneous coherent and non-coherent agents in + * the chip. Ncore transactions to peripheral should be non-coherent + * or it might drop them. + * + * One example where this is needed are PCIe MSIs, which use NoSnoop=0 + * and might end up routed to Ncore. PCIe coherent traffic (e.g. MSIs) + * that targets peripheral space will be dropped by Ncore because + * peripherals on S32G are not coherent as slaves. We add a hard + * boundary in the PCIe controller coherency control registers to + * separate physical memory space from peripheral space. + * + * Define the start of DDR as seen by Linux as this boundary between + * "memory" and "peripherals", with peripherals being below. + */ + dw_pcie_writel_dbi(pci, COHERENCY_CONTROL_1_OFF, + (ddr_base_low & CFG_MEMTYPE_BOUNDARY_LOW_ADDR_MASK)); + dw_pcie_writel_dbi(pci, COHERENCY_CONTROL_2_OFF, ddr_base_high); + dw_pcie_dbi_ro_wr_dis(pci); +} + +static int s32g_init_pcie_controller(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct s32g_pcie *s32g_pp = to_s32g_from_dw_pcie(pci); + u32 val; + + /* Set RP mode */ + val = s32g_pcie_readl_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_1); + val &= ~DEVICE_TYPE_MASK; + val |= FIELD_PREP(DEVICE_TYPE_MASK, PCI_EXP_TYPE_ROOT_PORT); + + /* Use default CRNS */ + val &= ~SRIS_MODE; + + s32g_pcie_writel_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_1, val); + + /* + * Make sure we use the coherency defaults (just in case the settings + * have been changed from their reset values) + */ + s32g_pcie_reset_mstr_ace(pci); + + dw_pcie_dbi_ro_wr_en(pci); + + val = dw_pcie_readl_dbi(pci, PCIE_PORT_FORCE); + val |= PORT_FORCE_DO_DESKEW_FOR_SRIS; + dw_pcie_writel_dbi(pci, PCIE_PORT_FORCE, val); + + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val |= GEN3_RELATED_OFF_EQ_PHASE_2_3; + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static const struct dw_pcie_host_ops s32g_pcie_host_ops = { + .init = s32g_init_pcie_controller, +}; + +static int s32g_init_pcie_phy(struct s32g_pcie *s32g_pp) +{ + struct dw_pcie *pci = &s32g_pp->pci; + struct device *dev = pci->dev; + struct s32g_pcie_port *port, *tmp; + int ret; + + list_for_each_entry(port, &s32g_pp->ports, list) { + ret = phy_init(port->phy); + if (ret) { + dev_err(dev, "Failed to init serdes PHY\n"); + goto err_phy_revert; + } + + ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, 0); + if (ret) { + dev_err(dev, "Failed to set mode on serdes PHY\n"); + goto err_phy_exit; + } + + ret = phy_power_on(port->phy); + if (ret) { + dev_err(dev, "Failed to power on serdes PHY\n"); + goto err_phy_exit; + } + } + + return 0; + +err_phy_exit: + phy_exit(port->phy); + +err_phy_revert: + list_for_each_entry_continue_reverse(port, &s32g_pp->ports, list) { + phy_power_off(port->phy); + phy_exit(port->phy); + } + + list_for_each_entry_safe(port, tmp, &s32g_pp->ports, list) + list_del(&port->list); + + return ret; +} + +static void s32g_deinit_pcie_phy(struct s32g_pcie *s32g_pp) +{ + struct s32g_pcie_port *port, *tmp; + + list_for_each_entry_safe(port, tmp, &s32g_pp->ports, list) { + phy_power_off(port->phy); + phy_exit(port->phy); + list_del(&port->list); + } +} + +static int s32g_pcie_init(struct device *dev, struct s32g_pcie *s32g_pp) +{ + s32g_pcie_disable_ltssm(s32g_pp); + + return s32g_init_pcie_phy(s32g_pp); +} + +static void s32g_pcie_deinit(struct s32g_pcie *s32g_pp) +{ + s32g_pcie_disable_ltssm(s32g_pp); + + s32g_deinit_pcie_phy(s32g_pp); +} + +static int s32g_pcie_parse_port(struct s32g_pcie *s32g_pp, struct device_node *node) +{ + struct device *dev = s32g_pp->pci.dev; + struct s32g_pcie_port *port; + int num_lanes; + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->phy = devm_of_phy_get(dev, node, NULL); + if (IS_ERR(port->phy)) + return dev_err_probe(dev, PTR_ERR(port->phy), + "Failed to get serdes PHY\n"); + + INIT_LIST_HEAD(&port->list); + list_add_tail(&port->list, &s32g_pp->ports); + + /* + * The DWC core initialization code cannot yet parse the num-lanes + * attribute in the Root Port node. The S32G only supports one Root + * Port for now so its driver can parse the node and set the num_lanes + * field of struct dwc_pcie before calling dw_pcie_host_init(). + */ + if (!of_property_read_u32(node, "num-lanes", &num_lanes)) + s32g_pp->pci.num_lanes = num_lanes; + + return 0; +} + +static int s32g_pcie_parse_ports(struct device *dev, struct s32g_pcie *s32g_pp) +{ + struct s32g_pcie_port *port, *tmp; + int ret = -ENOENT; + + for_each_available_child_of_node_scoped(dev->of_node, of_port) { + if (!of_node_is_type(of_port, "pci")) + continue; + + ret = s32g_pcie_parse_port(s32g_pp, of_port); + if (ret) + goto err_port; + } + +err_port: + list_for_each_entry_safe(port, tmp, &s32g_pp->ports, list) + list_del(&port->list); + + return ret; +} + +static int s32g_pcie_get_resources(struct platform_device *pdev, + struct s32g_pcie *s32g_pp) +{ + struct dw_pcie *pci = &s32g_pp->pci; + struct device *dev = &pdev->dev; + int ret; + + pci->dev = dev; + pci->ops = &s32g_pcie_ops; + + s32g_pp->ctrl_base = devm_platform_ioremap_resource_byname(pdev, "ctrl"); + if (IS_ERR(s32g_pp->ctrl_base)) + return PTR_ERR(s32g_pp->ctrl_base); + + INIT_LIST_HEAD(&s32g_pp->ports); + + ret = s32g_pcie_parse_ports(dev, s32g_pp); + if (ret) + return dev_err_probe(dev, ret, + "Failed to parse Root Port: %d\n", ret); + + platform_set_drvdata(pdev, s32g_pp); + + return 0; +} + +static int s32g_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct s32g_pcie *s32g_pp; + struct dw_pcie_rp *pp; + int ret; + + s32g_pp = devm_kzalloc(dev, sizeof(*s32g_pp), GFP_KERNEL); + if (!s32g_pp) + return -ENOMEM; + + ret = s32g_pcie_get_resources(pdev, s32g_pp); + if (ret) + return ret; + + pm_runtime_no_callbacks(dev); + devm_pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto err_pm_runtime_put; + + ret = s32g_pcie_init(dev, s32g_pp); + if (ret) + goto err_pm_runtime_put; + + pp = &s32g_pp->pci.pp; + pp->ops = &s32g_pcie_host_ops; + pp->use_atu_msg = true; + + ret = dw_pcie_host_init(pp); + if (ret) + goto err_pcie_deinit; + + return 0; + +err_pcie_deinit: + s32g_pcie_deinit(s32g_pp); +err_pm_runtime_put: + pm_runtime_put(dev); + + return ret; +} + +static int s32g_pcie_suspend_noirq(struct device *dev) +{ + struct s32g_pcie *s32g_pp = dev_get_drvdata(dev); + struct dw_pcie *pci = &s32g_pp->pci; + + return dw_pcie_suspend_noirq(pci); +} + +static int s32g_pcie_resume_noirq(struct device *dev) +{ + struct s32g_pcie *s32g_pp = dev_get_drvdata(dev); + struct dw_pcie *pci = &s32g_pp->pci; + + return dw_pcie_resume_noirq(pci); +} + +static const struct dev_pm_ops s32g_pcie_pm_ops = { + NOIRQ_SYSTEM_SLEEP_PM_OPS(s32g_pcie_suspend_noirq, + s32g_pcie_resume_noirq) +}; + +static const struct of_device_id s32g_pcie_of_match[] = { + { .compatible = "nxp,s32g2-pcie" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, s32g_pcie_of_match); + +static struct platform_driver s32g_pcie_driver = { + .driver = { + .name = "s32g-pcie", + .of_match_table = s32g_pcie_of_match, + .suppress_bind_attrs = true, + .pm = pm_sleep_ptr(&s32g_pcie_pm_ops), + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + .probe = s32g_pcie_probe, +}; + +builtin_platform_driver(s32g_pcie_driver); + +MODULE_AUTHOR("Ionut Vicovan <Ionut.Vicovan@nxp.com>"); +MODULE_DESCRIPTION("NXP S32G PCIe Host controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 805edbbfe7eb..7b92e7a1c0d9 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -55,7 +55,6 @@ #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 #define PARF_Q2A_FLUSH 0x1ac #define PARF_LTSSM 0x1b0 -#define PARF_SLV_DBI_ELBI 0x1b4 #define PARF_INT_ALL_STATUS 0x224 #define PARF_INT_ALL_CLEAR 0x228 #define PARF_INT_ALL_MASK 0x22c @@ -65,16 +64,6 @@ #define PARF_DBI_BASE_ADDR_V2_HI 0x354 #define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c -#define PARF_BLOCK_SLV_AXI_WR_BASE 0x360 -#define PARF_BLOCK_SLV_AXI_WR_BASE_HI 0x364 -#define PARF_BLOCK_SLV_AXI_WR_LIMIT 0x368 -#define PARF_BLOCK_SLV_AXI_WR_LIMIT_HI 0x36c -#define PARF_BLOCK_SLV_AXI_RD_BASE 0x370 -#define PARF_BLOCK_SLV_AXI_RD_BASE_HI 0x374 -#define PARF_BLOCK_SLV_AXI_RD_LIMIT 0x378 -#define PARF_BLOCK_SLV_AXI_RD_LIMIT_HI 0x37c -#define PARF_ECAM_BASE 0x380 -#define PARF_ECAM_BASE_HI 0x384 #define PARF_NO_SNOOP_OVERRIDE 0x3d4 #define PARF_ATU_BASE_ADDR 0x634 #define PARF_ATU_BASE_ADDR_HI 0x638 @@ -98,7 +87,6 @@ /* PARF_SYS_CTRL register fields */ #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29) -#define PCIE_ECAM_BLOCKER_EN BIT(26) #define MST_WAKEUP_EN BIT(13) #define SLV_WAKEUP_EN BIT(12) #define MSTR_ACLK_CGC_DIS BIT(10) @@ -146,9 +134,6 @@ /* PARF_LTSSM register fields */ #define LTSSM_EN BIT(8) -/* PARF_SLV_DBI_ELBI */ -#define SLV_DBI_ELBI_ADDR_BASE GENMASK(11, 0) - /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ #define PARF_INT_ALL_LINK_UP BIT(13) #define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23) @@ -262,6 +247,7 @@ struct qcom_pcie_ops { int (*get_resources)(struct qcom_pcie *pcie); int (*init)(struct qcom_pcie *pcie); int (*post_init)(struct qcom_pcie *pcie); + void (*host_post_init)(struct qcom_pcie *pcie); void (*deinit)(struct qcom_pcie *pcie); void (*ltssm_enable)(struct qcom_pcie *pcie); int (*config_sid)(struct qcom_pcie *pcie); @@ -326,47 +312,6 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) qcom_perst_assert(pcie, false); } -static void qcom_pci_config_ecam(struct dw_pcie_rp *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct qcom_pcie *pcie = to_qcom_pcie(pci); - u64 addr, addr_end; - u32 val; - - writel_relaxed(lower_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE); - writel_relaxed(upper_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE_HI); - - /* - * The only device on the root bus is a single Root Port. If we try to - * access any devices other than Device/Function 00.0 on Bus 0, the TLP - * will go outside of the controller to the PCI bus. But with CFG Shift - * Feature (ECAM) enabled in iATU, there is no guarantee that the - * response is going to be all F's. Hence, to make sure that the - * requester gets all F's response for accesses other than the Root - * Port, configure iATU to block the transactions starting from - * function 1 of the root bus to the end of the root bus (i.e., from - * dbi_base + 4KB to dbi_base + 1MB). - */ - addr = pci->dbi_phys_addr + SZ_4K; - writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE); - writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE_HI); - - writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE); - writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE_HI); - - addr_end = pci->dbi_phys_addr + SZ_1M - 1; - - writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT); - writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT_HI); - - writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT); - writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT_HI); - - val = readl_relaxed(pcie->parf + PARF_SYS_CTRL); - val |= PCIE_ECAM_BLOCKER_EN; - writel_relaxed(val, pcie->parf + PARF_SYS_CTRL); -} - static int qcom_pcie_start_link(struct dw_pcie *pci) { struct qcom_pcie *pcie = to_qcom_pcie(pci); @@ -696,6 +641,18 @@ static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) return 0; } +static int qcom_pcie_assert_perst(struct dw_pcie *pci, bool assert) +{ + struct qcom_pcie *pcie = to_qcom_pcie(pci); + + if (assert) + qcom_ep_reset_assert(pcie); + else + qcom_ep_reset_deassert(pcie); + + return 0; +} + static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) { u32 val; @@ -1067,6 +1024,8 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) val &= ~REQ_NOT_ENTR_L1; writel(val, pcie->parf + PARF_PM_CTRL); + pci->l1ss_support = true; + val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); val |= EN; writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); @@ -1094,6 +1053,25 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) return 0; } +static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) +{ + /* + * Downstream devices need to be in D0 state before enabling PCI PM + * substates. + */ + pci_set_power_state_locked(pdev, PCI_D0); + pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); + + return 0; +} + +static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie) +{ + struct dw_pcie_rp *pp = &pcie->pci->pp; + + pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL); +} + static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; @@ -1320,7 +1298,6 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct qcom_pcie *pcie = to_qcom_pcie(pci); - u16 offset; int ret; qcom_ep_reset_assert(pcie); @@ -1329,17 +1306,6 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp) if (ret) return ret; - if (pp->ecam_enabled) { - /* - * Override ELBI when ECAM is enabled, as when ECAM is enabled, - * ELBI moves under the 'config' space. - */ - offset = FIELD_GET(SLV_DBI_ELBI_ADDR_BASE, readl(pcie->parf + PARF_SLV_DBI_ELBI)); - pci->elbi_base = pci->dbi_base + offset; - - qcom_pci_config_ecam(pp); - } - ret = qcom_pcie_phy_power_on(pcie); if (ret) goto err_deinit; @@ -1380,9 +1346,19 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp) pcie->cfg->ops->deinit(pcie); } +static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct qcom_pcie *pcie = to_qcom_pcie(pci); + + if (pcie->cfg->ops->host_post_init) + pcie->cfg->ops->host_post_init(pcie); +} + static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { .init = qcom_pcie_host_init, .deinit = qcom_pcie_host_deinit, + .post_init = qcom_pcie_host_post_init, }; /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ @@ -1444,6 +1420,7 @@ static const struct qcom_pcie_ops ops_1_9_0 = { .get_resources = qcom_pcie_get_resources_2_7_0, .init = qcom_pcie_init_2_7_0, .post_init = qcom_pcie_post_init_2_7_0, + .host_post_init = qcom_pcie_host_post_init_2_7_0, .deinit = qcom_pcie_deinit_2_7_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, .config_sid = qcom_pcie_config_sid_1_9_0, @@ -1454,6 +1431,7 @@ static const struct qcom_pcie_ops ops_1_21_0 = { .get_resources = qcom_pcie_get_resources_2_7_0, .init = qcom_pcie_init_2_7_0, .post_init = qcom_pcie_post_init_2_7_0, + .host_post_init = qcom_pcie_host_post_init_2_7_0, .deinit = qcom_pcie_deinit_2_7_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; @@ -1516,6 +1494,7 @@ static const struct qcom_pcie_cfg cfg_fw_managed = { static const struct dw_pcie_ops dw_pcie_ops = { .link_up = qcom_pcie_link_up, .start_link = qcom_pcie_start_link, + .assert_perst = qcom_pcie_assert_perst, }; static int qcom_pcie_icc_init(struct qcom_pcie *pcie) @@ -1565,6 +1544,7 @@ static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie) { u32 offset, status, width, speed; struct dw_pcie *pci = pcie->pci; + struct dev_pm_opp_key key = {}; unsigned long freq_kbps; struct dev_pm_opp *opp; int ret, freq_mbps; @@ -1592,8 +1572,20 @@ static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie) return; freq_kbps = freq_mbps * KILO; - opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width, - true); + opp = dev_pm_opp_find_level_exact(pci->dev, speed); + if (IS_ERR(opp)) { + /* opp-level is not defined use only frequency */ + opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width, + true); + } else { + /* put opp-level OPP */ + dev_pm_opp_put(opp); + + key.freq = freq_kbps * width; + key.level = speed; + key.bw = 0; + opp = dev_pm_opp_find_key_exact(pci->dev, &key, true); + } if (!IS_ERR(opp)) { ret = dev_pm_opp_set_opp(pci->dev, opp); if (ret) diff --git a/drivers/pci/controller/dwc/pcie-spacemit-k1.c b/drivers/pci/controller/dwc/pcie-spacemit-k1.c new file mode 100644 index 000000000000..be20a520255b --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-spacemit-k1.c @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SpacemiT K1 PCIe host driver + * + * Copyright (C) 2025 by RISCstar Solutions Corporation. All rights reserved. + * Copyright (c) 2023, spacemit Corporation. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gfp.h> +#include <linux/mfd/syscon.h> +#include <linux/mod_devicetable.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <linux/types.h> + +#include "pcie-designware.h" + +#define PCI_VENDOR_ID_SPACEMIT 0x201f +#define PCI_DEVICE_ID_SPACEMIT_K1 0x0001 + +/* Offsets and field definitions for link management registers */ +#define K1_PHY_AHB_IRQ_EN 0x0000 +#define PCIE_INTERRUPT_EN BIT(0) + +#define K1_PHY_AHB_LINK_STS 0x0004 +#define SMLH_LINK_UP BIT(1) +#define RDLH_LINK_UP BIT(12) + +#define INTR_ENABLE 0x0014 +#define MSI_CTRL_INT BIT(11) + +/* Some controls require APMU regmap access */ +#define SYSCON_APMU "spacemit,apmu" + +/* Offsets and field definitions for APMU registers */ +#define PCIE_CLK_RESET_CONTROL 0x0000 +#define LTSSM_EN BIT(6) +#define PCIE_AUX_PWR_DET BIT(9) +#define PCIE_RC_PERST BIT(12) /* 1: assert PERST# */ +#define APP_HOLD_PHY_RST BIT(30) +#define DEVICE_TYPE_RC BIT(31) /* 0: endpoint; 1: RC */ + +#define PCIE_CONTROL_LOGIC 0x0004 +#define PCIE_SOFT_RESET BIT(0) + +struct k1_pcie { + struct dw_pcie pci; + struct phy *phy; + void __iomem *link; + struct regmap *pmu; /* Errors ignored; MMIO-backed regmap */ + u32 pmu_off; +}; + +#define to_k1_pcie(dw_pcie) \ + platform_get_drvdata(to_platform_device((dw_pcie)->dev)) + +static void k1_pcie_toggle_soft_reset(struct k1_pcie *k1) +{ + u32 offset; + u32 val; + + /* + * Write, then read back to guarantee it has reached the device + * before we start the delay. + */ + offset = k1->pmu_off + PCIE_CONTROL_LOGIC; + regmap_set_bits(k1->pmu, offset, PCIE_SOFT_RESET); + regmap_read(k1->pmu, offset, &val); + + mdelay(2); + + regmap_clear_bits(k1->pmu, offset, PCIE_SOFT_RESET); +} + +/* Enable app clocks, deassert resets */ +static int k1_pcie_enable_resources(struct k1_pcie *k1) +{ + struct dw_pcie *pci = &k1->pci; + int ret; + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(pci->app_clks), pci->app_clks); + if (ret) + return ret; + + ret = reset_control_bulk_deassert(ARRAY_SIZE(pci->app_rsts), + pci->app_rsts); + if (ret) + goto err_disable_clks; + + return 0; + +err_disable_clks: + clk_bulk_disable_unprepare(ARRAY_SIZE(pci->app_clks), pci->app_clks); + + return ret; +} + +/* Assert resets, disable app clocks */ +static void k1_pcie_disable_resources(struct k1_pcie *k1) +{ + struct dw_pcie *pci = &k1->pci; + + reset_control_bulk_assert(ARRAY_SIZE(pci->app_rsts), pci->app_rsts); + clk_bulk_disable_unprepare(ARRAY_SIZE(pci->app_clks), pci->app_clks); +} + +/* FIXME: Disable ASPM L1 to avoid errors reported on some NVMe drives */ +static void k1_pcie_disable_aspm_l1(struct k1_pcie *k1) +{ + struct dw_pcie *pci = &k1->pci; + u8 offset; + u32 val; + + offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + offset += PCI_EXP_LNKCAP; + + dw_pcie_dbi_ro_wr_en(pci); + val = dw_pcie_readl_dbi(pci, offset); + val &= ~PCI_EXP_LNKCAP_ASPM_L1; + dw_pcie_writel_dbi(pci, offset, val); + dw_pcie_dbi_ro_wr_dis(pci); +} + +static int k1_pcie_init(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct k1_pcie *k1 = to_k1_pcie(pci); + u32 reset_ctrl; + u32 val; + int ret; + + k1_pcie_toggle_soft_reset(k1); + + ret = k1_pcie_enable_resources(k1); + if (ret) + return ret; + + /* Set the PCI vendor and device ID */ + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, PCI_VENDOR_ID_SPACEMIT); + dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, PCI_DEVICE_ID_SPACEMIT_K1); + dw_pcie_dbi_ro_wr_dis(pci); + + /* + * Start by asserting fundamental reset (drive PERST# low). The + * PCI CEM spec says that PERST# should be deasserted at least + * 100ms after the power becomes stable, so we'll insert that + * delay first. Write, then read it back to guarantee the write + * reaches the device before we start the delay. + */ + reset_ctrl = k1->pmu_off + PCIE_CLK_RESET_CONTROL; + regmap_set_bits(k1->pmu, reset_ctrl, PCIE_RC_PERST); + regmap_read(k1->pmu, reset_ctrl, &val); + mdelay(PCIE_T_PVPERL_MS); + + /* + * Put the controller in root complex mode, and indicate that + * Vaux (3.3v) is present. + */ + regmap_set_bits(k1->pmu, reset_ctrl, DEVICE_TYPE_RC | PCIE_AUX_PWR_DET); + + ret = phy_init(k1->phy); + if (ret) { + k1_pcie_disable_resources(k1); + + return ret; + } + + /* Deassert fundamental reset (drive PERST# high) */ + regmap_clear_bits(k1->pmu, reset_ctrl, PCIE_RC_PERST); + + /* Finally, as a workaround, disable ASPM L1 */ + k1_pcie_disable_aspm_l1(k1); + + return 0; +} + +static void k1_pcie_deinit(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct k1_pcie *k1 = to_k1_pcie(pci); + + /* Assert fundamental reset (drive PERST# low) */ + regmap_set_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL, + PCIE_RC_PERST); + + phy_exit(k1->phy); + + k1_pcie_disable_resources(k1); +} + +static const struct dw_pcie_host_ops k1_pcie_host_ops = { + .init = k1_pcie_init, + .deinit = k1_pcie_deinit, +}; + +static bool k1_pcie_link_up(struct dw_pcie *pci) +{ + struct k1_pcie *k1 = to_k1_pcie(pci); + u32 val; + + val = readl_relaxed(k1->link + K1_PHY_AHB_LINK_STS); + + return (val & RDLH_LINK_UP) && (val & SMLH_LINK_UP); +} + +static int k1_pcie_start_link(struct dw_pcie *pci) +{ + struct k1_pcie *k1 = to_k1_pcie(pci); + u32 val; + + /* Stop holding the PHY in reset, and enable link training */ + regmap_update_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL, + APP_HOLD_PHY_RST | LTSSM_EN, LTSSM_EN); + + /* Enable the MSI interrupt */ + writel_relaxed(MSI_CTRL_INT, k1->link + INTR_ENABLE); + + /* Top-level interrupt enable */ + val = readl_relaxed(k1->link + K1_PHY_AHB_IRQ_EN); + val |= PCIE_INTERRUPT_EN; + writel_relaxed(val, k1->link + K1_PHY_AHB_IRQ_EN); + + return 0; +} + +static void k1_pcie_stop_link(struct dw_pcie *pci) +{ + struct k1_pcie *k1 = to_k1_pcie(pci); + u32 val; + + /* Disable interrupts */ + val = readl_relaxed(k1->link + K1_PHY_AHB_IRQ_EN); + val &= ~PCIE_INTERRUPT_EN; + writel_relaxed(val, k1->link + K1_PHY_AHB_IRQ_EN); + + writel_relaxed(0, k1->link + INTR_ENABLE); + + /* Disable the link and hold the PHY in reset */ + regmap_update_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL, + APP_HOLD_PHY_RST | LTSSM_EN, APP_HOLD_PHY_RST); +} + +static const struct dw_pcie_ops k1_pcie_ops = { + .link_up = k1_pcie_link_up, + .start_link = k1_pcie_start_link, + .stop_link = k1_pcie_stop_link, +}; + +static int k1_pcie_parse_port(struct k1_pcie *k1) +{ + struct device *dev = k1->pci.dev; + struct device_node *root_port; + struct phy *phy; + + /* We assume only one root port */ + root_port = of_get_next_available_child(dev_of_node(dev), NULL); + if (!root_port) + return -EINVAL; + + phy = devm_of_phy_get(dev, root_port, NULL); + + of_node_put(root_port); + + if (IS_ERR(phy)) + return PTR_ERR(phy); + + k1->phy = phy; + + return 0; +} + +static int k1_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct k1_pcie *k1; + int ret; + + k1 = devm_kzalloc(dev, sizeof(*k1), GFP_KERNEL); + if (!k1) + return -ENOMEM; + + k1->pmu = syscon_regmap_lookup_by_phandle_args(dev_of_node(dev), + SYSCON_APMU, 1, + &k1->pmu_off); + if (IS_ERR(k1->pmu)) + return dev_err_probe(dev, PTR_ERR(k1->pmu), + "failed to lookup PMU registers\n"); + + k1->link = devm_platform_ioremap_resource_byname(pdev, "link"); + if (IS_ERR(k1->link)) + return dev_err_probe(dev, PTR_ERR(k1->link), + "failed to map \"link\" registers\n"); + + k1->pci.dev = dev; + k1->pci.ops = &k1_pcie_ops; + k1->pci.pp.num_vectors = MAX_MSI_IRQS; + dw_pcie_cap_set(&k1->pci, REQ_RES); + + k1->pci.pp.ops = &k1_pcie_host_ops; + + /* Hold the PHY in reset until we start the link */ + regmap_set_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL, + APP_HOLD_PHY_RST); + + ret = devm_regulator_get_enable(dev, "vpcie3v3"); + if (ret) + return dev_err_probe(dev, ret, + "failed to get \"vpcie3v3\" supply\n"); + + pm_runtime_set_active(dev); + pm_runtime_no_callbacks(dev); + devm_pm_runtime_enable(dev); + + platform_set_drvdata(pdev, k1); + + ret = k1_pcie_parse_port(k1); + if (ret) + return dev_err_probe(dev, ret, "failed to parse root port\n"); + + ret = dw_pcie_host_init(&k1->pci.pp); + if (ret) + return dev_err_probe(dev, ret, "failed to initialize host\n"); + + return 0; +} + +static void k1_pcie_remove(struct platform_device *pdev) +{ + struct k1_pcie *k1 = platform_get_drvdata(pdev); + + dw_pcie_host_deinit(&k1->pci.pp); +} + +static const struct of_device_id k1_pcie_of_match_table[] = { + { .compatible = "spacemit,k1-pcie", }, + { } +}; + +static struct platform_driver k1_pcie_driver = { + .probe = k1_pcie_probe, + .remove = k1_pcie_remove, + .driver = { + .name = "spacemit-k1-pcie", + .of_match_table = k1_pcie_of_match_table, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; +module_platform_driver(k1_pcie_driver); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SpacemiT K1 PCIe host driver"); diff --git a/drivers/pci/controller/dwc/pcie-stm32-ep.c b/drivers/pci/controller/dwc/pcie-stm32-ep.c index 3400c7cd2d88..2cecf32d2b0f 100644 --- a/drivers/pci/controller/dwc/pcie-stm32-ep.c +++ b/drivers/pci/controller/dwc/pcie-stm32-ep.c @@ -7,9 +7,9 @@ */ #include <linux/clk.h> +#include <linux/gpio/consumer.h> #include <linux/mfd/syscon.h> #include <linux/of_platform.h> -#include <linux/of_gpio.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -37,36 +37,9 @@ static void stm32_pcie_ep_init(struct dw_pcie_ep *ep) dw_pcie_ep_reset_bar(pci, bar); } -static int stm32_pcie_enable_link(struct dw_pcie *pci) -{ - struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci); - - regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR, - STM32MP25_PCIECR_LTSSM_EN, - STM32MP25_PCIECR_LTSSM_EN); - - return dw_pcie_wait_for_link(pci); -} - -static void stm32_pcie_disable_link(struct dw_pcie *pci) -{ - struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci); - - regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR, STM32MP25_PCIECR_LTSSM_EN, 0); -} - static int stm32_pcie_start_link(struct dw_pcie *pci) { struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci); - int ret; - - dev_dbg(pci->dev, "Enable link\n"); - - ret = stm32_pcie_enable_link(pci); - if (ret) { - dev_err(pci->dev, "PCIe cannot establish link: %d\n", ret); - return ret; - } enable_irq(stm32_pcie->perst_irq); @@ -77,11 +50,7 @@ static void stm32_pcie_stop_link(struct dw_pcie *pci) { struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci); - dev_dbg(pci->dev, "Disable link\n"); - disable_irq(stm32_pcie->perst_irq); - - stm32_pcie_disable_link(pci); } static int stm32_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, @@ -152,6 +121,9 @@ static void stm32_pcie_perst_assert(struct dw_pcie *pci) dev_dbg(dev, "PERST asserted by host\n"); + regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR, + STM32MP25_PCIECR_LTSSM_EN, 0); + pci_epc_deinit_notify(ep->epc); stm32_pcie_disable_resources(stm32_pcie); @@ -192,6 +164,11 @@ static void stm32_pcie_perst_deassert(struct dw_pcie *pci) pci_epc_init_notify(ep->epc); + /* Enable link training */ + regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR, + STM32MP25_PCIECR_LTSSM_EN, + STM32MP25_PCIECR_LTSSM_EN); + return; err_disable_resources: @@ -237,6 +214,8 @@ static int stm32_add_pcie_ep(struct stm32_pcie *stm32_pcie, ep->ops = &stm32_pcie_ep_ops; + ep->page_size = stm32_pcie_epc_features.align; + ret = dw_pcie_ep_init(ep); if (ret) { dev_err(dev, "Failed to initialize ep: %d\n", ret); diff --git a/drivers/pci/controller/dwc/pcie-stm32.c b/drivers/pci/controller/dwc/pcie-stm32.c index 96a5fb893af4..a9e77478443b 100644 --- a/drivers/pci/controller/dwc/pcie-stm32.c +++ b/drivers/pci/controller/dwc/pcie-stm32.c @@ -7,18 +7,30 @@ */ #include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/irq.h> #include <linux/mfd/syscon.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> +#include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> #include <linux/regmap.h> #include <linux/reset.h> +#include <linux/stddef.h> + +#include "../../pci.h" + #include "pcie-designware.h" #include "pcie-stm32.h" -#include "../../pci.h" struct stm32_pcie { struct dw_pcie pci; diff --git a/drivers/pci/controller/dwc/pcie-stm32.h b/drivers/pci/controller/dwc/pcie-stm32.h index 09d39f04e469..419cf1ff669d 100644 --- a/drivers/pci/controller/dwc/pcie-stm32.h +++ b/drivers/pci/controller/dwc/pcie-stm32.h @@ -6,6 +6,9 @@ * Author: Christian Bruel <christian.bruel@foss.st.com> */ +#include <linux/bits.h> +#include <linux/device.h> + #define to_stm32_pcie(x) dev_get_drvdata((x)->dev) #define STM32MP25_PCIECR_TYPE_MASK GENMASK(11, 8) diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 10e74458e667..0ddeef70726d 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -260,7 +260,6 @@ struct tegra_pcie_dw { u32 msi_ctrl_int; u32 num_lanes; u32 cid; - u32 cfg_link_cap_l1sub; u32 ras_des_cap; u32 pcie_cap_base; u32 aspm_cmrt; @@ -475,8 +474,7 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) return IRQ_HANDLED; /* If EP doesn't advertise L1SS, just return */ - val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); - if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2))) + if (!pci->l1ss_support) return IRQ_HANDLED; /* Check if BME is set to '1' */ @@ -608,24 +606,6 @@ static struct pci_ops tegra_pci_ops = { }; #if defined(CONFIG_PCIEASPM) -static void disable_aspm_l11(struct tegra_pcie_dw *pcie) -{ - u32 val; - - val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); - val &= ~PCI_L1SS_CAP_ASPM_L1_1; - dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); -} - -static void disable_aspm_l12(struct tegra_pcie_dw *pcie) -{ - u32 val; - - val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub); - val &= ~PCI_L1SS_CAP_ASPM_L1_2; - dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); -} - static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) { u32 val; @@ -682,10 +662,9 @@ static int aspm_state_cnt(struct seq_file *s, void *data) static void init_host_aspm(struct tegra_pcie_dw *pcie) { struct dw_pcie *pci = &pcie->pci; - u32 val; + u32 l1ss, val; - val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); - pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP; + l1ss = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci, PCI_EXT_CAP_ID_VNDR); @@ -697,11 +676,14 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie) PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); /* Program T_cmrt and T_pwr_on values */ - val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); + val = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP); val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE); val |= (pcie->aspm_cmrt << 8); val |= (pcie->aspm_pwr_on_t << 19); - dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val); + dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, val); + + if (pcie->supports_clkreq) + pci->l1ss_support = true; /* Program L0s and L1 entrance latencies */ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); @@ -726,8 +708,6 @@ static void init_debugfs(struct tegra_pcie_dw *pcie) aspm_state_cnt); } #else -static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; } -static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; } static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; } #endif @@ -931,12 +911,6 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp) init_host_aspm(pcie); - /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ - if (!pcie->supports_clkreq) { - disable_aspm_l11(pcie); - disable_aspm_l12(pcie); - } - if (!pcie->of_data->has_l1ss_exit_fix) { val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; @@ -1871,12 +1845,6 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) init_host_aspm(pcie); - /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ - if (!pcie->supports_clkreq) { - disable_aspm_l11(pcie); - disable_aspm_l12(pcie); - } - if (!pcie->of_data->has_l1ss_exit_fix) { val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index 810d1c8de24e..c473e7c03bac 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -53,16 +53,12 @@ struct pci_config_window *pci_host_common_ecam_create(struct device *dev, EXPORT_SYMBOL_GPL(pci_host_common_ecam_create); int pci_host_common_init(struct platform_device *pdev, + struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops) { struct device *dev = &pdev->dev; - struct pci_host_bridge *bridge; struct pci_config_window *cfg; - bridge = devm_pci_alloc_host_bridge(dev, 0); - if (!bridge) - return -ENOMEM; - of_pci_check_probe_only(); platform_set_drvdata(pdev, bridge); @@ -85,12 +81,17 @@ EXPORT_SYMBOL_GPL(pci_host_common_init); int pci_host_common_probe(struct platform_device *pdev) { const struct pci_ecam_ops *ops; + struct pci_host_bridge *bridge; ops = of_device_get_match_data(&pdev->dev); if (!ops) return -ENODEV; - return pci_host_common_init(pdev, ops); + bridge = devm_pci_alloc_host_bridge(&pdev->dev, 0); + if (!bridge) + return -ENOMEM; + + return pci_host_common_init(pdev, bridge, ops); } EXPORT_SYMBOL_GPL(pci_host_common_probe); diff --git a/drivers/pci/controller/pci-host-common.h b/drivers/pci/controller/pci-host-common.h index 51c35ec0cf37..b5075d4bd7eb 100644 --- a/drivers/pci/controller/pci-host-common.h +++ b/drivers/pci/controller/pci-host-common.h @@ -14,6 +14,7 @@ struct pci_ecam_ops; int pci_host_common_probe(struct platform_device *pdev); int pci_host_common_init(struct platform_device *pdev, + struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops); void pci_host_common_remove(struct platform_device *pdev); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 146b43981b27..1e237d3538f9 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -3696,48 +3696,6 @@ static int hv_send_resources_released(struct hv_device *hdev) return 0; } -#define HVPCI_DOM_MAP_SIZE (64 * 1024) -static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE); - -/* - * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0 - * as invalid for passthrough PCI devices of this driver. - */ -#define HVPCI_DOM_INVALID 0 - -/** - * hv_get_dom_num() - Get a valid PCI domain number - * Check if the PCI domain number is in use, and return another number if - * it is in use. - * - * @dom: Requested domain number - * - * return: domain number on success, HVPCI_DOM_INVALID on failure - */ -static u16 hv_get_dom_num(u16 dom) -{ - unsigned int i; - - if (test_and_set_bit(dom, hvpci_dom_map) == 0) - return dom; - - for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) { - if (test_and_set_bit(i, hvpci_dom_map) == 0) - return i; - } - - return HVPCI_DOM_INVALID; -} - -/** - * hv_put_dom_num() - Mark the PCI domain number as free - * @dom: Domain number to be freed - */ -static void hv_put_dom_num(u16 dom) -{ - clear_bit(dom, hvpci_dom_map); -} - /** * hv_pci_probe() - New VMBus channel probe, for a root PCI bus * @hdev: VMBus's tracking struct for this root PCI bus @@ -3750,9 +3708,9 @@ static int hv_pci_probe(struct hv_device *hdev, { struct pci_host_bridge *bridge; struct hv_pcibus_device *hbus; - u16 dom_req, dom; + int ret, dom; + u16 dom_req; char *name; - int ret; bridge = devm_pci_alloc_host_bridge(&hdev->device, 0); if (!bridge) @@ -3779,11 +3737,14 @@ static int hv_pci_probe(struct hv_device *hdev, * PCI bus (which is actually emulated by the hypervisor) is domain 0. * (2) There will be no overlap between domains (after fixing possible * collisions) in the same VM. + * + * Because Gen1 VMs use domain 0, don't allow picking domain 0 here, + * even if bytes 4 and 5 of the instance GUID are both zero. For wider + * userspace compatibility, limit the domain ID to a 16-bit value. */ dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4]; - dom = hv_get_dom_num(dom_req); - - if (dom == HVPCI_DOM_INVALID) { + dom = pci_bus_find_emul_domain_nr(dom_req, 1, U16_MAX); + if (dom < 0) { dev_err(&hdev->device, "Unable to use dom# 0x%x or other numbers", dom_req); ret = -EINVAL; @@ -3917,7 +3878,7 @@ close: destroy_wq: destroy_workqueue(hbus->wq); free_dom: - hv_put_dom_num(hbus->bridge->domain_nr); + pci_bus_release_emul_domain_nr(hbus->bridge->domain_nr); free_bus: kfree(hbus); return ret; @@ -4042,8 +4003,6 @@ static void hv_pci_remove(struct hv_device *hdev) irq_domain_remove(hbus->irq_domain); irq_domain_free_fwnode(hbus->fwnode); - hv_put_dom_num(hbus->bridge->domain_nr); - kfree(hbus); } @@ -4217,9 +4176,6 @@ static int __init init_hv_pci_drv(void) if (ret) return ret; - /* Set the invalid domain number's bit, so it will not be used */ - set_bit(HVPCI_DOM_INVALID, hvpci_dom_map); - /* Initialize PCI block r/w interface */ hvpci_block_ops.read_block = hv_read_config_block; hvpci_block_ops.write_block = hv_write_config_block; diff --git a/drivers/pci/controller/pci-ixp4xx.c b/drivers/pci/controller/pci-ixp4xx.c index acb85e0d5675..9fd401838bad 100644 --- a/drivers/pci/controller/pci-ixp4xx.c +++ b/drivers/pci/controller/pci-ixp4xx.c @@ -214,6 +214,7 @@ static u32 ixp4xx_crp_byte_lane_enable_bits(u32 n, int size) return 0xffffffff; } +#ifdef CONFIG_ARM static int ixp4xx_crp_read_config(struct ixp4xx_pci *p, int where, int size, u32 *value) { @@ -251,6 +252,7 @@ static int ixp4xx_crp_read_config(struct ixp4xx_pci *p, int where, int size, return PCIBIOS_SUCCESSFUL; } +#endif static int ixp4xx_crp_write_config(struct ixp4xx_pci *p, int where, int size, u32 value) @@ -470,6 +472,7 @@ static int ixp4xx_pci_parse_map_dma_ranges(struct ixp4xx_pci *p) return 0; } +#ifdef CONFIG_ARM /* Only used to get context for abort handling */ static struct ixp4xx_pci *ixp4xx_pci_abort_singleton; @@ -509,6 +512,7 @@ static int ixp4xx_pci_abort_handler(unsigned long addr, unsigned int fsr, return 0; } +#endif static int __init ixp4xx_pci_probe(struct platform_device *pdev) { @@ -555,10 +559,12 @@ static int __init ixp4xx_pci_probe(struct platform_device *pdev) dev_info(dev, "controller is in %s mode\n", p->host_mode ? "host" : "option"); +#ifdef CONFIG_ARM /* Hook in our fault handler for PCI errors */ ixp4xx_pci_abort_singleton = p; hook_fault_code(16+6, ixp4xx_pci_abort_handler, SIGBUS, 0, "imprecise external abort"); +#endif ret = ixp4xx_pci_parse_map_ranges(p); if (ret) diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index 0380d300adca..2d92fc79f6dd 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -187,7 +187,6 @@ struct apple_pcie { const struct hw_info *hw; unsigned long *bitmap; struct list_head ports; - struct list_head entry; struct completion event; struct irq_fwspec fwspec; u32 nvecs; @@ -206,9 +205,6 @@ struct apple_pcie_port { int idx; }; -static LIST_HEAD(pcie_list); -static DEFINE_MUTEX(pcie_list_lock); - static void rmw_set(u32 set, void __iomem *addr) { writel_relaxed(readl_relaxed(addr) | set, addr); @@ -724,32 +720,9 @@ static int apple_msi_init(struct apple_pcie *pcie) return 0; } -static void apple_pcie_register(struct apple_pcie *pcie) -{ - guard(mutex)(&pcie_list_lock); - - list_add_tail(&pcie->entry, &pcie_list); -} - -static void apple_pcie_unregister(struct apple_pcie *pcie) -{ - guard(mutex)(&pcie_list_lock); - - list_del(&pcie->entry); -} - static struct apple_pcie *apple_pcie_lookup(struct device *dev) { - struct apple_pcie *pcie; - - guard(mutex)(&pcie_list_lock); - - list_for_each_entry(pcie, &pcie_list, entry) { - if (pcie->dev == dev) - return pcie; - } - - return NULL; + return pci_host_bridge_priv(dev_get_drvdata(dev)); } static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev) @@ -875,13 +848,15 @@ static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = { static int apple_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct pci_host_bridge *bridge; struct apple_pcie *pcie; int ret; - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); + if (!bridge) return -ENOMEM; + pcie = pci_host_bridge_priv(bridge); pcie->dev = dev; pcie->hw = of_device_get_match_data(dev); if (!pcie->hw) @@ -897,13 +872,7 @@ static int apple_pcie_probe(struct platform_device *pdev) if (ret) return ret; - apple_pcie_register(pcie); - - ret = pci_host_common_init(pdev, &apple_pcie_cfg_ecam_ops); - if (ret) - apple_pcie_unregister(pcie); - - return ret; + return pci_host_common_init(pdev, bridge, &apple_pcie_cfg_ecam_ops); } static const struct of_device_id apple_pcie_of_match[] = { diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 9afbd02ded35..062f55690012 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -14,15 +14,18 @@ #include <linux/irqchip/chained_irq.h> #include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> +#include <linux/kdebug.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/module.h> #include <linux/msi.h> +#include <linux/notifier.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/of_platform.h> +#include <linux/panic_notifier.h> #include <linux/pci.h> #include <linux/pci-ecam.h> #include <linux/printk.h> @@ -30,7 +33,9 @@ #include <linux/reset.h> #include <linux/sizes.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/types.h> #include "../pci.h" @@ -48,7 +53,6 @@ #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK 0x1f0 -#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00 #define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8 #define PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK 0xf8 @@ -155,8 +159,40 @@ #define MSI_INT_MASK_SET 0x10 #define MSI_INT_MASK_CLR 0x14 +/* Error report registers */ +#define PCIE_OUTB_ERR_TREAT 0x6000 +#define PCIE_OUTB_ERR_TREAT_CONFIG 0x1 +#define PCIE_OUTB_ERR_TREAT_MEM 0x2 +#define PCIE_OUTB_ERR_VALID 0x6004 +#define PCIE_OUTB_ERR_CLEAR 0x6008 +#define PCIE_OUTB_ERR_ACC_INFO 0x600c +#define PCIE_OUTB_ERR_ACC_INFO_CFG_ERR BIT(0) +#define PCIE_OUTB_ERR_ACC_INFO_MEM_ERR BIT(1) +#define PCIE_OUTB_ERR_ACC_INFO_TYPE_64 BIT(2) +#define PCIE_OUTB_ERR_ACC_INFO_DIR_WRITE BIT(4) +#define PCIE_OUTB_ERR_ACC_INFO_BYTE_LANES 0xff00 +#define PCIE_OUTB_ERR_ACC_ADDR 0x6010 +#define PCIE_OUTB_ERR_ACC_ADDR_BUS 0xff00000 +#define PCIE_OUTB_ERR_ACC_ADDR_DEV 0xf8000 +#define PCIE_OUTB_ERR_ACC_ADDR_FUNC 0x7000 +#define PCIE_OUTB_ERR_ACC_ADDR_REG 0xfff +#define PCIE_OUTB_ERR_CFG_CAUSE 0x6014 +#define PCIE_OUTB_ERR_CFG_CAUSE_TIMEOUT BIT(6) +#define PCIE_OUTB_ERR_CFG_CAUSE_ABORT BIT(5) +#define PCIE_OUTB_ERR_CFG_CAUSE_UNSUPP_REQ BIT(4) +#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_TIMEOUT BIT(2) +#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_DISABLED BIT(1) +#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_64BIT BIT(0) +#define PCIE_OUTB_ERR_MEM_ADDR_LO 0x6018 +#define PCIE_OUTB_ERR_MEM_ADDR_HI 0x601c +#define PCIE_OUTB_ERR_MEM_CAUSE 0x6020 +#define PCIE_OUTB_ERR_MEM_CAUSE_TIMEOUT BIT(6) +#define PCIE_OUTB_ERR_MEM_CAUSE_ABORT BIT(5) +#define PCIE_OUTB_ERR_MEM_CAUSE_UNSUPP_REQ BIT(4) +#define PCIE_OUTB_ERR_MEM_CAUSE_ACC_DISABLED BIT(1) +#define PCIE_OUTB_ERR_MEM_CAUSE_BAD_ADDR BIT(0) + #define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1 -#define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0 #define RGR1_SW_INIT_1_INIT_GENERIC_MASK 0x2 #define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT 0x1 @@ -259,6 +295,7 @@ struct pcie_cfg_data { int (*perst_set)(struct brcm_pcie *pcie, u32 val); int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); int (*post_setup)(struct brcm_pcie *pcie); + bool has_err_report; }; struct subdev_regulators { @@ -303,6 +340,10 @@ struct brcm_pcie { struct subdev_regulators *sr; bool ep_wakeup_capable; const struct pcie_cfg_data *cfg; + bool bridge_in_reset; + struct notifier_block die_notifier; + struct notifier_block panic_notifier; + spinlock_t bridge_lock; }; static inline bool is_bmips(const struct brcm_pcie *pcie) @@ -310,6 +351,24 @@ static inline bool is_bmips(const struct brcm_pcie *pcie) return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425; } +static int brcm_pcie_bridge_sw_init_set(struct brcm_pcie *pcie, u32 val) +{ + unsigned long flags; + int ret; + + if (pcie->cfg->has_err_report) + spin_lock_irqsave(&pcie->bridge_lock, flags); + + ret = pcie->cfg->bridge_sw_init_set(pcie, val); + /* If we fail, assume the bridge is in reset (off) */ + pcie->bridge_in_reset = ret ? true : val; + + if (pcie->cfg->has_err_report) + spin_unlock_irqrestore(&pcie->bridge_lock, flags); + + return ret; +} + /* * This is to convert the size of the inbound "BAR" region to the * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE @@ -1075,13 +1134,13 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) void __iomem *base = pcie->base; struct pci_host_bridge *bridge; struct resource_entry *entry; - u32 tmp, burst, aspm_support, num_lanes, num_lanes_cap; + u32 tmp, burst, num_lanes, num_lanes_cap; u8 num_out_wins = 0; int num_inbound_wins = 0; int memc, ret; /* Reset the bridge */ - ret = pcie->cfg->bridge_sw_init_set(pcie, 1); + ret = brcm_pcie_bridge_sw_init_set(pcie, 1); if (ret) return ret; @@ -1097,7 +1156,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) usleep_range(100, 200); /* Take the bridge out of reset */ - ret = pcie->cfg->bridge_sw_init_set(pcie, 0); + ret = brcm_pcie_bridge_sw_init_set(pcie, 0); if (ret) return ret; @@ -1175,12 +1234,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) /* Don't advertise L0s capability if 'aspm-no-l0s' */ - aspm_support = PCIE_LINK_STATE_L1; - if (!of_property_read_bool(pcie->np, "aspm-no-l0s")) - aspm_support |= PCIE_LINK_STATE_L0S; tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); - u32p_replace_bits(&tmp, aspm_support, - PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK); + if (of_property_read_bool(pcie->np, "aspm-no-l0s")) + tmp &= ~PCI_EXP_LNKCAP_ASPM_L0S; writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); /* 'tmp' still holds the contents of PRIV1_LINK_CAPABILITY */ @@ -1565,7 +1621,7 @@ static int brcm_pcie_turn_off(struct brcm_pcie *pcie) if (!(pcie->cfg->quirks & CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN)) /* Shutdown PCIe bridge */ - ret = pcie->cfg->bridge_sw_init_set(pcie, 1); + ret = brcm_pcie_bridge_sw_init_set(pcie, 1); return ret; } @@ -1653,7 +1709,9 @@ static int brcm_pcie_resume_noirq(struct device *dev) goto err_reset; /* Take bridge out of reset so we can access the SERDES reg */ - pcie->cfg->bridge_sw_init_set(pcie, 0); + ret = brcm_pcie_bridge_sw_init_set(pcie, 0); + if (ret) + goto err_reset; /* SERDES_IDDQ = 0 */ tmp = readl(base + HARD_DEBUG(pcie)); @@ -1707,6 +1765,119 @@ err_disable_clk: return ret; } +/* Dump out PCIe errors on die or panic */ +static int brcm_pcie_dump_err(struct brcm_pcie *pcie, + const char *type) +{ + void __iomem *base = pcie->base; + int i, is_cfg_err, is_mem_err, lanes; + const char *width_str, *direction_str; + u32 info, cfg_addr, cfg_cause, mem_cause, lo, hi; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); + unsigned long flags; + char lanes_str[9]; + + spin_lock_irqsave(&pcie->bridge_lock, flags); + /* Don't access registers when the bridge is off */ + if (pcie->bridge_in_reset || readl(base + PCIE_OUTB_ERR_VALID) == 0) { + spin_unlock_irqrestore(&pcie->bridge_lock, flags); + return NOTIFY_DONE; + } + + /* Read all necessary registers so we can release the spinlock ASAP */ + info = readl(base + PCIE_OUTB_ERR_ACC_INFO); + is_cfg_err = !!(info & PCIE_OUTB_ERR_ACC_INFO_CFG_ERR); + is_mem_err = !!(info & PCIE_OUTB_ERR_ACC_INFO_MEM_ERR); + if (is_cfg_err) { + cfg_addr = readl(base + PCIE_OUTB_ERR_ACC_ADDR); + cfg_cause = readl(base + PCIE_OUTB_ERR_CFG_CAUSE); + } + if (is_mem_err) { + mem_cause = readl(base + PCIE_OUTB_ERR_MEM_CAUSE); + lo = readl(base + PCIE_OUTB_ERR_MEM_ADDR_LO); + hi = readl(base + PCIE_OUTB_ERR_MEM_ADDR_HI); + } + /* We've got all of the info, clear the error */ + writel(1, base + PCIE_OUTB_ERR_CLEAR); + spin_unlock_irqrestore(&pcie->bridge_lock, flags); + + dev_err(pcie->dev, "reporting PCIe info which may be related to %s error\n", + type); + width_str = (info & PCIE_OUTB_ERR_ACC_INFO_TYPE_64) ? "64bit" : "32bit"; + direction_str = str_read_write(!(info & PCIE_OUTB_ERR_ACC_INFO_DIR_WRITE)); + lanes = FIELD_GET(PCIE_OUTB_ERR_ACC_INFO_BYTE_LANES, info); + for (i = 0, lanes_str[8] = 0; i < 8; i++) + lanes_str[i] = (lanes & (1 << i)) ? '1' : '0'; + + if (is_cfg_err) { + int bus = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_BUS, cfg_addr); + int dev = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_DEV, cfg_addr); + int func = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_FUNC, cfg_addr); + int reg = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_REG, cfg_addr); + + dev_err(pcie->dev, "Error: CFG Acc, %s, %s (%04x:%02x:%02x.%d) reg=0x%x, lanes=%s\n", + width_str, direction_str, bridge->domain_nr, bus, dev, + func, reg, lanes_str); + dev_err(pcie->dev, " Type: TO=%d Abt=%d UnsupReq=%d AccTO=%d AccDsbld=%d Acc64bit=%d\n", + !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_TIMEOUT), + !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ABORT), + !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_UNSUPP_REQ), + !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_TIMEOUT), + !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_DISABLED), + !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_64BIT)); + } + + if (is_mem_err) { + u64 addr = ((u64)hi << 32) | (u64)lo; + + dev_err(pcie->dev, "Error: Mem Acc, %s, %s, @0x%llx, lanes=%s\n", + width_str, direction_str, addr, lanes_str); + dev_err(pcie->dev, " Type: TO=%d Abt=%d UnsupReq=%d AccDsble=%d BadAddr=%d\n", + !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_TIMEOUT), + !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_ABORT), + !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_UNSUPP_REQ), + !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_ACC_DISABLED), + !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_BAD_ADDR)); + } + + return NOTIFY_DONE; +} + +static int brcm_pcie_die_notify_cb(struct notifier_block *self, + unsigned long v, void *p) +{ + struct brcm_pcie *pcie = + container_of(self, struct brcm_pcie, die_notifier); + + return brcm_pcie_dump_err(pcie, "Die"); +} + +static int brcm_pcie_panic_notify_cb(struct notifier_block *self, + unsigned long v, void *p) +{ + struct brcm_pcie *pcie = + container_of(self, struct brcm_pcie, panic_notifier); + + return brcm_pcie_dump_err(pcie, "Panic"); +} + +static void brcm_register_die_notifiers(struct brcm_pcie *pcie) +{ + pcie->panic_notifier.notifier_call = brcm_pcie_panic_notify_cb; + atomic_notifier_chain_register(&panic_notifier_list, + &pcie->panic_notifier); + + pcie->die_notifier.notifier_call = brcm_pcie_die_notify_cb; + register_die_notifier(&pcie->die_notifier); +} + +static void brcm_unregister_die_notifiers(struct brcm_pcie *pcie) +{ + unregister_die_notifier(&pcie->die_notifier); + atomic_notifier_chain_unregister(&panic_notifier_list, + &pcie->panic_notifier); +} + static void __brcm_pcie_remove(struct brcm_pcie *pcie) { brcm_msi_remove(pcie); @@ -1725,6 +1896,9 @@ static void brcm_pcie_remove(struct platform_device *pdev) pci_stop_root_bus(bridge->bus); pci_remove_root_bus(bridge->bus); + if (pcie->cfg->has_err_report) + brcm_unregister_die_notifiers(pcie); + __brcm_pcie_remove(pcie); } @@ -1825,6 +1999,7 @@ static const struct pcie_cfg_data bcm7216_cfg = { .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278, .has_phy = true, .num_inbound_wins = 3, + .has_err_report = true, }; static const struct pcie_cfg_data bcm7712_cfg = { @@ -1921,7 +2096,10 @@ static int brcm_pcie_probe(struct platform_device *pdev) if (ret) return dev_err_probe(&pdev->dev, ret, "could not enable clock\n"); - pcie->cfg->bridge_sw_init_set(pcie, 0); + ret = brcm_pcie_bridge_sw_init_set(pcie, 0); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "could not de-assert bridge reset\n"); if (pcie->swinit_reset) { ret = reset_control_assert(pcie->swinit_reset); @@ -1996,6 +2174,11 @@ static int brcm_pcie_probe(struct platform_device *pdev) return ret; } + if (pcie->cfg->has_err_report) { + spin_lock_init(&pcie->bridge_lock); + brcm_register_die_notifiers(pcie); + } + return 0; fail: diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index 22134e95574b..ccf71993ea35 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -17,6 +17,7 @@ #include <linux/irqchip/arm-gic-v3.h> #include <linux/platform_device.h> #include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/phy/phy.h> @@ -1337,29 +1338,16 @@ static int iproc_pcie_msi_steer(struct iproc_pcie *pcie, static int iproc_pcie_msi_enable(struct iproc_pcie *pcie) { - struct device_node *msi_node; + struct device_node *msi_node = NULL; int ret; /* * Either the "msi-parent" or the "msi-map" phandle needs to exist * for us to obtain the MSI node. */ - - msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0); - if (!msi_node) { - const __be32 *msi_map = NULL; - int len; - u32 phandle; - - msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len); - if (!msi_map) - return -ENODEV; - - phandle = be32_to_cpup(msi_map + 1); - msi_node = of_find_node_by_phandle(phandle); - if (!msi_node) - return -ENODEV; - } + of_msi_xlate(pcie->dev, &msi_node, 0); + if (!msi_node) + return -ENODEV; /* * Certain revisions of the iProc PCIe controller require additional diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 24cc30a2ab6c..4b78b6528f9f 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -143,23 +143,33 @@ struct mtk_pcie_port; /** + * enum mtk_pcie_quirks - MTK PCIe quirks + * @MTK_PCIE_FIX_CLASS_ID: host's class ID needed to be fixed + * @MTK_PCIE_FIX_DEVICE_ID: host's device ID needed to be fixed + * @MTK_PCIE_NO_MSI: Bridge has no MSI support, and relies on an external block + * @MTK_PCIE_SKIP_RSTB: Skip calling RSTB bits on PCIe probe + */ +enum mtk_pcie_quirks { + MTK_PCIE_FIX_CLASS_ID = BIT(0), + MTK_PCIE_FIX_DEVICE_ID = BIT(1), + MTK_PCIE_NO_MSI = BIT(2), + MTK_PCIE_SKIP_RSTB = BIT(3), +}; + +/** * struct mtk_pcie_soc - differentiate between host generations - * @need_fix_class_id: whether this host's class ID needed to be fixed or not - * @need_fix_device_id: whether this host's device ID needed to be fixed or not - * @no_msi: Bridge has no MSI support, and relies on an external block * @device_id: device ID which this host need to be fixed * @ops: pointer to configuration access functions * @startup: pointer to controller setting functions * @setup_irq: pointer to initialize IRQ functions + * @quirks: PCIe device quirks. */ struct mtk_pcie_soc { - bool need_fix_class_id; - bool need_fix_device_id; - bool no_msi; unsigned int device_id; struct pci_ops *ops; int (*startup)(struct mtk_pcie_port *port); int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); + enum mtk_pcie_quirks quirks; }; /** @@ -679,31 +689,28 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) regmap_update_bits(pcie->cfg, PCIE_SYS_CFG_V2, val, val); } - /* Assert all reset signals */ - writel(0, port->base + PCIE_RST_CTRL); + if (!(soc->quirks & MTK_PCIE_SKIP_RSTB)) { + /* Assert all reset signals */ + writel(0, port->base + PCIE_RST_CTRL); - /* - * Enable PCIe link down reset, if link status changed from link up to - * link down, this will reset MAC control registers and configuration - * space. - */ - writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); + /* + * Enable PCIe link down reset, if link status changed from + * link up to link down, this will reset MAC control registers + * and configuration space. + */ + writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); - /* - * Described in PCIe CEM specification sections 2.2 (PERST# Signal) and - * 2.2.1 (Initial Power-Up (G3 to S0)). The deassertion of PERST# should - * be delayed 100ms (TPVPERL) for the power and clock to become stable. - */ - msleep(100); + msleep(PCIE_T_PVPERL_MS); - /* De-assert PHY, PE, PIPE, MAC and configuration reset */ - val = readl(port->base + PCIE_RST_CTRL); - val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | - PCIE_MAC_SRSTB | PCIE_CRSTB; - writel(val, port->base + PCIE_RST_CTRL); + /* De-assert PHY, PE, PIPE, MAC and configuration reset */ + val = readl(port->base + PCIE_RST_CTRL); + val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | + PCIE_MAC_SRSTB | PCIE_CRSTB; + writel(val, port->base + PCIE_RST_CTRL); + } /* Set up vendor ID and class code */ - if (soc->need_fix_class_id) { + if (soc->quirks & MTK_PCIE_FIX_CLASS_ID) { val = PCI_VENDOR_ID_MEDIATEK; writew(val, port->base + PCIE_CONF_VEND_ID); @@ -711,7 +718,7 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) writew(val, port->base + PCIE_CONF_CLASS_ID); } - if (soc->need_fix_device_id) + if (soc->quirks & MTK_PCIE_FIX_DEVICE_ID) writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID); /* 100ms timeout value should be enough for Gen1/2 training */ @@ -821,6 +828,41 @@ static int mtk_pcie_startup_port(struct mtk_pcie_port *port) return 0; } +static int mtk_pcie_startup_port_an7583(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + struct device *dev = pcie->dev; + struct pci_host_bridge *host; + struct resource_entry *entry; + struct regmap *pbus_regmap; + resource_size_t addr; + u32 args[2], size; + + /* + * Configure PBus base address and base address mask to allow + * the hw to detect if a given address is accessible on PCIe + * controller. + */ + pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node, + "mediatek,pbus-csr", + ARRAY_SIZE(args), + args); + if (IS_ERR(pbus_regmap)) + return PTR_ERR(pbus_regmap); + + host = pci_host_bridge_from_priv(pcie); + entry = resource_list_first_type(&host->windows, IORESOURCE_MEM); + if (!entry) + return -ENODEV; + + addr = entry->res->start - entry->offset; + regmap_write(pbus_regmap, args[0], lower_32_bits(addr)); + size = lower_32_bits(resource_size(entry->res)); + regmap_write(pbus_regmap, args[1], GENMASK(31, __fls(size))); + + return mtk_pcie_startup_port_v2(port); +} + static void mtk_pcie_enable_port(struct mtk_pcie_port *port) { struct mtk_pcie *pcie = port->pcie; @@ -1099,7 +1141,7 @@ static int mtk_pcie_probe(struct platform_device *pdev) host->ops = pcie->soc->ops; host->sysdata = pcie; - host->msi_domain = pcie->soc->no_msi; + host->msi_domain = !!(pcie->soc->quirks & MTK_PCIE_NO_MSI); err = pci_host_probe(host); if (err) @@ -1187,9 +1229,9 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = { }; static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { - .no_msi = true, .ops = &mtk_pcie_ops, .startup = mtk_pcie_startup_port, + .quirks = MTK_PCIE_NO_MSI, }; static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = { @@ -1199,22 +1241,29 @@ static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = { }; static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = { - .need_fix_class_id = true, .ops = &mtk_pcie_ops_v2, .startup = mtk_pcie_startup_port_v2, .setup_irq = mtk_pcie_setup_irq, + .quirks = MTK_PCIE_FIX_CLASS_ID, +}; + +static const struct mtk_pcie_soc mtk_pcie_soc_an7583 = { + .ops = &mtk_pcie_ops_v2, + .startup = mtk_pcie_startup_port_an7583, + .setup_irq = mtk_pcie_setup_irq, + .quirks = MTK_PCIE_FIX_CLASS_ID | MTK_PCIE_SKIP_RSTB, }; static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = { - .need_fix_class_id = true, - .need_fix_device_id = true, .device_id = PCI_DEVICE_ID_MEDIATEK_7629, .ops = &mtk_pcie_ops_v2, .startup = mtk_pcie_startup_port_v2, .setup_irq = mtk_pcie_setup_irq, + .quirks = MTK_PCIE_FIX_CLASS_ID | MTK_PCIE_FIX_DEVICE_ID, }; static const struct of_device_id mtk_pcie_ids[] = { + { .compatible = "airoha,an7583-pcie", .data = &mtk_pcie_soc_an7583 }, { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 }, diff --git a/drivers/pci/controller/pcie-rzg3s-host.c b/drivers/pci/controller/pcie-rzg3s-host.c new file mode 100644 index 000000000000..667e6d629474 --- /dev/null +++ b/drivers/pci/controller/pcie-rzg3s-host.c @@ -0,0 +1,1761 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe driver for Renesas RZ/G3S SoCs + * + * Copyright (C) 2025 Renesas Electronics Corp. + * + * Based on: + * drivers/pci/controller/pcie-rcar-host.c + * Copyright (C) 2009 - 2011 Paul Mundt + */ + +#include <linux/bitfield.h> +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/cleanup.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/iopoll.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/mutex.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/units.h> + +#include "../pci.h" + +/* AXI registers */ +#define RZG3S_PCI_REQDATA(id) (0x80 + (id) * 0x4) +#define RZG3S_PCI_REQRCVDAT 0x8c + +#define RZG3S_PCI_REQADR1 0x90 +#define RZG3S_PCI_REQADR1_BUS GENMASK(31, 24) +#define RZG3S_PCI_REQADR1_DEV GENMASK(23, 19) +#define RZG3S_PCI_REQADR1_FUNC GENMASK(18, 16) +#define RZG3S_PCI_REQADR1_REG GENMASK(11, 0) + +#define RZG3S_PCI_REQBE 0x98 +#define RZG3S_PCI_REQBE_BYTE_EN GENMASK(3, 0) + +#define RZG3S_PCI_REQISS 0x9c +#define RZG3S_PCI_REQISS_MOR_STATUS GENMASK(18, 16) +#define RZG3S_PCI_REQISS_TR_TYPE GENMASK(11, 8) +#define RZG3S_PCI_REQISS_TR_TP0_RD FIELD_PREP(RZG3S_PCI_REQISS_TR_TYPE, 0x4) +#define RZG3S_PCI_REQISS_TR_TP0_WR FIELD_PREP(RZG3S_PCI_REQISS_TR_TYPE, 0x5) +#define RZG3S_PCI_REQISS_TR_TP1_RD FIELD_PREP(RZG3S_PCI_REQISS_TR_TYPE, 0x6) +#define RZG3S_PCI_REQISS_TR_TP1_WR FIELD_PREP(RZG3S_PCI_REQISS_TR_TYPE, 0x7) +#define RZG3S_PCI_REQISS_REQ_ISSUE BIT(0) + +#define RZG3S_PCI_MSIRCVWADRL 0x100 +#define RZG3S_PCI_MSIRCVWADRL_MASK GENMASK(31, 3) +#define RZG3S_PCI_MSIRCVWADRL_MSG_DATA_ENA BIT(1) +#define RZG3S_PCI_MSIRCVWADRL_ENA BIT(0) + +#define RZG3S_PCI_MSIRCVWADRU 0x104 + +#define RZG3S_PCI_MSIRCVWMSKL 0x108 +#define RZG3S_PCI_MSIRCVWMSKL_MASK GENMASK(31, 2) + +#define RZG3S_PCI_PINTRCVIE 0x110 +#define RZG3S_PCI_PINTRCVIE_INTX(i) BIT(i) +#define RZG3S_PCI_PINTRCVIE_MSI BIT(4) + +#define RZG3S_PCI_PINTRCVIS 0x114 +#define RZG3S_PCI_PINTRCVIS_INTX(i) BIT(i) +#define RZG3S_PCI_PINTRCVIS_MSI BIT(4) + +#define RZG3S_PCI_MSGRCVIE 0x120 +#define RZG3S_PCI_MSGRCVIE_MSG_RCV BIT(24) + +#define RZG3S_PCI_MSGRCVIS 0x124 +#define RZG3S_PCI_MSGRCVIS_MRI BIT(24) + +#define RZG3S_PCI_PEIE0 0x200 + +#define RZG3S_PCI_PEIS0 0x204 +#define RZG3S_PCI_PEIS0_RX_DLLP_PM_ENTER BIT(12) +#define RZG3S_PCI_PEIS0_DL_UPDOWN BIT(9) + +#define RZG3S_PCI_PEIE1 0x208 +#define RZG3S_PCI_PEIS1 0x20c +#define RZG3S_PCI_AMEIS 0x214 +#define RZG3S_PCI_ASEIS1 0x224 + +#define RZG3S_PCI_PCSTAT1 0x408 +#define RZG3S_PCI_PCSTAT1_LTSSM_STATE GENMASK(14, 10) +#define RZG3S_PCI_PCSTAT1_DL_DOWN_STS BIT(0) + +#define RZG3S_PCI_PCCTRL2 0x410 +#define RZG3S_PCI_PCCTRL2_LS_CHG GENMASK(9, 8) +#define RZG3S_PCI_PCCTRL2_LS_CHG_REQ BIT(0) + +#define RZG3S_PCI_PCSTAT2 0x414 +#define RZG3S_PCI_PCSTAT2_LS_CHG_DONE BIT(28) +#define RZG3S_PCI_PCSTAT2_SDRIRE GENMASK(7, 1) + +#define RZG3S_PCI_PERM 0x300 +#define RZG3S_PCI_PERM_CFG_HWINIT_EN BIT(2) +#define RZG3S_PCI_PERM_PIPE_PHY_REG_EN BIT(1) + +#define RZG3S_PCI_MSIRE(id) (0x600 + (id) * 0x10) +#define RZG3S_PCI_MSIRE_ENA BIT(0) + +#define RZG3S_PCI_MSIRM(id) (0x608 + (id) * 0x10) +#define RZG3S_PCI_MSIRS(id) (0x60c + (id) * 0x10) + +#define RZG3S_PCI_AWBASEL(id) (0x1000 + (id) * 0x20) +#define RZG3S_PCI_AWBASEL_WIN_ENA BIT(0) + +#define RZG3S_PCI_AWBASEU(id) (0x1004 + (id) * 0x20) +#define RZG3S_PCI_AWMASKL(id) (0x1008 + (id) * 0x20) +#define RZG3S_PCI_AWMASKU(id) (0x100c + (id) * 0x20) +#define RZG3S_PCI_ADESTL(id) (0x1010 + (id) * 0x20) +#define RZG3S_PCI_ADESTU(id) (0x1014 + (id) * 0x20) + +#define RZG3S_PCI_PWBASEL(id) (0x1100 + (id) * 0x20) +#define RZG3S_PCI_PWBASEL_ENA BIT(0) + +#define RZG3S_PCI_PWBASEU(id) (0x1104 + (id) * 0x20) +#define RZG3S_PCI_PDESTL(id) (0x1110 + (id) * 0x20) +#define RZG3S_PCI_PDESTU(id) (0x1114 + (id) * 0x20) +#define RZG3S_PCI_PWMASKL(id) (0x1108 + (id) * 0x20) +#define RZG3S_PCI_PWMASKU(id) (0x110c + (id) * 0x20) + +/* PHY control registers */ +#define RZG3S_PCI_PHY_XCFGD(id) (0x2000 + (id) * 0x10) +#define RZG3S_PCI_PHY_XCFGD_NUM 39 + +#define RZG3S_PCI_PHY_XCFGA_CMN(id) (0x2400 + (id) * 0x10) +#define RZG3S_PCI_PHY_XCFGA_CMN_NUM 16 + +#define RZG3S_PCI_PHY_XCFGA_RX(id) (0x2500 + (id) * 0x10) +#define RZG3S_PCI_PHY_XCFGA_RX_NUM 13 + +#define RZG3S_PCI_PHY_XCFGA_TX 0x25d0 + +#define RZG3S_PCI_PHY_XCFG_CTRL 0x2a20 +#define RZG3S_PCI_PHY_XCFG_CTRL_PHYREG_SEL BIT(0) + +/* PCIe registers */ +#define RZG3S_PCI_CFG_BASE 0x6000 +#define RZG3S_PCI_CFG_BARMSK00L 0xa0 +#define RZG3S_PCI_CFG_BARMSK00U 0xa4 + +#define RZG3S_PCI_CFG_PCIEC 0x60 + +/* System controller registers */ +#define RZG3S_SYS_PCIE_RST_RSM_B 0xd74 +#define RZG3S_SYS_PCIE_RST_RSM_B_MASK BIT(0) + +/* Maximum number of windows */ +#define RZG3S_MAX_WINDOWS 8 + +/* Number of MSI interrupts per register */ +#define RZG3S_PCI_MSI_INT_PER_REG 32 +/* The number of MSI interrupts */ +#define RZG3S_PCI_MSI_INT_NR RZG3S_PCI_MSI_INT_PER_REG + +/* Timeouts experimentally determined */ +#define RZG3S_REQ_ISSUE_TIMEOUT_US 2500 + +/** + * struct rzg3s_pcie_msi - RZ/G3S PCIe MSI data structure + * @domain: IRQ domain + * @map: bitmap with the allocated MSIs + * @dma_addr: address of the allocated MSI window + * @window_base: base address of the MSI window + * @pages: allocated pages for MSI window mapping + * @map_lock: lock for bitmap with the allocated MSIs + * @irq: MSI interrupt + */ +struct rzg3s_pcie_msi { + struct irq_domain *domain; + DECLARE_BITMAP(map, RZG3S_PCI_MSI_INT_NR); + dma_addr_t dma_addr; + dma_addr_t window_base; + unsigned long pages; + struct mutex map_lock; + int irq; +}; + +struct rzg3s_pcie_host; + +/** + * struct rzg3s_pcie_soc_data - SoC specific data + * @init_phy: PHY initialization function + * @power_resets: array with the resets that need to be de-asserted after + * power-on + * @cfg_resets: array with the resets that need to be de-asserted after + * configuration + * @num_power_resets: number of power resets + * @num_cfg_resets: number of configuration resets + */ +struct rzg3s_pcie_soc_data { + int (*init_phy)(struct rzg3s_pcie_host *host); + const char * const *power_resets; + const char * const *cfg_resets; + u8 num_power_resets; + u8 num_cfg_resets; +}; + +/** + * struct rzg3s_pcie_port - RZ/G3S PCIe Root Port data structure + * @refclk: PCIe reference clock + * @vendor_id: Vendor ID + * @device_id: Device ID + */ +struct rzg3s_pcie_port { + struct clk *refclk; + u32 vendor_id; + u32 device_id; +}; + +/** + * struct rzg3s_pcie_host - RZ/G3S PCIe data structure + * @axi: base address for AXI registers + * @pcie: base address for PCIe registers + * @dev: struct device + * @power_resets: reset control signals that should be set after power up + * @cfg_resets: reset control signals that should be set after configuration + * @sysc: SYSC regmap + * @intx_domain: INTx IRQ domain + * @data: SoC specific data + * @msi: MSI data structure + * @port: PCIe Root Port + * @hw_lock: lock for access to the HW resources + * @intx_irqs: INTx interrupts + * @max_link_speed: maximum supported link speed + */ +struct rzg3s_pcie_host { + void __iomem *axi; + void __iomem *pcie; + struct device *dev; + struct reset_control_bulk_data *power_resets; + struct reset_control_bulk_data *cfg_resets; + struct regmap *sysc; + struct irq_domain *intx_domain; + const struct rzg3s_pcie_soc_data *data; + struct rzg3s_pcie_msi msi; + struct rzg3s_pcie_port port; + raw_spinlock_t hw_lock; + int intx_irqs[PCI_NUM_INTX]; + int max_link_speed; +}; + +#define rzg3s_msi_to_host(_msi) container_of(_msi, struct rzg3s_pcie_host, msi) + +static void rzg3s_pcie_update_bits(void __iomem *base, u32 offset, u32 mask, + u32 val) +{ + u32 tmp; + + tmp = readl_relaxed(base + offset); + tmp &= ~mask; + tmp |= val & mask; + writel_relaxed(tmp, base + offset); +} + +static int rzg3s_pcie_child_issue_request(struct rzg3s_pcie_host *host) +{ + u32 val; + int ret; + + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_REQISS, + RZG3S_PCI_REQISS_REQ_ISSUE, + RZG3S_PCI_REQISS_REQ_ISSUE); + ret = readl_poll_timeout_atomic(host->axi + RZG3S_PCI_REQISS, val, + !(val & RZG3S_PCI_REQISS_REQ_ISSUE), + 5, RZG3S_REQ_ISSUE_TIMEOUT_US); + + if (val & RZG3S_PCI_REQISS_MOR_STATUS) + return -EIO; + + return ret; +} + +static void rzg3s_pcie_child_prepare_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct rzg3s_pcie_host *host = bus->sysdata; + unsigned int dev, func, reg; + + dev = PCI_SLOT(devfn); + func = PCI_FUNC(devfn); + reg = where & ~0x3; + + /* Set the destination */ + writel_relaxed(FIELD_PREP(RZG3S_PCI_REQADR1_BUS, bus->number) | + FIELD_PREP(RZG3S_PCI_REQADR1_DEV, dev) | + FIELD_PREP(RZG3S_PCI_REQADR1_FUNC, func) | + FIELD_PREP(RZG3S_PCI_REQADR1_REG, reg), + host->axi + RZG3S_PCI_REQADR1); + + /* Set byte enable */ + writel_relaxed(RZG3S_PCI_REQBE_BYTE_EN, host->axi + RZG3S_PCI_REQBE); +} + +static int rzg3s_pcie_child_read_conf(struct rzg3s_pcie_host *host, + struct pci_bus *bus, unsigned int devfn, + int where, u32 *data) +{ + bool type0 = pci_is_root_bus(bus->parent) ? true : false; + int ret; + + rzg3s_pcie_child_prepare_bus(bus, devfn, where); + + /* Set the type of request */ + writel_relaxed(type0 ? RZG3S_PCI_REQISS_TR_TP0_RD : + RZG3S_PCI_REQISS_TR_TP1_RD, + host->axi + RZG3S_PCI_REQISS); + + /* Issue the request and wait to finish */ + ret = rzg3s_pcie_child_issue_request(host); + if (ret) + return PCIBIOS_SET_FAILED; + + /* Read the data */ + *data = readl_relaxed(host->axi + RZG3S_PCI_REQRCVDAT); + + return PCIBIOS_SUCCESSFUL; +} + +/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ +static int rzg3s_pcie_child_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct rzg3s_pcie_host *host = bus->sysdata; + int ret; + + ret = rzg3s_pcie_child_read_conf(host, bus, devfn, where, val); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + if (size <= 2) + *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + + return PCIBIOS_SUCCESSFUL; +} + +static int rzg3s_pcie_child_write_conf(struct rzg3s_pcie_host *host, + struct pci_bus *bus, unsigned int devfn, + int where, u32 data) +{ + bool type0 = pci_is_root_bus(bus->parent) ? true : false; + int ret; + + rzg3s_pcie_child_prepare_bus(bus, devfn, where); + + /* Set the write data */ + writel_relaxed(0, host->axi + RZG3S_PCI_REQDATA(0)); + writel_relaxed(0, host->axi + RZG3S_PCI_REQDATA(1)); + writel_relaxed(data, host->axi + RZG3S_PCI_REQDATA(2)); + + /* Set the type of request */ + writel_relaxed(type0 ? RZG3S_PCI_REQISS_TR_TP0_WR : + RZG3S_PCI_REQISS_TR_TP1_WR, + host->axi + RZG3S_PCI_REQISS); + + /* Issue the request and wait to finish */ + ret = rzg3s_pcie_child_issue_request(host); + if (ret) + return PCIBIOS_SET_FAILED; + + return PCIBIOS_SUCCESSFUL; +} + +/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ +static int rzg3s_pcie_child_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct rzg3s_pcie_host *host = bus->sysdata; + u32 data, shift; + int ret; + + if (size == 4) + return rzg3s_pcie_child_write_conf(host, bus, devfn, where, val); + + /* + * Controller does 32 bit accesses. To do byte accesses software need + * to do read/modify/write. This may have potential side effects. For + * example, software may perform a 16-bit write. If the hardware only + * supports 32-bit accesses, we must do a 32-bit read, merge in the 16 + * bits we intend to write, followed by a 32-bit write. If the 16 bits + * we *don't* intend to write happen to have any RW1C + * (write-one-to-clear) bits set, we just inadvertently cleared + * something we shouldn't have. + */ + if (!bus->unsafe_warn) { + dev_warn(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n", + size, pci_domain_nr(bus), bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn), where); + bus->unsafe_warn = 1; + } + + ret = rzg3s_pcie_child_read_conf(host, bus, devfn, where, &data); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + if (size == 1) { + shift = BITS_PER_BYTE * (where & 3); + data &= ~(0xff << shift); + data |= ((val & 0xff) << shift); + } else if (size == 2) { + shift = BITS_PER_BYTE * (where & 2); + data &= ~(0xffff << shift); + data |= ((val & 0xffff) << shift); + } else { + data = val; + } + + return rzg3s_pcie_child_write_conf(host, bus, devfn, where, data); +} + +static struct pci_ops rzg3s_pcie_child_ops = { + .read = rzg3s_pcie_child_read, + .write = rzg3s_pcie_child_write, +}; + +static void __iomem *rzg3s_pcie_root_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct rzg3s_pcie_host *host = bus->sysdata; + + if (devfn) + return NULL; + + return host->pcie + where; +} + +/* Serialized by 'pci_lock' */ +static int rzg3s_pcie_root_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct rzg3s_pcie_host *host = bus->sysdata; + int ret; + + /* Enable access control to the CFGU */ + writel_relaxed(RZG3S_PCI_PERM_CFG_HWINIT_EN, + host->axi + RZG3S_PCI_PERM); + + ret = pci_generic_config_write(bus, devfn, where, size, val); + + /* Disable access control to the CFGU */ + writel_relaxed(0, host->axi + RZG3S_PCI_PERM); + + return ret; +} + +static struct pci_ops rzg3s_pcie_root_ops = { + .read = pci_generic_config_read, + .write = rzg3s_pcie_root_write, + .map_bus = rzg3s_pcie_root_map_bus, +}; + +static void rzg3s_pcie_intx_irq_handler(struct irq_desc *desc) +{ + struct rzg3s_pcie_host *host = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int irq = irq_desc_get_irq(desc); + u32 intx = irq - host->intx_irqs[0]; + + chained_irq_enter(chip, desc); + generic_handle_domain_irq(host->intx_domain, intx); + chained_irq_exit(chip, desc); +} + +static irqreturn_t rzg3s_pcie_msi_irq(int irq, void *data) +{ + u8 regs = RZG3S_PCI_MSI_INT_NR / RZG3S_PCI_MSI_INT_PER_REG; + DECLARE_BITMAP(bitmap, RZG3S_PCI_MSI_INT_NR); + struct rzg3s_pcie_host *host = data; + struct rzg3s_pcie_msi *msi = &host->msi; + unsigned long bit; + u32 status; + + status = readl_relaxed(host->axi + RZG3S_PCI_PINTRCVIS); + if (!(status & RZG3S_PCI_PINTRCVIS_MSI)) + return IRQ_NONE; + + /* Clear the MSI */ + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIS, + RZG3S_PCI_PINTRCVIS_MSI, + RZG3S_PCI_PINTRCVIS_MSI); + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_MSGRCVIS, + RZG3S_PCI_MSGRCVIS_MRI, RZG3S_PCI_MSGRCVIS_MRI); + + for (u8 reg_id = 0; reg_id < regs; reg_id++) { + status = readl_relaxed(host->axi + RZG3S_PCI_MSIRS(reg_id)); + bitmap_write(bitmap, status, reg_id * RZG3S_PCI_MSI_INT_PER_REG, + RZG3S_PCI_MSI_INT_PER_REG); + } + + for_each_set_bit(bit, bitmap, RZG3S_PCI_MSI_INT_NR) { + int ret; + + ret = generic_handle_domain_irq(msi->domain, bit); + if (ret) { + u8 reg_bit = bit % RZG3S_PCI_MSI_INT_PER_REG; + u8 reg_id = bit / RZG3S_PCI_MSI_INT_PER_REG; + + /* Unknown MSI, just clear it */ + writel_relaxed(BIT(reg_bit), + host->axi + RZG3S_PCI_MSIRS(reg_id)); + } + } + + return IRQ_HANDLED; +} + +static void rzg3s_pcie_msi_irq_ack(struct irq_data *d) +{ + struct rzg3s_pcie_msi *msi = irq_data_get_irq_chip_data(d); + struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi); + u8 reg_bit = d->hwirq % RZG3S_PCI_MSI_INT_PER_REG; + u8 reg_id = d->hwirq / RZG3S_PCI_MSI_INT_PER_REG; + + guard(raw_spinlock_irqsave)(&host->hw_lock); + + writel_relaxed(BIT(reg_bit), host->axi + RZG3S_PCI_MSIRS(reg_id)); +} + +static void rzg3s_pcie_msi_irq_mask(struct irq_data *d) +{ + struct rzg3s_pcie_msi *msi = irq_data_get_irq_chip_data(d); + struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi); + u8 reg_bit = d->hwirq % RZG3S_PCI_MSI_INT_PER_REG; + u8 reg_id = d->hwirq / RZG3S_PCI_MSI_INT_PER_REG; + + guard(raw_spinlock_irqsave)(&host->hw_lock); + + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_MSIRM(reg_id), BIT(reg_bit), + BIT(reg_bit)); +} + +static void rzg3s_pcie_msi_irq_unmask(struct irq_data *d) +{ + struct rzg3s_pcie_msi *msi = irq_data_get_irq_chip_data(d); + struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi); + u8 reg_bit = d->hwirq % RZG3S_PCI_MSI_INT_PER_REG; + u8 reg_id = d->hwirq / RZG3S_PCI_MSI_INT_PER_REG; + + guard(raw_spinlock_irqsave)(&host->hw_lock); + + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_MSIRM(reg_id), BIT(reg_bit), + 0); +} + +static void rzg3s_pcie_irq_compose_msi_msg(struct irq_data *data, + struct msi_msg *msg) +{ + struct rzg3s_pcie_msi *msi = irq_data_get_irq_chip_data(data); + struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi); + u32 lo, hi; + + /* + * Enable and msg data enable bits are part of the address lo. Drop + * them along with the unused bit. + */ + lo = readl_relaxed(host->axi + RZG3S_PCI_MSIRCVWADRL) & + RZG3S_PCI_MSIRCVWADRL_MASK; + hi = readl_relaxed(host->axi + RZG3S_PCI_MSIRCVWADRU); + + msg->address_lo = lo; + msg->address_hi = hi; + msg->data = data->hwirq; +} + +static struct irq_chip rzg3s_pcie_msi_bottom_chip = { + .name = "rzg3s-pcie-msi", + .irq_ack = rzg3s_pcie_msi_irq_ack, + .irq_mask = rzg3s_pcie_msi_irq_mask, + .irq_unmask = rzg3s_pcie_msi_irq_unmask, + .irq_compose_msi_msg = rzg3s_pcie_irq_compose_msi_msg, +}; + +static int rzg3s_pcie_msi_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct rzg3s_pcie_msi *msi = domain->host_data; + int hwirq; + + scoped_guard(mutex, &msi->map_lock) { + hwirq = bitmap_find_free_region(msi->map, RZG3S_PCI_MSI_INT_NR, + order_base_2(nr_irqs)); + } + + if (hwirq < 0) + return -ENOSPC; + + for (unsigned int i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, hwirq + i, + &rzg3s_pcie_msi_bottom_chip, + domain->host_data, handle_edge_irq, NULL, + NULL); + } + + return 0; +} + +static void rzg3s_pcie_msi_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct rzg3s_pcie_msi *msi = domain->host_data; + + guard(mutex)(&msi->map_lock); + + bitmap_release_region(msi->map, d->hwirq, order_base_2(nr_irqs)); +} + +static const struct irq_domain_ops rzg3s_pcie_msi_domain_ops = { + .alloc = rzg3s_pcie_msi_domain_alloc, + .free = rzg3s_pcie_msi_domain_free, +}; + +#define RZG3S_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) + +#define RZG3S_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \ + MSI_GENERIC_FLAGS_MASK) + +static const struct msi_parent_ops rzg3s_pcie_msi_parent_ops = { + .required_flags = RZG3S_PCIE_MSI_FLAGS_REQUIRED, + .supported_flags = RZG3S_PCIE_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .prefix = "RZG3S-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + +static int rzg3s_pcie_msi_allocate_domains(struct rzg3s_pcie_msi *msi) +{ + struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi); + struct device *dev = host->dev; + struct irq_domain_info info = { + .fwnode = dev_fwnode(dev), + .ops = &rzg3s_pcie_msi_domain_ops, + .size = RZG3S_PCI_MSI_INT_NR, + .host_data = msi, + }; + + msi->domain = msi_create_parent_irq_domain(&info, + &rzg3s_pcie_msi_parent_ops); + if (!msi->domain) + return dev_err_probe(dev, -ENOMEM, + "failed to create IRQ domain\n"); + + return 0; +} + +static int rzg3s_pcie_msi_hw_setup(struct rzg3s_pcie_host *host) +{ + u8 regs = RZG3S_PCI_MSI_INT_NR / RZG3S_PCI_MSI_INT_PER_REG; + struct rzg3s_pcie_msi *msi = &host->msi; + + /* + * Set MSI window size. HW will set the window to + * RZG3S_PCI_MSI_INT_NR * 4 bytes. + */ + writel_relaxed(FIELD_PREP(RZG3S_PCI_MSIRCVWMSKL_MASK, + RZG3S_PCI_MSI_INT_NR - 1), + host->axi + RZG3S_PCI_MSIRCVWMSKL); + + /* Set MSI window address and enable MSI window */ + writel_relaxed(upper_32_bits(msi->window_base), + host->axi + RZG3S_PCI_MSIRCVWADRU); + writel_relaxed(lower_32_bits(msi->window_base) | + RZG3S_PCI_MSIRCVWADRL_ENA | + RZG3S_PCI_MSIRCVWADRL_MSG_DATA_ENA, + host->axi + RZG3S_PCI_MSIRCVWADRL); + + /* Set MSI receive enable */ + for (u8 reg_id = 0; reg_id < regs; reg_id++) { + writel_relaxed(RZG3S_PCI_MSIRE_ENA, + host->axi + RZG3S_PCI_MSIRE(reg_id)); + } + + /* Enable message receive interrupts */ + writel_relaxed(RZG3S_PCI_MSGRCVIE_MSG_RCV, + host->axi + RZG3S_PCI_MSGRCVIE); + + /* Enable MSI */ + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIE, + RZG3S_PCI_PINTRCVIE_MSI, + RZG3S_PCI_PINTRCVIE_MSI); + + return 0; +} + +static int rzg3s_pcie_msi_setup(struct rzg3s_pcie_host *host) +{ + size_t size = RZG3S_PCI_MSI_INT_NR * sizeof(u32); + struct rzg3s_pcie_msi *msi = &host->msi; + struct device *dev = host->dev; + int id, ret; + + msi->pages = __get_free_pages(GFP_KERNEL | GFP_DMA, 0); + if (!msi->pages) + return -ENOMEM; + + msi->dma_addr = dma_map_single(dev, (void *)msi->pages, size * 2, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, msi->dma_addr)) { + ret = -ENOMEM; + goto free_pages; + } + + /* + * According to the RZ/G3S HW manual (Rev.1.10, section 34.4.5.2 Setting + * the MSI Window) the MSI window needs to fall within one of the + * enabled AXI windows. Find an enabled AXI window to setup the MSI + * window. + */ + for (id = 0; id < RZG3S_MAX_WINDOWS; id++) { + u64 base, basel, baseu; + u64 mask, maskl, masku; + + basel = readl_relaxed(host->axi + RZG3S_PCI_AWBASEL(id)); + /* Skip checking this AXI window if it's not enabled */ + if (!(basel & RZG3S_PCI_AWBASEL_WIN_ENA)) + continue; + + baseu = readl_relaxed(host->axi + RZG3S_PCI_AWBASEU(id)); + base = baseu << 32 | basel; + + maskl = readl_relaxed(host->axi + RZG3S_PCI_AWMASKL(id)); + masku = readl_relaxed(host->axi + RZG3S_PCI_AWMASKU(id)); + mask = masku << 32 | maskl; + + if (msi->dma_addr < base || msi->dma_addr > base + mask) + continue; + + break; + } + + if (id == RZG3S_MAX_WINDOWS) { + ret = -EINVAL; + goto dma_unmap; + } + + /* The MSI base address must be aligned to the MSI size */ + msi->window_base = ALIGN(msi->dma_addr, size); + if (msi->window_base < msi->dma_addr) { + ret = -EINVAL; + goto dma_unmap; + } + + rzg3s_pcie_msi_hw_setup(host); + + return 0; + +dma_unmap: + dma_unmap_single(dev, msi->dma_addr, size * 2, DMA_BIDIRECTIONAL); +free_pages: + free_pages(msi->pages, 0); + return ret; +} + +static void rzg3s_pcie_msi_hw_teardown(struct rzg3s_pcie_host *host) +{ + u8 regs = RZG3S_PCI_MSI_INT_NR / RZG3S_PCI_MSI_INT_PER_REG; + + /* Disable MSI */ + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIE, + RZG3S_PCI_PINTRCVIE_MSI, 0); + + /* Disable message receive interrupts */ + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_MSGRCVIE, + RZG3S_PCI_MSGRCVIE_MSG_RCV, 0); + + /* Disable MSI receive enable */ + for (u8 reg_id = 0; reg_id < regs; reg_id++) + writel_relaxed(0, host->axi + RZG3S_PCI_MSIRE(reg_id)); + + /* Disable MSI window */ + writel_relaxed(0, host->axi + RZG3S_PCI_MSIRCVWADRL); +} + +static void rzg3s_pcie_teardown_msi(struct rzg3s_pcie_host *host) +{ + size_t size = RZG3S_PCI_MSI_INT_NR * sizeof(u32); + struct rzg3s_pcie_msi *msi = &host->msi; + + rzg3s_pcie_msi_hw_teardown(host); + + free_irq(msi->irq, host); + irq_domain_remove(msi->domain); + + /* Free unused memory */ + dma_unmap_single(host->dev, msi->dma_addr, size * 2, DMA_BIDIRECTIONAL); + free_pages(msi->pages, 0); +} + +static int rzg3s_pcie_init_msi(struct rzg3s_pcie_host *host) +{ + struct platform_device *pdev = to_platform_device(host->dev); + struct rzg3s_pcie_msi *msi = &host->msi; + struct device *dev = host->dev; + const char *devname; + int ret; + + ret = devm_mutex_init(dev, &msi->map_lock); + if (ret) + return ret; + + msi->irq = platform_get_irq_byname(pdev, "msi"); + if (msi->irq < 0) + return dev_err_probe(dev, msi->irq, "Failed to get MSI IRQ!\n"); + + devname = devm_kasprintf(dev, GFP_KERNEL, "%s-msi", dev_name(dev)); + if (!devname) + return -ENOMEM; + + ret = rzg3s_pcie_msi_allocate_domains(msi); + if (ret) + return ret; + + /* + * Don't use devm_request_irq() as the driver uses non-devm helpers + * to control clocks. Mixing them may lead to subtle bugs. + */ + ret = request_irq(msi->irq, rzg3s_pcie_msi_irq, 0, devname, host); + if (ret) { + dev_err_probe(dev, ret, "Failed to request IRQ: %d\n", ret); + goto free_domains; + } + + ret = rzg3s_pcie_msi_setup(host); + if (ret) { + dev_err_probe(dev, ret, "Failed to setup MSI!\n"); + goto free_irq; + } + + return 0; + +free_irq: + free_irq(msi->irq, host); +free_domains: + irq_domain_remove(msi->domain); + return ret; +} + +static void rzg3s_pcie_intx_irq_ack(struct irq_data *d) +{ + struct rzg3s_pcie_host *host = irq_data_get_irq_chip_data(d); + + guard(raw_spinlock_irqsave)(&host->hw_lock); + + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIS, + RZG3S_PCI_PINTRCVIS_INTX(d->hwirq), + RZG3S_PCI_PINTRCVIS_INTX(d->hwirq)); +} + +static void rzg3s_pcie_intx_irq_mask(struct irq_data *d) +{ + struct rzg3s_pcie_host *host = irq_data_get_irq_chip_data(d); + + guard(raw_spinlock_irqsave)(&host->hw_lock); + + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIE, + RZG3S_PCI_PINTRCVIE_INTX(d->hwirq), 0); +} + +static void rzg3s_pcie_intx_irq_unmask(struct irq_data *d) +{ + struct rzg3s_pcie_host *host = irq_data_get_irq_chip_data(d); + + guard(raw_spinlock_irqsave)(&host->hw_lock); + + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIE, + RZG3S_PCI_PINTRCVIE_INTX(d->hwirq), + RZG3S_PCI_PINTRCVIE_INTX(d->hwirq)); +} + +static struct irq_chip rzg3s_pcie_intx_irq_chip = { + .name = "PCIe INTx", + .irq_ack = rzg3s_pcie_intx_irq_ack, + .irq_mask = rzg3s_pcie_intx_irq_mask, + .irq_unmask = rzg3s_pcie_intx_irq_unmask, +}; + +static int rzg3s_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &rzg3s_pcie_intx_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops rzg3s_pcie_intx_domain_ops = { + .map = rzg3s_pcie_intx_map, + .xlate = irq_domain_xlate_onetwocell, +}; + +static int rzg3s_pcie_init_irqdomain(struct rzg3s_pcie_host *host) +{ + struct device *dev = host->dev; + struct platform_device *pdev = to_platform_device(dev); + + for (int i = 0; i < PCI_NUM_INTX; i++) { + char irq_name[5] = {0}; + int irq; + + scnprintf(irq_name, ARRAY_SIZE(irq_name), "int%c", 'a' + i); + + irq = platform_get_irq_byname(pdev, irq_name); + if (irq < 0) + return dev_err_probe(dev, -EINVAL, + "Failed to parse and map INT%c IRQ\n", + 'A' + i); + + host->intx_irqs[i] = irq; + irq_set_chained_handler_and_data(irq, + rzg3s_pcie_intx_irq_handler, + host); + } + + host->intx_domain = irq_domain_create_linear(dev_fwnode(dev), + PCI_NUM_INTX, + &rzg3s_pcie_intx_domain_ops, + host); + if (!host->intx_domain) + return dev_err_probe(dev, -EINVAL, + "Failed to add irq domain for INTx IRQs\n"); + irq_domain_update_bus_token(host->intx_domain, DOMAIN_BUS_WIRED); + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + int ret = rzg3s_pcie_init_msi(host); + + if (ret) { + irq_domain_remove(host->intx_domain); + return ret; + } + } + + return 0; +} + +static void rzg3s_pcie_teardown_irqdomain(struct rzg3s_pcie_host *host) +{ + if (IS_ENABLED(CONFIG_PCI_MSI)) + rzg3s_pcie_teardown_msi(host); + + irq_domain_remove(host->intx_domain); +} + +static int rzg3s_pcie_set_max_link_speed(struct rzg3s_pcie_host *host) +{ + u32 remote_supported_link_speeds, max_supported_link_speeds; + u32 cs2, tmp, pcie_cap = RZG3S_PCI_CFG_PCIEC; + u32 cur_link_speed, link_speed; + u8 ltssm_state_l0 = 0xc; + int ret; + u16 ls; + + /* + * According to the RZ/G3S HW manual (Rev.1.10, section 34.6.3 Caution + * when Changing the Speed Spontaneously) link speed change can be done + * only when the LTSSM is in L0. + */ + ret = readl_poll_timeout(host->axi + RZG3S_PCI_PCSTAT1, tmp, + FIELD_GET(RZG3S_PCI_PCSTAT1_LTSSM_STATE, tmp) == ltssm_state_l0, + PCIE_LINK_WAIT_SLEEP_MS * MILLI, + PCIE_LINK_WAIT_SLEEP_MS * MILLI * + PCIE_LINK_WAIT_MAX_RETRIES); + if (ret) + return ret; + + ls = readw_relaxed(host->pcie + pcie_cap + PCI_EXP_LNKSTA); + cs2 = readl_relaxed(host->axi + RZG3S_PCI_PCSTAT2); + + switch (pcie_link_speed[host->max_link_speed]) { + case PCIE_SPEED_5_0GT: + max_supported_link_speeds = GENMASK(PCI_EXP_LNKSTA_CLS_5_0GB - 1, 0); + link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT; + break; + default: + /* Should not happen */ + return -EINVAL; + } + + cur_link_speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, ls); + remote_supported_link_speeds = FIELD_GET(RZG3S_PCI_PCSTAT2_SDRIRE, cs2); + /* Drop reserved bits */ + remote_supported_link_speeds &= max_supported_link_speeds; + + /* + * Return if max link speed is already set or the connected device + * doesn't support it. + */ + if (cur_link_speed == host->max_link_speed || + remote_supported_link_speeds != max_supported_link_speeds) + return 0; + + /* Set target Link speed */ + rzg3s_pcie_update_bits(host->pcie, pcie_cap + PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_TLS, + FIELD_PREP(PCI_EXP_LNKCTL2_TLS, link_speed)); + + /* Request link speed change */ + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PCCTRL2, + RZG3S_PCI_PCCTRL2_LS_CHG_REQ | + RZG3S_PCI_PCCTRL2_LS_CHG, + RZG3S_PCI_PCCTRL2_LS_CHG_REQ | + FIELD_PREP(RZG3S_PCI_PCCTRL2_LS_CHG, + link_speed - 1)); + + ret = readl_poll_timeout(host->axi + RZG3S_PCI_PCSTAT2, cs2, + (cs2 & RZG3S_PCI_PCSTAT2_LS_CHG_DONE), + PCIE_LINK_WAIT_SLEEP_MS * MILLI, + PCIE_LINK_WAIT_SLEEP_MS * MILLI * + PCIE_LINK_WAIT_MAX_RETRIES); + + /* + * According to the RZ/G3S HW manual (Rev.1.10, section 34.6.3 Caution + * when Changing the Speed Spontaneously) the PCI_PCCTRL2_LS_CHG_REQ + * should be de-asserted after checking for PCI_PCSTAT2_LS_CHG_DONE. + */ + rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PCCTRL2, + RZG3S_PCI_PCCTRL2_LS_CHG_REQ, 0); + + return ret; +} + +static int rzg3s_pcie_config_init(struct rzg3s_pcie_host *host) +{ + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host); + struct resource_entry *ft; + struct resource *bus; + u8 subordinate_bus; + u8 secondary_bus; + u8 primary_bus; + + ft = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); + if (!ft) + return -ENODEV; + + bus = ft->res; + primary_bus = bus->start; + secondary_bus = bus->start + 1; + subordinate_bus = bus->end; + + /* Enable access control to the CFGU */ + writel_relaxed(RZG3S_PCI_PERM_CFG_HWINIT_EN, + host->axi + RZG3S_PCI_PERM); + + /* HW manual recommends to write 0xffffffff on initialization */ + writel_relaxed(0xffffffff, host->pcie + RZG3S_PCI_CFG_BARMSK00L); + writel_relaxed(0xffffffff, host->pcie + RZG3S_PCI_CFG_BARMSK00U); + + /* Update bus info */ + writeb_relaxed(primary_bus, host->pcie + PCI_PRIMARY_BUS); + writeb_relaxed(secondary_bus, host->pcie + PCI_SECONDARY_BUS); + writeb_relaxed(subordinate_bus, host->pcie + PCI_SUBORDINATE_BUS); + + /* Disable access control to the CFGU */ + writel_relaxed(0, host->axi + RZG3S_PCI_PERM); + + return 0; +} + +static void rzg3s_pcie_irq_init(struct rzg3s_pcie_host *host) +{ + /* + * According to the HW manual of the RZ/G3S (Rev.1.10, sections + * corresponding to all registers written with ~0U), the hardware + * ignores value written to unused bits. Writing ~0U to these registers + * should be safe. + */ + + /* Clear the link state and PM transitions */ + writel_relaxed(RZG3S_PCI_PEIS0_DL_UPDOWN | + RZG3S_PCI_PEIS0_RX_DLLP_PM_ENTER, + host->axi + RZG3S_PCI_PEIS0); + + /* Disable all interrupts */ + writel_relaxed(0, host->axi + RZG3S_PCI_PEIE0); + + /* Clear all parity and ecc error interrupts */ + writel_relaxed(~0U, host->axi + RZG3S_PCI_PEIS1); + + /* Disable all parity and ecc error interrupts */ + writel_relaxed(0, host->axi + RZG3S_PCI_PEIE1); + + /* Clear all AXI master error interrupts */ + writel_relaxed(~0U, host->axi + RZG3S_PCI_AMEIS); + + /* Clear all AXI slave error interrupts */ + writel_relaxed(~0U, host->axi + RZG3S_PCI_ASEIS1); + + /* Clear all message receive interrupts */ + writel_relaxed(~0U, host->axi + RZG3S_PCI_MSGRCVIS); +} + +static int rzg3s_pcie_power_resets_deassert(struct rzg3s_pcie_host *host) +{ + const struct rzg3s_pcie_soc_data *data = host->data; + + /* + * According to the RZ/G3S HW manual (Rev.1.10, section + * 34.5.1.2 De-asserting the Reset) the PCIe IP needs to wait 5ms from + * power on to the de-assertion of reset. + */ + fsleep(5000); + return reset_control_bulk_deassert(data->num_power_resets, + host->power_resets); +} + +static int rzg3s_pcie_resets_prepare_and_get(struct rzg3s_pcie_host *host) +{ + const struct rzg3s_pcie_soc_data *data = host->data; + unsigned int i; + int ret; + + host->power_resets = devm_kmalloc_array(host->dev, + data->num_power_resets, + sizeof(*host->power_resets), + GFP_KERNEL); + if (!host->power_resets) + return -ENOMEM; + + for (i = 0; i < data->num_power_resets; i++) + host->power_resets[i].id = data->power_resets[i]; + + host->cfg_resets = devm_kmalloc_array(host->dev, + data->num_cfg_resets, + sizeof(*host->cfg_resets), + GFP_KERNEL); + if (!host->cfg_resets) + return -ENOMEM; + + for (i = 0; i < data->num_cfg_resets; i++) + host->cfg_resets[i].id = data->cfg_resets[i]; + + ret = devm_reset_control_bulk_get_exclusive(host->dev, + data->num_power_resets, + host->power_resets); + if (ret) + return ret; + + return devm_reset_control_bulk_get_exclusive(host->dev, + data->num_cfg_resets, + host->cfg_resets); +} + +static int rzg3s_pcie_host_parse_port(struct rzg3s_pcie_host *host) +{ + struct device_node *of_port = of_get_next_child(host->dev->of_node, NULL); + struct rzg3s_pcie_port *port = &host->port; + int ret; + + ret = of_property_read_u32(of_port, "vendor-id", &port->vendor_id); + if (ret) + return ret; + + ret = of_property_read_u32(of_port, "device-id", &port->device_id); + if (ret) + return ret; + + port->refclk = of_clk_get_by_name(of_port, "ref"); + if (IS_ERR(port->refclk)) + return PTR_ERR(port->refclk); + + return 0; +} + +static int rzg3s_pcie_host_init_port(struct rzg3s_pcie_host *host) +{ + struct rzg3s_pcie_port *port = &host->port; + struct device *dev = host->dev; + int ret; + + /* Enable access control to the CFGU */ + writel_relaxed(RZG3S_PCI_PERM_CFG_HWINIT_EN, + host->axi + RZG3S_PCI_PERM); + + /* Update vendor ID and device ID */ + writew_relaxed(port->vendor_id, host->pcie + PCI_VENDOR_ID); + writew_relaxed(port->device_id, host->pcie + PCI_DEVICE_ID); + + /* Disable access control to the CFGU */ + writel_relaxed(0, host->axi + RZG3S_PCI_PERM); + + ret = clk_prepare_enable(port->refclk); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable refclk!\n"); + + /* Set the PHY, if any */ + if (host->data->init_phy) { + ret = host->data->init_phy(host); + if (ret) { + dev_err_probe(dev, ret, "Failed to set the PHY!\n"); + goto refclk_disable; + } + } + + return 0; + +refclk_disable: + clk_disable_unprepare(port->refclk); + return ret; +} + +static int rzg3s_pcie_host_init(struct rzg3s_pcie_host *host) +{ + u32 val; + int ret; + + /* Initialize the PCIe related registers */ + ret = rzg3s_pcie_config_init(host); + if (ret) + return ret; + + ret = rzg3s_pcie_host_init_port(host); + if (ret) + return ret; + + /* Initialize the interrupts */ + rzg3s_pcie_irq_init(host); + + ret = reset_control_bulk_deassert(host->data->num_cfg_resets, + host->cfg_resets); + if (ret) + goto disable_port_refclk; + + /* Wait for link up */ + ret = readl_poll_timeout(host->axi + RZG3S_PCI_PCSTAT1, val, + !(val & RZG3S_PCI_PCSTAT1_DL_DOWN_STS), + PCIE_LINK_WAIT_SLEEP_MS * MILLI, + PCIE_LINK_WAIT_SLEEP_MS * MILLI * + PCIE_LINK_WAIT_MAX_RETRIES); + if (ret) + goto cfg_resets_deassert; + + val = readl_relaxed(host->axi + RZG3S_PCI_PCSTAT2); + dev_info(host->dev, "PCIe link status [0x%x]\n", val); + + return 0; + +cfg_resets_deassert: + reset_control_bulk_assert(host->data->num_cfg_resets, + host->cfg_resets); +disable_port_refclk: + clk_disable_unprepare(host->port.refclk); + return ret; +} + +static void rzg3s_pcie_set_inbound_window(struct rzg3s_pcie_host *host, + u64 cpu_addr, u64 pci_addr, u64 size, + int id) +{ + /* Set CPU window base address */ + writel_relaxed(upper_32_bits(cpu_addr), + host->axi + RZG3S_PCI_ADESTU(id)); + writel_relaxed(lower_32_bits(cpu_addr), + host->axi + RZG3S_PCI_ADESTL(id)); + + /* Set window size */ + writel_relaxed(upper_32_bits(size), host->axi + RZG3S_PCI_AWMASKU(id)); + writel_relaxed(lower_32_bits(size), host->axi + RZG3S_PCI_AWMASKL(id)); + + /* Set PCIe window base address and enable the window */ + writel_relaxed(upper_32_bits(pci_addr), + host->axi + RZG3S_PCI_AWBASEU(id)); + writel_relaxed(lower_32_bits(pci_addr) | RZG3S_PCI_AWBASEL_WIN_ENA, + host->axi + RZG3S_PCI_AWBASEL(id)); +} + +static int rzg3s_pcie_set_inbound_windows(struct rzg3s_pcie_host *host, + struct resource_entry *entry, + int *index) +{ + u64 pci_addr = entry->res->start - entry->offset; + u64 cpu_addr = entry->res->start; + u64 cpu_end = entry->res->end; + u64 size_id = 0; + int id = *index; + u64 size; + + while (cpu_addr < cpu_end) { + if (id >= RZG3S_MAX_WINDOWS) + return dev_err_probe(host->dev, -ENOSPC, + "Failed to map inbound window for resource (%s)\n", + entry->res->name); + + size = resource_size(entry->res) - size_id; + + /* + * According to the RZ/G3S HW manual (Rev.1.10, + * section 34.3.1.71 AXI Window Mask (Lower) Registers) the min + * size is 4K. + */ + size = max(size, SZ_4K); + + /* + * According the RZ/G3S HW manual (Rev.1.10, sections: + * - 34.3.1.69 AXI Window Base (Lower) Registers + * - 34.3.1.71 AXI Window Mask (Lower) Registers + * - 34.3.1.73 AXI Destination (Lower) Registers) + * the CPU addr, PCIe addr, size should be 4K aligned and be a + * power of 2. + */ + size = ALIGN(size, SZ_4K); + size = roundup_pow_of_two(size); + + cpu_addr = ALIGN(cpu_addr, SZ_4K); + pci_addr = ALIGN(pci_addr, SZ_4K); + + /* + * According to the RZ/G3S HW manual (Rev.1.10, section + * 34.3.1.71 AXI Window Mask (Lower) Registers) HW expects first + * 12 LSB bits to be 0xfff. Subtract 1 from size for this. + */ + rzg3s_pcie_set_inbound_window(host, cpu_addr, pci_addr, + size - 1, id); + + pci_addr += size; + cpu_addr += size; + size_id = size; + id++; + } + *index = id; + + return 0; +} + +static int rzg3s_pcie_parse_map_dma_ranges(struct rzg3s_pcie_host *host) +{ + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host); + struct resource_entry *entry; + int i = 0, ret; + + resource_list_for_each_entry(entry, &bridge->dma_ranges) { + ret = rzg3s_pcie_set_inbound_windows(host, entry, &i); + if (ret) + return ret; + } + + return 0; +} + +static void rzg3s_pcie_set_outbound_window(struct rzg3s_pcie_host *host, + struct resource_entry *win, int id) +{ + struct resource *res = win->res; + resource_size_t size = resource_size(res); + resource_size_t res_start; + + if (res->flags & IORESOURCE_IO) + res_start = pci_pio_to_address(res->start) - win->offset; + else + res_start = res->start - win->offset; + + /* + * According to the RZ/G3S HW manual (Rev.1.10, section 34.3.1.75 PCIe + * Window Base (Lower) Registers) the window base address need to be 4K + * aligned. + */ + res_start = ALIGN(res_start, SZ_4K); + + size = ALIGN(size, SZ_4K); + size = roundup_pow_of_two(size) - 1; + + /* Set PCIe destination */ + writel_relaxed(upper_32_bits(res_start), + host->axi + RZG3S_PCI_PDESTU(id)); + writel_relaxed(lower_32_bits(res_start), + host->axi + RZG3S_PCI_PDESTL(id)); + + /* Set PCIe window mask */ + writel_relaxed(upper_32_bits(size), host->axi + RZG3S_PCI_PWMASKU(id)); + writel_relaxed(lower_32_bits(size), host->axi + RZG3S_PCI_PWMASKL(id)); + + /* Set PCIe window base and enable the window */ + writel_relaxed(upper_32_bits(res_start), + host->axi + RZG3S_PCI_PWBASEU(id)); + writel_relaxed(lower_32_bits(res_start) | RZG3S_PCI_PWBASEL_ENA, + host->axi + RZG3S_PCI_PWBASEL(id)); +} + +static int rzg3s_pcie_parse_map_ranges(struct rzg3s_pcie_host *host) +{ + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host); + struct resource_entry *win; + int i = 0; + + resource_list_for_each_entry(win, &bridge->windows) { + struct resource *res = win->res; + + if (i >= RZG3S_MAX_WINDOWS) + return dev_err_probe(host->dev, -ENOSPC, + "Failed to map outbound window for resource (%s)\n", + res->name); + + if (!res->flags) + continue; + + switch (resource_type(res)) { + case IORESOURCE_IO: + case IORESOURCE_MEM: + rzg3s_pcie_set_outbound_window(host, win, i); + i++; + break; + } + } + + return 0; +} + +static int rzg3s_soc_pcie_init_phy(struct rzg3s_pcie_host *host) +{ + static const u32 xcfgd_settings[RZG3S_PCI_PHY_XCFGD_NUM] = { + [8] = 0xe0006801, 0x007f7e30, 0x183e0000, 0x978ff500, + 0xec000000, 0x009f1400, 0x0000d009, + [17] = 0x78000000, + [19] = 0x00880000, 0x000005c0, 0x07000000, 0x00780920, + 0xc9400ce2, 0x90000c0c, 0x000c1414, 0x00005034, + 0x00006000, 0x00000001, + }; + static const u32 xcfga_cmn_settings[RZG3S_PCI_PHY_XCFGA_CMN_NUM] = { + 0x00000d10, 0x08310100, 0x00c21404, 0x013c0010, 0x01874440, + 0x1a216082, 0x00103440, 0x00000080, 0x00000010, 0x0c1000c1, + 0x1000c100, 0x0222000c, 0x00640019, 0x00a00028, 0x01d11228, + 0x0201001d, + }; + static const u32 xcfga_rx_settings[RZG3S_PCI_PHY_XCFGA_RX_NUM] = { + 0x07d55000, 0x030e3f00, 0x00000288, 0x102c5880, 0x0000000b, + 0x04141441, 0x00641641, 0x00d63d63, 0x00641641, 0x01970377, + 0x00190287, 0x00190028, 0x00000028, + }; + unsigned int i; + + /* + * Enable access permission for physical layer control and status + * registers. + */ + writel_relaxed(RZG3S_PCI_PERM_PIPE_PHY_REG_EN, + host->axi + RZG3S_PCI_PERM); + + for (i = 0; i < RZG3S_PCI_PHY_XCFGD_NUM; i++) { + writel_relaxed(xcfgd_settings[i], + host->axi + RZG3S_PCI_PHY_XCFGD(i)); + } + + for (i = 0; i < RZG3S_PCI_PHY_XCFGA_CMN_NUM; i++) { + writel_relaxed(xcfga_cmn_settings[i], + host->axi + RZG3S_PCI_PHY_XCFGA_CMN(i)); + } + + for (i = 0; i < RZG3S_PCI_PHY_XCFGA_RX_NUM; i++) { + writel_relaxed(xcfga_rx_settings[i], + host->axi + RZG3S_PCI_PHY_XCFGA_RX(i)); + } + + writel_relaxed(0x107, host->axi + RZG3S_PCI_PHY_XCFGA_TX); + + /* Select PHY settings values */ + writel_relaxed(RZG3S_PCI_PHY_XCFG_CTRL_PHYREG_SEL, + host->axi + RZG3S_PCI_PHY_XCFG_CTRL); + + /* + * Disable access permission for physical layer control and status + * registers. + */ + writel_relaxed(0, host->axi + RZG3S_PCI_PERM); + + return 0; +} + +static int +rzg3s_pcie_host_setup(struct rzg3s_pcie_host *host, + int (*init_irqdomain)(struct rzg3s_pcie_host *host), + void (*teardown_irqdomain)(struct rzg3s_pcie_host *host)) +{ + struct device *dev = host->dev; + int ret; + + /* Set inbound windows */ + ret = rzg3s_pcie_parse_map_dma_ranges(host); + if (ret) + return dev_err_probe(dev, ret, + "Failed to set inbound windows!\n"); + + /* Set outbound windows */ + ret = rzg3s_pcie_parse_map_ranges(host); + if (ret) + return dev_err_probe(dev, ret, + "Failed to set outbound windows!\n"); + + ret = init_irqdomain(host); + if (ret) + return dev_err_probe(dev, ret, "Failed to init IRQ domain\n"); + + ret = rzg3s_pcie_host_init(host); + if (ret) { + dev_err_probe(dev, ret, "Failed to initialize the HW!\n"); + goto teardown_irqdomain; + } + + ret = rzg3s_pcie_set_max_link_speed(host); + if (ret) + dev_info(dev, "Failed to set max link speed\n"); + + msleep(PCIE_RESET_CONFIG_WAIT_MS); + + return 0; + +teardown_irqdomain: + teardown_irqdomain(host); + + return ret; +} + +static int rzg3s_pcie_probe(struct platform_device *pdev) +{ + struct pci_host_bridge *bridge; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct device_node *sysc_np __free(device_node) = + of_parse_phandle(np, "renesas,sysc", 0); + struct rzg3s_pcie_host *host; + int ret; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host)); + if (!bridge) + return -ENOMEM; + + host = pci_host_bridge_priv(bridge); + host->dev = dev; + host->data = device_get_match_data(dev); + platform_set_drvdata(pdev, host); + + host->axi = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->axi)) + return PTR_ERR(host->axi); + host->pcie = host->axi + RZG3S_PCI_CFG_BASE; + + host->max_link_speed = of_pci_get_max_link_speed(np); + if (host->max_link_speed < 0) + host->max_link_speed = 2; + + ret = rzg3s_pcie_host_parse_port(host); + if (ret) + return ret; + + host->sysc = syscon_node_to_regmap(sysc_np); + if (IS_ERR(host->sysc)) { + ret = PTR_ERR(host->sysc); + goto port_refclk_put; + } + + ret = regmap_update_bits(host->sysc, RZG3S_SYS_PCIE_RST_RSM_B, + RZG3S_SYS_PCIE_RST_RSM_B_MASK, + FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 1)); + if (ret) + goto port_refclk_put; + + ret = rzg3s_pcie_resets_prepare_and_get(host); + if (ret) + goto sysc_signal_restore; + + ret = rzg3s_pcie_power_resets_deassert(host); + if (ret) + goto sysc_signal_restore; + + pm_runtime_enable(dev); + + /* + * Controller clocks are part of a clock power domain. Enable them + * through runtime PM. + */ + ret = pm_runtime_resume_and_get(dev); + if (ret) + goto rpm_disable; + + raw_spin_lock_init(&host->hw_lock); + + ret = rzg3s_pcie_host_setup(host, rzg3s_pcie_init_irqdomain, + rzg3s_pcie_teardown_irqdomain); + if (ret) + goto rpm_put; + + bridge->sysdata = host; + bridge->ops = &rzg3s_pcie_root_ops; + bridge->child_ops = &rzg3s_pcie_child_ops; + ret = pci_host_probe(bridge); + if (ret) + goto host_probe_teardown; + + return 0; + +host_probe_teardown: + rzg3s_pcie_teardown_irqdomain(host); + reset_control_bulk_deassert(host->data->num_cfg_resets, + host->cfg_resets); +rpm_put: + pm_runtime_put_sync(dev); +rpm_disable: + pm_runtime_disable(dev); + reset_control_bulk_assert(host->data->num_power_resets, + host->power_resets); +sysc_signal_restore: + /* + * SYSC RST_RSM_B signal need to be asserted before turning off the + * power to the PHY. + */ + regmap_update_bits(host->sysc, RZG3S_SYS_PCIE_RST_RSM_B, + RZG3S_SYS_PCIE_RST_RSM_B_MASK, + FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 0)); +port_refclk_put: + clk_put(host->port.refclk); + + return ret; +} + +static int rzg3s_pcie_suspend_noirq(struct device *dev) +{ + struct rzg3s_pcie_host *host = dev_get_drvdata(dev); + const struct rzg3s_pcie_soc_data *data = host->data; + struct rzg3s_pcie_port *port = &host->port; + struct regmap *sysc = host->sysc; + int ret; + + ret = pm_runtime_put_sync(dev); + if (ret) + return ret; + + clk_disable_unprepare(port->refclk); + + ret = reset_control_bulk_assert(data->num_power_resets, + host->power_resets); + if (ret) + goto refclk_restore; + + ret = reset_control_bulk_assert(data->num_cfg_resets, + host->cfg_resets); + if (ret) + goto power_resets_restore; + + ret = regmap_update_bits(sysc, RZG3S_SYS_PCIE_RST_RSM_B, + RZG3S_SYS_PCIE_RST_RSM_B_MASK, + FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 0)); + if (ret) + goto cfg_resets_restore; + + return 0; + + /* Restore the previous state if any error happens */ +cfg_resets_restore: + reset_control_bulk_deassert(data->num_cfg_resets, + host->cfg_resets); +power_resets_restore: + reset_control_bulk_deassert(data->num_power_resets, + host->power_resets); +refclk_restore: + clk_prepare_enable(port->refclk); + pm_runtime_resume_and_get(dev); + return ret; +} + +static int rzg3s_pcie_resume_noirq(struct device *dev) +{ + struct rzg3s_pcie_host *host = dev_get_drvdata(dev); + const struct rzg3s_pcie_soc_data *data = host->data; + struct regmap *sysc = host->sysc; + int ret; + + ret = regmap_update_bits(sysc, RZG3S_SYS_PCIE_RST_RSM_B, + RZG3S_SYS_PCIE_RST_RSM_B_MASK, + FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 1)); + if (ret) + return ret; + + ret = rzg3s_pcie_power_resets_deassert(host); + if (ret) + goto assert_rst_rsm_b; + + ret = pm_runtime_resume_and_get(dev); + if (ret) + goto assert_power_resets; + + ret = rzg3s_pcie_host_setup(host, rzg3s_pcie_msi_hw_setup, + rzg3s_pcie_msi_hw_teardown); + if (ret) + goto rpm_put; + + return 0; + + /* + * If any error happens there is no way to recover the IP. Put it in the + * lowest possible power state. + */ +rpm_put: + pm_runtime_put_sync(dev); +assert_power_resets: + reset_control_bulk_assert(data->num_power_resets, + host->power_resets); +assert_rst_rsm_b: + regmap_update_bits(sysc, RZG3S_SYS_PCIE_RST_RSM_B, + RZG3S_SYS_PCIE_RST_RSM_B_MASK, + FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 0)); + return ret; +} + +static const struct dev_pm_ops rzg3s_pcie_pm_ops = { + NOIRQ_SYSTEM_SLEEP_PM_OPS(rzg3s_pcie_suspend_noirq, + rzg3s_pcie_resume_noirq) +}; + +static const char * const rzg3s_soc_power_resets[] = { + "aresetn", "rst_cfg_b", "rst_load_b", +}; + +static const char * const rzg3s_soc_cfg_resets[] = { + "rst_b", "rst_ps_b", "rst_gp_b", "rst_rsm_b", +}; + +static const struct rzg3s_pcie_soc_data rzg3s_soc_data = { + .power_resets = rzg3s_soc_power_resets, + .num_power_resets = ARRAY_SIZE(rzg3s_soc_power_resets), + .cfg_resets = rzg3s_soc_cfg_resets, + .num_cfg_resets = ARRAY_SIZE(rzg3s_soc_cfg_resets), + .init_phy = rzg3s_soc_pcie_init_phy, +}; + +static const struct of_device_id rzg3s_pcie_of_match[] = { + { + .compatible = "renesas,r9a08g045-pcie", + .data = &rzg3s_soc_data, + }, + {} +}; + +static struct platform_driver rzg3s_pcie_driver = { + .driver = { + .name = "rzg3s-pcie-host", + .of_match_table = rzg3s_pcie_of_match, + .pm = pm_ptr(&rzg3s_pcie_pm_ops), + .suppress_bind_attrs = true, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + .probe = rzg3s_pcie_probe, +}; +builtin_platform_driver(rzg3s_pcie_driver); diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index b4b62b9ccc45..ec6afc38e898 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -578,22 +578,6 @@ static void vmd_detach_resources(struct vmd_dev *vmd) vmd->dev->resource[VMD_MEMBAR2].child = NULL; } -/* - * VMD domains start at 0x10000 to not clash with ACPI _SEG domains. - * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower - * 16 bits are the PCI Segment Group (domain) number. Other bits are - * currently reserved. - */ -static int vmd_find_free_domain(void) -{ - int domain = 0xffff; - struct pci_bus *bus = NULL; - - while ((bus = pci_find_next_bus(bus)) != NULL) - domain = max_t(int, domain, pci_domain_nr(bus)); - return domain + 1; -} - static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint, resource_size_t *offset1, resource_size_t *offset2) @@ -878,13 +862,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) .parent = res, }; - sd->vmd_dev = vmd->dev; - sd->domain = vmd_find_free_domain(); - if (sd->domain < 0) - return sd->domain; - - sd->node = pcibus_to_node(vmd->dev->bus); - /* * Currently MSI remapping must be enabled in guest passthrough mode * due to some missing interrupt remapping plumbing. This is probably @@ -910,9 +887,24 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); + sd->vmd_dev = vmd->dev; + + /* + * Emulated domains start at 0x10000 to not clash with ACPI _SEG + * domains. Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of + * which the lower 16 bits are the PCI Segment Group (domain) number. + * Other bits are currently reserved. + */ + sd->domain = pci_bus_find_emul_domain_nr(0, 0x10000, INT_MAX); + if (sd->domain < 0) + return sd->domain; + + sd->node = pcibus_to_node(vmd->dev->bus); + vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start, &vmd_ops, sd, &resources); if (!vmd->bus) { + pci_bus_release_emul_domain_nr(sd->domain); pci_free_resource_list(&resources); vmd_remove_irq_domain(vmd); return -ENODEV; @@ -1005,6 +997,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENOMEM; vmd->dev = dev; + vmd->sysdata.domain = PCI_DOMAIN_NR_NOT_SET; vmd->instance = ida_alloc(&vmd_instance_ida, GFP_KERNEL); if (vmd->instance < 0) return vmd->instance; @@ -1070,6 +1063,7 @@ static void vmd_remove(struct pci_dev *dev) vmd_detach_resources(vmd); vmd_remove_irq_domain(vmd); ida_free(&vmd_instance_ida, vmd->instance); + pci_bus_release_emul_domain_nr(vmd->sysdata.domain); } static void vmd_shutdown(struct pci_dev *dev) |
