summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/cxl/acpi.c7
-rw-r--r--drivers/cxl/core/cdat.c25
-rw-r--r--drivers/cxl/core/core.h5
-rw-r--r--drivers/cxl/core/hdm.c105
-rw-r--r--drivers/cxl/core/pci.c89
-rw-r--r--drivers/cxl/core/port.c318
-rw-r--r--drivers/cxl/core/region.c4
-rw-r--r--drivers/cxl/cxl.h43
-rw-r--r--drivers/cxl/cxlpci.h2
-rw-r--r--drivers/cxl/port.c47
-rw-r--r--tools/testing/cxl/Kbuild7
-rw-r--r--tools/testing/cxl/cxl_core_exports.c22
-rw-r--r--tools/testing/cxl/exports.h13
-rw-r--r--tools/testing/cxl/test/cxl.c115
-rw-r--r--tools/testing/cxl/test/mock.c96
-rw-r--r--tools/testing/cxl/test/mock.h9
16 files changed, 642 insertions, 265 deletions
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index ff15dac3f294..d7a5539d07d4 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -401,7 +401,6 @@ DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
struct cxl_cfmws_context *ctx)
{
- int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_port *root_port = ctx->root_port;
struct cxl_cxims_context cxims_ctx;
struct device *dev = ctx->dev;
@@ -419,8 +418,6 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
rc = eig_to_granularity(cfmws->granularity, &ig);
if (rc)
return rc;
- for (i = 0; i < ways; i++)
- target_map[i] = cfmws->interleave_targets[i];
struct resource *res __free(del_cxl_resource) = alloc_cxl_resource(
cfmws->base_hpa, cfmws->window_size, ctx->id++);
@@ -446,6 +443,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
.end = cfmws->base_hpa + cfmws->window_size - 1,
};
cxld->interleave_ways = ways;
+ for (i = 0; i < ways; i++)
+ cxld->target_map[i] = cfmws->interleave_targets[i];
/*
* Minimize the x1 granularity to advertise support for any
* valid region granularity
@@ -484,7 +483,7 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
cxlrd->ops->spa_to_hpa = cxl_apply_xor_maps;
}
- rc = cxl_decoder_add(cxld, target_map);
+ rc = cxl_decoder_add(cxld);
if (rc)
return rc;
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index bca1ec279651..c4bd6e8a0cf0 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -338,7 +338,7 @@ static int match_cxlrd_hb(struct device *dev, void *data)
guard(rwsem_read)(&cxl_rwsem.region);
for (int i = 0; i < cxlsd->nr_targets; i++) {
- if (host_bridge == cxlsd->target[i]->dport_dev)
+ if (cxlsd->target[i] && host_bridge == cxlsd->target[i]->dport_dev)
return 1;
}
@@ -440,8 +440,8 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
} *tbl = (struct acpi_cdat_sslbis_table *)header;
int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
struct acpi_cdat_sslbis *sslbis;
- struct cxl_port *port = arg;
- struct device *dev = &port->dev;
+ struct cxl_dport *dport = arg;
+ struct device *dev = &dport->port->dev;
int remain, entries, i;
u16 len;
@@ -467,8 +467,6 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
__le64 le_base;
__le16 le_val;
- struct cxl_dport *dport;
- unsigned long index;
u16 dsp_id;
u64 val;
@@ -499,28 +497,27 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
sslbis->data_type);
- xa_for_each(&port->dports, index, dport) {
- if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
- dsp_id == dport->port_id) {
- cxl_access_coordinate_set(dport->coord,
- sslbis->data_type,
- val);
- }
+ if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
+ dsp_id == dport->port_id) {
+ cxl_access_coordinate_set(dport->coord,
+ sslbis->data_type, val);
+ return 0;
}
}
return 0;
}
-void cxl_switch_parse_cdat(struct cxl_port *port)
+void cxl_switch_parse_cdat(struct cxl_dport *dport)
{
+ struct cxl_port *port = dport->port;
int rc;
if (!port->cdat.table)
return;
rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
- port, port->cdat.table, port->cdat.length);
+ dport, port->cdat.table, port->cdat.length);
rc = cdat_table_parse_output(rc);
if (rc)
dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 5707cd60a8eb..1fb66132b777 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -148,6 +148,11 @@ int cxl_ras_init(void);
void cxl_ras_exit(void);
int cxl_gpf_port_setup(struct cxl_dport *dport);
+struct cxl_hdm;
+int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info);
+int cxl_port_get_possible_dports(struct cxl_port *port);
+
#ifdef CONFIG_CXL_FEATURES
struct cxl_feat_entry *
cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid);
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 37176c0a781f..d3a094ca01ad 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -21,12 +21,11 @@ struct cxl_rwsem cxl_rwsem = {
.dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa),
};
-static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map)
+static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld)
{
int rc;
- rc = cxl_decoder_add_locked(cxld, target_map);
+ rc = cxl_decoder_add_locked(cxld);
if (rc) {
put_device(&cxld->dev);
dev_err(&port->dev, "Failed to add decoder\n");
@@ -50,12 +49,9 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
* are claimed and passed to the single dport. Disable the range until the first
* CXL region is enumerated / activated.
*/
-int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
+static int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
struct cxl_switch_decoder *cxlsd;
- struct cxl_dport *dport = NULL;
- int single_port_map[1];
- unsigned long index;
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
/*
@@ -71,13 +67,8 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
device_lock_assert(&port->dev);
- xa_for_each(&port->dports, index, dport)
- break;
- single_port_map[0] = dport->port_id;
-
- return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
+ return add_hdm_decoder(port, &cxlsd->cxld);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, "CXL");
static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
{
@@ -147,8 +138,8 @@ static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
* @port: cxl_port to map
* @info: cached DVSEC range register info
*/
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info)
+static struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
+ struct cxl_endpoint_dvsec_info *info)
{
struct cxl_register_map *reg_map = &port->reg_map;
struct device *dev = &port->dev;
@@ -203,7 +194,6 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
return cxlhdm;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, "CXL");
static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
{
@@ -984,7 +974,7 @@ static int cxl_setup_hdm_decoder_from_dvsec(
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map, void __iomem *hdm, int which,
+ void __iomem *hdm, int which,
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
{
struct cxl_endpoint_decoder *cxled = NULL;
@@ -1103,7 +1093,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
target_list.value = (hi << 32) + lo;
for (i = 0; i < cxld->interleave_ways; i++)
- target_map[i] = target_list.target_id[i];
+ cxld->target_map[i] = target_list.target_id[i];
return 0;
}
@@ -1168,8 +1158,8 @@ static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
* @cxlhdm: Structure to populate with HDM capabilities
* @info: cached DVSEC range register info
*/
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info)
+static int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
struct cxl_port *port = cxlhdm->port;
@@ -1179,7 +1169,6 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
cxl_settle_decoders(cxlhdm);
for (i = 0; i < cxlhdm->decoder_count; i++) {
- int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
int rc, target_count = cxlhdm->target_count;
struct cxl_decoder *cxld;
@@ -1207,8 +1196,7 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
cxld = &cxlsd->cxld;
}
- rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
- &dpa_base, info);
+ rc = init_hdm_decoder(port, cxld, hdm, i, &dpa_base, info);
if (rc) {
dev_warn(&port->dev,
"Failed to initialize decoder%d.%d\n",
@@ -1216,7 +1204,7 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
put_device(&cxld->dev);
return rc;
}
- rc = add_hdm_decoder(port, cxld, target_map);
+ rc = add_hdm_decoder(port, cxld);
if (rc) {
dev_warn(&port->dev,
"Failed to add decoder%d.%d\n", port->id, i);
@@ -1226,4 +1214,71 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, "CXL");
+
+/**
+ * __devm_cxl_switch_port_decoders_setup - allocate and setup switch decoders
+ * @port: CXL port context
+ *
+ * Return 0 or -errno on error
+ */
+int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_hdm *cxlhdm;
+
+ if (is_cxl_root(port) || is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ cxlhdm = devm_cxl_setup_hdm(port, NULL);
+ if (!IS_ERR(cxlhdm))
+ return devm_cxl_enumerate_decoders(cxlhdm, NULL);
+
+ if (PTR_ERR(cxlhdm) != -ENODEV) {
+ dev_err(&port->dev, "Failed to map HDM decoder capability\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ if (cxl_port_get_possible_dports(port) == 1) {
+ dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
+ return devm_cxl_add_passthrough_decoder(port);
+ }
+
+ dev_err(&port->dev, "HDM decoder capability not found\n");
+ return -ENXIO;
+}
+EXPORT_SYMBOL_NS_GPL(__devm_cxl_switch_port_decoders_setup, "CXL");
+
+/**
+ * devm_cxl_endpoint_decoders_setup - allocate and setup endpoint decoders
+ * @port: CXL port context
+ *
+ * Return 0 or -errno on error
+ */
+int devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct cxl_endpoint_dvsec_info info = { .port = port };
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_hdm *cxlhdm;
+ int rc;
+
+ if (!is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ rc = cxl_dvsec_rr_decode(cxlds, &info);
+ if (rc < 0)
+ return rc;
+
+ cxlhdm = devm_cxl_setup_hdm(port, &info);
+ if (IS_ERR(cxlhdm)) {
+ if (PTR_ERR(cxlhdm) == -ENODEV)
+ dev_err(&port->dev, "HDM decoder registers not found\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
+ if (rc)
+ return rc;
+
+ return devm_cxl_enumerate_decoders(cxlhdm, &info);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_endpoint_decoders_setup, "CXL");
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index b50551601c2e..18825e1505d6 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -24,6 +24,53 @@ static unsigned short media_ready_timeout = 60;
module_param(media_ready_timeout, ushort, 0644);
MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
+static int pci_get_port_num(struct pci_dev *pdev)
+{
+ u32 lnkcap;
+ int type;
+
+ type = pci_pcie_type(pdev);
+ if (type != PCI_EXP_TYPE_DOWNSTREAM && type != PCI_EXP_TYPE_ROOT_PORT)
+ return -EINVAL;
+
+ if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
+ &lnkcap))
+ return -ENXIO;
+
+ return FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
+}
+
+/**
+ * __devm_cxl_add_dport_by_dev - allocate a dport by dport device
+ * @port: cxl_port that hosts the dport
+ * @dport_dev: 'struct device' of the dport
+ *
+ * Returns the allocated dport on success or ERR_PTR() of -errno on error
+ */
+struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_register_map map;
+ struct pci_dev *pdev;
+ int port_num, rc;
+
+ if (!dev_is_pci(dport_dev))
+ return ERR_PTR(-EINVAL);
+
+ pdev = to_pci_dev(dport_dev);
+ port_num = pci_get_port_num(pdev);
+ if (port_num < 0)
+ return ERR_PTR(port_num);
+
+ rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
+ if (rc)
+ return ERR_PTR(rc);
+
+ device_lock_assert(&port->dev);
+ return devm_cxl_add_dport(port, dport_dev, port_num, map.resource);
+}
+EXPORT_SYMBOL_NS_GPL(__devm_cxl_add_dport_by_dev, "CXL");
+
struct cxl_walk_context {
struct pci_bus *bus;
struct cxl_port *port;
@@ -1169,3 +1216,45 @@ int cxl_gpf_port_setup(struct cxl_dport *dport)
return 0;
}
+
+static int count_dports(struct pci_dev *pdev, void *data)
+{
+ struct cxl_walk_context *ctx = data;
+ int type = pci_pcie_type(pdev);
+
+ if (pdev->bus != ctx->bus)
+ return 0;
+ if (!pci_is_pcie(pdev))
+ return 0;
+ if (type != ctx->type)
+ return 0;
+
+ ctx->count++;
+ return 0;
+}
+
+int cxl_port_get_possible_dports(struct cxl_port *port)
+{
+ struct pci_bus *bus = cxl_port_to_pci_bus(port);
+ struct cxl_walk_context ctx;
+ int type;
+
+ if (!bus) {
+ dev_err(&port->dev, "No PCI bus found for port %s\n",
+ dev_name(&port->dev));
+ return -ENXIO;
+ }
+
+ if (pci_is_root_bus(bus))
+ type = PCI_EXP_TYPE_ROOT_PORT;
+ else
+ type = PCI_EXP_TYPE_DOWNSTREAM;
+
+ ctx = (struct cxl_walk_context) {
+ .bus = bus,
+ .type = type,
+ };
+ pci_walk_bus(bus, count_dports, &ctx);
+
+ return ctx.count;
+}
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 8f36ff413f5d..d5f71eb1ade8 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -33,6 +33,15 @@
static DEFINE_IDA(cxl_port_ida);
static DEFINE_XARRAY(cxl_root_buses);
+/*
+ * The terminal device in PCI is NULL and @platform_bus
+ * for platform devices (for cxl_test)
+ */
+static bool is_cxl_host_bridge(struct device *dev)
+{
+ return (!dev || dev == &platform_bus);
+}
+
int cxl_num_decoders_committed(struct cxl_port *port)
{
lockdep_assert_held(&cxl_rwsem.region);
@@ -741,6 +750,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
xa_init(&port->dports);
xa_init(&port->endpoints);
xa_init(&port->regions);
+ port->component_reg_phys = CXL_RESOURCE_NONE;
device_initialize(dev);
lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
@@ -859,9 +869,7 @@ static int cxl_port_add(struct cxl_port *port,
if (rc)
return rc;
- rc = cxl_port_setup_regs(port, component_reg_phys);
- if (rc)
- return rc;
+ port->component_reg_phys = component_reg_phys;
} else {
rc = dev_set_name(dev, "root%d", port->id);
if (rc)
@@ -1192,6 +1200,18 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
cxl_debugfs_create_dport_dir(dport);
+ /*
+ * Setup port register if this is the first dport showed up. Having
+ * a dport also means that there is at least 1 active link.
+ */
+ if (port->nr_dports == 1 &&
+ port->component_reg_phys != CXL_RESOURCE_NONE) {
+ rc = cxl_port_setup_regs(port, port->component_reg_phys);
+ if (rc)
+ return ERR_PTR(rc);
+ port->component_reg_phys = CXL_RESOURCE_NONE;
+ }
+
return dport;
}
@@ -1349,21 +1369,6 @@ static struct cxl_port *find_cxl_port(struct device *dport_dev,
return port;
}
-static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
- struct device *dport_dev,
- struct cxl_dport **dport)
-{
- struct cxl_find_port_ctx ctx = {
- .dport_dev = dport_dev,
- .parent_port = parent_port,
- .dport = dport,
- };
- struct cxl_port *port;
-
- port = __find_cxl_port(&ctx);
- return port;
-}
-
/*
* All users of grandparent() are using it to walk PCIe-like switch port
* hierarchy. A PCIe switch is comprised of a bridge device representing the
@@ -1424,7 +1429,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, "CXL");
* through ->remove(). This "bottom-up" removal selectively removes individual
* child ports manually. This depends on devm_cxl_add_port() to not change is
* devm action registration order, and for dports to have already been
- * destroyed by reap_dports().
+ * destroyed by del_dports().
*/
static void delete_switch_port(struct cxl_port *port)
{
@@ -1433,18 +1438,24 @@ static void delete_switch_port(struct cxl_port *port)
devm_release_action(port->dev.parent, unregister_port, port);
}
-static void reap_dports(struct cxl_port *port)
+static void del_dport(struct cxl_dport *dport)
+{
+ struct cxl_port *port = dport->port;
+
+ devm_release_action(&port->dev, cxl_dport_unlink, dport);
+ devm_release_action(&port->dev, cxl_dport_remove, dport);
+ devm_kfree(&port->dev, dport);
+}
+
+static void del_dports(struct cxl_port *port)
{
struct cxl_dport *dport;
unsigned long index;
device_lock_assert(&port->dev);
- xa_for_each(&port->dports, index, dport) {
- devm_release_action(&port->dev, cxl_dport_unlink, dport);
- devm_release_action(&port->dev, cxl_dport_remove, dport);
- devm_kfree(&port->dev, dport);
- }
+ xa_for_each(&port->dports, index, dport)
+ del_dport(dport);
}
struct detach_ctx {
@@ -1502,7 +1513,7 @@ static void cxl_detach_ep(void *data)
*/
died = true;
port->dead = true;
- reap_dports(port);
+ del_dports(port);
}
device_unlock(&port->dev);
@@ -1533,16 +1544,157 @@ static resource_size_t find_component_registers(struct device *dev)
return map.resource;
}
+static int match_port_by_uport(struct device *dev, const void *data)
+{
+ const struct device *uport_dev = data;
+ struct cxl_port *port;
+
+ if (!is_cxl_port(dev))
+ return 0;
+
+ port = to_cxl_port(dev);
+ return uport_dev == port->uport_dev;
+}
+
+/*
+ * Function takes a device reference on the port device. Caller should do a
+ * put_device() when done.
+ */
+static struct cxl_port *find_cxl_port_by_uport(struct device *uport_dev)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&cxl_bus_type, NULL, uport_dev, match_port_by_uport);
+ if (dev)
+ return to_cxl_port(dev);
+ return NULL;
+}
+
+static int update_decoder_targets(struct device *dev, void *data)
+{
+ struct cxl_dport *dport = data;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int i;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxlsd = to_cxl_switch_decoder(dev);
+ cxld = &cxlsd->cxld;
+ guard(rwsem_write)(&cxl_rwsem.region);
+
+ for (i = 0; i < cxld->interleave_ways; i++) {
+ if (cxld->target_map[i] == dport->port_id) {
+ cxlsd->target[i] = dport;
+ dev_dbg(dev, "dport%d found in target list, index %d\n",
+ dport->port_id, i);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_FREE(del_cxl_dport, struct cxl_dport *, if (!IS_ERR_OR_NULL(_T)) del_dport(_T))
+static struct cxl_dport *cxl_port_add_dport(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_dport *dport;
+ int rc;
+
+ device_lock_assert(&port->dev);
+ if (!port->dev.driver)
+ return ERR_PTR(-ENXIO);
+
+ dport = cxl_find_dport_by_dev(port, dport_dev);
+ if (dport) {
+ dev_dbg(&port->dev, "dport%d:%s already exists\n",
+ dport->port_id, dev_name(dport_dev));
+ return ERR_PTR(-EBUSY);
+ }
+
+ struct cxl_dport *new_dport __free(del_cxl_dport) =
+ devm_cxl_add_dport_by_dev(port, dport_dev);
+ if (IS_ERR(new_dport))
+ return new_dport;
+
+ cxl_switch_parse_cdat(new_dport);
+
+ if (ida_is_empty(&port->decoder_ida)) {
+ rc = devm_cxl_switch_port_decoders_setup(port);
+ if (rc)
+ return ERR_PTR(rc);
+ dev_dbg(&port->dev, "first dport%d:%s added with decoders\n",
+ new_dport->port_id, dev_name(dport_dev));
+ return no_free_ptr(new_dport);
+ }
+
+ /* New dport added, update the decoder targets */
+ device_for_each_child(&port->dev, new_dport, update_decoder_targets);
+
+ dev_dbg(&port->dev, "dport%d:%s added\n", new_dport->port_id,
+ dev_name(dport_dev));
+
+ return no_free_ptr(new_dport);
+}
+
+static struct cxl_dport *devm_cxl_create_port(struct device *ep_dev,
+ struct cxl_port *parent_port,
+ struct cxl_dport *parent_dport,
+ struct device *uport_dev,
+ struct device *dport_dev)
+{
+ resource_size_t component_reg_phys;
+
+ device_lock_assert(&parent_port->dev);
+ if (!parent_port->dev.driver) {
+ dev_warn(ep_dev,
+ "port %s:%s:%s disabled, failed to enumerate CXL.mem\n",
+ dev_name(&parent_port->dev), dev_name(uport_dev),
+ dev_name(dport_dev));
+ }
+
+ struct cxl_port *port __free(put_cxl_port) =
+ find_cxl_port_by_uport(uport_dev);
+ if (!port) {
+ component_reg_phys = find_component_registers(uport_dev);
+ port = devm_cxl_add_port(&parent_port->dev, uport_dev,
+ component_reg_phys, parent_dport);
+ if (IS_ERR(port))
+ return ERR_CAST(port);
+
+ /*
+ * retry to make sure a port is found. a port device
+ * reference is taken.
+ */
+ port = find_cxl_port_by_uport(uport_dev);
+ if (!port)
+ return ERR_PTR(-ENODEV);
+
+ dev_dbg(ep_dev, "created port %s:%s\n",
+ dev_name(&port->dev), dev_name(port->uport_dev));
+ } else {
+ /*
+ * Port was created before right before this function is
+ * called. Signal the caller to deal with it.
+ */
+ return ERR_PTR(-EAGAIN);
+ }
+
+ guard(device)(&port->dev);
+ return cxl_port_add_dport(port, dport_dev);
+}
+
static int add_port_attach_ep(struct cxl_memdev *cxlmd,
struct device *uport_dev,
struct device *dport_dev)
{
struct device *dparent = grandparent(dport_dev);
struct cxl_dport *dport, *parent_dport;
- resource_size_t component_reg_phys;
int rc;
- if (!dparent) {
+ if (is_cxl_host_bridge(dparent)) {
/*
* The iteration reached the topology root without finding the
* CXL-root 'cxl_port' on a previous iteration, fail for now to
@@ -1554,42 +1706,31 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
}
struct cxl_port *parent_port __free(put_cxl_port) =
- find_cxl_port(dparent, &parent_dport);
+ find_cxl_port_by_uport(dparent->parent);
if (!parent_port) {
/* iterate to create this parent_port */
return -EAGAIN;
}
- /*
- * Definition with __free() here to keep the sequence of
- * dereferencing the device of the port before the parent_port releasing.
- */
- struct cxl_port *port __free(put_cxl_port) = NULL;
scoped_guard(device, &parent_port->dev) {
- if (!parent_port->dev.driver) {
- dev_warn(&cxlmd->dev,
- "port %s:%s disabled, failed to enumerate CXL.mem\n",
- dev_name(&parent_port->dev), dev_name(uport_dev));
- return -ENXIO;
+ parent_dport = cxl_find_dport_by_dev(parent_port, dparent);
+ if (!parent_dport) {
+ parent_dport = cxl_port_add_dport(parent_port, dparent);
+ if (IS_ERR(parent_dport))
+ return PTR_ERR(parent_dport);
}
- port = find_cxl_port_at(parent_port, dport_dev, &dport);
- if (!port) {
- component_reg_phys = find_component_registers(uport_dev);
- port = devm_cxl_add_port(&parent_port->dev, uport_dev,
- component_reg_phys, parent_dport);
- if (IS_ERR(port))
- return PTR_ERR(port);
-
- /* retry find to pick up the new dport information */
- port = find_cxl_port_at(parent_port, dport_dev, &dport);
- if (!port)
- return -ENXIO;
+ dport = devm_cxl_create_port(&cxlmd->dev, parent_port,
+ parent_dport, uport_dev,
+ dport_dev);
+ if (IS_ERR(dport)) {
+ /* Port already exists, restart iteration */
+ if (PTR_ERR(dport) == -EAGAIN)
+ return 0;
+ return PTR_ERR(dport);
}
}
- dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
- dev_name(&port->dev), dev_name(port->uport_dev));
rc = cxl_add_ep(dport, &cxlmd->dev);
if (rc == -EBUSY) {
/*
@@ -1602,6 +1743,25 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
return rc;
}
+static struct cxl_dport *find_or_add_dport(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_dport *dport;
+
+ device_lock_assert(&port->dev);
+ dport = cxl_find_dport_by_dev(port, dport_dev);
+ if (!dport) {
+ dport = cxl_port_add_dport(port, dport_dev);
+ if (IS_ERR(dport))
+ return dport;
+
+ /* New dport added, restart iteration */
+ return ERR_PTR(-EAGAIN);
+ }
+
+ return dport;
+}
+
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
{
struct device *dev = &cxlmd->dev;
@@ -1630,11 +1790,7 @@ retry:
struct device *uport_dev;
struct cxl_dport *dport;
- /*
- * The terminal "grandparent" in PCI is NULL and @platform_bus
- * for platform devices
- */
- if (!dport_dev || dport_dev == &platform_bus)
+ if (is_cxl_host_bridge(dport_dev))
return 0;
uport_dev = dport_dev->parent;
@@ -1648,12 +1804,26 @@ retry:
dev_name(iter), dev_name(dport_dev),
dev_name(uport_dev));
struct cxl_port *port __free(put_cxl_port) =
- find_cxl_port(dport_dev, &dport);
+ find_cxl_port_by_uport(uport_dev);
if (port) {
dev_dbg(&cxlmd->dev,
"found already registered port %s:%s\n",
dev_name(&port->dev),
dev_name(port->uport_dev));
+
+ /*
+ * RP port enumerated by cxl_acpi without dport will
+ * have the dport added here.
+ */
+ scoped_guard(device, &port->dev) {
+ dport = find_or_add_dport(port, dport_dev);
+ if (IS_ERR(dport)) {
+ if (PTR_ERR(dport) == -EAGAIN)
+ goto retry;
+ return PTR_ERR(dport);
+ }
+ }
+
rc = cxl_add_ep(dport, &cxlmd->dev);
/*
@@ -1705,24 +1875,24 @@ struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, "CXL");
static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
- struct cxl_port *port, int *target_map)
+ struct cxl_port *port)
{
+ struct cxl_decoder *cxld = &cxlsd->cxld;
int i;
- if (!target_map)
- return 0;
-
device_lock_assert(&port->dev);
if (xa_empty(&port->dports))
- return -EINVAL;
+ return 0;
guard(rwsem_write)(&cxl_rwsem.region);
for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
- struct cxl_dport *dport = find_dport(port, target_map[i]);
+ struct cxl_dport *dport = find_dport(port, cxld->target_map[i]);
- if (!dport)
- return -ENXIO;
+ if (!dport) {
+ /* dport may be activated later */
+ continue;
+ }
cxlsd->target[i] = dport;
}
@@ -1911,9 +2081,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, "CXL");
/**
* cxl_decoder_add_locked - Add a decoder with targets
* @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
- * @target_map: A list of downstream ports that this decoder can direct memory
- * traffic to. These numbers should correspond with the port number
- * in the PCIe Link Capabilities structure.
*
* Certain types of decoders may not have any targets. The main example of this
* is an endpoint device. A more awkward example is a hostbridge whose root
@@ -1927,7 +2094,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, "CXL");
* Return: Negative error code if the decoder wasn't properly configured; else
* returns 0.
*/
-int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
+int cxl_decoder_add_locked(struct cxl_decoder *cxld)
{
struct cxl_port *port;
struct device *dev;
@@ -1948,7 +2115,7 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
if (!is_endpoint_decoder(dev)) {
struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
- rc = decoder_populate_targets(cxlsd, port, target_map);
+ rc = decoder_populate_targets(cxlsd, port);
if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
dev_err(&port->dev,
"Failed to populate active decoder targets\n");
@@ -1967,9 +2134,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, "CXL");
/**
* cxl_decoder_add - Add a decoder with targets
* @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
- * @target_map: A list of downstream ports that this decoder can direct memory
- * traffic to. These numbers should correspond with the port number
- * in the PCIe Link Capabilities structure.
*
* This is the unlocked variant of cxl_decoder_add_locked().
* See cxl_decoder_add_locked().
@@ -1977,7 +2141,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, "CXL");
* Context: Process context. Takes and releases the device lock of the port that
* owns the @cxld.
*/
-int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
+int cxl_decoder_add(struct cxl_decoder *cxld)
{
struct cxl_port *port;
@@ -1990,7 +2154,7 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
guard(device)(&port->dev);
- return cxl_decoder_add_locked(cxld, target_map);
+ return cxl_decoder_add_locked(cxld);
}
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, "CXL");
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 7a0cead24490..e14c1d305b22 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -1516,8 +1516,10 @@ add_target:
cxl_rr->nr_targets_set);
return -ENXIO;
}
- } else
+ } else {
cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
+ cxlsd->cxld.target_map[cxl_rr->nr_targets_set] = ep->dport->port_id;
+ }
inc = 1;
out_target_set:
cxl_rr->nr_targets_set += inc;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 4fe3df06f57a..231ddccf8977 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -357,6 +357,9 @@ enum cxl_decoder_type {
* @target_type: accelerator vs expander (type2 vs type3) selector
* @region: currently assigned region for this decoder
* @flags: memory type capabilities and locking
+ * @target_map: cached copy of hardware port-id list, available at init
+ * before all @dport objects have been instantiated. While
+ * dport id is 8bit, CFMWS interleave targets are 32bits.
* @commit: device/decoder-type specific callback to commit settings to hw
* @reset: device/decoder-type specific callback to reset hw settings
*/
@@ -369,6 +372,7 @@ struct cxl_decoder {
enum cxl_decoder_type target_type;
struct cxl_region *region;
unsigned long flags;
+ u32 target_map[CXL_DECODER_MAX_INTERLEAVE];
int (*commit)(struct cxl_decoder *cxld);
void (*reset)(struct cxl_decoder *cxld);
};
@@ -603,6 +607,7 @@ struct cxl_dax_region {
* @cdat: Cached CDAT data
* @cdat_available: Should a CDAT attribute be available in sysfs
* @pci_latency: Upstream latency in picoseconds
+ * @component_reg_phys: Physical address of component register
*/
struct cxl_port {
struct device dev;
@@ -626,6 +631,7 @@ struct cxl_port {
} cdat;
bool cdat_available;
long pci_latency;
+ resource_size_t component_reg_phys;
};
/**
@@ -789,9 +795,9 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets);
struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets);
-int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
+int cxl_decoder_add(struct cxl_decoder *cxld);
struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
-int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
+int cxl_decoder_add_locked(struct cxl_decoder *cxld);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
static inline int cxl_root_decoder_autoremove(struct device *host,
struct cxl_root_decoder *cxlrd)
@@ -814,12 +820,10 @@ struct cxl_endpoint_dvsec_info {
struct range dvsec_range[2];
};
-struct cxl_hdm;
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info);
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info);
-int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
+int devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+int devm_cxl_endpoint_decoders_setup(struct cxl_port *port);
+
struct cxl_dev_state;
int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info);
@@ -898,7 +902,7 @@ static inline u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint,
#endif
void cxl_endpoint_parse_cdat(struct cxl_port *port);
-void cxl_switch_parse_cdat(struct cxl_port *port);
+void cxl_switch_parse_cdat(struct cxl_dport *dport);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
@@ -913,6 +917,10 @@ void cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c2);
bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
+struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
+struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
/*
* Unit test builds overrides this to __weak, find the 'strong' version
@@ -923,4 +931,21 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
#endif
u16 cxl_gpf_get_dvsec(struct device *dev);
+
+/*
+ * Declaration for functions that are mocked by cxl_test that are called by
+ * cxl_core. The respective functions are defined as __foo() and called by
+ * cxl_core as foo(). The macros below ensures that those functions would
+ * exist as foo(). See tools/testing/cxl/cxl_core_exports.c and
+ * tools/testing/cxl/exports.h for setting up the mock functions. The dance
+ * is done to avoid a circular dependency where cxl_core calls a function that
+ * ends up being a mock function and goes to * cxl_test where it calls a
+ * cxl_core function.
+ */
+#ifndef CXL_TEST_ENABLE
+#define DECLARE_TESTABLE(x) __##x
+#define devm_cxl_add_dport_by_dev DECLARE_TESTABLE(devm_cxl_add_dport_by_dev)
+#define devm_cxl_switch_port_decoders_setup DECLARE_TESTABLE(devm_cxl_switch_port_decoders_setup)
+#endif
+
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 54e219b0049e..7ae621e618e7 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -129,8 +129,6 @@ static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
-int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info);
void read_cdat_data(struct cxl_port *port);
void cxl_cor_error_detected(struct pci_dev *pdev);
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index cf32dc50b7a6..51c8f2f84717 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -59,55 +59,20 @@ static int discover_region(struct device *dev, void *unused)
static int cxl_switch_port_probe(struct cxl_port *port)
{
- struct cxl_hdm *cxlhdm;
- int rc;
+ /* Reset nr_dports for rebind of driver */
+ port->nr_dports = 0;
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
- rc = devm_cxl_port_enumerate_dports(port);
- if (rc < 0)
- return rc;
-
- cxl_switch_parse_cdat(port);
-
- cxlhdm = devm_cxl_setup_hdm(port, NULL);
- if (!IS_ERR(cxlhdm))
- return devm_cxl_enumerate_decoders(cxlhdm, NULL);
-
- if (PTR_ERR(cxlhdm) != -ENODEV) {
- dev_err(&port->dev, "Failed to map HDM decoder capability\n");
- return PTR_ERR(cxlhdm);
- }
-
- if (rc == 1) {
- dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
- return devm_cxl_add_passthrough_decoder(port);
- }
-
- dev_err(&port->dev, "HDM decoder capability not found\n");
- return -ENXIO;
+ return 0;
}
static int cxl_endpoint_port_probe(struct cxl_port *port)
{
- struct cxl_endpoint_dvsec_info info = { .port = port };
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_hdm *cxlhdm;
int rc;
- rc = cxl_dvsec_rr_decode(cxlds, &info);
- if (rc < 0)
- return rc;
-
- cxlhdm = devm_cxl_setup_hdm(port, &info);
- if (IS_ERR(cxlhdm)) {
- if (PTR_ERR(cxlhdm) == -ENODEV)
- dev_err(&port->dev, "HDM decoder registers not found\n");
- return PTR_ERR(cxlhdm);
- }
-
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
cxl_endpoint_parse_cdat(port);
@@ -117,11 +82,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
if (rc)
return rc;
- rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
- if (rc)
- return rc;
-
- rc = devm_cxl_enumerate_decoders(cxlhdm, &info);
+ rc = devm_cxl_endpoint_decoders_setup(port);
if (rc)
return rc;
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index d07f14cb7aa4..0d5ce4b74b9f 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -5,22 +5,19 @@ ldflags-y += --wrap=acpi_evaluate_integer
ldflags-y += --wrap=acpi_pci_find_root
ldflags-y += --wrap=nvdimm_bus_register
ldflags-y += --wrap=devm_cxl_port_enumerate_dports
-ldflags-y += --wrap=devm_cxl_setup_hdm
-ldflags-y += --wrap=devm_cxl_add_passthrough_decoder
-ldflags-y += --wrap=devm_cxl_enumerate_decoders
ldflags-y += --wrap=cxl_await_media_ready
-ldflags-y += --wrap=cxl_hdm_decode_init
-ldflags-y += --wrap=cxl_dvsec_rr_decode
ldflags-y += --wrap=devm_cxl_add_rch_dport
ldflags-y += --wrap=cxl_rcd_component_reg_phys
ldflags-y += --wrap=cxl_endpoint_parse_cdat
ldflags-y += --wrap=cxl_dport_init_ras_reporting
+ldflags-y += --wrap=devm_cxl_endpoint_decoders_setup
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
CXL_CORE_SRC := $(DRIVERS)/cxl/core
ccflags-y := -I$(srctree)/drivers/cxl/
ccflags-y += -D__mock=__weak
+ccflags-y += -DCXL_TEST_ENABLE=1
ccflags-y += -DTRACE_INCLUDE_PATH=$(CXL_CORE_SRC) -I$(srctree)/drivers/cxl/core/
obj-m += cxl_acpi.o
diff --git a/tools/testing/cxl/cxl_core_exports.c b/tools/testing/cxl/cxl_core_exports.c
index f088792a8925..6754de35598d 100644
--- a/tools/testing/cxl/cxl_core_exports.c
+++ b/tools/testing/cxl/cxl_core_exports.c
@@ -2,6 +2,28 @@
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include "cxl.h"
+#include "exports.h"
/* Exporting of cxl_core symbols that are only used by cxl_test */
EXPORT_SYMBOL_NS_GPL(cxl_num_decoders_committed, "CXL");
+
+cxl_add_dport_by_dev_fn _devm_cxl_add_dport_by_dev =
+ __devm_cxl_add_dport_by_dev;
+EXPORT_SYMBOL_NS_GPL(_devm_cxl_add_dport_by_dev, "CXL");
+
+struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ return _devm_cxl_add_dport_by_dev(port, dport_dev);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport_by_dev, "CXL");
+
+cxl_switch_decoders_setup_fn _devm_cxl_switch_port_decoders_setup =
+ __devm_cxl_switch_port_decoders_setup;
+EXPORT_SYMBOL_NS_GPL(_devm_cxl_switch_port_decoders_setup, "CXL");
+
+int devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ return _devm_cxl_switch_port_decoders_setup(port);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_switch_port_decoders_setup, "CXL");
diff --git a/tools/testing/cxl/exports.h b/tools/testing/cxl/exports.h
new file mode 100644
index 000000000000..7ebee7c0bd67
--- /dev/null
+++ b/tools/testing/cxl/exports.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef __MOCK_CXL_EXPORTS_H_
+#define __MOCK_CXL_EXPORTS_H_
+
+typedef struct cxl_dport *(*cxl_add_dport_by_dev_fn)(struct cxl_port *port,
+ struct device *dport_dev);
+extern cxl_add_dport_by_dev_fn _devm_cxl_add_dport_by_dev;
+
+typedef int(*cxl_switch_decoders_setup_fn)(struct cxl_port *port);
+extern cxl_switch_decoders_setup_fn _devm_cxl_switch_port_decoders_setup;
+
+#endif
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index ba50338f8ada..2d135ca533d0 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -643,15 +643,8 @@ static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
return cxlhdm;
}
-static int mock_cxl_add_passthrough_decoder(struct cxl_port *port)
-{
- dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n");
- return -EOPNOTSUPP;
-}
-
-
struct target_map_ctx {
- int *target_map;
+ u32 *target_map;
int index;
int target_count;
};
@@ -818,15 +811,21 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
*/
if (WARN_ON(!dev))
continue;
+
cxlsd = to_cxl_switch_decoder(dev);
if (i == 0) {
/* put cxl_mem.4 second in the decode order */
- if (pdev->id == 4)
+ if (pdev->id == 4) {
cxlsd->target[1] = dport;
- else
+ cxld->target_map[1] = dport->port_id;
+ } else {
cxlsd->target[0] = dport;
- } else
+ cxld->target_map[0] = dport->port_id;
+ }
+ } else {
cxlsd->target[0] = dport;
+ cxld->target_map[0] = dport->port_id;
+ }
cxld = &cxlsd->cxld;
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->flags = CXL_DECODER_F_ENABLE;
@@ -863,9 +862,7 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
target_count = NR_CXL_SWITCH_PORTS;
for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
- int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
struct target_map_ctx ctx = {
- .target_map = target_map,
.target_count = target_count,
};
struct cxl_decoder *cxld;
@@ -894,6 +891,8 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
cxld = &cxled->cxld;
}
+ ctx.target_map = cxld->target_map;
+
mock_init_hdm_decoder(cxld);
if (target_count) {
@@ -905,7 +904,7 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
}
}
- rc = cxl_decoder_add_locked(cxld, target_map);
+ rc = cxl_decoder_add_locked(cxld);
if (rc) {
put_device(&cxld->dev);
dev_err(&port->dev, "Failed to add decoder\n");
@@ -921,10 +920,42 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
return 0;
}
-static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
+static int __mock_cxl_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_hdm *cxlhdm;
+
+ cxlhdm = mock_cxl_setup_hdm(port, NULL);
+ if (IS_ERR(cxlhdm)) {
+ if (PTR_ERR(cxlhdm) != -ENODEV)
+ dev_err(&port->dev, "Failed to map HDM decoder capability\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ return mock_cxl_enumerate_decoders(cxlhdm, NULL);
+}
+
+static int mock_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ if (is_cxl_root(port) || is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ return __mock_cxl_decoders_setup(port);
+}
+
+static int mock_cxl_endpoint_decoders_setup(struct cxl_port *port)
+{
+ if (!is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ return __mock_cxl_decoders_setup(port);
+}
+
+static int get_port_array(struct cxl_port *port,
+ struct platform_device ***port_array,
+ int *port_array_size)
{
struct platform_device **array;
- int i, array_size;
+ int array_size;
if (port->depth == 1) {
if (is_multi_bridge(port->uport_dev)) {
@@ -958,6 +989,22 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
return -ENXIO;
}
+ *port_array = array;
+ *port_array_size = array_size;
+
+ return 0;
+}
+
+static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
+{
+ struct platform_device **array;
+ int i, array_size;
+ int rc;
+
+ rc = get_port_array(port, &array, &array_size);
+ if (rc)
+ return rc;
+
for (i = 0; i < array_size; i++) {
struct platform_device *pdev = array[i];
struct cxl_dport *dport;
@@ -979,6 +1026,36 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
return 0;
}
+static struct cxl_dport *mock_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct platform_device **array;
+ int rc, i, array_size;
+
+ rc = get_port_array(port, &array, &array_size);
+ if (rc)
+ return ERR_PTR(rc);
+
+ for (i = 0; i < array_size; i++) {
+ struct platform_device *pdev = array[i];
+
+ if (pdev->dev.parent != port->uport_dev) {
+ dev_dbg(&port->dev, "%s: mismatch parent %s\n",
+ dev_name(port->uport_dev),
+ dev_name(pdev->dev.parent));
+ continue;
+ }
+
+ if (&pdev->dev != dport_dev)
+ continue;
+
+ return devm_cxl_add_dport(port, &pdev->dev, pdev->id,
+ CXL_RESOURCE_NONE);
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
/*
* Faking the cxl_dpa_perf for the memdev when appropriate.
*/
@@ -1035,11 +1112,11 @@ static struct cxl_mock_ops cxl_mock_ops = {
.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
.acpi_evaluate_integer = mock_acpi_evaluate_integer,
.acpi_pci_find_root = mock_acpi_pci_find_root,
+ .devm_cxl_switch_port_decoders_setup = mock_cxl_switch_port_decoders_setup,
+ .devm_cxl_endpoint_decoders_setup = mock_cxl_endpoint_decoders_setup,
.devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
- .devm_cxl_setup_hdm = mock_cxl_setup_hdm,
- .devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
- .devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
.cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat,
+ .devm_cxl_add_dport_by_dev = mock_cxl_add_dport_by_dev,
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
};
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index 1989ae020df3..995269a75cbd 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -10,12 +10,21 @@
#include <cxlmem.h>
#include <cxlpci.h>
#include "mock.h"
+#include "../exports.h"
static LIST_HEAD(mock);
+static struct cxl_dport *
+redirect_devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
+static int redirect_devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+
void register_cxl_mock_ops(struct cxl_mock_ops *ops)
{
list_add_rcu(&ops->list, &mock);
+ _devm_cxl_add_dport_by_dev = redirect_devm_cxl_add_dport_by_dev;
+ _devm_cxl_switch_port_decoders_setup =
+ redirect_devm_cxl_switch_port_decoders_setup;
}
EXPORT_SYMBOL_GPL(register_cxl_mock_ops);
@@ -23,6 +32,9 @@ DEFINE_STATIC_SRCU(cxl_mock_srcu);
void unregister_cxl_mock_ops(struct cxl_mock_ops *ops)
{
+ _devm_cxl_switch_port_decoders_setup =
+ __devm_cxl_switch_port_decoders_setup;
+ _devm_cxl_add_dport_by_dev = __devm_cxl_add_dport_by_dev;
list_del_rcu(&ops->list);
synchronize_srcu(&cxl_mock_srcu);
}
@@ -131,55 +143,34 @@ __wrap_nvdimm_bus_register(struct device *dev,
}
EXPORT_SYMBOL_GPL(__wrap_nvdimm_bus_register);
-struct cxl_hdm *__wrap_devm_cxl_setup_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info)
-
-{
- int index;
- struct cxl_hdm *cxlhdm;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (ops && ops->is_mock_port(port->uport_dev))
- cxlhdm = ops->devm_cxl_setup_hdm(port, info);
- else
- cxlhdm = devm_cxl_setup_hdm(port, info);
- put_cxl_mock_ops(index);
-
- return cxlhdm;
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_setup_hdm, "CXL");
-
-int __wrap_devm_cxl_add_passthrough_decoder(struct cxl_port *port)
+int redirect_devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
{
int rc, index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_port(port->uport_dev))
- rc = ops->devm_cxl_add_passthrough_decoder(port);
+ rc = ops->devm_cxl_switch_port_decoders_setup(port);
else
- rc = devm_cxl_add_passthrough_decoder(port);
+ rc = __devm_cxl_switch_port_decoders_setup(port);
put_cxl_mock_ops(index);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_passthrough_decoder, "CXL");
-int __wrap_devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info)
+int __wrap_devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
{
int rc, index;
- struct cxl_port *port = cxlhdm->port;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_port(port->uport_dev))
- rc = ops->devm_cxl_enumerate_decoders(cxlhdm, info);
+ rc = ops->devm_cxl_endpoint_decoders_setup(port);
else
- rc = devm_cxl_enumerate_decoders(cxlhdm, info);
+ rc = devm_cxl_endpoint_decoders_setup(port);
put_cxl_mock_ops(index);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_enumerate_decoders, "CXL");
+EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_endpoint_decoders_setup, "CXL");
int __wrap_devm_cxl_port_enumerate_dports(struct cxl_port *port)
{
@@ -211,39 +202,6 @@ int __wrap_cxl_await_media_ready(struct cxl_dev_state *cxlds)
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_await_media_ready, "CXL");
-int __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
- struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info)
-{
- int rc = 0, index;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (ops && ops->is_mock_dev(cxlds->dev))
- rc = 0;
- else
- rc = cxl_hdm_decode_init(cxlds, cxlhdm, info);
- put_cxl_mock_ops(index);
-
- return rc;
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_cxl_hdm_decode_init, "CXL");
-
-int __wrap_cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
- struct cxl_endpoint_dvsec_info *info)
-{
- int rc = 0, index;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (ops && ops->is_mock_dev(cxlds->dev))
- rc = 0;
- else
- rc = cxl_dvsec_rr_decode(cxlds, info);
- put_cxl_mock_ops(index);
-
- return rc;
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dvsec_rr_decode, "CXL");
-
struct cxl_dport *__wrap_devm_cxl_add_rch_dport(struct cxl_port *port,
struct device *dport_dev,
int port_id,
@@ -311,6 +269,22 @@ void __wrap_cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dport_init_ras_reporting, "CXL");
+struct cxl_dport *redirect_devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+ struct cxl_dport *dport;
+
+ if (ops && ops->is_mock_port(port->uport_dev))
+ dport = ops->devm_cxl_add_dport_by_dev(port, dport_dev);
+ else
+ dport = __devm_cxl_add_dport_by_dev(port, dport_dev);
+ put_cxl_mock_ops(index);
+
+ return dport;
+}
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("cxl_test: emulation module");
MODULE_IMPORT_NS("ACPI");
diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h
index d1b0271d2822..4ed932e76aae 100644
--- a/tools/testing/cxl/test/mock.h
+++ b/tools/testing/cxl/test/mock.h
@@ -20,12 +20,11 @@ struct cxl_mock_ops {
bool (*is_mock_port)(struct device *dev);
bool (*is_mock_dev)(struct device *dev);
int (*devm_cxl_port_enumerate_dports)(struct cxl_port *port);
- struct cxl_hdm *(*devm_cxl_setup_hdm)(
- struct cxl_port *port, struct cxl_endpoint_dvsec_info *info);
- int (*devm_cxl_add_passthrough_decoder)(struct cxl_port *port);
- int (*devm_cxl_enumerate_decoders)(
- struct cxl_hdm *hdm, struct cxl_endpoint_dvsec_info *info);
+ int (*devm_cxl_switch_port_decoders_setup)(struct cxl_port *port);
+ int (*devm_cxl_endpoint_decoders_setup)(struct cxl_port *port);
void (*cxl_endpoint_parse_cdat)(struct cxl_port *port);
+ struct cxl_dport *(*devm_cxl_add_dport_by_dev)(struct cxl_port *port,
+ struct device *dport_dev);
};
void register_cxl_mock_ops(struct cxl_mock_ops *ops);