summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/cache/sifive,ccache0.yaml5
-rw-r--r--MAINTAINERS3
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/x86/mm/pat/set_memory.c2
-rw-r--r--drivers/cache/Kconfig37
-rw-r--r--drivers/cache/Makefile2
-rw-r--r--drivers/cache/hisi_soc_hha.c194
-rw-r--r--drivers/cxl/core/region.c5
-rw-r--r--drivers/nvdimm/region.c2
-rw-r--r--drivers/nvdimm/region_devs.c2
-rw-r--r--include/linux/cache_coherency.h61
-rw-r--r--include/linux/memregion.h16
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/cache_maint.c138
15 files changed, 462 insertions, 12 deletions
diff --git a/Documentation/devicetree/bindings/cache/sifive,ccache0.yaml b/Documentation/devicetree/bindings/cache/sifive,ccache0.yaml
index 579bacb66f34..c0e5ebb1fa4c 100644
--- a/Documentation/devicetree/bindings/cache/sifive,ccache0.yaml
+++ b/Documentation/devicetree/bindings/cache/sifive,ccache0.yaml
@@ -48,6 +48,11 @@ properties:
- const: microchip,mpfs-ccache
- const: sifive,fu540-c000-ccache
- const: cache
+ - items:
+ - const: microchip,pic64gx-ccache
+ - const: microchip,mpfs-ccache
+ - const: sifive,fu540-c000-ccache
+ - const: cache
cache-block-size:
const: 64
diff --git a/MAINTAINERS b/MAINTAINERS
index b10ef9480bfb..a6939cea1a2b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -24451,10 +24451,13 @@ F: drivers/staging/
STANDALONE CACHE CONTROLLER DRIVERS
M: Conor Dooley <conor@kernel.org>
+M: Jonathan Cameron <jonathan.cameron@huawei.com>
S: Maintained
T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
F: Documentation/devicetree/bindings/cache/
F: drivers/cache
+F: include/linux/cache_coherency.h
+F: lib/cache_maint.c
STARFIRE/DURALAN NETWORK DRIVER
M: Ion Badulescu <ionut@badula.org>
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6663ffd23f25..f63288ea4b2b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -21,6 +21,7 @@ config ARM64
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_CC_PLATFORM
+ select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
@@ -149,6 +150,7 @@ config ARM64
select GENERIC_ARCH_TOPOLOGY
select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_CPU_AUTOPROBE
+ select GENERIC_CPU_CACHE_MAINTENANCE
select GENERIC_CPU_DEVICES
select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 970981893c9b..6bb3eafa5563 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -368,7 +368,7 @@ bool cpu_cache_has_invalidate_memregion(void)
}
EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM");
-int cpu_cache_invalidate_memregion(int res_desc)
+int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
{
if (WARN_ON_ONCE(!cpu_cache_has_invalidate_memregion()))
return -ENXIO;
diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
index db51386c663a..1518449d47b5 100644
--- a/drivers/cache/Kconfig
+++ b/drivers/cache/Kconfig
@@ -1,9 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Cache Drivers"
+
+menuconfig CACHEMAINT_FOR_DMA
+ bool "Cache management for noncoherent DMA"
+ depends on RISCV
+ default y
+ help
+ These drivers implement support for noncoherent DMA master devices
+ on platforms that lack the standard CPU interfaces for this.
+
+if CACHEMAINT_FOR_DMA
config AX45MP_L2_CACHE
bool "Andes Technology AX45MP L2 Cache controller"
- depends on RISCV
select RISCV_NONSTANDARD_CACHE_OPS
help
Support for the L2 cache controller on Andes Technology AX45MP platforms.
@@ -16,7 +24,6 @@ config SIFIVE_CCACHE
config STARFIVE_STARLINK_CACHE
bool "StarFive StarLink Cache controller"
- depends on RISCV
depends on ARCH_STARFIVE
depends on 64BIT
select RISCV_DMA_NONCOHERENT
@@ -24,4 +31,26 @@ config STARFIVE_STARLINK_CACHE
help
Support for the StarLink cache controller IP from StarFive.
-endmenu
+endif #CACHEMAINT_FOR_DMA
+
+menuconfig CACHEMAINT_FOR_HOTPLUG
+ bool "Cache management for memory hot plug like operations"
+ depends on GENERIC_CPU_CACHE_MAINTENANCE
+ help
+ These drivers implement cache management for flows where it is necessary
+ to flush data from all host caches.
+
+if CACHEMAINT_FOR_HOTPLUG
+
+config HISI_SOC_HHA
+ tristate "HiSilicon Hydra Home Agent (HHA) device driver"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ help
+ The Hydra Home Agent (HHA) is responsible for cache coherency
+ on the SoC. This drivers enables the cache maintenance functions of
+ the HHA.
+
+ This driver can be built as a module. If so, the module will be
+ called hisi_soc_hha.
+
+endif #CACHEMAINT_FOR_HOTPLUG
diff --git a/drivers/cache/Makefile b/drivers/cache/Makefile
index 55c5e851034d..b3362b15d6c1 100644
--- a/drivers/cache/Makefile
+++ b/drivers/cache/Makefile
@@ -3,3 +3,5 @@
obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
obj-$(CONFIG_STARFIVE_STARLINK_CACHE) += starfive_starlink_cache.o
+
+obj-$(CONFIG_HISI_SOC_HHA) += hisi_soc_hha.o
diff --git a/drivers/cache/hisi_soc_hha.c b/drivers/cache/hisi_soc_hha.c
new file mode 100644
index 000000000000..25ff0f5ae79b
--- /dev/null
+++ b/drivers/cache/hisi_soc_hha.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for HiSilicon Hydra Home Agent (HHA).
+ *
+ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ * Yushan Wang <wangyushan12@huawei.com>
+ *
+ * A system typically contains multiple HHAs. Each is responsible for a subset
+ * of the physical addresses in the system, but interleave can make the mapping
+ * from a particular cache line to a responsible HHA complex. As such no
+ * filtering is done in the driver, with the hardware being responsible for
+ * responding with success for even if it was not responsible for any addresses
+ * in the range on which the operation was requested.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cache_coherency.h>
+#include <linux/dev_printk.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/memregion.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#define HISI_HHA_CTRL 0x5004
+#define HISI_HHA_CTRL_EN BIT(0)
+#define HISI_HHA_CTRL_RANGE BIT(1)
+#define HISI_HHA_CTRL_TYPE GENMASK(3, 2)
+#define HISI_HHA_START_L 0x5008
+#define HISI_HHA_START_H 0x500c
+#define HISI_HHA_LEN_L 0x5010
+#define HISI_HHA_LEN_H 0x5014
+
+/* The maintain operation performs in a 128 Byte granularity */
+#define HISI_HHA_MAINT_ALIGN 128
+
+#define HISI_HHA_POLL_GAP_US 10
+#define HISI_HHA_POLL_TIMEOUT_US 50000
+
+struct hisi_soc_hha {
+ /* Must be first element */
+ struct cache_coherency_ops_inst cci;
+ /* Locks HHA instance to forbid overlapping access. */
+ struct mutex lock;
+ void __iomem *base;
+};
+
+static bool hisi_hha_cache_maintain_wait_finished(struct hisi_soc_hha *soc_hha)
+{
+ u32 val;
+
+ return !readl_poll_timeout_atomic(soc_hha->base + HISI_HHA_CTRL, val,
+ !(val & HISI_HHA_CTRL_EN),
+ HISI_HHA_POLL_GAP_US,
+ HISI_HHA_POLL_TIMEOUT_US);
+}
+
+static int hisi_soc_hha_wbinv(struct cache_coherency_ops_inst *cci,
+ struct cc_inval_params *invp)
+{
+ struct hisi_soc_hha *soc_hha =
+ container_of(cci, struct hisi_soc_hha, cci);
+ phys_addr_t top, addr = invp->addr;
+ size_t size = invp->size;
+ u32 reg;
+
+ if (!size)
+ return -EINVAL;
+
+ addr = ALIGN_DOWN(addr, HISI_HHA_MAINT_ALIGN);
+ top = ALIGN(addr + size, HISI_HHA_MAINT_ALIGN);
+ size = top - addr;
+
+ guard(mutex)(&soc_hha->lock);
+
+ if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
+ return -EBUSY;
+
+ /*
+ * Hardware will search for addresses ranging [addr, addr + size - 1],
+ * last byte included, and perform maintenance in 128 byte granules
+ * on those cachelines which contain the addresses. If a given instance
+ * is either not responsible for a cacheline or that cacheline is not
+ * currently present then the search will fail, no operation will be
+ * necessary and the device will report success.
+ */
+ size -= 1;
+
+ writel(lower_32_bits(addr), soc_hha->base + HISI_HHA_START_L);
+ writel(upper_32_bits(addr), soc_hha->base + HISI_HHA_START_H);
+ writel(lower_32_bits(size), soc_hha->base + HISI_HHA_LEN_L);
+ writel(upper_32_bits(size), soc_hha->base + HISI_HHA_LEN_H);
+
+ reg = FIELD_PREP(HISI_HHA_CTRL_TYPE, 1); /* Clean Invalid */
+ reg |= HISI_HHA_CTRL_RANGE | HISI_HHA_CTRL_EN;
+ writel(reg, soc_hha->base + HISI_HHA_CTRL);
+
+ return 0;
+}
+
+static int hisi_soc_hha_done(struct cache_coherency_ops_inst *cci)
+{
+ struct hisi_soc_hha *soc_hha =
+ container_of(cci, struct hisi_soc_hha, cci);
+
+ guard(mutex)(&soc_hha->lock);
+ if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static const struct cache_coherency_ops hha_ops = {
+ .wbinv = hisi_soc_hha_wbinv,
+ .done = hisi_soc_hha_done,
+};
+
+static int hisi_soc_hha_probe(struct platform_device *pdev)
+{
+ struct hisi_soc_hha *soc_hha;
+ struct resource *mem;
+ int ret;
+
+ soc_hha = cache_coherency_ops_instance_alloc(&hha_ops,
+ struct hisi_soc_hha, cci);
+ if (!soc_hha)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, soc_hha);
+
+ mutex_init(&soc_hha->lock);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto err_free_cci;
+ }
+
+ soc_hha->base = ioremap(mem->start, resource_size(mem));
+ if (!soc_hha->base) {
+ ret = dev_err_probe(&pdev->dev, -ENOMEM,
+ "failed to remap io memory");
+ goto err_free_cci;
+ }
+
+ ret = cache_coherency_ops_instance_register(&soc_hha->cci);
+ if (ret)
+ goto err_iounmap;
+
+ return 0;
+
+err_iounmap:
+ iounmap(soc_hha->base);
+err_free_cci:
+ cache_coherency_ops_instance_put(&soc_hha->cci);
+ return ret;
+}
+
+static void hisi_soc_hha_remove(struct platform_device *pdev)
+{
+ struct hisi_soc_hha *soc_hha = platform_get_drvdata(pdev);
+
+ cache_coherency_ops_instance_unregister(&soc_hha->cci);
+ iounmap(soc_hha->base);
+ cache_coherency_ops_instance_put(&soc_hha->cci);
+}
+
+static const struct acpi_device_id hisi_soc_hha_ids[] = {
+ { "HISI0511", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_soc_hha_ids);
+
+static struct platform_driver hisi_soc_hha_driver = {
+ .driver = {
+ .name = "hisi_soc_hha",
+ .acpi_match_table = hisi_soc_hha_ids,
+ },
+ .probe = hisi_soc_hha_probe,
+ .remove = hisi_soc_hha_remove,
+};
+
+module_platform_driver(hisi_soc_hha_driver);
+
+MODULE_IMPORT_NS("CACHE_COHERENCY");
+MODULE_DESCRIPTION("HiSilicon Hydra Home Agent driver supporting cache maintenance");
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
+MODULE_AUTHOR("Yushan Wang <wangyushan12@huawei.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 41b64d871c5a..ae107b619795 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -236,7 +236,10 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
return -ENXIO;
}
- cpu_cache_invalidate_memregion(IORES_DESC_CXL);
+ if (!cxlr->params.res)
+ return -ENXIO;
+ cpu_cache_invalidate_memregion(cxlr->params.res->start,
+ resource_size(cxlr->params.res));
return 0;
}
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index cd9b52040d7b..53567f3ed427 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -110,7 +110,7 @@ static void nd_region_remove(struct device *dev)
* here is ok.
*/
if (cpu_cache_has_invalidate_memregion())
- cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
+ cpu_cache_invalidate_all();
}
static int child_notify(struct device *dev, void *data)
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index a5ceaf5db595..1220530a23b6 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -90,7 +90,7 @@ static int nd_region_invalidate_memregion(struct nd_region *nd_region)
}
}
- cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
+ cpu_cache_invalidate_all();
out:
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
diff --git a/include/linux/cache_coherency.h b/include/linux/cache_coherency.h
new file mode 100644
index 000000000000..cc81c5733e31
--- /dev/null
+++ b/include/linux/cache_coherency.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cache coherency maintenance operation device drivers
+ *
+ * Copyright Huawei 2025
+ */
+#ifndef _LINUX_CACHE_COHERENCY_H_
+#define _LINUX_CACHE_COHERENCY_H_
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+
+struct cc_inval_params {
+ phys_addr_t addr;
+ size_t size;
+};
+
+struct cache_coherency_ops_inst;
+
+struct cache_coherency_ops {
+ int (*wbinv)(struct cache_coherency_ops_inst *cci,
+ struct cc_inval_params *invp);
+ int (*done)(struct cache_coherency_ops_inst *cci);
+};
+
+struct cache_coherency_ops_inst {
+ struct kref kref;
+ struct list_head node;
+ const struct cache_coherency_ops *ops;
+};
+
+int cache_coherency_ops_instance_register(struct cache_coherency_ops_inst *cci);
+void cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst *cci);
+
+struct cache_coherency_ops_inst *
+_cache_coherency_ops_instance_alloc(const struct cache_coherency_ops *ops,
+ size_t size);
+/**
+ * cache_coherency_ops_instance_alloc - Allocate cache coherency ops instance
+ * @ops: Cache maintenance operations
+ * @drv_struct: structure that contains the struct cache_coherency_ops_inst
+ * @member: Name of the struct cache_coherency_ops_inst member in @drv_struct.
+ *
+ * This allocates a driver specific structure and initializes the
+ * cache_coherency_ops_inst embedded in the drv_struct. Upon success the
+ * pointer must be freed via cache_coherency_ops_instance_put().
+ *
+ * Returns a &drv_struct * on success, %NULL on error.
+ */
+#define cache_coherency_ops_instance_alloc(ops, drv_struct, member) \
+ ({ \
+ static_assert(__same_type(struct cache_coherency_ops_inst, \
+ ((drv_struct *)NULL)->member)); \
+ static_assert(offsetof(drv_struct, member) == 0); \
+ (drv_struct *)_cache_coherency_ops_instance_alloc(ops, \
+ sizeof(drv_struct)); \
+ })
+void cache_coherency_ops_instance_put(struct cache_coherency_ops_inst *cci);
+
+#endif
diff --git a/include/linux/memregion.h b/include/linux/memregion.h
index c01321467789..a55f62cc5266 100644
--- a/include/linux/memregion.h
+++ b/include/linux/memregion.h
@@ -26,8 +26,10 @@ static inline void memregion_free(int id)
/**
* cpu_cache_invalidate_memregion - drop any CPU cached data for
- * memregions described by @res_desc
- * @res_desc: one of the IORES_DESC_* types
+ * memregion
+ * @start: start physical address of the target memory region.
+ * @len: length of the target memory region. -1 for all the regions of
+ * the target type.
*
* Perform cache maintenance after a memory event / operation that
* changes the contents of physical memory in a cache-incoherent manner.
@@ -46,7 +48,7 @@ static inline void memregion_free(int id)
* the cache maintenance.
*/
#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
-int cpu_cache_invalidate_memregion(int res_desc);
+int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len);
bool cpu_cache_has_invalidate_memregion(void);
#else
static inline bool cpu_cache_has_invalidate_memregion(void)
@@ -54,10 +56,16 @@ static inline bool cpu_cache_has_invalidate_memregion(void)
return false;
}
-static inline int cpu_cache_invalidate_memregion(int res_desc)
+static inline int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
{
WARN_ON_ONCE("CPU cache invalidation required");
return -ENXIO;
}
#endif
+
+static inline int cpu_cache_invalidate_all(void)
+{
+ return cpu_cache_invalidate_memregion(0, -1);
+}
+
#endif /* _MEMREGION_H_ */
diff --git a/lib/Kconfig b/lib/Kconfig
index e629449dd2a3..2923924bea78 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -542,6 +542,9 @@ config MEMREGION
config ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
bool
+config GENERIC_CPU_CACHE_MAINTENANCE
+ bool
+
config ARCH_HAS_MEMREMAP_COMPAT_ALIGN
bool
diff --git a/lib/Makefile b/lib/Makefile
index 1ab2c4be3b66..aaf677cf4527 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -127,6 +127,8 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
+obj-$(CONFIG_GENERIC_CPU_CACHE_MAINTENANCE) += cache_maint.o
+
lib-y += logic_pio.o
lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o
diff --git a/lib/cache_maint.c b/lib/cache_maint.c
new file mode 100644
index 000000000000..9256a9ffc34c
--- /dev/null
+++ b/lib/cache_maint.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic support for Memory System Cache Maintenance operations.
+ *
+ * Coherency maintenance drivers register with this simple framework that will
+ * iterate over each registered instance to first kick off invalidation and
+ * then to wait until it is complete.
+ *
+ * If no implementations are registered yet cpu_cache_has_invalidate_memregion()
+ * will return false. If this runs concurrently with unregistration then a
+ * race exists but this is no worse than the case where the operations instance
+ * responsible for a given memory region has not yet registered.
+ */
+#include <linux/cache_coherency.h>
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/export.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/memregion.h>
+#include <linux/module.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+
+static LIST_HEAD(cache_ops_instance_list);
+static DECLARE_RWSEM(cache_ops_instance_list_lock);
+
+static void __cache_coherency_ops_instance_free(struct kref *kref)
+{
+ struct cache_coherency_ops_inst *cci =
+ container_of(kref, struct cache_coherency_ops_inst, kref);
+ kfree(cci);
+}
+
+void cache_coherency_ops_instance_put(struct cache_coherency_ops_inst *cci)
+{
+ kref_put(&cci->kref, __cache_coherency_ops_instance_free);
+}
+EXPORT_SYMBOL_GPL(cache_coherency_ops_instance_put);
+
+static int cache_inval_one(struct cache_coherency_ops_inst *cci, void *data)
+{
+ if (!cci->ops)
+ return -EINVAL;
+
+ return cci->ops->wbinv(cci, data);
+}
+
+static int cache_inval_done_one(struct cache_coherency_ops_inst *cci)
+{
+ if (!cci->ops)
+ return -EINVAL;
+
+ if (!cci->ops->done)
+ return 0;
+
+ return cci->ops->done(cci);
+}
+
+static int cache_invalidate_memregion(phys_addr_t addr, size_t size)
+{
+ int ret;
+ struct cache_coherency_ops_inst *cci;
+ struct cc_inval_params params = {
+ .addr = addr,
+ .size = size,
+ };
+
+ guard(rwsem_read)(&cache_ops_instance_list_lock);
+ list_for_each_entry(cci, &cache_ops_instance_list, node) {
+ ret = cache_inval_one(cci, &params);
+ if (ret)
+ return ret;
+ }
+ list_for_each_entry(cci, &cache_ops_instance_list, node) {
+ ret = cache_inval_done_one(cci);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+struct cache_coherency_ops_inst *
+_cache_coherency_ops_instance_alloc(const struct cache_coherency_ops *ops,
+ size_t size)
+{
+ struct cache_coherency_ops_inst *cci;
+
+ if (!ops || !ops->wbinv)
+ return NULL;
+
+ cci = kzalloc(size, GFP_KERNEL);
+ if (!cci)
+ return NULL;
+
+ cci->ops = ops;
+ INIT_LIST_HEAD(&cci->node);
+ kref_init(&cci->kref);
+
+ return cci;
+}
+EXPORT_SYMBOL_NS_GPL(_cache_coherency_ops_instance_alloc, "CACHE_COHERENCY");
+
+int cache_coherency_ops_instance_register(struct cache_coherency_ops_inst *cci)
+{
+ guard(rwsem_write)(&cache_ops_instance_list_lock);
+ list_add(&cci->node, &cache_ops_instance_list);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cache_coherency_ops_instance_register, "CACHE_COHERENCY");
+
+void cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst *cci)
+{
+ guard(rwsem_write)(&cache_ops_instance_list_lock);
+ list_del(&cci->node);
+}
+EXPORT_SYMBOL_NS_GPL(cache_coherency_ops_instance_unregister, "CACHE_COHERENCY");
+
+int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
+{
+ return cache_invalidate_memregion(start, len);
+}
+EXPORT_SYMBOL_NS_GPL(cpu_cache_invalidate_memregion, "DEVMEM");
+
+/*
+ * Used for optimization / debug purposes only as removal can race
+ *
+ * Machines that do not support invalidation, e.g. VMs, will not have any
+ * operations instance to register and so this will always return false.
+ */
+bool cpu_cache_has_invalidate_memregion(void)
+{
+ guard(rwsem_read)(&cache_ops_instance_list_lock);
+ return !list_empty(&cache_ops_instance_list);
+}
+EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM");