summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-12-06 10:15:41 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-12-06 10:15:41 -0800
commit249872f53d64441690927853e9d3af36394802d5 (patch)
treea2f4b88ba236fce2ac8ec23edc7d30658ea7c809 /drivers
parentfbff94967958e46f7404b2dfbcf3b19e96aaaae2 (diff)
parent7dfbe9a6751973c17138ddc0d33deff5f5f35b94 (diff)
Merge tag 'tsm-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/devsec/tsm
Pull PCIe Link Encryption and Device Authentication from Dan Williams: "New PCI infrastructure and one architecture implementation for PCIe link encryption establishment via platform firmware services. This work is the result of multiple vendors coming to consensus on some core infrastructure (thanks Alexey, Yilun, and Aneesh!), and three vendor implementations, although only one is included in this pull. The PCI core changes have an ack from Bjorn, the crypto/ccp/ changes have an ack from Tom, and the iommu/amd/ changes have an ack from Joerg. PCIe link encryption is made possible by the soup of acronyms mentioned in the shortlog below. Link Integrity and Data Encryption (IDE) is a protocol for installing keys in the transmitter and receiver at each end of a link. That protocol is transported over Data Object Exchange (DOE) mailboxes using PCI configuration requests. The aspect that makes this a "platform firmware service" is that the key provisioning and protocol is coordinated through a Trusted Execution Envrionment (TEE) Security Manager (TSM). That is either firmware running in a coprocessor (AMD SEV-TIO), or quasi-hypervisor software (Intel TDX Connect / ARM CCA) running in a protected CPU mode. Now, the only reason to ask a TSM to run this protocol and install the keys rather than have a Linux driver do the same is so that later, a confidential VM can ask the TSM directly "can you certify this device?". That precludes host Linux from provisioning its own keys, because host Linux is outside the trust domain for the VM. It also turns out that all architectures, save for one, do not publish a mechanism for an OS to establish keys in the root port. So "TSM-established link encryption" is the only cross-architecture path for this capability for the foreseeable future. This unblocks the other arch implementations to follow in v6.20/v7.0, once they clear some other dependencies, and it unblocks the next phase of work to implement the end-to-end flow of confidential device assignment. The PCIe specification calls this end-to-end flow Trusted Execution Environment (TEE) Device Interface Security Protocol (TDISP). In the meantime, Linux gets a link encryption facility which has practical benefits along the same lines as memory encryption. It authenticates devices via certificates and may protect against interposer attacks trying to capture clear-text PCIe traffic. Summary: - Introduce the PCI/TSM core for the coordination of device authentication, link encryption and establishment (IDE), and later management of the device security operational states (TDISP). Notify the new TSM core layer of PCI device arrival and departure - Add a low level TSM driver for the link encryption establishment capabilities of the AMD SEV-TIO architecture - Add a library of helpers TSM drivers to use for IDE establishment and the DOE transport - Add skeleton support for 'bind' and 'guest_request' operations in support of TDISP" * tag 'tsm-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/devsec/tsm: (23 commits) crypto/ccp: Fix CONFIG_PCI=n build virt: Fix Kconfig warning when selecting TSM without VIRT_DRIVERS crypto/ccp: Implement SEV-TIO PCIe IDE (phase1) iommu/amd: Report SEV-TIO support psp-sev: Assign numbers to all status codes and add new ccp: Make snp_reclaim_pages and __sev_do_cmd_locked public PCI/TSM: Add 'dsm' and 'bound' attributes for dependent functions PCI/TSM: Add pci_tsm_guest_req() for managing TDIs PCI/TSM: Add pci_tsm_bind() helper for instantiating TDIs PCI/IDE: Initialize an ID for all IDE streams PCI/IDE: Add Address Association Register setup for downstream MMIO resource: Introduce resource_assigned() for discerning active resources PCI/TSM: Drop stub for pci_tsm_doe_transfer() drivers/virt: Drop VIRT_DRIVERS build dependency PCI/TSM: Report active IDE streams PCI/IDE: Report available IDE streams PCI/IDE: Add IDE establishment helpers PCI: Establish document for PCI host bridge sysfs attributes PCI: Add PCIe Device 3 Extended Capability enumeration PCI/TSM: Establish Secure Sessions and Link Encryption ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/base/bus.c38
-rw-r--r--drivers/crypto/ccp/Kconfig1
-rw-r--r--drivers/crypto/ccp/Makefile4
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.c864
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.h123
-rw-r--r--drivers/crypto/ccp/sev-dev-tsm.c405
-rw-r--r--drivers/crypto/ccp/sev-dev.c66
-rw-r--r--drivers/crypto/ccp/sev-dev.h11
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c9
-rw-r--r--drivers/pci/Kconfig18
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c39
-rw-r--r--drivers/pci/doe.c2
-rw-r--r--drivers/pci/ide.c815
-rw-r--r--drivers/pci/pci-sysfs.c4
-rw-r--r--drivers/pci/pci.h21
-rw-r--r--drivers/pci/probe.c31
-rw-r--r--drivers/pci/remove.c7
-rw-r--r--drivers/pci/search.c62
-rw-r--r--drivers/pci/tsm.c900
-rw-r--r--drivers/virt/Kconfig4
-rw-r--r--drivers/virt/coco/Kconfig5
-rw-r--r--drivers/virt/coco/Makefile1
-rw-r--r--drivers/virt/coco/tsm-core.c163
26 files changed, 3574 insertions, 24 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 20eb17596b89..b9f70e01f269 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -160,7 +160,7 @@ obj-$(CONFIG_RPMSG) += rpmsg/
obj-$(CONFIG_SOUNDWIRE) += soundwire/
# Virtualization drivers
-obj-$(CONFIG_VIRT_DRIVERS) += virt/
+obj-y += virt/
obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 320e155c6be7..9eb7771706f0 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -334,6 +334,19 @@ static struct device *next_device(struct klist_iter *i)
return dev;
}
+static struct device *prev_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_prev(i);
+ struct device *dev = NULL;
+ struct device_private *dev_prv;
+
+ if (n) {
+ dev_prv = to_device_private_bus(n);
+ dev = dev_prv->device;
+ }
+ return dev;
+}
+
/**
* bus_for_each_dev - device iterator.
* @bus: bus type.
@@ -414,6 +427,31 @@ struct device *bus_find_device(const struct bus_type *bus,
}
EXPORT_SYMBOL_GPL(bus_find_device);
+struct device *bus_find_device_reverse(const struct bus_type *bus,
+ struct device *start, const void *data,
+ device_match_t match)
+{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!sp)
+ return NULL;
+
+ klist_iter_init_node(&sp->klist_devices, &i,
+ (start ? &start->p->knode_bus : NULL));
+ while ((dev = prev_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
+ break;
+ }
+ }
+ klist_iter_exit(&i);
+ subsys_put(sp);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(bus_find_device_reverse);
+
static struct device_driver *next_driver(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index f394e45e11ab..f16a0f611317 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -39,6 +39,7 @@ config CRYPTO_DEV_SP_PSP
bool "Platform Security Processor (PSP) device"
default y
depends on CRYPTO_DEV_CCP_DD && X86_64 && AMD_IOMMU
+ select PCI_TSM if PCI
help
Provide support for the AMD Platform Security Processor (PSP).
The PSP is a dedicated processor that provides support for key
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index a9626b30044a..0424e08561ef 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -16,6 +16,10 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
hsti.o \
sfs.o
+ifeq ($(CONFIG_PCI_TSM),y)
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += sev-dev-tsm.o sev-dev-tio.o
+endif
+
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
ccp-crypto-aes.o \
diff --git a/drivers/crypto/ccp/sev-dev-tio.c b/drivers/crypto/ccp/sev-dev-tio.c
new file mode 100644
index 000000000000..9a98f98c20a7
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tio.c
@@ -0,0 +1,864 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// Interface to PSP for CCP/SEV-TIO/SNP-VM
+
+#include <linux/pci.h>
+#include <linux/tsm.h>
+#include <linux/psp.h>
+#include <linux/vmalloc.h>
+#include <linux/bitfield.h>
+#include <linux/pci-doe.h>
+#include <asm/sev-common.h>
+#include <asm/sev.h>
+#include <asm/page.h>
+#include "sev-dev.h"
+#include "sev-dev-tio.h"
+
+#define to_tio_status(dev_data) \
+ (container_of((dev_data), struct tio_dsm, data)->sev->tio_status)
+
+#define SLA_PAGE_TYPE_DATA 0
+#define SLA_PAGE_TYPE_SCATTER 1
+#define SLA_PAGE_SIZE_4K 0
+#define SLA_PAGE_SIZE_2M 1
+#define SLA_SZ(s) ((s).page_size == SLA_PAGE_SIZE_2M ? SZ_2M : SZ_4K)
+#define SLA_SCATTER_LEN(s) (SLA_SZ(s) / sizeof(struct sla_addr_t))
+#define SLA_EOL ((struct sla_addr_t) { .pfn = ((1UL << 40) - 1) })
+#define SLA_NULL ((struct sla_addr_t) { 0 })
+#define IS_SLA_NULL(s) ((s).sla == SLA_NULL.sla)
+#define IS_SLA_EOL(s) ((s).sla == SLA_EOL.sla)
+
+static phys_addr_t sla_to_pa(struct sla_addr_t sla)
+{
+ u64 pfn = sla.pfn;
+ u64 pa = pfn << PAGE_SHIFT;
+
+ return pa;
+}
+
+static void *sla_to_va(struct sla_addr_t sla)
+{
+ void *va = __va(__sme_clr(sla_to_pa(sla)));
+
+ return va;
+}
+
+#define sla_to_pfn(sla) (__pa(sla_to_va(sla)) >> PAGE_SHIFT)
+#define sla_to_page(sla) virt_to_page(sla_to_va(sla))
+
+static struct sla_addr_t make_sla(struct page *pg, bool stp)
+{
+ u64 pa = __sme_set(page_to_phys(pg));
+ struct sla_addr_t ret = {
+ .pfn = pa >> PAGE_SHIFT,
+ .page_size = SLA_PAGE_SIZE_4K, /* Do not do SLA_PAGE_SIZE_2M ATM */
+ .page_type = stp ? SLA_PAGE_TYPE_SCATTER : SLA_PAGE_TYPE_DATA
+ };
+
+ return ret;
+}
+
+/* the BUFFER Structure */
+#define SLA_BUFFER_FLAG_ENCRYPTION BIT(0)
+
+/*
+ * struct sla_buffer_hdr - Scatter list address buffer header
+ *
+ * @capacity_sz: Total capacity of the buffer in bytes
+ * @payload_sz: Size of buffer payload in bytes, must be multiple of 32B
+ * @flags: Buffer flags (SLA_BUFFER_FLAG_ENCRYPTION: buffer is encrypted)
+ * @iv: Initialization vector used for encryption
+ * @authtag: Authentication tag for encrypted buffer
+ */
+struct sla_buffer_hdr {
+ u32 capacity_sz;
+ u32 payload_sz; /* The size of BUFFER_PAYLOAD in bytes. Must be multiple of 32B */
+ u32 flags;
+ u8 reserved1[4];
+ u8 iv[16]; /* IV used for the encryption of this buffer */
+ u8 authtag[16]; /* Authentication tag for this buffer */
+ u8 reserved2[16];
+} __packed;
+
+enum spdm_data_type_t {
+ DOBJ_DATA_TYPE_SPDM = 0x1,
+ DOBJ_DATA_TYPE_SECURE_SPDM = 0x2,
+};
+
+struct spdm_dobj_hdr_req {
+ struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_REQ */
+ u8 data_type; /* spdm_data_type_t */
+ u8 reserved2[5];
+} __packed;
+
+struct spdm_dobj_hdr_resp {
+ struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_RESP */
+ u8 data_type; /* spdm_data_type_t */
+ u8 reserved2[5];
+} __packed;
+
+/* Defined in sev-dev-tio.h so sev-dev-tsm.c can read types of blobs */
+struct spdm_dobj_hdr_cert;
+struct spdm_dobj_hdr_meas;
+struct spdm_dobj_hdr_report;
+
+/* Used in all SPDM-aware TIO commands */
+struct spdm_ctrl {
+ struct sla_addr_t req;
+ struct sla_addr_t resp;
+ struct sla_addr_t scratch;
+ struct sla_addr_t output;
+} __packed;
+
+static size_t sla_dobj_id_to_size(u8 id)
+{
+ size_t n;
+
+ BUILD_BUG_ON(sizeof(struct spdm_dobj_hdr_resp) != 0x10);
+ switch (id) {
+ case SPDM_DOBJ_ID_REQ:
+ n = sizeof(struct spdm_dobj_hdr_req);
+ break;
+ case SPDM_DOBJ_ID_RESP:
+ n = sizeof(struct spdm_dobj_hdr_resp);
+ break;
+ default:
+ WARN_ON(1);
+ n = 0;
+ break;
+ }
+
+ return n;
+}
+
+#define SPDM_DOBJ_HDR_SIZE(hdr) sla_dobj_id_to_size((hdr)->id)
+#define SPDM_DOBJ_DATA(hdr) ((u8 *)(hdr) + SPDM_DOBJ_HDR_SIZE(hdr))
+#define SPDM_DOBJ_LEN(hdr) ((hdr)->length - SPDM_DOBJ_HDR_SIZE(hdr))
+
+#define sla_to_dobj_resp_hdr(buf) ((struct spdm_dobj_hdr_resp *) \
+ sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_RESP))
+#define sla_to_dobj_req_hdr(buf) ((struct spdm_dobj_hdr_req *) \
+ sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_REQ))
+
+static struct spdm_dobj_hdr *sla_to_dobj_hdr(struct sla_buffer_hdr *buf)
+{
+ if (!buf)
+ return NULL;
+
+ return (struct spdm_dobj_hdr *) &buf[1];
+}
+
+static struct spdm_dobj_hdr *sla_to_dobj_hdr_check(struct sla_buffer_hdr *buf, u32 check_dobjid)
+{
+ struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
+
+ if (WARN_ON_ONCE(!hdr))
+ return NULL;
+
+ if (hdr->id != check_dobjid) {
+ pr_err("! ERROR: expected %d, found %d\n", check_dobjid, hdr->id);
+ return NULL;
+ }
+
+ return hdr;
+}
+
+static void *sla_to_data(struct sla_buffer_hdr *buf, u32 dobjid)
+{
+ struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
+
+ if (WARN_ON_ONCE(dobjid != SPDM_DOBJ_ID_REQ && dobjid != SPDM_DOBJ_ID_RESP))
+ return NULL;
+
+ if (!hdr)
+ return NULL;
+
+ return (u8 *) hdr + sla_dobj_id_to_size(dobjid);
+}
+
+/*
+ * struct sev_data_tio_status - SEV_CMD_TIO_STATUS command
+ *
+ * @length: Length of this command buffer in bytes
+ * @status_paddr: System physical address of the TIO_STATUS structure
+ */
+struct sev_data_tio_status {
+ u32 length;
+ u8 reserved[4];
+ u64 status_paddr;
+} __packed;
+
+/* TIO_INIT */
+struct sev_data_tio_init {
+ u32 length;
+ u8 reserved[12];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_create - TIO_DEV_CREATE command
+ *
+ * @length: Length in bytes of this command buffer
+ * @dev_ctx_sla: Scatter list address pointing to a buffer to be used as a device context buffer
+ * @device_id: PCIe Routing Identifier of the device to connect to
+ * @root_port_id: PCIe Routing Identifier of the root port of the device
+ * @segment_id: PCIe Segment Identifier of the device to connect to
+ */
+struct sev_data_tio_dev_create {
+ u32 length;
+ u8 reserved1[4];
+ struct sla_addr_t dev_ctx_sla;
+ u16 device_id;
+ u16 root_port_id;
+ u8 segment_id;
+ u8 reserved2[11];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_connect - TIO_DEV_CONNECT command
+ *
+ * @length: Length in bytes of this command buffer
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ * @tc_mask: Bitmask of the traffic classes to initialize for SEV-TIO usage.
+ * Setting the kth bit of the TC_MASK to 1 indicates that the traffic
+ * class k will be initialized
+ * @cert_slot: Slot number of the certificate requested for constructing the SPDM session
+ * @ide_stream_id: IDE stream IDs to be associated with this device.
+ * Valid only if corresponding bit in TC_MASK is set
+ */
+struct sev_data_tio_dev_connect {
+ u32 length;
+ u8 reserved1[4];
+ struct spdm_ctrl spdm_ctrl;
+ u8 reserved2[8];
+ struct sla_addr_t dev_ctx_sla;
+ u8 tc_mask;
+ u8 cert_slot;
+ u8 reserved3[6];
+ u8 ide_stream_id[8];
+ u8 reserved4[8];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_disconnect - TIO_DEV_DISCONNECT command
+ *
+ * @length: Length in bytes of this command buffer
+ * @flags: Command flags (TIO_DEV_DISCONNECT_FLAG_FORCE: force disconnect)
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ */
+#define TIO_DEV_DISCONNECT_FLAG_FORCE BIT(0)
+
+struct sev_data_tio_dev_disconnect {
+ u32 length;
+ u32 flags;
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_meas - TIO_DEV_MEASUREMENTS command
+ *
+ * @length: Length in bytes of this command buffer
+ * @flags: Command flags (TIO_DEV_MEAS_FLAG_RAW_BITSTREAM: request raw measurements)
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ * @meas_nonce: Nonce for measurement freshness verification
+ */
+#define TIO_DEV_MEAS_FLAG_RAW_BITSTREAM BIT(0)
+
+struct sev_data_tio_dev_meas {
+ u32 length;
+ u32 flags;
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+ u8 meas_nonce[32];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_certs - TIO_DEV_CERTIFICATES command
+ *
+ * @length: Length in bytes of this command buffer
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ */
+struct sev_data_tio_dev_certs {
+ u32 length;
+ u8 reserved[4];
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_reclaim - TIO_DEV_RECLAIM command
+ *
+ * @length: Length in bytes of this command buffer
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ *
+ * This command reclaims resources associated with a device context.
+ */
+struct sev_data_tio_dev_reclaim {
+ u32 length;
+ u8 reserved[4];
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+static struct sla_buffer_hdr *sla_buffer_map(struct sla_addr_t sla)
+{
+ struct sla_buffer_hdr *buf;
+
+ BUILD_BUG_ON(sizeof(struct sla_buffer_hdr) != 0x40);
+ if (IS_SLA_NULL(sla))
+ return NULL;
+
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ struct sla_addr_t *scatter = sla_to_va(sla);
+ unsigned int i, npages = 0;
+
+ for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
+ if (WARN_ON_ONCE(SLA_SZ(scatter[i]) > SZ_4K))
+ return NULL;
+
+ if (WARN_ON_ONCE(scatter[i].page_type == SLA_PAGE_TYPE_SCATTER))
+ return NULL;
+
+ if (IS_SLA_EOL(scatter[i])) {
+ npages = i;
+ break;
+ }
+ }
+ if (WARN_ON_ONCE(!npages))
+ return NULL;
+
+ struct page **pp = kmalloc_array(npages, sizeof(pp[0]), GFP_KERNEL);
+
+ if (!pp)
+ return NULL;
+
+ for (i = 0; i < npages; ++i)
+ pp[i] = sla_to_page(scatter[i]);
+
+ buf = vm_map_ram(pp, npages, 0);
+ kfree(pp);
+ } else {
+ struct page *pg = sla_to_page(sla);
+
+ buf = vm_map_ram(&pg, 1, 0);
+ }
+
+ return buf;
+}
+
+static void sla_buffer_unmap(struct sla_addr_t sla, struct sla_buffer_hdr *buf)
+{
+ if (!buf)
+ return;
+
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ struct sla_addr_t *scatter = sla_to_va(sla);
+ unsigned int i, npages = 0;
+
+ for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
+ if (IS_SLA_EOL(scatter[i])) {
+ npages = i;
+ break;
+ }
+ }
+ if (!npages)
+ return;
+
+ vm_unmap_ram(buf, npages);
+ } else {
+ vm_unmap_ram(buf, 1);
+ }
+}
+
+static void dobj_response_init(struct sla_buffer_hdr *buf)
+{
+ struct spdm_dobj_hdr *dobj = sla_to_dobj_hdr(buf);
+
+ dobj->id = SPDM_DOBJ_ID_RESP;
+ dobj->version.major = 0x1;
+ dobj->version.minor = 0;
+ dobj->length = 0;
+ buf->payload_sz = sla_dobj_id_to_size(dobj->id) + dobj->length;
+}
+
+static void sla_free(struct sla_addr_t sla, size_t len, bool firmware_state)
+{
+ unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ struct sla_addr_t *scatter = NULL;
+ int ret = 0, i;
+
+ if (IS_SLA_NULL(sla))
+ return;
+
+ if (firmware_state) {
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ scatter = sla_to_va(sla);
+
+ for (i = 0; i < npages; ++i) {
+ if (IS_SLA_EOL(scatter[i]))
+ break;
+
+ ret = snp_reclaim_pages(sla_to_pa(scatter[i]), 1, false);
+ if (ret)
+ break;
+ }
+ } else {
+ ret = snp_reclaim_pages(sla_to_pa(sla), 1, false);
+ }
+ }
+
+ if (WARN_ON(ret))
+ return;
+
+ if (scatter) {
+ for (i = 0; i < npages; ++i) {
+ if (IS_SLA_EOL(scatter[i]))
+ break;
+ free_page((unsigned long)sla_to_va(scatter[i]));
+ }
+ }
+
+ free_page((unsigned long)sla_to_va(sla));
+}
+
+static struct sla_addr_t sla_alloc(size_t len, bool firmware_state)
+{
+ unsigned long i, npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ struct sla_addr_t *scatter = NULL;
+ struct sla_addr_t ret = SLA_NULL;
+ struct sla_buffer_hdr *buf;
+ struct page *pg;
+
+ if (npages == 0)
+ return ret;
+
+ if (WARN_ON_ONCE(npages > ((PAGE_SIZE / sizeof(struct sla_addr_t)) + 1)))
+ return ret;
+
+ BUILD_BUG_ON(PAGE_SIZE < SZ_4K);
+
+ if (npages > 1) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return SLA_NULL;
+
+ ret = make_sla(pg, true);
+ scatter = page_to_virt(pg);
+ for (i = 0; i < npages; ++i) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ goto no_reclaim_exit;
+
+ scatter[i] = make_sla(pg, false);
+ }
+ scatter[i] = SLA_EOL;
+ } else {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return SLA_NULL;
+
+ ret = make_sla(pg, false);
+ }
+
+ buf = sla_buffer_map(ret);
+ if (!buf)
+ goto no_reclaim_exit;
+
+ buf->capacity_sz = (npages << PAGE_SHIFT);
+ sla_buffer_unmap(ret, buf);
+
+ if (firmware_state) {
+ if (scatter) {
+ for (i = 0; i < npages; ++i) {
+ if (rmp_make_private(sla_to_pfn(scatter[i]), 0,
+ PG_LEVEL_4K, 0, true))
+ goto free_exit;
+ }
+ } else {
+ if (rmp_make_private(sla_to_pfn(ret), 0, PG_LEVEL_4K, 0, true))
+ goto no_reclaim_exit;
+ }
+ }
+
+ return ret;
+
+no_reclaim_exit:
+ firmware_state = false;
+free_exit:
+ sla_free(ret, len, firmware_state);
+ return SLA_NULL;
+}
+
+/* Expands a buffer, only firmware owned buffers allowed for now */
+static int sla_expand(struct sla_addr_t *sla, size_t *len)
+{
+ struct sla_buffer_hdr *oldbuf = sla_buffer_map(*sla), *newbuf;
+ struct sla_addr_t oldsla = *sla, newsla;
+ size_t oldlen = *len, newlen;
+
+ if (!oldbuf)
+ return -EFAULT;
+
+ newlen = oldbuf->capacity_sz;
+ if (oldbuf->capacity_sz == oldlen) {
+ /* This buffer does not require expansion, must be another buffer */
+ sla_buffer_unmap(oldsla, oldbuf);
+ return 1;
+ }
+
+ pr_notice("Expanding BUFFER from %ld to %ld bytes\n", oldlen, newlen);
+
+ newsla = sla_alloc(newlen, true);
+ if (IS_SLA_NULL(newsla))
+ return -ENOMEM;
+
+ newbuf = sla_buffer_map(newsla);
+ if (!newbuf) {
+ sla_free(newsla, newlen, true);
+ return -EFAULT;
+ }
+
+ memcpy(newbuf, oldbuf, oldlen);
+
+ sla_buffer_unmap(newsla, newbuf);
+ sla_free(oldsla, oldlen, true);
+ *sla = newsla;
+ *len = newlen;
+
+ return 0;
+}
+
+static int sev_tio_do_cmd(int cmd, void *data, size_t data_len, int *psp_ret,
+ struct tsm_dsm_tio *dev_data)
+{
+ int rc;
+
+ *psp_ret = 0;
+ rc = sev_do_cmd(cmd, data, psp_ret);
+
+ if (WARN_ON(!rc && *psp_ret == SEV_RET_SPDM_REQUEST))
+ return -EIO;
+
+ if (rc == 0 && *psp_ret == SEV_RET_EXPAND_BUFFER_LENGTH_REQUEST) {
+ int rc1, rc2;
+
+ rc1 = sla_expand(&dev_data->output, &dev_data->output_len);
+ if (rc1 < 0)
+ return rc1;
+
+ rc2 = sla_expand(&dev_data->scratch, &dev_data->scratch_len);
+ if (rc2 < 0)
+ return rc2;
+
+ if (!rc1 && !rc2)
+ /* Neither buffer requires expansion, this is wrong */
+ return -EFAULT;
+
+ *psp_ret = 0;
+ rc = sev_do_cmd(cmd, data, psp_ret);
+ }
+
+ if ((rc == 0 || rc == -EIO) && *psp_ret == SEV_RET_SPDM_REQUEST) {
+ struct spdm_dobj_hdr_resp *resp_hdr;
+ struct spdm_dobj_hdr_req *req_hdr;
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ size_t resp_len = tio_status->spdm_req_size_max -
+ (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + sizeof(struct sla_buffer_hdr));
+
+ if (!dev_data->cmd) {
+ if (WARN_ON_ONCE(!data_len || (data_len != *(u32 *) data)))
+ return -EINVAL;
+ if (WARN_ON(data_len > sizeof(dev_data->cmd_data)))
+ return -EFAULT;
+ memcpy(dev_data->cmd_data, data, data_len);
+ memset(&dev_data->cmd_data[data_len], 0xFF,
+ sizeof(dev_data->cmd_data) - data_len);
+ dev_data->cmd = cmd;
+ }
+
+ req_hdr = sla_to_dobj_req_hdr(dev_data->reqbuf);
+ resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
+ switch (req_hdr->data_type) {
+ case DOBJ_DATA_TYPE_SPDM:
+ rc = PCI_DOE_FEATURE_CMA;
+ break;
+ case DOBJ_DATA_TYPE_SECURE_SPDM:
+ rc = PCI_DOE_FEATURE_SSESSION;
+ break;
+ default:
+ return -EINVAL;
+ }
+ resp_hdr->data_type = req_hdr->data_type;
+ dev_data->spdm.req_len = req_hdr->hdr.length -
+ sla_dobj_id_to_size(SPDM_DOBJ_ID_REQ);
+ dev_data->spdm.rsp_len = resp_len;
+ } else if (dev_data && dev_data->cmd) {
+ /* For either error or success just stop the bouncing */
+ memset(dev_data->cmd_data, 0, sizeof(dev_data->cmd_data));
+ dev_data->cmd = 0;
+ }
+
+ return rc;
+}
+
+int sev_tio_continue(struct tsm_dsm_tio *dev_data)
+{
+ struct spdm_dobj_hdr_resp *resp_hdr;
+ int ret;
+
+ if (!dev_data || !dev_data->cmd)
+ return -EINVAL;
+
+ resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
+ resp_hdr->hdr.length = ALIGN(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
+ dev_data->spdm.rsp_len, 32);
+ dev_data->respbuf->payload_sz = resp_hdr->hdr.length;
+
+ ret = sev_tio_do_cmd(dev_data->cmd, dev_data->cmd_data, 0,
+ &dev_data->psp_ret, dev_data);
+ if (ret)
+ return ret;
+
+ if (dev_data->psp_ret != SEV_RET_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void spdm_ctrl_init(struct spdm_ctrl *ctrl, struct tsm_dsm_tio *dev_data)
+{
+ ctrl->req = dev_data->req;
+ ctrl->resp = dev_data->resp;
+ ctrl->scratch = dev_data->scratch;
+ ctrl->output = dev_data->output;
+}
+
+static void spdm_ctrl_free(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ size_t len = tio_status->spdm_req_size_max -
+ (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
+ sizeof(struct sla_buffer_hdr));
+ struct tsm_spdm *spdm = &dev_data->spdm;
+
+ sla_buffer_unmap(dev_data->resp, dev_data->respbuf);
+ sla_buffer_unmap(dev_data->req, dev_data->reqbuf);
+ spdm->rsp = NULL;
+ spdm->req = NULL;
+ sla_free(dev_data->req, len, true);
+ sla_free(dev_data->resp, len, false);
+ sla_free(dev_data->scratch, tio_status->spdm_scratch_size_max, true);
+
+ dev_data->req.sla = 0;
+ dev_data->resp.sla = 0;
+ dev_data->scratch.sla = 0;
+ dev_data->respbuf = NULL;
+ dev_data->reqbuf = NULL;
+ sla_free(dev_data->output, tio_status->spdm_out_size_max, true);
+}
+
+static int spdm_ctrl_alloc(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct tsm_spdm *spdm = &dev_data->spdm;
+ int ret;
+
+ dev_data->req = sla_alloc(tio_status->spdm_req_size_max, true);
+ dev_data->resp = sla_alloc(tio_status->spdm_req_size_max, false);
+ dev_data->scratch_len = tio_status->spdm_scratch_size_max;
+ dev_data->scratch = sla_alloc(dev_data->scratch_len, true);
+ dev_data->output_len = tio_status->spdm_out_size_max;
+ dev_data->output = sla_alloc(dev_data->output_len, true);
+
+ if (IS_SLA_NULL(dev_data->req) || IS_SLA_NULL(dev_data->resp) ||
+ IS_SLA_NULL(dev_data->scratch) || IS_SLA_NULL(dev_data->dev_ctx)) {
+ ret = -ENOMEM;
+ goto free_spdm_exit;
+ }
+
+ dev_data->reqbuf = sla_buffer_map(dev_data->req);
+ dev_data->respbuf = sla_buffer_map(dev_data->resp);
+ if (!dev_data->reqbuf || !dev_data->respbuf) {
+ ret = -EFAULT;
+ goto free_spdm_exit;
+ }
+
+ spdm->req = sla_to_data(dev_data->reqbuf, SPDM_DOBJ_ID_REQ);
+ spdm->rsp = sla_to_data(dev_data->respbuf, SPDM_DOBJ_ID_RESP);
+ if (!spdm->req || !spdm->rsp) {
+ ret = -EFAULT;
+ goto free_spdm_exit;
+ }
+
+ dobj_response_init(dev_data->respbuf);
+
+ return 0;
+
+free_spdm_exit:
+ spdm_ctrl_free(dev_data);
+ return ret;
+}
+
+int sev_tio_init_locked(void *tio_status_page)
+{
+ struct sev_tio_status *tio_status = tio_status_page;
+ struct sev_data_tio_status data_status = {
+ .length = sizeof(data_status),
+ };
+ int ret, psp_ret;
+
+ data_status.status_paddr = __psp_pa(tio_status_page);
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
+ if (ret)
+ return ret;
+
+ if (tio_status->length < offsetofend(struct sev_tio_status, tdictx_size) ||
+ tio_status->reserved)
+ return -EFAULT;
+
+ if (!tio_status->tio_en && !tio_status->tio_init_done)
+ return -ENOENT;
+
+ if (tio_status->tio_init_done)
+ return -EBUSY;
+
+ struct sev_data_tio_init ti = { .length = sizeof(ti) };
+
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_INIT, &ti, &psp_ret);
+ if (ret)
+ return ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id,
+ u16 root_port_id, u8 segment_id)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct sev_data_tio_dev_create create = {
+ .length = sizeof(create),
+ .device_id = device_id,
+ .root_port_id = root_port_id,
+ .segment_id = segment_id,
+ };
+ void *data_pg;
+ int ret;
+
+ dev_data->dev_ctx = sla_alloc(tio_status->devctx_size, true);
+ if (IS_SLA_NULL(dev_data->dev_ctx))
+ return -ENOMEM;
+
+ data_pg = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT);
+ if (!data_pg) {
+ ret = -ENOMEM;
+ goto free_ctx_exit;
+ }
+
+ create.dev_ctx_sla = dev_data->dev_ctx;
+ ret = sev_do_cmd(SEV_CMD_TIO_DEV_CREATE, &create, &dev_data->psp_ret);
+ if (ret)
+ goto free_data_pg_exit;
+
+ dev_data->data_pg = data_pg;
+
+ return 0;
+
+free_data_pg_exit:
+ snp_free_firmware_page(data_pg);
+free_ctx_exit:
+ sla_free(create.dev_ctx_sla, tio_status->devctx_size, true);
+ return ret;
+}
+
+int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct sev_data_tio_dev_reclaim r = {
+ .length = sizeof(r),
+ .dev_ctx_sla = dev_data->dev_ctx,
+ };
+ int ret;
+
+ if (dev_data->data_pg) {
+ snp_free_firmware_page(dev_data->data_pg);
+ dev_data->data_pg = NULL;
+ }
+
+ if (IS_SLA_NULL(dev_data->dev_ctx))
+ return 0;
+
+ ret = sev_do_cmd(SEV_CMD_TIO_DEV_RECLAIM, &r, &dev_data->psp_ret);
+
+ sla_free(dev_data->dev_ctx, tio_status->devctx_size, true);
+ dev_data->dev_ctx = SLA_NULL;
+
+ spdm_ctrl_free(dev_data);
+
+ return ret;
+}
+
+int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot)
+{
+ struct sev_data_tio_dev_connect connect = {
+ .length = sizeof(connect),
+ .tc_mask = tc_mask,
+ .cert_slot = cert_slot,
+ .dev_ctx_sla = dev_data->dev_ctx,
+ .ide_stream_id = {
+ ids[0], ids[1], ids[2], ids[3],
+ ids[4], ids[5], ids[6], ids[7]
+ },
+ };
+ int ret;
+
+ if (WARN_ON(IS_SLA_NULL(dev_data->dev_ctx)))
+ return -EFAULT;
+ if (!(tc_mask & 1))
+ return -EINVAL;
+
+ ret = spdm_ctrl_alloc(dev_data);
+ if (ret)
+ return ret;
+
+ spdm_ctrl_init(&connect.spdm_ctrl, dev_data);
+
+ return sev_tio_do_cmd(SEV_CMD_TIO_DEV_CONNECT, &connect, sizeof(connect),
+ &dev_data->psp_ret, dev_data);
+}
+
+int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force)
+{
+ struct sev_data_tio_dev_disconnect dc = {
+ .length = sizeof(dc),
+ .dev_ctx_sla = dev_data->dev_ctx,
+ .flags = force ? TIO_DEV_DISCONNECT_FLAG_FORCE : 0,
+ };
+
+ if (WARN_ON_ONCE(IS_SLA_NULL(dev_data->dev_ctx)))
+ return -EFAULT;
+
+ spdm_ctrl_init(&dc.spdm_ctrl, dev_data);
+
+ return sev_tio_do_cmd(SEV_CMD_TIO_DEV_DISCONNECT, &dc, sizeof(dc),
+ &dev_data->psp_ret, dev_data);
+}
+
+int sev_tio_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_TIO_STATUS: return sizeof(struct sev_data_tio_status);
+ case SEV_CMD_TIO_INIT: return sizeof(struct sev_data_tio_init);
+ case SEV_CMD_TIO_DEV_CREATE: return sizeof(struct sev_data_tio_dev_create);
+ case SEV_CMD_TIO_DEV_RECLAIM: return sizeof(struct sev_data_tio_dev_reclaim);
+ case SEV_CMD_TIO_DEV_CONNECT: return sizeof(struct sev_data_tio_dev_connect);
+ case SEV_CMD_TIO_DEV_DISCONNECT: return sizeof(struct sev_data_tio_dev_disconnect);
+ default: return 0;
+ }
+}
diff --git a/drivers/crypto/ccp/sev-dev-tio.h b/drivers/crypto/ccp/sev-dev-tio.h
new file mode 100644
index 000000000000..67512b3dbc53
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tio.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PSP_SEV_TIO_H__
+#define __PSP_SEV_TIO_H__
+
+#include <linux/pci-tsm.h>
+#include <linux/pci-ide.h>
+#include <linux/tsm.h>
+#include <uapi/linux/psp-sev.h>
+
+struct sla_addr_t {
+ union {
+ u64 sla;
+ struct {
+ u64 page_type :1,
+ page_size :1,
+ reserved1 :10,
+ pfn :40,
+ reserved2 :12;
+ };
+ };
+} __packed;
+
+#define SEV_TIO_MAX_COMMAND_LENGTH 128
+
+/* SPDM control structure for DOE */
+struct tsm_spdm {
+ unsigned long req_len;
+ void *req;
+ unsigned long rsp_len;
+ void *rsp;
+};
+
+/* Describes TIO device */
+struct tsm_dsm_tio {
+ u8 cert_slot;
+ struct sla_addr_t dev_ctx;
+ struct sla_addr_t req;
+ struct sla_addr_t resp;
+ struct sla_addr_t scratch;
+ struct sla_addr_t output;
+ size_t output_len;
+ size_t scratch_len;
+ struct tsm_spdm spdm;
+ struct sla_buffer_hdr *reqbuf; /* vmap'ed @req for DOE */
+ struct sla_buffer_hdr *respbuf; /* vmap'ed @resp for DOE */
+
+ int cmd;
+ int psp_ret;
+ u8 cmd_data[SEV_TIO_MAX_COMMAND_LENGTH];
+ void *data_pg; /* Data page for DEV_STATUS/TDI_STATUS/TDI_INFO/ASID_FENCE */
+
+#define TIO_IDE_MAX_TC 8
+ struct pci_ide *ide[TIO_IDE_MAX_TC];
+};
+
+/* Describes TSM structure for PF0 pointed by pci_dev->tsm */
+struct tio_dsm {
+ struct pci_tsm_pf0 tsm;
+ struct tsm_dsm_tio data;
+ struct sev_device *sev;
+};
+
+/* Data object IDs */
+#define SPDM_DOBJ_ID_NONE 0
+#define SPDM_DOBJ_ID_REQ 1
+#define SPDM_DOBJ_ID_RESP 2
+
+struct spdm_dobj_hdr {
+ u32 id; /* Data object type identifier */
+ u32 length; /* Length of the data object, INCLUDING THIS HEADER */
+ struct { /* Version of the data object structure */
+ u8 minor;
+ u8 major;
+ } version;
+} __packed;
+
+/**
+ * struct sev_tio_status - TIO_STATUS command's info_paddr buffer
+ *
+ * @length: Length of this structure in bytes
+ * @tio_en: Indicates that SNP_INIT_EX initialized the RMP for SEV-TIO
+ * @tio_init_done: Indicates TIO_INIT has been invoked
+ * @spdm_req_size_min: Minimum SPDM request buffer size in bytes
+ * @spdm_req_size_max: Maximum SPDM request buffer size in bytes
+ * @spdm_scratch_size_min: Minimum SPDM scratch buffer size in bytes
+ * @spdm_scratch_size_max: Maximum SPDM scratch buffer size in bytes
+ * @spdm_out_size_min: Minimum SPDM output buffer size in bytes
+ * @spdm_out_size_max: Maximum for the SPDM output buffer size in bytes
+ * @spdm_rsp_size_min: Minimum SPDM response buffer size in bytes
+ * @spdm_rsp_size_max: Maximum SPDM response buffer size in bytes
+ * @devctx_size: Size of a device context buffer in bytes
+ * @tdictx_size: Size of a TDI context buffer in bytes
+ * @tio_crypto_alg: TIO crypto algorithms supported
+ */
+struct sev_tio_status {
+ u32 length;
+ u32 tio_en :1,
+ tio_init_done :1,
+ reserved :30;
+ u32 spdm_req_size_min;
+ u32 spdm_req_size_max;
+ u32 spdm_scratch_size_min;
+ u32 spdm_scratch_size_max;
+ u32 spdm_out_size_min;
+ u32 spdm_out_size_max;
+ u32 spdm_rsp_size_min;
+ u32 spdm_rsp_size_max;
+ u32 devctx_size;
+ u32 tdictx_size;
+ u32 tio_crypto_alg;
+ u8 reserved2[12];
+} __packed;
+
+int sev_tio_init_locked(void *tio_status_page);
+int sev_tio_continue(struct tsm_dsm_tio *dev_data);
+
+int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id, u16 root_port_id,
+ u8 segment_id);
+int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot);
+int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force);
+int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data);
+
+#endif /* __PSP_SEV_TIO_H__ */
diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c
new file mode 100644
index 000000000000..ea29cd5d0ff9
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tsm.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// Interface to CCP/SEV-TIO for generic PCIe TDISP module
+
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/tsm.h>
+#include <linux/iommu.h>
+#include <linux/pci-doe.h>
+#include <linux/bitfield.h>
+#include <linux/module.h>
+
+#include <asm/sev-common.h>
+#include <asm/sev.h>
+
+#include "psp-dev.h"
+#include "sev-dev.h"
+#include "sev-dev-tio.h"
+
+MODULE_IMPORT_NS("PCI_IDE");
+
+#define TIO_DEFAULT_NR_IDE_STREAMS 1
+
+static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS;
+module_param_named(ide_nr, nr_ide_streams, uint, 0644);
+MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB");
+
+#define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev))
+#define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data))
+#define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data))
+#define tsm_dev_to_sev(tsmdev) dev_to_sev((tsmdev)->dev.parent)
+
+#define pdev_to_tio_dsm(pdev) (container_of((pdev)->tsm, struct tio_dsm, tsm.base_tsm))
+
+static int sev_tio_spdm_cmd(struct tio_dsm *dsm, int ret)
+{
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ struct tsm_spdm *spdm = &dev_data->spdm;
+
+ /* Check the main command handler response before entering the loop */
+ if (ret == 0 && dev_data->psp_ret != SEV_RET_SUCCESS)
+ return -EINVAL;
+
+ if (ret <= 0)
+ return ret;
+
+ /* ret > 0 means "SPDM requested" */
+ while (ret == PCI_DOE_FEATURE_CMA || ret == PCI_DOE_FEATURE_SSESSION) {
+ ret = pci_doe(dsm->tsm.doe_mb, PCI_VENDOR_ID_PCI_SIG, ret,
+ spdm->req, spdm->req_len, spdm->rsp, spdm->rsp_len);
+ if (ret < 0)
+ break;
+
+ WARN_ON_ONCE(ret == 0); /* The response should never be empty */
+ spdm->rsp_len = ret;
+ ret = sev_tio_continue(dev_data);
+ }
+
+ return ret;
+}
+
+static int stream_enable(struct pci_ide *ide)
+{
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+ int ret;
+
+ ret = pci_ide_stream_enable(rp, ide);
+ if (ret)
+ return ret;
+
+ ret = pci_ide_stream_enable(ide->pdev, ide);
+ if (ret)
+ pci_ide_stream_disable(rp, ide);
+
+ return ret;
+}
+
+static int streams_enable(struct pci_ide **ide)
+{
+ int ret = 0;
+
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ ret = stream_enable(ide[i]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void stream_disable(struct pci_ide *ide)
+{
+ pci_ide_stream_disable(ide->pdev, ide);
+ pci_ide_stream_disable(pcie_find_root_port(ide->pdev), ide);
+}
+
+static void streams_disable(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
+ if (ide[i])
+ stream_disable(ide[i]);
+}
+
+static void stream_setup(struct pci_ide *ide)
+{
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+
+ ide->partner[PCI_IDE_EP].rid_start = 0;
+ ide->partner[PCI_IDE_EP].rid_end = 0xffff;
+ ide->partner[PCI_IDE_RP].rid_start = 0;
+ ide->partner[PCI_IDE_RP].rid_end = 0xffff;
+
+ ide->pdev->ide_cfg = 0;
+ ide->pdev->ide_tee_limit = 1;
+ rp->ide_cfg = 1;
+ rp->ide_tee_limit = 0;
+
+ pci_warn(ide->pdev, "Forcing CFG/TEE for %s", pci_name(rp));
+ pci_ide_stream_setup(ide->pdev, ide);
+ pci_ide_stream_setup(rp, ide);
+}
+
+static u8 streams_setup(struct pci_ide **ide, u8 *ids)
+{
+ bool def = false;
+ u8 tc_mask = 0;
+ int i;
+
+ for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (!ide[i]) {
+ ids[i] = 0xFF;
+ continue;
+ }
+
+ tc_mask |= BIT(i);
+ ids[i] = ide[i]->stream_id;
+
+ if (!def) {
+ struct pci_ide_partner *settings;
+
+ settings = pci_ide_to_settings(ide[i]->pdev, ide[i]);
+ settings->default_stream = 1;
+ def = true;
+ }
+
+ stream_setup(ide[i]);
+ }
+
+ return tc_mask;
+}
+
+static int streams_register(struct pci_ide **ide)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ ret = pci_ide_stream_register(ide[i]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void streams_unregister(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
+ if (ide[i])
+ pci_ide_stream_unregister(ide[i]);
+}
+
+static void stream_teardown(struct pci_ide *ide)
+{
+ pci_ide_stream_teardown(ide->pdev, ide);
+ pci_ide_stream_teardown(pcie_find_root_port(ide->pdev), ide);
+}
+
+static void streams_teardown(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ stream_teardown(ide[i]);
+ pci_ide_stream_free(ide[i]);
+ ide[i] = NULL;
+ }
+ }
+}
+
+static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide,
+ unsigned int tc)
+{
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+ struct pci_ide *ide1;
+
+ if (ide[tc]) {
+ pci_err(pdev, "Stream for class=%d already registered", tc);
+ return -EBUSY;
+ }
+
+ /* FIXME: find a better way */
+ if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS)
+ pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams);
+ pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams);
+
+ ide1 = pci_ide_stream_alloc(pdev);
+ if (!ide1)
+ return -EFAULT;
+
+ /* Blindly assign streamid=0 to TC=0, and so on */
+ ide1->stream_id = tc;
+
+ ide[tc] = ide1;
+
+ return 0;
+}
+
+static struct pci_tsm *tio_pf0_probe(struct pci_dev *pdev, struct sev_device *sev)
+{
+ struct tio_dsm *dsm __free(kfree) = kzalloc(sizeof(*dsm), GFP_KERNEL);
+ int rc;
+
+ if (!dsm)
+ return NULL;
+
+ rc = pci_tsm_pf0_constructor(pdev, &dsm->tsm, sev->tsmdev);
+ if (rc)
+ return NULL;
+
+ pci_dbg(pdev, "TSM enabled\n");
+ dsm->sev = sev;
+ return &no_free_ptr(dsm)->tsm.base_tsm;
+}
+
+static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev)
+{
+ struct sev_device *sev = tsm_dev_to_sev(tsmdev);
+
+ if (is_pci_tsm_pf0(pdev))
+ return tio_pf0_probe(pdev, sev);
+ return 0;
+}
+
+static void dsm_remove(struct pci_tsm *tsm)
+{
+ struct pci_dev *pdev = tsm->pdev;
+
+ pci_dbg(pdev, "TSM disabled\n");
+
+ if (is_pci_tsm_pf0(pdev)) {
+ struct tio_dsm *dsm = container_of(tsm, struct tio_dsm, tsm.base_tsm);
+
+ pci_tsm_pf0_destructor(&dsm->tsm);
+ kfree(dsm);
+ }
+}
+
+static int dsm_create(struct tio_dsm *dsm)
+{
+ struct pci_dev *pdev = dsm->tsm.base_tsm.pdev;
+ u8 segment_id = pdev->bus ? pci_domain_nr(pdev->bus) : 0;
+ struct pci_dev *rootport = pcie_find_root_port(pdev);
+ u16 device_id = pci_dev_id(pdev);
+ u16 root_port_id;
+ u32 lnkcap = 0;
+
+ if (pci_read_config_dword(rootport, pci_pcie_cap(rootport) + PCI_EXP_LNKCAP,
+ &lnkcap))
+ return -ENODEV;
+
+ root_port_id = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
+
+ return sev_tio_dev_create(&dsm->data, device_id, root_port_id, segment_id);
+}
+
+static int dsm_connect(struct pci_dev *pdev)
+{
+ struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ u8 ids[TIO_IDE_MAX_TC];
+ u8 tc_mask;
+ int ret;
+
+ if (pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
+ PCI_DOE_FEATURE_SSESSION) != dsm->tsm.doe_mb) {
+ pci_err(pdev, "CMA DOE MB must support SSESSION\n");
+ return -EFAULT;
+ }
+
+ ret = stream_alloc(pdev, dev_data->ide, 0);
+ if (ret)
+ return ret;
+
+ ret = dsm_create(dsm);
+ if (ret)
+ goto ide_free_exit;
+
+ tc_mask = streams_setup(dev_data->ide, ids);
+
+ ret = sev_tio_dev_connect(dev_data, tc_mask, ids, dev_data->cert_slot);
+ ret = sev_tio_spdm_cmd(dsm, ret);
+ if (ret)
+ goto free_exit;
+
+ streams_enable(dev_data->ide);
+
+ ret = streams_register(dev_data->ide);
+ if (ret)
+ goto free_exit;
+
+ return 0;
+
+free_exit:
+ sev_tio_dev_reclaim(dev_data);
+
+ streams_disable(dev_data->ide);
+ide_free_exit:
+
+ streams_teardown(dev_data->ide);
+
+ return ret;
+}
+
+static void dsm_disconnect(struct pci_dev *pdev)
+{
+ bool force = SYSTEM_HALT <= system_state && system_state <= SYSTEM_RESTART;
+ struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ int ret;
+
+ ret = sev_tio_dev_disconnect(dev_data, force);
+ ret = sev_tio_spdm_cmd(dsm, ret);
+ if (ret && !force) {
+ ret = sev_tio_dev_disconnect(dev_data, true);
+ sev_tio_spdm_cmd(dsm, ret);
+ }
+
+ sev_tio_dev_reclaim(dev_data);
+
+ streams_disable(dev_data->ide);
+ streams_unregister(dev_data->ide);
+ streams_teardown(dev_data->ide);
+}
+
+static struct pci_tsm_ops sev_tsm_ops = {
+ .probe = dsm_probe,
+ .remove = dsm_remove,
+ .connect = dsm_connect,
+ .disconnect = dsm_disconnect,
+};
+
+void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page)
+{
+ struct sev_tio_status *t = kzalloc(sizeof(*t), GFP_KERNEL);
+ struct tsm_dev *tsmdev;
+ int ret;
+
+ WARN_ON(sev->tio_status);
+
+ if (!t)
+ return;
+
+ ret = sev_tio_init_locked(tio_status_page);
+ if (ret) {
+ pr_warn("SEV-TIO STATUS failed with %d\n", ret);
+ goto error_exit;
+ }
+
+ tsmdev = tsm_register(sev->dev, &sev_tsm_ops);
+ if (IS_ERR(tsmdev))
+ goto error_exit;
+
+ memcpy(t, tio_status_page, sizeof(*t));
+
+ pr_notice("SEV-TIO status: EN=%d INIT_DONE=%d rq=%d..%d rs=%d..%d "
+ "scr=%d..%d out=%d..%d dev=%d tdi=%d algos=%x\n",
+ t->tio_en, t->tio_init_done,
+ t->spdm_req_size_min, t->spdm_req_size_max,
+ t->spdm_rsp_size_min, t->spdm_rsp_size_max,
+ t->spdm_scratch_size_min, t->spdm_scratch_size_max,
+ t->spdm_out_size_min, t->spdm_out_size_max,
+ t->devctx_size, t->tdictx_size,
+ t->tio_crypto_alg);
+
+ sev->tsmdev = tsmdev;
+ sev->tio_status = t;
+
+ return;
+
+error_exit:
+ kfree(t);
+ pr_err("Failed to enable SEV-TIO: ret=%d en=%d initdone=%d SEV=%d\n",
+ ret, t->tio_en, t->tio_init_done, boot_cpu_has(X86_FEATURE_SEV));
+}
+
+void sev_tsm_uninit(struct sev_device *sev)
+{
+ if (sev->tsmdev)
+ tsm_unregister(sev->tsmdev);
+
+ sev->tsmdev = NULL;
+}
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 52ba892444a8..956ea609d0cc 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -75,6 +75,14 @@ static bool psp_init_on_probe = true;
module_param(psp_init_on_probe, bool, 0444);
MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it");
+#if IS_ENABLED(CONFIG_PCI_TSM)
+static bool sev_tio_enabled = true;
+module_param_named(tio, sev_tio_enabled, bool, 0444);
+MODULE_PARM_DESC(tio, "Enables TIO in SNP_INIT_EX");
+#else
+static const bool sev_tio_enabled = false;
+#endif
+
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
@@ -251,7 +259,7 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit);
case SEV_CMD_SNP_FEATURE_INFO: return sizeof(struct sev_data_snp_feature_info);
case SEV_CMD_SNP_VLEK_LOAD: return sizeof(struct sev_user_data_snp_vlek_load);
- default: return 0;
+ default: return sev_tio_cmd_buffer_len(cmd);
}
return 0;
@@ -380,13 +388,7 @@ static int sev_write_init_ex_file_if_required(int cmd_id)
return sev_write_init_ex_file();
}
-/*
- * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked()
- * needs snp_reclaim_pages(), so a forward declaration is needed.
- */
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
-
-static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
+int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
{
int ret, err, i;
@@ -420,6 +422,7 @@ cleanup:
snp_leak_pages(__phys_to_pfn(paddr), npages - i);
return ret;
}
+EXPORT_SYMBOL_GPL(snp_reclaim_pages);
static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked)
{
@@ -850,7 +853,7 @@ static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf)
return 0;
}
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
{
struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0};
struct psp_device *psp = psp_master;
@@ -1392,6 +1395,8 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
*
*/
if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) {
+ bool tio_supp = !!(sev->snp_feat_info_0.ebx & SNP_SEV_TIO_SUPPORTED);
+
/*
* Firmware checks that the pages containing the ranges enumerated
* in the RANGES structure are either in the default page state or in the
@@ -1432,6 +1437,17 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
data.init_rmp = 1;
data.list_paddr_en = 1;
data.list_paddr = __psp_pa(snp_range_list);
+
+ data.tio_en = tio_supp && sev_tio_enabled && amd_iommu_sev_tio_supported();
+
+ /*
+ * When psp_init_on_probe is disabled, the userspace calling
+ * SEV ioctl can inadvertently shut down SNP and SEV-TIO causing
+ * unexpected state loss.
+ */
+ if (data.tio_en && !psp_init_on_probe)
+ dev_warn(sev->dev, "SEV-TIO as incompatible with psp_init_on_probe=0\n");
+
cmd = SEV_CMD_SNP_INIT_EX;
} else {
cmd = SEV_CMD_SNP_INIT;
@@ -1469,7 +1485,8 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
snp_hv_fixed_pages_state_update(sev, HV_FIXED);
sev->snp_initialized = true;
- dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
+ dev_dbg(sev->dev, "SEV-SNP firmware initialized, SEV-TIO is %s\n",
+ data.tio_en ? "enabled" : "disabled");
dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
sev->api_minor, sev->build);
@@ -1477,6 +1494,23 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
atomic_notifier_chain_register(&panic_notifier_list,
&snp_panic_notifier);
+ if (data.tio_en) {
+ /*
+ * This executes with the sev_cmd_mutex held so down the stack
+ * snp_reclaim_pages(locked=false) might be needed (which is extremely
+ * unlikely) but will cause a deadlock.
+ * Instead of exporting __snp_alloc_firmware_pages(), allocate a page
+ * for this one call here.
+ */
+ void *tio_status = page_address(__snp_alloc_firmware_pages(
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO, 0, true));
+
+ if (tio_status) {
+ sev_tsm_init_locked(sev, tio_status);
+ __snp_free_firmware_pages(virt_to_page(tio_status), 0, true);
+ }
+ }
+
sev_es_tmr_size = SNP_TMR_SIZE;
return 0;
@@ -2756,8 +2790,20 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
static void sev_firmware_shutdown(struct sev_device *sev)
{
+ /*
+ * Calling without sev_cmd_mutex held as TSM will likely try disconnecting
+ * IDE and this ends up calling sev_do_cmd() which locks sev_cmd_mutex.
+ */
+ if (sev->tio_status)
+ sev_tsm_uninit(sev);
+
mutex_lock(&sev_cmd_mutex);
+
__sev_firmware_shutdown(sev, false);
+
+ kfree(sev->tio_status);
+ sev->tio_status = NULL;
+
mutex_unlock(&sev_cmd_mutex);
}
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index ac03bd0848f7..b1cd556bbbf6 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -34,6 +34,8 @@ struct sev_misc_dev {
struct miscdevice misc;
};
+struct sev_tio_status;
+
struct sev_device {
struct device *dev;
struct psp_device *psp;
@@ -61,15 +63,24 @@ struct sev_device {
struct sev_user_data_snp_status snp_plat_status;
struct snp_feature_info snp_feat_info_0;
+
+ struct tsm_dev *tsmdev;
+ struct sev_tio_status *tio_status;
};
int sev_dev_init(struct psp_device *psp);
void sev_dev_destroy(struct psp_device *psp);
+int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
+
void sev_pci_init(void);
void sev_pci_exit(void);
struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages);
void snp_free_hv_fixed_pages(struct page *page);
+void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page);
+void sev_tsm_uninit(struct sev_device *sev);
+int sev_tio_cmd_buffer_len(int cmd);
+
#endif /* __SEV_DEV_H */
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 78b1c44bd6b5..320733e7d8b4 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -107,6 +107,7 @@
/* Extended Feature 2 Bits */
+#define FEATURE_SEVSNPIO_SUP BIT_ULL(1)
#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5)
#define FEATURE_SNPAVICSUP_GAM(x) \
(FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index fdd6328bca89..4b2953418977 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -2261,6 +2261,9 @@ static void print_iommu_info(void)
if (check_feature(FEATURE_SNP))
pr_cont(" SNP");
+ if (check_feature2(FEATURE_SEVSNPIO_SUP))
+ pr_cont(" SEV-TIO");
+
pr_cont("\n");
}
@@ -4028,4 +4031,10 @@ int amd_iommu_snp_disable(void)
return 0;
}
EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);
+
+bool amd_iommu_sev_tio_supported(void)
+{
+ return check_feature2(FEATURE_SEVSNPIO_SUP);
+}
+EXPORT_SYMBOL_GPL(amd_iommu_sev_tio_supported);
#endif
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index f94f5d384362..00b0210e1f1d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -122,6 +122,24 @@ config XEN_PCIDEV_FRONTEND
config PCI_ATS
bool
+config PCI_IDE
+ bool
+
+config PCI_TSM
+ bool "PCI TSM: Device security protocol support"
+ select PCI_IDE
+ select PCI_DOE
+ select TSM
+ help
+ The TEE (Trusted Execution Environment) Device Interface
+ Security Protocol (TDISP) defines a "TSM" as a platform agent
+ that manages device authentication, link encryption, link
+ integrity protection, and assignment of PCI device functions
+ (virtual or physical) to confidential computing VMs that can
+ access (DMA) guest private memory.
+
+ Enable a platform TSM driver to use this capability.
+
config PCI_DOE
bool "Enable PCI Data Object Exchange (DOE) support"
help
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index f3c81c892786..e10cfe5a280b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_VGA_ARB) += vgaarb.o
obj-$(CONFIG_PCI_DOE) += doe.o
+obj-$(CONFIG_PCI_IDE) += ide.o
+obj-$(CONFIG_PCI_TSM) += tsm.o
obj-$(CONFIG_PCI_DYNAMIC_OF_NODES) += of_property.o
obj-$(CONFIG_PCI_NPEM) += npem.o
obj-$(CONFIG_PCIE_TPH) += tph.o
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 9daf13ed3714..4383a36fd6ca 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/cleanup.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -435,6 +436,27 @@ static int __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void
return ret;
}
+static int __pci_walk_bus_reverse(struct pci_bus *top,
+ int (*cb)(struct pci_dev *, void *),
+ void *userdata)
+{
+ struct pci_dev *dev;
+ int ret = 0;
+
+ list_for_each_entry_reverse(dev, &top->devices, bus_list) {
+ if (dev->subordinate) {
+ ret = __pci_walk_bus_reverse(dev->subordinate, cb,
+ userdata);
+ if (ret)
+ break;
+ }
+ ret = cb(dev, userdata);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
/**
* pci_walk_bus - walk devices on/under bus, calling callback.
* @top: bus whose devices should be walked
@@ -456,6 +478,23 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void
}
EXPORT_SYMBOL_GPL(pci_walk_bus);
+/**
+ * pci_walk_bus_reverse - walk devices on/under bus, calling callback.
+ * @top: bus whose devices should be walked
+ * @cb: callback to be called for each device found
+ * @userdata: arbitrary pointer to be passed to callback
+ *
+ * Same semantics as pci_walk_bus(), but walks the bus in reverse order.
+ */
+void pci_walk_bus_reverse(struct pci_bus *top,
+ int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+ down_read(&pci_bus_sem);
+ __pci_walk_bus_reverse(top, cb, userdata);
+ up_read(&pci_bus_sem);
+}
+EXPORT_SYMBOL_GPL(pci_walk_bus_reverse);
+
void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
{
lockdep_assert_held(&pci_bus_sem);
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
index aae9a8a00406..62be9c8dbc52 100644
--- a/drivers/pci/doe.c
+++ b/drivers/pci/doe.c
@@ -24,8 +24,6 @@
#include "pci.h"
-#define PCI_DOE_FEATURE_DISCOVERY 0
-
/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
#define PCI_DOE_TIMEOUT HZ
#define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
diff --git a/drivers/pci/ide.c b/drivers/pci/ide.c
new file mode 100644
index 000000000000..f0ef474e1a0d
--- /dev/null
+++ b/drivers/pci/ide.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
+
+/* PCIe r7.0 section 6.33 Integrity & Data Encryption (IDE) */
+
+#define dev_fmt(fmt) "PCI/IDE: " fmt
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/pci-ide.h>
+#include <linux/pci_regs.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/tsm.h>
+
+#include "pci.h"
+
+static int __sel_ide_offset(u16 ide_cap, u8 nr_link_ide, u8 stream_index,
+ u8 nr_ide_mem)
+{
+ u32 offset = ide_cap + PCI_IDE_LINK_STREAM_0 +
+ nr_link_ide * PCI_IDE_LINK_BLOCK_SIZE;
+
+ /*
+ * Assume a constant number of address association resources per stream
+ * index
+ */
+ return offset + stream_index * PCI_IDE_SEL_BLOCK_SIZE(nr_ide_mem);
+}
+
+static int sel_ide_offset(struct pci_dev *pdev,
+ struct pci_ide_partner *settings)
+{
+ return __sel_ide_offset(pdev->ide_cap, pdev->nr_link_ide,
+ settings->stream_index, pdev->nr_ide_mem);
+}
+
+static bool reserve_stream_index(struct pci_dev *pdev, u8 idx)
+{
+ int ret;
+
+ ret = ida_alloc_range(&pdev->ide_stream_ida, idx, idx, GFP_KERNEL);
+ return ret >= 0;
+}
+
+static bool reserve_stream_id(struct pci_host_bridge *hb, u8 id)
+{
+ int ret;
+
+ ret = ida_alloc_range(&hb->ide_stream_ids_ida, id, id, GFP_KERNEL);
+ return ret >= 0;
+}
+
+static bool claim_stream(struct pci_host_bridge *hb, u8 stream_id,
+ struct pci_dev *pdev, u8 stream_idx)
+{
+ dev_info(&hb->dev, "Stream ID %d active at init\n", stream_id);
+ if (!reserve_stream_id(hb, stream_id)) {
+ dev_info(&hb->dev, "Failed to claim %s Stream ID %d\n",
+ stream_id == PCI_IDE_RESERVED_STREAM_ID ? "reserved" :
+ "active",
+ stream_id);
+ return false;
+ }
+
+ /* No stream index to reserve in the Link IDE case */
+ if (!pdev)
+ return true;
+
+ if (!reserve_stream_index(pdev, stream_idx)) {
+ pci_info(pdev, "Failed to claim active Selective Stream %d\n",
+ stream_idx);
+ return false;
+ }
+
+ return true;
+}
+
+void pci_ide_init(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+ u16 nr_link_ide, nr_ide_mem, nr_streams;
+ u16 ide_cap;
+ u32 val;
+
+ /*
+ * Unconditionally init so that ida idle state is consistent with
+ * pdev->ide_cap.
+ */
+ ida_init(&pdev->ide_stream_ida);
+
+ if (!pci_is_pcie(pdev))
+ return;
+
+ ide_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_IDE);
+ if (!ide_cap)
+ return;
+
+ pci_read_config_dword(pdev, ide_cap + PCI_IDE_CAP, &val);
+ if ((val & PCI_IDE_CAP_SELECTIVE) == 0)
+ return;
+
+ /*
+ * Require endpoint IDE capability to be paired with IDE Root Port IDE
+ * capability.
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) {
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+
+ if (!rp->ide_cap)
+ return;
+ }
+
+ pdev->ide_cfg = FIELD_GET(PCI_IDE_CAP_SEL_CFG, val);
+ pdev->ide_tee_limit = FIELD_GET(PCI_IDE_CAP_TEE_LIMITED, val);
+
+ if (val & PCI_IDE_CAP_LINK)
+ nr_link_ide = 1 + FIELD_GET(PCI_IDE_CAP_LINK_TC_NUM, val);
+ else
+ nr_link_ide = 0;
+
+ nr_ide_mem = 0;
+ nr_streams = 1 + FIELD_GET(PCI_IDE_CAP_SEL_NUM, val);
+ for (u16 i = 0; i < nr_streams; i++) {
+ int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
+ int nr_assoc;
+ u32 val;
+ u8 id;
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
+
+ /*
+ * Let's not entertain streams that do not have a constant
+ * number of address association blocks
+ */
+ nr_assoc = FIELD_GET(PCI_IDE_SEL_CAP_ASSOC_NUM, val);
+ if (i && (nr_assoc != nr_ide_mem)) {
+ pci_info(pdev, "Unsupported Selective Stream %d capability, SKIP the rest\n", i);
+ nr_streams = i;
+ break;
+ }
+
+ nr_ide_mem = nr_assoc;
+
+ /*
+ * Claim Stream IDs and Selective Stream blocks that are already
+ * active on the device
+ */
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CTL, &val);
+ id = FIELD_GET(PCI_IDE_SEL_CTL_ID, val);
+ if ((val & PCI_IDE_SEL_CTL_EN) &&
+ !claim_stream(hb, id, pdev, i))
+ return;
+ }
+
+ /* Reserve link stream-ids that are already active on the device */
+ for (u16 i = 0; i < nr_link_ide; ++i) {
+ int pos = ide_cap + PCI_IDE_LINK_STREAM_0 + i * PCI_IDE_LINK_BLOCK_SIZE;
+ u8 id;
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_LINK_CTL_0, &val);
+ id = FIELD_GET(PCI_IDE_LINK_CTL_ID, val);
+ if ((val & PCI_IDE_LINK_CTL_EN) &&
+ !claim_stream(hb, id, NULL, -1))
+ return;
+ }
+
+ for (u16 i = 0; i < nr_streams; i++) {
+ int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
+ if (val & PCI_IDE_SEL_CTL_EN)
+ continue;
+ val &= ~PCI_IDE_SEL_CTL_ID;
+ val |= FIELD_PREP(PCI_IDE_SEL_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
+ }
+
+ for (u16 i = 0; i < nr_link_ide; ++i) {
+ int pos = ide_cap + PCI_IDE_LINK_STREAM_0 +
+ i * PCI_IDE_LINK_BLOCK_SIZE;
+
+ pci_read_config_dword(pdev, pos, &val);
+ if (val & PCI_IDE_LINK_CTL_EN)
+ continue;
+ val &= ~PCI_IDE_LINK_CTL_ID;
+ val |= FIELD_PREP(PCI_IDE_LINK_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
+ pci_write_config_dword(pdev, pos, val);
+ }
+
+ pdev->ide_cap = ide_cap;
+ pdev->nr_link_ide = nr_link_ide;
+ pdev->nr_sel_ide = nr_streams;
+ pdev->nr_ide_mem = nr_ide_mem;
+}
+
+struct stream_index {
+ struct ida *ida;
+ u8 stream_index;
+};
+
+static void free_stream_index(struct stream_index *stream)
+{
+ ida_free(stream->ida, stream->stream_index);
+}
+
+DEFINE_FREE(free_stream, struct stream_index *, if (_T) free_stream_index(_T))
+static struct stream_index *alloc_stream_index(struct ida *ida, u16 max,
+ struct stream_index *stream)
+{
+ int id;
+
+ if (!max)
+ return NULL;
+
+ id = ida_alloc_max(ida, max - 1, GFP_KERNEL);
+ if (id < 0)
+ return NULL;
+
+ *stream = (struct stream_index) {
+ .ida = ida,
+ .stream_index = id,
+ };
+ return stream;
+}
+
+/**
+ * pci_ide_stream_alloc() - Reserve stream indices and probe for settings
+ * @pdev: IDE capable PCIe Endpoint Physical Function
+ *
+ * Retrieve the Requester ID range of @pdev for programming its Root
+ * Port IDE RID Association registers, and conversely retrieve the
+ * Requester ID of the Root Port for programming @pdev's IDE RID
+ * Association registers.
+ *
+ * Allocate a Selective IDE Stream Register Block instance per port.
+ *
+ * Allocate a platform stream resource from the associated host bridge.
+ * Retrieve stream association parameters for Requester ID range and
+ * address range restrictions for the stream.
+ */
+struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev)
+{
+ /* EP, RP, + HB Stream allocation */
+ struct stream_index __stream[PCI_IDE_HB + 1];
+ struct pci_bus_region pref_assoc = { 0, -1 };
+ struct pci_bus_region mem_assoc = { 0, -1 };
+ struct resource *mem, *pref;
+ struct pci_host_bridge *hb;
+ struct pci_dev *rp, *br;
+ int num_vf, rid_end;
+
+ if (!pci_is_pcie(pdev))
+ return NULL;
+
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ENDPOINT)
+ return NULL;
+
+ if (!pdev->ide_cap)
+ return NULL;
+
+ struct pci_ide *ide __free(kfree) = kzalloc(sizeof(*ide), GFP_KERNEL);
+ if (!ide)
+ return NULL;
+
+ hb = pci_find_host_bridge(pdev->bus);
+ struct stream_index *hb_stream __free(free_stream) = alloc_stream_index(
+ &hb->ide_stream_ida, hb->nr_ide_streams, &__stream[PCI_IDE_HB]);
+ if (!hb_stream)
+ return NULL;
+
+ rp = pcie_find_root_port(pdev);
+ struct stream_index *rp_stream __free(free_stream) = alloc_stream_index(
+ &rp->ide_stream_ida, rp->nr_sel_ide, &__stream[PCI_IDE_RP]);
+ if (!rp_stream)
+ return NULL;
+
+ struct stream_index *ep_stream __free(free_stream) = alloc_stream_index(
+ &pdev->ide_stream_ida, pdev->nr_sel_ide, &__stream[PCI_IDE_EP]);
+ if (!ep_stream)
+ return NULL;
+
+ /* for SR-IOV case, cover all VFs */
+ num_vf = pci_num_vf(pdev);
+ if (num_vf)
+ rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf),
+ pci_iov_virtfn_devfn(pdev, num_vf));
+ else
+ rid_end = pci_dev_id(pdev);
+
+ br = pci_upstream_bridge(pdev);
+ if (!br)
+ return NULL;
+
+ /*
+ * Check if the device consumes memory and/or prefetch-memory. Setup
+ * downstream address association ranges for each.
+ */
+ mem = pci_resource_n(br, PCI_BRIDGE_MEM_WINDOW);
+ pref = pci_resource_n(br, PCI_BRIDGE_PREF_MEM_WINDOW);
+ if (resource_assigned(mem))
+ pcibios_resource_to_bus(br->bus, &mem_assoc, mem);
+ if (resource_assigned(pref))
+ pcibios_resource_to_bus(br->bus, &pref_assoc, pref);
+
+ *ide = (struct pci_ide) {
+ .pdev = pdev,
+ .partner = {
+ [PCI_IDE_EP] = {
+ .rid_start = pci_dev_id(rp),
+ .rid_end = pci_dev_id(rp),
+ .stream_index = no_free_ptr(ep_stream)->stream_index,
+ /* Disable upstream address association */
+ .mem_assoc = { 0, -1 },
+ .pref_assoc = { 0, -1 },
+ },
+ [PCI_IDE_RP] = {
+ .rid_start = pci_dev_id(pdev),
+ .rid_end = rid_end,
+ .stream_index = no_free_ptr(rp_stream)->stream_index,
+ .mem_assoc = mem_assoc,
+ .pref_assoc = pref_assoc,
+ },
+ },
+ .host_bridge_stream = no_free_ptr(hb_stream)->stream_index,
+ .stream_id = -1,
+ };
+
+ return_ptr(ide);
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_alloc);
+
+/**
+ * pci_ide_stream_free() - unwind pci_ide_stream_alloc()
+ * @ide: idle IDE settings descriptor
+ *
+ * Free all of the stream index (register block) allocations acquired by
+ * pci_ide_stream_alloc(). The stream represented by @ide is assumed to
+ * be unregistered and not instantiated in any device.
+ */
+void pci_ide_stream_free(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+
+ ida_free(&pdev->ide_stream_ida, ide->partner[PCI_IDE_EP].stream_index);
+ ida_free(&rp->ide_stream_ida, ide->partner[PCI_IDE_RP].stream_index);
+ ida_free(&hb->ide_stream_ida, ide->host_bridge_stream);
+ kfree(ide);
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_free);
+
+/**
+ * pci_ide_stream_release() - unwind and release an @ide context
+ * @ide: partially or fully registered IDE settings descriptor
+ *
+ * In support of automatic cleanup of IDE setup routines perform IDE
+ * teardown in expected reverse order of setup and with respect to which
+ * aspects of IDE setup have successfully completed.
+ *
+ * Be careful that setup order mirrors this shutdown order. Otherwise,
+ * open code releasing the IDE context.
+ */
+void pci_ide_stream_release(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+
+ if (ide->partner[PCI_IDE_RP].enable)
+ pci_ide_stream_disable(rp, ide);
+
+ if (ide->partner[PCI_IDE_EP].enable)
+ pci_ide_stream_disable(pdev, ide);
+
+ if (ide->tsm_dev)
+ tsm_ide_stream_unregister(ide);
+
+ if (ide->partner[PCI_IDE_RP].setup)
+ pci_ide_stream_teardown(rp, ide);
+
+ if (ide->partner[PCI_IDE_EP].setup)
+ pci_ide_stream_teardown(pdev, ide);
+
+ if (ide->name)
+ pci_ide_stream_unregister(ide);
+
+ pci_ide_stream_free(ide);
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_release);
+
+struct pci_ide_stream_id {
+ struct pci_host_bridge *hb;
+ u8 stream_id;
+};
+
+static struct pci_ide_stream_id *
+request_stream_id(struct pci_host_bridge *hb, u8 stream_id,
+ struct pci_ide_stream_id *sid)
+{
+ if (!reserve_stream_id(hb, stream_id))
+ return NULL;
+
+ *sid = (struct pci_ide_stream_id) {
+ .hb = hb,
+ .stream_id = stream_id,
+ };
+
+ return sid;
+}
+DEFINE_FREE(free_stream_id, struct pci_ide_stream_id *,
+ if (_T) ida_free(&_T->hb->ide_stream_ids_ida, _T->stream_id))
+
+/**
+ * pci_ide_stream_register() - Prepare to activate an IDE Stream
+ * @ide: IDE settings descriptor
+ *
+ * After a Stream ID has been acquired for @ide, record the presence of
+ * the stream in sysfs. The expectation is that @ide is immutable while
+ * registered.
+ */
+int pci_ide_stream_register(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+ struct pci_ide_stream_id __sid;
+ u8 ep_stream, rp_stream;
+ int rc;
+
+ if (ide->stream_id < 0 || ide->stream_id > U8_MAX) {
+ pci_err(pdev, "Setup fail: Invalid Stream ID: %d\n", ide->stream_id);
+ return -ENXIO;
+ }
+
+ struct pci_ide_stream_id *sid __free(free_stream_id) =
+ request_stream_id(hb, ide->stream_id, &__sid);
+ if (!sid) {
+ pci_err(pdev, "Setup fail: Stream ID %d in use\n", ide->stream_id);
+ return -EBUSY;
+ }
+
+ ep_stream = ide->partner[PCI_IDE_EP].stream_index;
+ rp_stream = ide->partner[PCI_IDE_RP].stream_index;
+ const char *name __free(kfree) = kasprintf(GFP_KERNEL, "stream%d.%d.%d",
+ ide->host_bridge_stream,
+ rp_stream, ep_stream);
+ if (!name)
+ return -ENOMEM;
+
+ rc = sysfs_create_link(&hb->dev.kobj, &pdev->dev.kobj, name);
+ if (rc)
+ return rc;
+
+ ide->name = no_free_ptr(name);
+
+ /* Stream ID reservation recorded in @ide is now successfully registered */
+ retain_and_null_ptr(sid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_register);
+
+/**
+ * pci_ide_stream_unregister() - unwind pci_ide_stream_register()
+ * @ide: idle IDE settings descriptor
+ *
+ * In preparation for freeing @ide, remove sysfs enumeration for the
+ * stream.
+ */
+void pci_ide_stream_unregister(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+
+ sysfs_remove_link(&hb->dev.kobj, ide->name);
+ kfree(ide->name);
+ ida_free(&hb->ide_stream_ids_ida, ide->stream_id);
+ ide->name = NULL;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_unregister);
+
+static int pci_ide_domain(struct pci_dev *pdev)
+{
+ if (pdev->fm_enabled)
+ return pci_domain_nr(pdev->bus);
+ return 0;
+}
+
+struct pci_ide_partner *pci_ide_to_settings(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ if (!pci_is_pcie(pdev)) {
+ pci_warn_once(pdev, "not a PCIe device\n");
+ return NULL;
+ }
+
+ switch (pci_pcie_type(pdev)) {
+ case PCI_EXP_TYPE_ENDPOINT:
+ if (pdev != ide->pdev) {
+ pci_warn_once(pdev, "setup expected Endpoint: %s\n", pci_name(ide->pdev));
+ return NULL;
+ }
+ return &ide->partner[PCI_IDE_EP];
+ case PCI_EXP_TYPE_ROOT_PORT: {
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+
+ if (pdev != rp) {
+ pci_warn_once(pdev, "setup expected Root Port: %s\n",
+ pci_name(rp));
+ return NULL;
+ }
+ return &ide->partner[PCI_IDE_RP];
+ }
+ default:
+ pci_warn_once(pdev, "invalid device type\n");
+ return NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(pci_ide_to_settings);
+
+static void set_ide_sel_ctl(struct pci_dev *pdev, struct pci_ide *ide,
+ struct pci_ide_partner *settings, int pos,
+ bool enable)
+{
+ u32 val = FIELD_PREP(PCI_IDE_SEL_CTL_ID, ide->stream_id) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_DEFAULT, settings->default_stream) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_CFG_EN, pdev->ide_cfg) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_TEE_LIMITED, pdev->ide_tee_limit) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_EN, enable);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
+}
+
+#define SEL_ADDR1_LOWER GENMASK(31, 20)
+#define SEL_ADDR_UPPER GENMASK_ULL(63, 32)
+#define PREP_PCI_IDE_SEL_ADDR1(base, limit) \
+ (FIELD_PREP(PCI_IDE_SEL_ADDR_1_VALID, 1) | \
+ FIELD_PREP(PCI_IDE_SEL_ADDR_1_BASE_LOW, \
+ FIELD_GET(SEL_ADDR1_LOWER, (base))) | \
+ FIELD_PREP(PCI_IDE_SEL_ADDR_1_LIMIT_LOW, \
+ FIELD_GET(SEL_ADDR1_LOWER, (limit))))
+
+static void mem_assoc_to_regs(struct pci_bus_region *region,
+ struct pci_ide_regs *regs, int idx)
+{
+ /* convert to u64 range for bitfield size checks */
+ struct range r = { region->start, region->end };
+
+ regs->addr[idx].assoc1 = PREP_PCI_IDE_SEL_ADDR1(r.start, r.end);
+ regs->addr[idx].assoc2 = FIELD_GET(SEL_ADDR_UPPER, r.end);
+ regs->addr[idx].assoc3 = FIELD_GET(SEL_ADDR_UPPER, r.start);
+}
+
+/**
+ * pci_ide_stream_to_regs() - convert IDE settings to association register values
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered IDE settings descriptor
+ * @regs: output register values
+ */
+static void pci_ide_stream_to_regs(struct pci_dev *pdev, struct pci_ide *ide,
+ struct pci_ide_regs *regs)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int assoc_idx = 0;
+
+ memset(regs, 0, sizeof(*regs));
+
+ if (!settings)
+ return;
+
+ regs->rid1 = FIELD_PREP(PCI_IDE_SEL_RID_1_LIMIT, settings->rid_end);
+
+ regs->rid2 = FIELD_PREP(PCI_IDE_SEL_RID_2_VALID, 1) |
+ FIELD_PREP(PCI_IDE_SEL_RID_2_BASE, settings->rid_start) |
+ FIELD_PREP(PCI_IDE_SEL_RID_2_SEG, pci_ide_domain(pdev));
+
+ if (pdev->nr_ide_mem && pci_bus_region_size(&settings->mem_assoc)) {
+ mem_assoc_to_regs(&settings->mem_assoc, regs, assoc_idx);
+ assoc_idx++;
+ }
+
+ if (pdev->nr_ide_mem > assoc_idx &&
+ pci_bus_region_size(&settings->pref_assoc)) {
+ mem_assoc_to_regs(&settings->pref_assoc, regs, assoc_idx);
+ assoc_idx++;
+ }
+
+ regs->nr_addr = assoc_idx;
+}
+
+/**
+ * pci_ide_stream_setup() - program settings to Selective IDE Stream registers
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered IDE settings descriptor
+ *
+ * When @pdev is a PCI_EXP_TYPE_ENDPOINT then the PCI_IDE_EP partner
+ * settings are written to @pdev's Selective IDE Stream register block,
+ * and when @pdev is a PCI_EXP_TYPE_ROOT_PORT, the PCI_IDE_RP settings
+ * are selected.
+ */
+void pci_ide_stream_setup(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ struct pci_ide_regs regs;
+ int pos;
+
+ if (!settings)
+ return;
+
+ pci_ide_stream_to_regs(pdev, ide, &regs);
+
+ pos = sel_ide_offset(pdev, settings);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, regs.rid1);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, regs.rid2);
+
+ for (int i = 0; i < regs.nr_addr; i++) {
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i),
+ regs.addr[i].assoc1);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i),
+ regs.addr[i].assoc2);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i),
+ regs.addr[i].assoc3);
+ }
+
+ /* clear extra unused address association blocks */
+ for (int i = regs.nr_addr; i < pdev->nr_ide_mem; i++) {
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
+ }
+
+ /*
+ * Setup control register early for devices that expect
+ * stream_id is set during key programming.
+ */
+ set_ide_sel_ctl(pdev, ide, settings, pos, false);
+ settings->setup = 1;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_setup);
+
+/**
+ * pci_ide_stream_teardown() - disable the stream and clear all settings
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered IDE settings descriptor
+ *
+ * For stream destruction, zero all registers that may have been written
+ * by pci_ide_stream_setup(). Consider pci_ide_stream_disable() to leave
+ * settings in place while temporarily disabling the stream.
+ */
+void pci_ide_stream_teardown(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int pos, i;
+
+ if (!settings)
+ return;
+
+ pos = sel_ide_offset(pdev, settings);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
+
+ for (i = 0; i < pdev->nr_ide_mem; i++) {
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
+ }
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, 0);
+ settings->setup = 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_teardown);
+
+/**
+ * pci_ide_stream_enable() - enable a Selective IDE Stream
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered and setup IDE settings descriptor
+ *
+ * Activate the stream by writing to the Selective IDE Stream Control
+ * Register.
+ *
+ * Return: 0 if the stream successfully entered the "secure" state, and -EINVAL
+ * if @ide is invalid, and -ENXIO if the stream fails to enter the secure state.
+ *
+ * Note that the state may go "insecure" at any point after returning 0, but
+ * those events are equivalent to a "link down" event and handled via
+ * asynchronous error reporting.
+ *
+ * Caller is responsible to clear the enable bit in the -ENXIO case.
+ */
+int pci_ide_stream_enable(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int pos;
+ u32 val;
+
+ if (!settings)
+ return -EINVAL;
+
+ pos = sel_ide_offset(pdev, settings);
+
+ set_ide_sel_ctl(pdev, ide, settings, pos, true);
+ settings->enable = 1;
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_STS, &val);
+ if (FIELD_GET(PCI_IDE_SEL_STS_STATE, val) !=
+ PCI_IDE_SEL_STS_STATE_SECURE)
+ return -ENXIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_enable);
+
+/**
+ * pci_ide_stream_disable() - disable a Selective IDE Stream
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered and setup IDE settings descriptor
+ *
+ * Clear the Selective IDE Stream Control Register, but leave all other
+ * registers untouched.
+ */
+void pci_ide_stream_disable(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int pos;
+
+ if (!settings)
+ return;
+
+ pos = sel_ide_offset(pdev, settings);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
+ settings->enable = 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_disable);
+
+void pci_ide_init_host_bridge(struct pci_host_bridge *hb)
+{
+ hb->nr_ide_streams = 256;
+ ida_init(&hb->ide_stream_ida);
+ ida_init(&hb->ide_stream_ids_ida);
+ reserve_stream_id(hb, PCI_IDE_RESERVED_STREAM_ID);
+}
+
+static ssize_t available_secure_streams_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_host_bridge *hb = to_pci_host_bridge(dev);
+ int nr = READ_ONCE(hb->nr_ide_streams);
+ int avail = nr;
+
+ if (!nr)
+ return -ENXIO;
+
+ /*
+ * Yes, this is inefficient and racy, but it is only for occasional
+ * platform resource surveys. Worst case is bounded to 256 streams.
+ */
+ for (int i = 0; i < nr; i++)
+ if (ida_exists(&hb->ide_stream_ida, i))
+ avail--;
+ return sysfs_emit(buf, "%d\n", avail);
+}
+static DEVICE_ATTR_RO(available_secure_streams);
+
+static struct attribute *pci_ide_attrs[] = {
+ &dev_attr_available_secure_streams.attr,
+ NULL
+};
+
+static umode_t pci_ide_attr_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_host_bridge *hb = to_pci_host_bridge(dev);
+
+ if (a == &dev_attr_available_secure_streams.attr)
+ if (!hb->nr_ide_streams)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group pci_ide_attr_group = {
+ .attrs = pci_ide_attrs,
+ .is_visible = pci_ide_attr_visible,
+};
+
+/**
+ * pci_ide_set_nr_streams() - sets size of the pool of IDE Stream resources
+ * @hb: host bridge boundary for the stream pool
+ * @nr: number of streams
+ *
+ * Platform PCI init and/or expert test module use only. Limit IDE
+ * Stream establishment by setting the number of stream resources
+ * available at the host bridge. Platform init code must set this before
+ * the first pci_ide_stream_alloc() call if the platform has less than the
+ * default of 256 streams per host-bridge.
+ *
+ * The "PCI_IDE" symbol namespace is required because this is typically
+ * a detail that is settled in early PCI init. I.e. this export is not
+ * for endpoint drivers.
+ */
+void pci_ide_set_nr_streams(struct pci_host_bridge *hb, u16 nr)
+{
+ hb->nr_ide_streams = min(nr, 256);
+ WARN_ON_ONCE(!ida_is_empty(&hb->ide_stream_ida));
+ sysfs_update_group(&hb->dev.kobj, &pci_ide_attr_group);
+}
+EXPORT_SYMBOL_NS_GPL(pci_ide_set_nr_streams, "PCI_IDE");
+
+void pci_ide_destroy(struct pci_dev *pdev)
+{
+ ida_destroy(&pdev->ide_stream_ida);
+}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 80a7c4fe6b03..c2df915ad2d2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1856,5 +1856,9 @@ const struct attribute_group *pci_dev_attr_groups[] = {
#ifdef CONFIG_PCI_DOE
&pci_doe_sysfs_group,
#endif
+#ifdef CONFIG_PCI_TSM
+ &pci_tsm_auth_attr_group,
+ &pci_tsm_attr_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a33bc4e0bf34..0e67014aa001 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -615,6 +615,27 @@ static inline void pci_doe_sysfs_init(struct pci_dev *pdev) { }
static inline void pci_doe_sysfs_teardown(struct pci_dev *pdev) { }
#endif
+#ifdef CONFIG_PCI_IDE
+void pci_ide_init(struct pci_dev *dev);
+void pci_ide_init_host_bridge(struct pci_host_bridge *hb);
+void pci_ide_destroy(struct pci_dev *dev);
+extern const struct attribute_group pci_ide_attr_group;
+#else
+static inline void pci_ide_init(struct pci_dev *dev) { }
+static inline void pci_ide_init_host_bridge(struct pci_host_bridge *hb) { }
+static inline void pci_ide_destroy(struct pci_dev *dev) { }
+#endif
+
+#ifdef CONFIG_PCI_TSM
+void pci_tsm_init(struct pci_dev *pdev);
+void pci_tsm_destroy(struct pci_dev *pdev);
+extern const struct attribute_group pci_tsm_attr_group;
+extern const struct attribute_group pci_tsm_auth_attr_group;
+#else
+static inline void pci_tsm_init(struct pci_dev *pdev) { }
+static inline void pci_tsm_destroy(struct pci_dev *pdev) { }
+#endif
+
/**
* pci_dev_set_io_state - Set the new error state if possible.
*
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 124d2d309c58..41183aed8f5d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -658,6 +658,18 @@ static void pci_release_host_bridge_dev(struct device *dev)
kfree(bridge);
}
+static const struct attribute_group *pci_host_bridge_groups[] = {
+#ifdef CONFIG_PCI_IDE
+ &pci_ide_attr_group,
+#endif
+ NULL
+};
+
+static const struct device_type pci_host_bridge_type = {
+ .groups = pci_host_bridge_groups,
+ .release = pci_release_host_bridge_dev,
+};
+
static void pci_init_host_bridge(struct pci_host_bridge *bridge)
{
INIT_LIST_HEAD(&bridge->windows);
@@ -677,6 +689,8 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge)
bridge->native_dpc = 1;
bridge->domain_nr = PCI_DOMAIN_NR_NOT_SET;
bridge->native_cxl_error = 1;
+ bridge->dev.type = &pci_host_bridge_type;
+ pci_ide_init_host_bridge(bridge);
device_initialize(&bridge->dev);
}
@@ -690,7 +704,6 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
return NULL;
pci_init_host_bridge(bridge);
- bridge->dev.release = pci_release_host_bridge_dev;
return bridge;
}
@@ -2296,6 +2309,17 @@ int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
return 0;
}
+static void pci_dev3_init(struct pci_dev *pdev)
+{
+ u16 cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DEV3);
+ u32 val = 0;
+
+ if (!cap)
+ return;
+ pci_read_config_dword(pdev, cap + PCI_DEV3_STA, &val);
+ pdev->fm_enabled = !!(val & PCI_DEV3_STA_SEGMENT);
+}
+
/**
* pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
* @dev: PCI device to query
@@ -2680,6 +2704,8 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_doe_init(dev); /* Data Object Exchange */
pci_tph_init(dev); /* TLP Processing Hints */
pci_rebar_init(dev); /* Resizable BAR */
+ pci_dev3_init(dev); /* Device 3 capabilities */
+ pci_ide_init(dev); /* Link Integrity and Data Encryption */
pcie_report_downtraining(dev);
pci_init_reset_methods(dev);
@@ -2773,6 +2799,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
ret = device_add(&dev->dev);
WARN_ON(ret < 0);
+ /* Establish pdev->tsm for newly added (e.g. new SR-IOV VFs) */
+ pci_tsm_init(dev);
+
pci_npem_create(dev);
pci_doe_sysfs_init(dev);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index ce5c25adef55..417a9ea59117 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -57,6 +57,12 @@ static void pci_destroy_dev(struct pci_dev *dev)
pci_doe_sysfs_teardown(dev);
pci_npem_remove(dev);
+ /*
+ * While device is in D0 drop the device from TSM link operations
+ * including unbind and disconnect (IDE + SPDM teardown).
+ */
+ pci_tsm_destroy(dev);
+
device_del(&dev->dev);
down_write(&pci_bus_sem);
@@ -64,6 +70,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
up_write(&pci_bus_sem);
pci_doe_destroy(dev);
+ pci_ide_destroy(dev);
pcie_aspm_exit_link_state(dev);
pci_bridge_d3_update(dev);
pci_pwrctrl_unregister(&dev->dev);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 53840634fbfc..e6e84dc62e82 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -282,6 +282,45 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
return pdev;
}
+static struct pci_dev *pci_get_dev_by_id_reverse(const struct pci_device_id *id,
+ struct pci_dev *from)
+{
+ struct device *dev;
+ struct device *dev_start = NULL;
+ struct pci_dev *pdev = NULL;
+
+ if (from)
+ dev_start = &from->dev;
+ dev = bus_find_device_reverse(&pci_bus_type, dev_start, (void *)id,
+ match_pci_dev_by_id);
+ if (dev)
+ pdev = to_pci_dev(dev);
+ pci_dev_put(from);
+ return pdev;
+}
+
+enum pci_search_direction {
+ PCI_SEARCH_FORWARD,
+ PCI_SEARCH_REVERSE,
+};
+
+static struct pci_dev *__pci_get_subsys(unsigned int vendor, unsigned int device,
+ unsigned int ss_vendor, unsigned int ss_device,
+ struct pci_dev *from, enum pci_search_direction dir)
+{
+ struct pci_device_id id = {
+ .vendor = vendor,
+ .device = device,
+ .subvendor = ss_vendor,
+ .subdevice = ss_device,
+ };
+
+ if (dir == PCI_SEARCH_FORWARD)
+ return pci_get_dev_by_id(&id, from);
+ else
+ return pci_get_dev_by_id_reverse(&id, from);
+}
+
/**
* pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
@@ -302,14 +341,8 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
unsigned int ss_vendor, unsigned int ss_device,
struct pci_dev *from)
{
- struct pci_device_id id = {
- .vendor = vendor,
- .device = device,
- .subvendor = ss_vendor,
- .subdevice = ss_device,
- };
-
- return pci_get_dev_by_id(&id, from);
+ return __pci_get_subsys(vendor, device, ss_vendor, ss_device, from,
+ PCI_SEARCH_FORWARD);
}
EXPORT_SYMBOL(pci_get_subsys);
@@ -334,6 +367,19 @@ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
}
EXPORT_SYMBOL(pci_get_device);
+/*
+ * Same semantics as pci_get_device(), except walks the PCI device list
+ * in reverse discovery order.
+ */
+struct pci_dev *pci_get_device_reverse(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *from)
+{
+ return __pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from,
+ PCI_SEARCH_REVERSE);
+}
+EXPORT_SYMBOL(pci_get_device_reverse);
+
/**
* pci_get_class - begin or continue searching for a PCI device by class
* @class: search for a PCI device with this class designation
diff --git a/drivers/pci/tsm.c b/drivers/pci/tsm.c
new file mode 100644
index 000000000000..5fdcd7f2e820
--- /dev/null
+++ b/drivers/pci/tsm.c
@@ -0,0 +1,900 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interface with platform TEE Security Manager (TSM) objects as defined by
+ * PCIe r7.0 section 11 TEE Device Interface Security Protocol (TDISP)
+ *
+ * Copyright(c) 2024-2025 Intel Corporation. All rights reserved.
+ */
+
+#define dev_fmt(fmt) "PCI/TSM: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include <linux/pci-doe.h>
+#include <linux/pci-tsm.h>
+#include <linux/sysfs.h>
+#include <linux/tsm.h>
+#include <linux/xarray.h>
+#include "pci.h"
+
+/*
+ * Provide a read/write lock against the init / exit of pdev tsm
+ * capabilities and arrival/departure of a TSM instance
+ */
+static DECLARE_RWSEM(pci_tsm_rwsem);
+
+/*
+ * Count of TSMs registered that support physical link operations vs device
+ * security state management.
+ */
+static int pci_tsm_link_count;
+static int pci_tsm_devsec_count;
+
+static const struct pci_tsm_ops *to_pci_tsm_ops(struct pci_tsm *tsm)
+{
+ return tsm->tsm_dev->pci_ops;
+}
+
+static inline bool is_dsm(struct pci_dev *pdev)
+{
+ return pdev->tsm && pdev->tsm->dsm_dev == pdev;
+}
+
+static inline bool has_tee(struct pci_dev *pdev)
+{
+ return pdev->devcap & PCI_EXP_DEVCAP_TEE;
+}
+
+/* 'struct pci_tsm_pf0' wraps 'struct pci_tsm' when ->dsm_dev == ->pdev (self) */
+static struct pci_tsm_pf0 *to_pci_tsm_pf0(struct pci_tsm *tsm)
+{
+ /*
+ * All "link" TSM contexts reference the device that hosts the DSM
+ * interface for a set of devices. Walk to the DSM device and cast its
+ * ->tsm context to a 'struct pci_tsm_pf0 *'.
+ */
+ struct pci_dev *pf0 = tsm->dsm_dev;
+
+ if (!is_pci_tsm_pf0(pf0) || !is_dsm(pf0)) {
+ pci_WARN_ONCE(tsm->pdev, 1, "invalid context object\n");
+ return NULL;
+ }
+
+ return container_of(pf0->tsm, struct pci_tsm_pf0, base_tsm);
+}
+
+static void tsm_remove(struct pci_tsm *tsm)
+{
+ struct pci_dev *pdev;
+
+ if (!tsm)
+ return;
+
+ pdev = tsm->pdev;
+ to_pci_tsm_ops(tsm)->remove(tsm);
+ pdev->tsm = NULL;
+}
+DEFINE_FREE(tsm_remove, struct pci_tsm *, if (_T) tsm_remove(_T))
+
+static void pci_tsm_walk_fns(struct pci_dev *pdev,
+ int (*cb)(struct pci_dev *pdev, void *data),
+ void *data)
+{
+ /* Walk subordinate physical functions */
+ for (int i = 0; i < 8; i++) {
+ struct pci_dev *pf __free(pci_dev_put) = pci_get_slot(
+ pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), i));
+
+ if (!pf)
+ continue;
+
+ /* on entry function 0 has already run @cb */
+ if (i > 0)
+ cb(pf, data);
+
+ /* walk virtual functions of each pf */
+ for (int j = 0; j < pci_num_vf(pf); j++) {
+ struct pci_dev *vf __free(pci_dev_put) =
+ pci_get_domain_bus_and_slot(
+ pci_domain_nr(pf->bus),
+ pci_iov_virtfn_bus(pf, j),
+ pci_iov_virtfn_devfn(pf, j));
+
+ if (!vf)
+ continue;
+
+ cb(vf, data);
+ }
+ }
+
+ /*
+ * Walk downstream devices, assumes that an upstream DSM is
+ * limited to downstream physical functions
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_UPSTREAM && is_dsm(pdev))
+ pci_walk_bus(pdev->subordinate, cb, data);
+}
+
+static void pci_tsm_walk_fns_reverse(struct pci_dev *pdev,
+ int (*cb)(struct pci_dev *pdev,
+ void *data),
+ void *data)
+{
+ /* Reverse walk downstream devices */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_UPSTREAM && is_dsm(pdev))
+ pci_walk_bus_reverse(pdev->subordinate, cb, data);
+
+ /* Reverse walk subordinate physical functions */
+ for (int i = 7; i >= 0; i--) {
+ struct pci_dev *pf __free(pci_dev_put) = pci_get_slot(
+ pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), i));
+
+ if (!pf)
+ continue;
+
+ /* reverse walk virtual functions */
+ for (int j = pci_num_vf(pf) - 1; j >= 0; j--) {
+ struct pci_dev *vf __free(pci_dev_put) =
+ pci_get_domain_bus_and_slot(
+ pci_domain_nr(pf->bus),
+ pci_iov_virtfn_bus(pf, j),
+ pci_iov_virtfn_devfn(pf, j));
+
+ if (!vf)
+ continue;
+ cb(vf, data);
+ }
+
+ /* on exit, caller will run @cb on function 0 */
+ if (i > 0)
+ cb(pf, data);
+ }
+}
+
+static void link_sysfs_disable(struct pci_dev *pdev)
+{
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_auth_attr_group);
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_attr_group);
+}
+
+static void link_sysfs_enable(struct pci_dev *pdev)
+{
+ bool tee = has_tee(pdev);
+
+ pci_dbg(pdev, "%s Security Manager detected (%s%s%s)\n",
+ pdev->tsm ? "Device" : "Platform TEE",
+ pdev->ide_cap ? "IDE" : "", pdev->ide_cap && tee ? " " : "",
+ tee ? "TEE" : "");
+
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_auth_attr_group);
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_attr_group);
+}
+
+static int probe_fn(struct pci_dev *pdev, void *dsm)
+{
+ struct pci_dev *dsm_dev = dsm;
+ const struct pci_tsm_ops *ops = to_pci_tsm_ops(dsm_dev->tsm);
+
+ pdev->tsm = ops->probe(dsm_dev->tsm->tsm_dev, pdev);
+ pci_dbg(pdev, "setup TSM context: DSM: %s status: %s\n",
+ pci_name(dsm_dev), pdev->tsm ? "success" : "failed");
+ if (pdev->tsm)
+ link_sysfs_enable(pdev);
+ return 0;
+}
+
+static int pci_tsm_connect(struct pci_dev *pdev, struct tsm_dev *tsm_dev)
+{
+ int rc;
+ struct pci_tsm_pf0 *tsm_pf0;
+ const struct pci_tsm_ops *ops = tsm_dev->pci_ops;
+ struct pci_tsm *pci_tsm __free(tsm_remove) = ops->probe(tsm_dev, pdev);
+
+ /* connect() mutually exclusive with subfunction pci_tsm_init() */
+ lockdep_assert_held_write(&pci_tsm_rwsem);
+
+ if (!pci_tsm)
+ return -ENXIO;
+
+ pdev->tsm = pci_tsm;
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+
+ /* mutex_intr assumes connect() is always sysfs/user driven */
+ ACQUIRE(mutex_intr, lock)(&tsm_pf0->lock);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &lock)))
+ return rc;
+
+ rc = ops->connect(pdev);
+ if (rc)
+ return rc;
+
+ pdev->tsm = no_free_ptr(pci_tsm);
+
+ /*
+ * Now that the DSM is established, probe() all the potential
+ * dependent functions. Failure to probe a function is not fatal
+ * to connect(), it just disables subsequent security operations
+ * for that function.
+ *
+ * Note this is done unconditionally, without regard to finding
+ * PCI_EXP_DEVCAP_TEE on the dependent function, for robustness. The DSM
+ * is the ultimate arbiter of security state relative to a given
+ * interface id, and if it says it can manage TDISP state of a function,
+ * let it.
+ */
+ if (has_tee(pdev))
+ pci_tsm_walk_fns(pdev, probe_fn, pdev);
+ return 0;
+}
+
+static ssize_t connect_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tsm_dev *tsm_dev;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ if (!pdev->tsm)
+ return sysfs_emit(buf, "\n");
+
+ tsm_dev = pdev->tsm->tsm_dev;
+ return sysfs_emit(buf, "%s\n", dev_name(&tsm_dev->dev));
+}
+
+/* Is @tsm_dev managing physical link / session properties... */
+static bool is_link_tsm(struct tsm_dev *tsm_dev)
+{
+ return tsm_dev && tsm_dev->pci_ops && tsm_dev->pci_ops->link_ops.probe;
+}
+
+/* ...or is @tsm_dev managing device security state ? */
+static bool is_devsec_tsm(struct tsm_dev *tsm_dev)
+{
+ return tsm_dev && tsm_dev->pci_ops && tsm_dev->pci_ops->devsec_ops.lock;
+}
+
+static ssize_t connect_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int rc, id;
+
+ rc = sscanf(buf, "tsm%d\n", &id);
+ if (rc != 1)
+ return -EINVAL;
+
+ ACQUIRE(rwsem_write_kill, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &lock)))
+ return rc;
+
+ if (pdev->tsm)
+ return -EBUSY;
+
+ struct tsm_dev *tsm_dev __free(put_tsm_dev) = find_tsm_dev(id);
+ if (!is_link_tsm(tsm_dev))
+ return -ENXIO;
+
+ rc = pci_tsm_connect(pdev, tsm_dev);
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(connect);
+
+static int remove_fn(struct pci_dev *pdev, void *data)
+{
+ tsm_remove(pdev->tsm);
+ link_sysfs_disable(pdev);
+ return 0;
+}
+
+/*
+ * Note, this helper only returns an error code and takes an argument for
+ * compatibility with the pci_walk_bus() callback prototype. pci_tsm_unbind()
+ * always succeeds.
+ */
+static int __pci_tsm_unbind(struct pci_dev *pdev, void *data)
+{
+ struct pci_tdi *tdi;
+ struct pci_tsm_pf0 *tsm_pf0;
+
+ lockdep_assert_held(&pci_tsm_rwsem);
+
+ if (!pdev->tsm)
+ return 0;
+
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ guard(mutex)(&tsm_pf0->lock);
+
+ tdi = pdev->tsm->tdi;
+ if (!tdi)
+ return 0;
+
+ to_pci_tsm_ops(pdev->tsm)->unbind(tdi);
+ pdev->tsm->tdi = NULL;
+
+ return 0;
+}
+
+void pci_tsm_unbind(struct pci_dev *pdev)
+{
+ guard(rwsem_read)(&pci_tsm_rwsem);
+ __pci_tsm_unbind(pdev, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_unbind);
+
+/**
+ * pci_tsm_bind() - Bind @pdev as a TDI for @kvm
+ * @pdev: PCI device function to bind
+ * @kvm: Private memory attach context
+ * @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ *
+ * Context: Caller is responsible for constraining the bind lifetime to the
+ * registered state of the device. For example, pci_tsm_bind() /
+ * pci_tsm_unbind() limited to the VFIO driver bound state of the device.
+ */
+int pci_tsm_bind(struct pci_dev *pdev, struct kvm *kvm, u32 tdi_id)
+{
+ struct pci_tsm_pf0 *tsm_pf0;
+ struct pci_tdi *tdi;
+
+ if (!kvm)
+ return -EINVAL;
+
+ guard(rwsem_read)(&pci_tsm_rwsem);
+
+ if (!pdev->tsm)
+ return -EINVAL;
+
+ if (!is_link_tsm(pdev->tsm->tsm_dev))
+ return -ENXIO;
+
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ guard(mutex)(&tsm_pf0->lock);
+
+ /* Resolve races to bind a TDI */
+ if (pdev->tsm->tdi) {
+ if (pdev->tsm->tdi->kvm != kvm)
+ return -EBUSY;
+ return 0;
+ }
+
+ tdi = to_pci_tsm_ops(pdev->tsm)->bind(pdev, kvm, tdi_id);
+ if (IS_ERR(tdi))
+ return PTR_ERR(tdi);
+
+ pdev->tsm->tdi = tdi;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_tsm_bind);
+
+/**
+ * pci_tsm_guest_req() - helper to marshal guest requests to the TSM driver
+ * @pdev: @pdev representing a bound tdi
+ * @scope: caller asserts this passthrough request is limited to TDISP operations
+ * @req_in: Input payload forwarded from the guest
+ * @in_len: Length of @req_in
+ * @req_out: Output payload buffer response to the guest
+ * @out_len: Length of @req_out on input, bytes filled in @req_out on output
+ * @tsm_code: Optional TSM arch specific result code for the guest TSM
+ *
+ * This is a common entry point for requests triggered by userspace KVM-exit
+ * service handlers responding to TDI information or state change requests. The
+ * scope parameter limits requests to TDISP state management, or limited debug.
+ * This path is only suitable for commands and results that are the host kernel
+ * has no use, the host is only facilitating guest to TSM communication.
+ *
+ * Returns 0 on success and -error on failure and positive "residue" on success
+ * but @req_out is filled with less then @out_len, or @req_out is NULL and a
+ * residue number of bytes were not consumed from @req_in. On success or
+ * failure @tsm_code may be populated with a TSM implementation specific result
+ * code for the guest to consume.
+ *
+ * Context: Caller is responsible for calling this within the pci_tsm_bind()
+ * state of the TDI.
+ */
+ssize_t pci_tsm_guest_req(struct pci_dev *pdev, enum pci_tsm_req_scope scope,
+ sockptr_t req_in, size_t in_len, sockptr_t req_out,
+ size_t out_len, u64 *tsm_code)
+{
+ struct pci_tsm_pf0 *tsm_pf0;
+ struct pci_tdi *tdi;
+ int rc;
+
+ /* Forbid requests that are not directly related to TDISP operations */
+ if (scope > PCI_TSM_REQ_STATE_CHANGE)
+ return -EINVAL;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ if (!pdev->tsm)
+ return -ENXIO;
+
+ if (!is_link_tsm(pdev->tsm->tsm_dev))
+ return -ENXIO;
+
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ ACQUIRE(mutex_intr, ops_lock)(&tsm_pf0->lock);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &ops_lock)))
+ return rc;
+
+ tdi = pdev->tsm->tdi;
+ if (!tdi)
+ return -ENXIO;
+ return to_pci_tsm_ops(pdev->tsm)->guest_req(tdi, scope, req_in, in_len,
+ req_out, out_len, tsm_code);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_guest_req);
+
+static void pci_tsm_unbind_all(struct pci_dev *pdev)
+{
+ pci_tsm_walk_fns_reverse(pdev, __pci_tsm_unbind, NULL);
+ __pci_tsm_unbind(pdev, NULL);
+}
+
+static void __pci_tsm_disconnect(struct pci_dev *pdev)
+{
+ struct pci_tsm_pf0 *tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ const struct pci_tsm_ops *ops = to_pci_tsm_ops(pdev->tsm);
+
+ /* disconnect() mutually exclusive with subfunction pci_tsm_init() */
+ lockdep_assert_held_write(&pci_tsm_rwsem);
+
+ pci_tsm_unbind_all(pdev);
+
+ /*
+ * disconnect() is uninterruptible as it may be called for device
+ * teardown
+ */
+ guard(mutex)(&tsm_pf0->lock);
+ pci_tsm_walk_fns_reverse(pdev, remove_fn, NULL);
+ ops->disconnect(pdev);
+}
+
+static void pci_tsm_disconnect(struct pci_dev *pdev)
+{
+ __pci_tsm_disconnect(pdev);
+ tsm_remove(pdev->tsm);
+}
+
+static ssize_t disconnect_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tsm_dev *tsm_dev;
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &lock)))
+ return rc;
+
+ if (!pdev->tsm)
+ return -ENXIO;
+
+ tsm_dev = pdev->tsm->tsm_dev;
+ if (!sysfs_streq(buf, dev_name(&tsm_dev->dev)))
+ return -EINVAL;
+
+ pci_tsm_disconnect(pdev);
+ return len;
+}
+static DEVICE_ATTR_WO(disconnect);
+
+static ssize_t bound_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_tsm_pf0 *tsm_pf0;
+ struct pci_tsm *tsm;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ tsm = pdev->tsm;
+ if (!tsm)
+ return sysfs_emit(buf, "\n");
+ tsm_pf0 = to_pci_tsm_pf0(tsm);
+
+ ACQUIRE(mutex_intr, ops_lock)(&tsm_pf0->lock);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &ops_lock)))
+ return rc;
+
+ if (!tsm->tdi)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%s\n", dev_name(&tsm->tsm_dev->dev));
+}
+static DEVICE_ATTR_RO(bound);
+
+static ssize_t dsm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_tsm *tsm;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ tsm = pdev->tsm;
+ if (!tsm)
+ return sysfs_emit(buf, "\n");
+
+ return sysfs_emit(buf, "%s\n", pci_name(tsm->dsm_dev));
+}
+static DEVICE_ATTR_RO(dsm);
+
+/* The 'authenticated' attribute is exclusive to the presence of a 'link' TSM */
+static bool pci_tsm_link_group_visible(struct kobject *kobj)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (!pci_tsm_link_count)
+ return false;
+
+ if (!pci_is_pcie(pdev))
+ return false;
+
+ if (is_pci_tsm_pf0(pdev))
+ return true;
+
+ /*
+ * Show 'authenticated' and other attributes for the managed
+ * sub-functions of a DSM.
+ */
+ if (pdev->tsm)
+ return true;
+
+ return false;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(pci_tsm_link);
+
+/*
+ * 'link' and 'devsec' TSMs share the same 'tsm/' sysfs group, so the TSM type
+ * specific attributes need individual visibility checks.
+ */
+static umode_t pci_tsm_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (pci_tsm_link_group_visible(kobj)) {
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (attr == &dev_attr_bound.attr) {
+ if (is_pci_tsm_pf0(pdev) && has_tee(pdev))
+ return attr->mode;
+ if (pdev->tsm && has_tee(pdev->tsm->dsm_dev))
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_dsm.attr) {
+ if (is_pci_tsm_pf0(pdev))
+ return attr->mode;
+ if (pdev->tsm && has_tee(pdev->tsm->dsm_dev))
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_connect.attr ||
+ attr == &dev_attr_disconnect.attr) {
+ if (is_pci_tsm_pf0(pdev))
+ return attr->mode;
+ }
+ }
+
+ return 0;
+}
+
+static bool pci_tsm_group_visible(struct kobject *kobj)
+{
+ return pci_tsm_link_group_visible(kobj);
+}
+DEFINE_SYSFS_GROUP_VISIBLE(pci_tsm);
+
+static struct attribute *pci_tsm_attrs[] = {
+ &dev_attr_connect.attr,
+ &dev_attr_disconnect.attr,
+ &dev_attr_bound.attr,
+ &dev_attr_dsm.attr,
+ NULL
+};
+
+const struct attribute_group pci_tsm_attr_group = {
+ .name = "tsm",
+ .attrs = pci_tsm_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(pci_tsm),
+};
+
+static ssize_t authenticated_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ /*
+ * When the SPDM session established via TSM the 'authenticated' state
+ * of the device is identical to the connect state.
+ */
+ return connect_show(dev, attr, buf);
+}
+static DEVICE_ATTR_RO(authenticated);
+
+static struct attribute *pci_tsm_auth_attrs[] = {
+ &dev_attr_authenticated.attr,
+ NULL
+};
+
+const struct attribute_group pci_tsm_auth_attr_group = {
+ .attrs = pci_tsm_auth_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(pci_tsm_link),
+};
+
+/*
+ * Retrieve physical function0 device whether it has TEE capability or not
+ */
+static struct pci_dev *pf0_dev_get(struct pci_dev *pdev)
+{
+ struct pci_dev *pf_dev = pci_physfn(pdev);
+
+ if (PCI_FUNC(pf_dev->devfn) == 0)
+ return pci_dev_get(pf_dev);
+
+ return pci_get_slot(pf_dev->bus,
+ pf_dev->devfn - PCI_FUNC(pf_dev->devfn));
+}
+
+/*
+ * Find the PCI Device instance that serves as the Device Security Manager (DSM)
+ * for @pdev. Note that no additional reference is held for the resulting device
+ * because that resulting object always has a registered lifetime
+ * greater-than-or-equal to that of the @pdev argument. This is by virtue of
+ * @pdev being a descendant of, or identical to, the returned DSM device.
+ */
+static struct pci_dev *find_dsm_dev(struct pci_dev *pdev)
+{
+ struct device *grandparent;
+ struct pci_dev *uport;
+
+ if (is_pci_tsm_pf0(pdev))
+ return pdev;
+
+ struct pci_dev *pf0 __free(pci_dev_put) = pf0_dev_get(pdev);
+ if (!pf0)
+ return NULL;
+
+ if (is_dsm(pf0))
+ return pf0;
+
+ /*
+ * For cases where a switch may be hosting TDISP services on behalf of
+ * downstream devices, check the first upstream port relative to this
+ * endpoint.
+ */
+ if (!pdev->dev.parent)
+ return NULL;
+ grandparent = pdev->dev.parent->parent;
+ if (!grandparent)
+ return NULL;
+ if (!dev_is_pci(grandparent))
+ return NULL;
+ uport = to_pci_dev(grandparent);
+ if (!pci_is_pcie(uport) ||
+ pci_pcie_type(uport) != PCI_EXP_TYPE_UPSTREAM)
+ return NULL;
+
+ if (is_dsm(uport))
+ return uport;
+ return NULL;
+}
+
+/**
+ * pci_tsm_tdi_constructor() - base 'struct pci_tdi' initialization for link TSMs
+ * @pdev: PCI device function representing the TDI
+ * @tdi: context to initialize
+ * @kvm: Private memory attach context
+ * @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM
+ */
+void pci_tsm_tdi_constructor(struct pci_dev *pdev, struct pci_tdi *tdi,
+ struct kvm *kvm, u32 tdi_id)
+{
+ tdi->pdev = pdev;
+ tdi->kvm = kvm;
+ tdi->tdi_id = tdi_id;
+}
+EXPORT_SYMBOL_GPL(pci_tsm_tdi_constructor);
+
+/**
+ * pci_tsm_link_constructor() - base 'struct pci_tsm' initialization for link TSMs
+ * @pdev: The PCI device
+ * @tsm: context to initialize
+ * @tsm_dev: Platform TEE Security Manager, initiator of security operations
+ */
+int pci_tsm_link_constructor(struct pci_dev *pdev, struct pci_tsm *tsm,
+ struct tsm_dev *tsm_dev)
+{
+ if (!is_link_tsm(tsm_dev))
+ return -EINVAL;
+
+ tsm->dsm_dev = find_dsm_dev(pdev);
+ if (!tsm->dsm_dev) {
+ pci_warn(pdev, "failed to find Device Security Manager\n");
+ return -ENXIO;
+ }
+ tsm->pdev = pdev;
+ tsm->tsm_dev = tsm_dev;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_tsm_link_constructor);
+
+/**
+ * pci_tsm_pf0_constructor() - common 'struct pci_tsm_pf0' (DSM) initialization
+ * @pdev: Physical Function 0 PCI device (as indicated by is_pci_tsm_pf0())
+ * @tsm: context to initialize
+ * @tsm_dev: Platform TEE Security Manager, initiator of security operations
+ */
+int pci_tsm_pf0_constructor(struct pci_dev *pdev, struct pci_tsm_pf0 *tsm,
+ struct tsm_dev *tsm_dev)
+{
+ mutex_init(&tsm->lock);
+ tsm->doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
+ PCI_DOE_FEATURE_CMA);
+ if (!tsm->doe_mb) {
+ pci_warn(pdev, "TSM init failure, no CMA mailbox\n");
+ return -ENODEV;
+ }
+
+ return pci_tsm_link_constructor(pdev, &tsm->base_tsm, tsm_dev);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_pf0_constructor);
+
+void pci_tsm_pf0_destructor(struct pci_tsm_pf0 *pf0_tsm)
+{
+ mutex_destroy(&pf0_tsm->lock);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_pf0_destructor);
+
+int pci_tsm_register(struct tsm_dev *tsm_dev)
+{
+ struct pci_dev *pdev = NULL;
+
+ if (!tsm_dev)
+ return -EINVAL;
+
+ /* The TSM device must only implement one of link_ops or devsec_ops */
+ if (!is_link_tsm(tsm_dev) && !is_devsec_tsm(tsm_dev))
+ return -EINVAL;
+
+ if (is_link_tsm(tsm_dev) && is_devsec_tsm(tsm_dev))
+ return -EINVAL;
+
+ guard(rwsem_write)(&pci_tsm_rwsem);
+
+ /* On first enable, update sysfs groups */
+ if (is_link_tsm(tsm_dev) && pci_tsm_link_count++ == 0) {
+ for_each_pci_dev(pdev)
+ if (is_pci_tsm_pf0(pdev))
+ link_sysfs_enable(pdev);
+ } else if (is_devsec_tsm(tsm_dev)) {
+ pci_tsm_devsec_count++;
+ }
+
+ return 0;
+}
+
+static void pci_tsm_fn_exit(struct pci_dev *pdev)
+{
+ __pci_tsm_unbind(pdev, NULL);
+ tsm_remove(pdev->tsm);
+}
+
+/**
+ * __pci_tsm_destroy() - destroy the TSM context for @pdev
+ * @pdev: device to cleanup
+ * @tsm_dev: the TSM device being removed, or NULL if @pdev is being removed.
+ *
+ * At device removal or TSM unregistration all established context
+ * with the TSM is torn down. Additionally, if there are no more TSMs
+ * registered, the PCI tsm/ sysfs attributes are hidden.
+ */
+static void __pci_tsm_destroy(struct pci_dev *pdev, struct tsm_dev *tsm_dev)
+{
+ struct pci_tsm *tsm = pdev->tsm;
+
+ lockdep_assert_held_write(&pci_tsm_rwsem);
+
+ /*
+ * First, handle the TSM removal case to shutdown @pdev sysfs, this is
+ * skipped if the device itself is being removed since sysfs goes away
+ * naturally at that point
+ */
+ if (is_link_tsm(tsm_dev) && is_pci_tsm_pf0(pdev) && !pci_tsm_link_count)
+ link_sysfs_disable(pdev);
+
+ /* Nothing else to do if this device never attached to the departing TSM */
+ if (!tsm)
+ return;
+
+ /* Now lookup the tsm_dev to destroy TSM context */
+ if (!tsm_dev)
+ tsm_dev = tsm->tsm_dev;
+ else if (tsm_dev != tsm->tsm_dev)
+ return;
+
+ if (is_link_tsm(tsm_dev) && is_pci_tsm_pf0(pdev))
+ pci_tsm_disconnect(pdev);
+ else
+ pci_tsm_fn_exit(pdev);
+}
+
+void pci_tsm_destroy(struct pci_dev *pdev)
+{
+ guard(rwsem_write)(&pci_tsm_rwsem);
+ __pci_tsm_destroy(pdev, NULL);
+}
+
+void pci_tsm_init(struct pci_dev *pdev)
+{
+ guard(rwsem_read)(&pci_tsm_rwsem);
+
+ /*
+ * Subfunctions are either probed synchronous with connect() or later
+ * when either the SR-IOV configuration is changed, or, unlikely,
+ * connect() raced initial bus scanning.
+ */
+ if (pdev->tsm)
+ return;
+
+ if (pci_tsm_link_count) {
+ struct pci_dev *dsm = find_dsm_dev(pdev);
+
+ if (!dsm)
+ return;
+
+ /*
+ * The only path to init a Device Security Manager capable
+ * device is via connect().
+ */
+ if (!dsm->tsm)
+ return;
+
+ probe_fn(pdev, dsm);
+ }
+}
+
+void pci_tsm_unregister(struct tsm_dev *tsm_dev)
+{
+ struct pci_dev *pdev = NULL;
+
+ guard(rwsem_write)(&pci_tsm_rwsem);
+ if (is_link_tsm(tsm_dev))
+ pci_tsm_link_count--;
+ if (is_devsec_tsm(tsm_dev))
+ pci_tsm_devsec_count--;
+ for_each_pci_dev_reverse(pdev)
+ __pci_tsm_destroy(pdev, tsm_dev);
+}
+
+int pci_tsm_doe_transfer(struct pci_dev *pdev, u8 type, const void *req,
+ size_t req_sz, void *resp, size_t resp_sz)
+{
+ struct pci_tsm_pf0 *tsm;
+
+ if (!pdev->tsm || !is_pci_tsm_pf0(pdev))
+ return -ENXIO;
+
+ tsm = to_pci_tsm_pf0(pdev->tsm);
+ if (!tsm->doe_mb)
+ return -ENXIO;
+
+ return pci_doe(tsm->doe_mb, PCI_VENDOR_ID_PCI_SIG, type, req, req_sz,
+ resp, resp_sz);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_doe_transfer);
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index d8c848cf09a6..52eb7e4ba71f 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -47,6 +47,6 @@ source "drivers/virt/nitro_enclaves/Kconfig"
source "drivers/virt/acrn/Kconfig"
-source "drivers/virt/coco/Kconfig"
-
endif
+
+source "drivers/virt/coco/Kconfig"
diff --git a/drivers/virt/coco/Kconfig b/drivers/virt/coco/Kconfig
index 819a97e8ba99..df1cfaf26c65 100644
--- a/drivers/virt/coco/Kconfig
+++ b/drivers/virt/coco/Kconfig
@@ -3,6 +3,7 @@
# Confidential computing related collateral
#
+if VIRT_DRIVERS
source "drivers/virt/coco/efi_secret/Kconfig"
source "drivers/virt/coco/pkvm-guest/Kconfig"
@@ -14,3 +15,7 @@ source "drivers/virt/coco/tdx-guest/Kconfig"
source "drivers/virt/coco/arm-cca-guest/Kconfig"
source "drivers/virt/coco/guest/Kconfig"
+endif
+
+config TSM
+ bool
diff --git a/drivers/virt/coco/Makefile b/drivers/virt/coco/Makefile
index f918bbb61737..cb52021912b3 100644
--- a/drivers/virt/coco/Makefile
+++ b/drivers/virt/coco/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_ARM_PKVM_GUEST) += pkvm-guest/
obj-$(CONFIG_SEV_GUEST) += sev-guest/
obj-$(CONFIG_INTEL_TDX_GUEST) += tdx-guest/
obj-$(CONFIG_ARM_CCA_GUEST) += arm-cca-guest/
+obj-$(CONFIG_TSM) += tsm-core.o
obj-$(CONFIG_TSM_GUEST) += guest/
diff --git a/drivers/virt/coco/tsm-core.c b/drivers/virt/coco/tsm-core.c
new file mode 100644
index 000000000000..f027876a2f19
--- /dev/null
+++ b/drivers/virt/coco/tsm-core.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/tsm.h>
+#include <linux/pci.h>
+#include <linux/rwsem.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/cleanup.h>
+#include <linux/pci-tsm.h>
+#include <linux/pci-ide.h>
+
+static struct class *tsm_class;
+static DECLARE_RWSEM(tsm_rwsem);
+static DEFINE_IDA(tsm_ida);
+
+static int match_id(struct device *dev, const void *data)
+{
+ struct tsm_dev *tsm_dev = container_of(dev, struct tsm_dev, dev);
+ int id = *(const int *)data;
+
+ return tsm_dev->id == id;
+}
+
+struct tsm_dev *find_tsm_dev(int id)
+{
+ struct device *dev = class_find_device(tsm_class, NULL, &id, match_id);
+
+ if (!dev)
+ return NULL;
+ return container_of(dev, struct tsm_dev, dev);
+}
+
+static struct tsm_dev *alloc_tsm_dev(struct device *parent)
+{
+ struct device *dev;
+ int id;
+
+ struct tsm_dev *tsm_dev __free(kfree) =
+ kzalloc(sizeof(*tsm_dev), GFP_KERNEL);
+ if (!tsm_dev)
+ return ERR_PTR(-ENOMEM);
+
+ id = ida_alloc(&tsm_ida, GFP_KERNEL);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ tsm_dev->id = id;
+ dev = &tsm_dev->dev;
+ dev->parent = parent;
+ dev->class = tsm_class;
+ device_initialize(dev);
+
+ return no_free_ptr(tsm_dev);
+}
+
+static struct tsm_dev *tsm_register_pci_or_reset(struct tsm_dev *tsm_dev,
+ struct pci_tsm_ops *pci_ops)
+{
+ int rc;
+
+ if (!pci_ops)
+ return tsm_dev;
+
+ tsm_dev->pci_ops = pci_ops;
+ rc = pci_tsm_register(tsm_dev);
+ if (rc) {
+ dev_err(tsm_dev->dev.parent,
+ "PCI/TSM registration failure: %d\n", rc);
+ device_unregister(&tsm_dev->dev);
+ return ERR_PTR(rc);
+ }
+
+ /* Notify TSM userspace that PCI/TSM operations are now possible */
+ kobject_uevent(&tsm_dev->dev.kobj, KOBJ_CHANGE);
+ return tsm_dev;
+}
+
+struct tsm_dev *tsm_register(struct device *parent, struct pci_tsm_ops *pci_ops)
+{
+ struct tsm_dev *tsm_dev __free(put_tsm_dev) = alloc_tsm_dev(parent);
+ struct device *dev;
+ int rc;
+
+ if (IS_ERR(tsm_dev))
+ return tsm_dev;
+
+ dev = &tsm_dev->dev;
+ rc = dev_set_name(dev, "tsm%d", tsm_dev->id);
+ if (rc)
+ return ERR_PTR(rc);
+
+ rc = device_add(dev);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return tsm_register_pci_or_reset(no_free_ptr(tsm_dev), pci_ops);
+}
+EXPORT_SYMBOL_GPL(tsm_register);
+
+void tsm_unregister(struct tsm_dev *tsm_dev)
+{
+ if (tsm_dev->pci_ops)
+ pci_tsm_unregister(tsm_dev);
+ device_unregister(&tsm_dev->dev);
+}
+EXPORT_SYMBOL_GPL(tsm_unregister);
+
+/* must be invoked between tsm_register / tsm_unregister */
+int tsm_ide_stream_register(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_tsm *tsm = pdev->tsm;
+ struct tsm_dev *tsm_dev = tsm->tsm_dev;
+ int rc;
+
+ rc = sysfs_create_link(&tsm_dev->dev.kobj, &pdev->dev.kobj, ide->name);
+ if (rc)
+ return rc;
+
+ ide->tsm_dev = tsm_dev;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tsm_ide_stream_register);
+
+void tsm_ide_stream_unregister(struct pci_ide *ide)
+{
+ struct tsm_dev *tsm_dev = ide->tsm_dev;
+
+ ide->tsm_dev = NULL;
+ sysfs_remove_link(&tsm_dev->dev.kobj, ide->name);
+}
+EXPORT_SYMBOL_GPL(tsm_ide_stream_unregister);
+
+static void tsm_release(struct device *dev)
+{
+ struct tsm_dev *tsm_dev = container_of(dev, typeof(*tsm_dev), dev);
+
+ ida_free(&tsm_ida, tsm_dev->id);
+ kfree(tsm_dev);
+}
+
+static int __init tsm_init(void)
+{
+ tsm_class = class_create("tsm");
+ if (IS_ERR(tsm_class))
+ return PTR_ERR(tsm_class);
+
+ tsm_class->dev_release = tsm_release;
+ return 0;
+}
+module_init(tsm_init)
+
+static void __exit tsm_exit(void)
+{
+ class_destroy(tsm_class);
+}
+module_exit(tsm_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TEE Security Manager Class Device");