diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_sriov_packet.c')
| -rw-r--r-- | drivers/gpu/drm/xe/xe_sriov_packet.c | 293 |
1 files changed, 293 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/xe_sriov_packet.c b/drivers/gpu/drm/xe/xe_sriov_packet.c index 1ac7e025e9fd..c612f37e9707 100644 --- a/drivers/gpu/drm/xe/xe_sriov_packet.c +++ b/drivers/gpu/drm/xe/xe_sriov_packet.c @@ -8,6 +8,67 @@ #include "xe_printk.h" #include "xe_sriov_packet.h" #include "xe_sriov_packet_types.h" +#include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_migration.h" +#include "xe_sriov_printk.h" + +static struct mutex *pf_migration_mutex(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + + return &xe->sriov.pf.vfs[vfid].migration.lock; +} + +static struct xe_sriov_packet **pf_pick_pending(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + lockdep_assert_held(pf_migration_mutex(xe, vfid)); + + return &xe->sriov.pf.vfs[vfid].migration.pending; +} + +static struct xe_sriov_packet ** +pf_pick_descriptor(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + lockdep_assert_held(pf_migration_mutex(xe, vfid)); + + return &xe->sriov.pf.vfs[vfid].migration.descriptor; +} + +static struct xe_sriov_packet **pf_pick_trailer(struct xe_device *xe, unsigned int vfid) +{ + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe)); + lockdep_assert_held(pf_migration_mutex(xe, vfid)); + + return &xe->sriov.pf.vfs[vfid].migration.trailer; +} + +static struct xe_sriov_packet **pf_pick_read_packet(struct xe_device *xe, + unsigned int vfid) +{ + struct xe_sriov_packet **data; + + data = pf_pick_descriptor(xe, vfid); + if (*data) + return data; + + data = pf_pick_pending(xe, vfid); + if (!*data) + *data = xe_sriov_pf_migration_save_consume(xe, vfid); + if (*data) + return data; + + data = pf_pick_trailer(xe, vfid); + if (*data) + return data; + + return NULL; +} static bool pkt_needs_bo(struct xe_sriov_packet *data) { @@ -135,3 +196,235 @@ int xe_sriov_packet_init_from_hdr(struct xe_sriov_packet *data) return pkt_init(data); } + +static ssize_t pkt_hdr_read(struct xe_sriov_packet *data, + char __user *buf, size_t len) +{ + loff_t offset = sizeof(data->hdr) - data->hdr_remaining; + + if (!data->hdr_remaining) + return -EINVAL; + + if (len > data->hdr_remaining) + len = data->hdr_remaining; + + if (copy_to_user(buf, (void *)&data->hdr + offset, len)) + return -EFAULT; + + data->hdr_remaining -= len; + + return len; +} + +static ssize_t pkt_data_read(struct xe_sriov_packet *data, + char __user *buf, size_t len) +{ + if (len > data->remaining) + len = data->remaining; + + if (copy_to_user(buf, data->vaddr + (data->hdr.size - data->remaining), len)) + return -EFAULT; + + data->remaining -= len; + + return len; +} + +static ssize_t pkt_read_single(struct xe_sriov_packet **data, + unsigned int vfid, char __user *buf, size_t len) +{ + ssize_t copied = 0; + + if ((*data)->hdr_remaining) + copied = pkt_hdr_read(*data, buf, len); + else + copied = pkt_data_read(*data, buf, len); + + if ((*data)->remaining == 0 && (*data)->hdr_remaining == 0) { + xe_sriov_packet_free(*data); + *data = NULL; + } + + return copied; +} + +/** + * xe_sriov_packet_read_single() - Read migration data from a single packet. + * @xe: the &xe_device + * @vfid: the VF identifier + * @buf: start address of userspace buffer + * @len: requested read size from userspace + * + * Return: number of bytes that has been successfully read, + * 0 if no more migration data is available, + * -errno on failure. + */ +ssize_t xe_sriov_packet_read_single(struct xe_device *xe, unsigned int vfid, + char __user *buf, size_t len) +{ + struct xe_sriov_packet **data = pf_pick_read_packet(xe, vfid); + + if (!data) + return -ENODATA; + if (IS_ERR(*data)) + return PTR_ERR(*data); + + return pkt_read_single(data, vfid, buf, len); +} + +static ssize_t pkt_hdr_write(struct xe_sriov_packet *data, + const char __user *buf, size_t len) +{ + loff_t offset = sizeof(data->hdr) - data->hdr_remaining; + int ret; + + if (len > data->hdr_remaining) + len = data->hdr_remaining; + + if (copy_from_user((void *)&data->hdr + offset, buf, len)) + return -EFAULT; + + data->hdr_remaining -= len; + + if (!data->hdr_remaining) { + ret = xe_sriov_packet_init_from_hdr(data); + if (ret) + return ret; + } + + return len; +} + +static ssize_t pkt_data_write(struct xe_sriov_packet *data, + const char __user *buf, size_t len) +{ + if (len > data->remaining) + len = data->remaining; + + if (copy_from_user(data->vaddr + (data->hdr.size - data->remaining), buf, len)) + return -EFAULT; + + data->remaining -= len; + + return len; +} + +/** + * xe_sriov_packet_write_single() - Write migration data to a single packet. + * @xe: the &xe_device + * @vfid: the VF identifier + * @buf: start address of userspace buffer + * @len: requested write size from userspace + * + * Return: number of bytes that has been successfully written, + * -errno on failure. + */ +ssize_t xe_sriov_packet_write_single(struct xe_device *xe, unsigned int vfid, + const char __user *buf, size_t len) +{ + struct xe_sriov_packet **data = pf_pick_pending(xe, vfid); + int ret; + ssize_t copied; + + if (IS_ERR_OR_NULL(*data)) { + *data = xe_sriov_packet_alloc(xe); + if (!*data) + return -ENOMEM; + } + + if ((*data)->hdr_remaining) + copied = pkt_hdr_write(*data, buf, len); + else + copied = pkt_data_write(*data, buf, len); + + if ((*data)->hdr_remaining == 0 && (*data)->remaining == 0) { + ret = xe_sriov_pf_migration_restore_produce(xe, vfid, *data); + if (ret) { + xe_sriov_packet_free(*data); + return ret; + } + + *data = NULL; + } + + return copied; +} + +#define MIGRATION_DESCRIPTOR_DWORDS 0 +static size_t pf_descriptor_init(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_packet **desc = pf_pick_descriptor(xe, vfid); + struct xe_sriov_packet *data; + int ret; + + data = xe_sriov_packet_alloc(xe); + if (!data) + return -ENOMEM; + + ret = xe_sriov_packet_init(data, 0, 0, XE_SRIOV_PACKET_TYPE_DESCRIPTOR, + 0, MIGRATION_DESCRIPTOR_DWORDS * sizeof(u32)); + if (ret) { + xe_sriov_packet_free(data); + return ret; + } + + *desc = data; + + return 0; +} + +static void pf_pending_init(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_packet **data = pf_pick_pending(xe, vfid); + + *data = NULL; +} + +#define MIGRATION_TRAILER_SIZE 0 +static int pf_trailer_init(struct xe_device *xe, unsigned int vfid) +{ + struct xe_sriov_packet **trailer = pf_pick_trailer(xe, vfid); + struct xe_sriov_packet *data; + int ret; + + data = xe_sriov_packet_alloc(xe); + if (!data) + return -ENOMEM; + + ret = xe_sriov_packet_init(data, 0, 0, XE_SRIOV_PACKET_TYPE_TRAILER, + 0, MIGRATION_TRAILER_SIZE); + if (ret) { + xe_sriov_packet_free(data); + return ret; + } + + *trailer = data; + + return 0; +} + +/** + * xe_sriov_packet_save_init() - Initialize the pending save migration packets. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * Return: 0 on success, -errno on failure. + */ +int xe_sriov_packet_save_init(struct xe_device *xe, unsigned int vfid) +{ + int ret; + + scoped_cond_guard(mutex_intr, return -EINTR, pf_migration_mutex(xe, vfid)) { + ret = pf_descriptor_init(xe, vfid); + if (ret) + return ret; + + ret = pf_trailer_init(xe, vfid); + if (ret) + return ret; + + pf_pending_init(xe, vfid); + } + + return 0; +} |
