summaryrefslogtreecommitdiff
path: root/drivers/accel/ivpu/ivpu_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/accel/ivpu/ivpu_gem.c')
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c161
1 files changed, 117 insertions, 44 deletions
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 59cfcf3eaded..ece68f570b7e 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -15,6 +15,7 @@
#include <drm/drm_utils.h>
#include "ivpu_drv.h"
+#include "ivpu_fw.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_mmu.h"
@@ -27,8 +28,8 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs;
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
{
ivpu_dbg(vdev, BO,
- "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
- action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id,
+ "%6s: bo %8p size %9zu ctx %d vpu_addr %9llx pages %d sgt %d mmu_mapped %d wc %d imported %d\n",
+ action, bo, ivpu_bo_size(bo), bo->ctx_id, bo->vpu_addr,
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
(bool)drm_gem_is_imported(&bo->base.base));
}
@@ -43,22 +44,47 @@ static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
dma_resv_unlock(bo->base.base.resv);
}
+static struct sg_table *ivpu_bo_map_attachment(struct ivpu_device *vdev, struct ivpu_bo *bo)
+{
+ struct sg_table *sgt;
+
+ drm_WARN_ON(&vdev->drm, !bo->base.base.import_attach);
+
+ ivpu_bo_lock(bo);
+
+ sgt = bo->base.sgt;
+ if (!sgt) {
+ sgt = dma_buf_map_attachment(bo->base.base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ ivpu_err(vdev, "Failed to map BO in IOMMU: %ld\n", PTR_ERR(sgt));
+ else
+ bo->base.sgt = sgt;
+ }
+
+ ivpu_bo_unlock(bo);
+
+ return sgt;
+}
+
/*
- * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
+ * ivpu_bo_bind() - pin the backing physical pages and map them to VPU.
*
* This function pins physical memory pages, then maps the physical pages
* to IOMMU address space and finally updates the VPU MMU page tables
* to allow the VPU to translate VPU address to IOMMU address.
*/
-int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
+int __must_check ivpu_bo_bind(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
struct sg_table *sgt;
int ret = 0;
- ivpu_dbg_bo(vdev, bo, "pin");
+ ivpu_dbg_bo(vdev, bo, "bind");
- sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (bo->base.base.import_attach)
+ sgt = ivpu_bo_map_attachment(vdev, bo);
+ else
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
@@ -70,7 +96,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
if (!bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
- ivpu_bo_is_snooped(bo));
+ ivpu_bo_is_snooped(bo), ivpu_bo_is_read_only(bo));
if (ret) {
ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
goto unlock;
@@ -99,9 +125,9 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
if (!ret) {
bo->ctx = ctx;
+ bo->ctx_id = ctx->id;
bo->vpu_addr = bo->mm_node.start;
- } else {
- ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
+ ivpu_dbg_bo(vdev, bo, "vaddr");
}
ivpu_bo_unlock(bo);
@@ -115,7 +141,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount));
+ dma_resv_assert_held(bo->base.base.resv);
if (bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -130,13 +156,15 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
bo->ctx = NULL;
}
- if (drm_gem_is_imported(&bo->base.base))
- return;
-
if (bo->base.sgt) {
- dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
- sg_free_table(bo->base.sgt);
- kfree(bo->base.sgt);
+ if (bo->base.base.import_attach) {
+ dma_buf_unmap_attachment(bo->base.base.import_attach,
+ bo->base.sgt, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(bo->base.sgt);
+ kfree(bo->base.sgt);
+ }
bo->base.sgt = NULL;
}
}
@@ -182,10 +210,11 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
+ struct ivpu_device *vdev = to_ivpu_device(dev);
struct device *attach_dev = dev->dev;
struct dma_buf_attachment *attach;
- struct sg_table *sgt;
struct drm_gem_object *obj;
+ struct ivpu_bo *bo;
int ret;
attach = dma_buf_attach(dma_buf, attach_dev);
@@ -194,25 +223,25 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto fail_detach;
- }
-
- obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, NULL);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
- goto fail_unmap;
+ goto fail_detach;
}
obj->import_attach = attach;
obj->resv = dma_buf->resv;
+ bo = to_ivpu_bo(obj);
+
+ mutex_lock(&vdev->bo_list_lock);
+ list_add_tail(&bo->bo_list_node, &vdev->bo_list);
+ mutex_unlock(&vdev->bo_list_lock);
+
+ ivpu_dbg(vdev, BO, "import: bo %8p size %9zu\n", bo, ivpu_bo_size(bo));
+
return obj;
-fail_unmap:
- dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
@@ -220,7 +249,7 @@ fail_detach:
return ERR_PTR(ret);
}
-static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id)
+static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
{
struct drm_gem_shmem_object *shmem;
struct ivpu_bo *bo;
@@ -238,7 +267,6 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
return ERR_CAST(shmem);
bo = to_ivpu_bo(&shmem->base);
- bo->ctx_id = ctx_id;
bo->base.map_wc = flags & DRM_IVPU_BO_WC;
bo->flags = flags;
@@ -246,7 +274,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
mutex_unlock(&vdev->bo_list_lock);
- ivpu_dbg_bo(vdev, bo, "alloc");
+ ivpu_dbg(vdev, BO, " alloc: bo %8p size %9llu\n", bo, size);
return bo;
}
@@ -259,8 +287,8 @@ static int ivpu_gem_bo_open(struct drm_gem_object *obj, struct drm_file *file)
struct ivpu_addr_range *range;
if (bo->ctx) {
- ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
- file_priv->ctx.id, bo->ctx->id);
+ ivpu_dbg(vdev, IOCTL, "Can't add BO %pe to ctx %u: already in ctx %u\n",
+ bo, file_priv->ctx.id, bo->ctx->id);
return -EALREADY;
}
@@ -281,23 +309,41 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
ivpu_dbg_bo(vdev, bo, "free");
+ drm_WARN_ON(&vdev->drm, list_empty(&bo->bo_list_node));
+
mutex_lock(&vdev->bo_list_lock);
list_del(&bo->bo_list_node);
- mutex_unlock(&vdev->bo_list_lock);
drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) &&
!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
drm_WARN_ON(&vdev->drm, bo->base.vaddr);
+ ivpu_bo_lock(bo);
ivpu_bo_unbind_locked(bo);
+ ivpu_bo_unlock(bo);
+
+ mutex_unlock(&vdev->bo_list_lock);
+
drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
drm_WARN_ON(&vdev->drm, bo->ctx);
drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
+ drm_WARN_ON(obj->dev, bo->base.base.vma_node.vm_files.rb_node);
drm_gem_shmem_free(&bo->base);
}
+static enum drm_gem_object_status ivpu_gem_status(struct drm_gem_object *obj)
+{
+ struct ivpu_bo *bo = to_ivpu_bo(obj);
+ enum drm_gem_object_status status = 0;
+
+ if (ivpu_bo_is_resident(bo))
+ status |= DRM_GEM_OBJECT_RESIDENT;
+
+ return status;
+}
+
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.free = ivpu_gem_bo_free,
.open = ivpu_gem_bo_open,
@@ -308,6 +354,7 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
+ .status = ivpu_gem_status,
.vm_ops = &drm_gem_shmem_vm_ops,
};
@@ -320,25 +367,33 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
struct ivpu_bo *bo;
int ret;
- if (args->flags & ~DRM_IVPU_BO_FLAGS)
+ if (args->flags & ~DRM_IVPU_BO_FLAGS) {
+ ivpu_dbg(vdev, IOCTL, "Invalid BO flags 0x%x\n", args->flags);
return -EINVAL;
+ }
- if (size == 0)
+ if (size == 0) {
+ ivpu_dbg(vdev, IOCTL, "Invalid BO size %llu\n", args->size);
return -EINVAL;
+ }
- bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id);
+ bo = ivpu_bo_alloc(vdev, size, args->flags);
if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
+ ivpu_dbg(vdev, IOCTL, "Failed to allocate BO: %pe ctx %u size %llu flags 0x%x\n",
bo, file_priv->ctx.id, args->size, args->flags);
return PTR_ERR(bo);
}
+ drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 0);
+
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
- if (ret)
- ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
+ if (ret) {
+ ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n",
bo, file_priv->ctx.id, args->size, args->flags);
- else
+ } else {
args->vpu_addr = bo->vpu_addr;
+ drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 1);
+ }
drm_gem_object_put(&bo->base.base);
@@ -360,18 +415,21 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
- bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID);
+ bo = ivpu_bo_alloc(vdev, size, flags);
if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
+ ivpu_err(vdev, "Failed to allocate BO: %pe vpu_addr 0x%llx size %llu flags 0x%x\n",
bo, range->start, size, flags);
return NULL;
}
ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range);
- if (ret)
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate NPU address for BO: %pe ctx %u size %llu: %d\n",
+ bo, ctx->id, size, ret);
goto err_put;
+ }
- ret = ivpu_bo_pin(bo);
+ ret = ivpu_bo_bind(bo);
if (ret)
goto err_put;
@@ -391,6 +449,21 @@ err_put:
return NULL;
}
+struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags)
+{
+ struct ivpu_addr_range range;
+
+ if (!ivpu_is_within_range(addr, size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid runtime BO address 0x%llx size %llu\n", addr, size);
+ return NULL;
+ }
+
+ if (ivpu_hw_range_init(vdev, &range, addr, size))
+ return NULL;
+
+ return ivpu_bo_create(vdev, &vdev->gctx, &range, size, flags);
+}
+
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags)
{
return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags);