summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2025-03-07 07:03:56 +1000
committerDave Airlie <airlied@redhat.com>2025-03-07 07:06:34 +1000
commitac3a75bd4205b8856631f57001ebd304e072124e (patch)
tree0d05bb879457bab3e697eb8aea3e2872d134a752
parent7eb172143d5508b4da468ed59ee857c6e5e01da6 (diff)
parent80da96d735094ea22985ced98bc57fe3a4422921 (diff)
Merge tag 'drm-misc-fixes-2025-03-06' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes
A Kconfig fix for nouveau, locking and timestamp fixes for imagination, a header guard fix for sched and a DPMS regression fix for bochs. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <mripard@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250306-antelope-of-imminent-anger-bca19e@houat
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c6
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c18
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h4
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c134
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h3
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h4
-rw-r--r--drivers/gpu/drm/tiny/bochs.c5
9 files changed, 141 insertions, 38 deletions
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index c39beb70c317..6d13864851fc 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -527,8 +527,10 @@ pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
static void
pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
{
- pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start,
- fw_obj->fw_mm_node.size);
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
+ fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
}
static bool
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 73707daa4e52..5dbb636d7d4f 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -333,8 +333,8 @@ static int fw_trace_seq_show(struct seq_file *s, void *v)
if (sf_id == ROGUE_FW_SF_LAST)
return -EINVAL;
- timestamp = read_fw_trace(trace_seq_data, 1) |
- ((u64)read_fw_trace(trace_seq_data, 2) << 32);
+ timestamp = ((u64)read_fw_trace(trace_seq_data, 1) << 32) |
+ read_fw_trace(trace_seq_data, 2);
timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index c4f08432882b..43411be930a2 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -109,12 +109,20 @@ pvr_queue_fence_get_driver_name(struct dma_fence *f)
return PVR_DRIVER_NAME;
}
+static void pvr_queue_fence_release_work(struct work_struct *w)
+{
+ struct pvr_queue_fence *fence = container_of(w, struct pvr_queue_fence, release_work);
+
+ pvr_context_put(fence->queue->ctx);
+ dma_fence_free(&fence->base);
+}
+
static void pvr_queue_fence_release(struct dma_fence *f)
{
struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+ struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev;
- pvr_context_put(fence->queue->ctx);
- dma_fence_free(f);
+ queue_work(pvr_dev->sched_wq, &fence->release_work);
}
static const char *
@@ -268,6 +276,7 @@ pvr_queue_fence_init(struct dma_fence *f,
pvr_context_get(queue->ctx);
fence->queue = queue;
+ INIT_WORK(&fence->release_work, pvr_queue_fence_release_work);
dma_fence_init(&fence->base, fence_ops,
&fence_ctx->lock, fence_ctx->id,
atomic_inc_return(&fence_ctx->seqno));
@@ -304,8 +313,9 @@ pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
static void
pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
{
- pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
- &queue->job_fence_ctx);
+ if (!fence->ops)
+ pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
+ &queue->job_fence_ctx);
}
/**
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
index e06ced69302f..93fe9ac9f58c 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.h
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -5,6 +5,7 @@
#define PVR_QUEUE_H
#include <drm/gpu_scheduler.h>
+#include <linux/workqueue.h>
#include "pvr_cccb.h"
#include "pvr_device.h"
@@ -63,6 +64,9 @@ struct pvr_queue_fence {
/** @queue: Queue that created this fence. */
struct pvr_queue *queue;
+
+ /** @release_work: Fence release work structure. */
+ struct work_struct release_work;
};
/**
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 363f885a7098..2896fa7501b1 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -293,8 +293,9 @@ err_bind_op_fini:
static int
pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
- struct pvr_vm_context *vm_ctx, u64 device_addr,
- u64 size)
+ struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
{
int err;
@@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
goto err_bind_op_fini;
}
+ bind_op->pvr_obj = pvr_obj;
bind_op->vm_ctx = vm_ctx;
bind_op->device_addr = device_addr;
bind_op->size = size;
@@ -598,20 +600,6 @@ err_free:
}
/**
- * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
- * @vm_ctx: Target VM context.
- *
- * This function ensures that no mappings are left dangling by unmapping them
- * all in order of ascending device-virtual address.
- */
-void
-pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
-{
- WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
- vm_ctx->gpuvm_mgr.mm_range));
-}
-
-/**
* pvr_vm_context_release() - Teardown a VM context.
* @ref_count: Pointer to reference counter of the VM context.
*
@@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
- /* Unmap operations don't have an object to lock. */
- if (!pvr_obj)
- return 0;
-
- /* Acquire lock on the GEM being mapped. */
+ /* Acquire lock on the GEM object being mapped/unmapped. */
return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
}
@@ -772,8 +756,10 @@ err_cleanup:
}
/**
- * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
+ * memory.
* @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
* @device_addr: Virtual device address at the start of the target mapping.
* @size: Size of the target mapping.
*
@@ -784,9 +770,13 @@ err_cleanup:
* * Any error encountered while performing internal operations required to
* destroy the mapping (returned from pvr_vm_gpuva_unmap or
* pvr_vm_gpuva_remap).
+ *
+ * The vm_ctx->lock must be held when calling this function.
*/
-int
-pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+static int
+pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
{
struct pvr_vm_bind_op bind_op = {0};
struct drm_gpuvm_exec vm_exec = {
@@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
},
};
- int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
- size);
+ int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj,
+ device_addr, size);
if (err)
return err;
+ pvr_gem_object_get(pvr_obj);
+
err = drm_gpuvm_exec_lock(&vm_exec);
if (err)
goto err_cleanup;
@@ -818,6 +810,96 @@ err_cleanup:
return err;
}
+/**
+ * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
+ * memory.
+ * @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error encountered by pvr_vm_unmap_obj_locked.
+ */
+int
+pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
+{
+ int err;
+
+ mutex_lock(&vm_ctx->lock);
+ err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size);
+ mutex_unlock(&vm_ctx->lock);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error encountered by drm_gpuva_find,
+ * * Any error encountered by pvr_vm_unmap_obj_locked.
+ */
+int
+pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+{
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+ int err;
+
+ mutex_lock(&vm_ctx->lock);
+
+ va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size);
+ if (va) {
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+ err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
+ va->va.addr, va->va.range);
+ } else {
+ err = -ENOENT;
+ }
+
+ mutex_unlock(&vm_ctx->lock);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
+ * @vm_ctx: Target VM context.
+ *
+ * This function ensures that no mappings are left dangling by unmapping them
+ * all in order of ascending device-virtual address.
+ */
+void
+pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
+{
+ mutex_lock(&vm_ctx->lock);
+
+ for (;;) {
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+
+ va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr,
+ vm_ctx->gpuvm_mgr.mm_start,
+ vm_ctx->gpuvm_mgr.mm_range);
+ if (!va)
+ break;
+
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+
+ WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
+ va->va.addr, va->va.range));
+ }
+
+ mutex_unlock(&vm_ctx->lock);
+}
+
/* Static data areas are determined by firmware. */
static const struct drm_pvr_static_data_area static_data_areas[] = {
{
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
index 79406243617c..b0528dffa7f1 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.h
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -38,6 +38,9 @@ struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
int pvr_vm_map(struct pvr_vm_context *vm_ctx,
struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
u64 device_addr, u64 size);
+int pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size);
int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx);
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ce840300578d..1050a4617fc1 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -4,6 +4,7 @@ config DRM_NOUVEAU
depends on DRM && PCI && MMU
select IOMMU_API
select FW_LOADER
+ select FW_CACHE if PM_SLEEP
select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index c75302ca3427..f56e77e7f6d0 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -21,7 +21,7 @@
*
*/
-#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_GPU_SCHED_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _GPU_SCHED_TRACE_H_
#include <linux/stringify.h>
@@ -106,7 +106,7 @@ TRACE_EVENT(drm_sched_job_wait_dep,
__entry->seqno)
);
-#endif
+#endif /* _GPU_SCHED_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index c67e1f906785..8706763af8fb 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -335,8 +335,6 @@ static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode
bochs->xres, bochs->yres, bochs->bpp,
bochs->yres_virtual);
- bochs_hw_blank(bochs, false);
-
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
@@ -506,6 +504,9 @@ static int bochs_crtc_helper_atomic_check(struct drm_crtc *crtc,
static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct bochs_device *bochs = to_bochs_device(crtc->dev);
+
+ bochs_hw_blank(bochs, false);
}
static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc,