diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_exec_queue.c')
| -rw-r--r-- | drivers/gpu/drm/xe/xe_exec_queue.c | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 0beb6388acb0..05a1bd6b55bf 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -1092,3 +1092,27 @@ void xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch) xe_lrc_update_hwctx_regs_with_address(q->lrc[i]); } } + +/** + * xe_exec_queue_jobs_ring_restore - Re-emit ring commands of requests pending on given queue. + * @q: the &xe_exec_queue struct instance + */ +void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q) +{ + struct xe_gpu_scheduler *sched = &q->guc->sched; + struct xe_sched_job *job; + + /* + * This routine is used within VF migration recovery. This means + * using the lock here introduces a restriction: we cannot wait + * for any GFX HW response while the lock is taken. + */ + spin_lock(&sched->base.job_list_lock); + list_for_each_entry(job, &sched->base.pending_list, drm.list) { + if (xe_sched_job_is_error(job)) + continue; + + q->ring_ops->emit_job(job); + } + spin_unlock(&sched->base.job_list_lock); +} |
